From 55c742ea5bc9970db05c6c17d969992b4e02f527 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Sat, 15 Nov 2025 15:54:03 -0800 Subject: [PATCH 01/11] add rego integration to source policies Signed-off-by: Tonis Tiigi --- build/build.go | 3 + build/opt.go | 30 + go.mod | 38 +- go.sum | 86 +- policy/types.go | 137 + policy/validate.go | 501 ++ .../agnivade/levenshtein/.gitignore | 5 + .../agnivade/levenshtein/License.txt | 21 + .../github.com/agnivade/levenshtein/Makefile | 13 + .../github.com/agnivade/levenshtein/README.md | 80 + .../agnivade/levenshtein/levenshtein.go | 101 + vendor/github.com/beorn7/perks/LICENSE | 20 + .../beorn7/perks/quantile/exampledata.txt | 2388 +++++++ .../beorn7/perks/quantile/stream.go | 316 + .../github.com/cespare/xxhash/v2/LICENSE.txt | 22 + vendor/github.com/cespare/xxhash/v2/README.md | 74 + .../github.com/cespare/xxhash/v2/testall.sh | 10 + vendor/github.com/cespare/xxhash/v2/xxhash.go | 243 + .../cespare/xxhash/v2/xxhash_amd64.s | 209 + .../cespare/xxhash/v2/xxhash_arm64.s | 183 + .../cespare/xxhash/v2/xxhash_asm.go | 15 + .../cespare/xxhash/v2/xxhash_other.go | 76 + .../cespare/xxhash/v2/xxhash_safe.go | 16 + .../cespare/xxhash/v2/xxhash_unsafe.go | 58 + .../decred/dcrd/dcrec/secp256k1/v4/LICENSE | 17 + .../decred/dcrd/dcrec/secp256k1/v4/README.md | 72 + .../secp256k1/v4/compressedbytepoints.go | 18 + .../decred/dcrd/dcrec/secp256k1/v4/curve.go | 1310 ++++ .../dcrd/dcrec/secp256k1/v4/curve_embedded.go | 14 + .../dcrec/secp256k1/v4/curve_precompute.go | 14 + .../decred/dcrd/dcrec/secp256k1/v4/doc.go | 59 + .../decred/dcrd/dcrec/secp256k1/v4/ecdh.go | 21 + .../dcrec/secp256k1/v4/ellipticadaptor.go | 255 + .../decred/dcrd/dcrec/secp256k1/v4/error.go | 67 + .../decred/dcrd/dcrec/secp256k1/v4/field.go | 1696 +++++ .../dcrec/secp256k1/v4/loadprecomputed.go | 91 + .../dcrd/dcrec/secp256k1/v4/modnscalar.go | 1105 +++ .../decred/dcrd/dcrec/secp256k1/v4/nonce.go | 263 + .../decred/dcrd/dcrec/secp256k1/v4/privkey.go | 111 + .../decred/dcrd/dcrec/secp256k1/v4/pubkey.go | 236 + vendor/github.com/go-ini/ini/.editorconfig | 12 + vendor/github.com/go-ini/ini/.gitignore | 7 + vendor/github.com/go-ini/ini/.golangci.yml | 27 + vendor/github.com/go-ini/ini/LICENSE | 191 + vendor/github.com/go-ini/ini/Makefile | 15 + vendor/github.com/go-ini/ini/README.md | 43 + vendor/github.com/go-ini/ini/codecov.yml | 16 + vendor/github.com/go-ini/ini/data_source.go | 76 + vendor/github.com/go-ini/ini/deprecated.go | 22 + vendor/github.com/go-ini/ini/error.go | 49 + vendor/github.com/go-ini/ini/file.go | 541 ++ vendor/github.com/go-ini/ini/helper.go | 24 + vendor/github.com/go-ini/ini/ini.go | 176 + vendor/github.com/go-ini/ini/key.go | 837 +++ vendor/github.com/go-ini/ini/parser.go | 520 ++ vendor/github.com/go-ini/ini/section.go | 256 + vendor/github.com/go-ini/ini/struct.go | 747 ++ vendor/github.com/gobwas/glob/.gitignore | 8 + vendor/github.com/gobwas/glob/.travis.yml | 9 + vendor/github.com/gobwas/glob/LICENSE | 21 + vendor/github.com/gobwas/glob/bench.sh | 26 + .../gobwas/glob/compiler/compiler.go | 525 ++ vendor/github.com/gobwas/glob/glob.go | 80 + vendor/github.com/gobwas/glob/match/any.go | 45 + vendor/github.com/gobwas/glob/match/any_of.go | 82 + vendor/github.com/gobwas/glob/match/btree.go | 146 + .../github.com/gobwas/glob/match/contains.go | 58 + .../github.com/gobwas/glob/match/every_of.go | 99 + vendor/github.com/gobwas/glob/match/list.go | 49 + vendor/github.com/gobwas/glob/match/match.go | 81 + vendor/github.com/gobwas/glob/match/max.go | 49 + vendor/github.com/gobwas/glob/match/min.go | 57 + .../github.com/gobwas/glob/match/nothing.go | 27 + vendor/github.com/gobwas/glob/match/prefix.go | 50 + .../gobwas/glob/match/prefix_any.go | 55 + .../gobwas/glob/match/prefix_suffix.go | 62 + vendor/github.com/gobwas/glob/match/range.go | 48 + vendor/github.com/gobwas/glob/match/row.go | 77 + .../github.com/gobwas/glob/match/segments.go | 91 + vendor/github.com/gobwas/glob/match/single.go | 43 + vendor/github.com/gobwas/glob/match/suffix.go | 35 + .../gobwas/glob/match/suffix_any.go | 43 + vendor/github.com/gobwas/glob/match/super.go | 33 + vendor/github.com/gobwas/glob/match/text.go | 45 + vendor/github.com/gobwas/glob/readme.md | 148 + .../github.com/gobwas/glob/syntax/ast/ast.go | 122 + .../gobwas/glob/syntax/ast/parser.go | 157 + .../gobwas/glob/syntax/lexer/lexer.go | 273 + .../gobwas/glob/syntax/lexer/token.go | 88 + .../github.com/gobwas/glob/syntax/syntax.go | 14 + .../gobwas/glob/util/runes/runes.go | 154 + .../gobwas/glob/util/strings/strings.go | 39 + vendor/github.com/goccy/go-json/.codecov.yml | 32 + vendor/github.com/goccy/go-json/.gitignore | 2 + vendor/github.com/goccy/go-json/.golangci.yml | 86 + vendor/github.com/goccy/go-json/CHANGELOG.md | 425 ++ vendor/github.com/goccy/go-json/LICENSE | 21 + vendor/github.com/goccy/go-json/Makefile | 39 + vendor/github.com/goccy/go-json/README.md | 529 ++ vendor/github.com/goccy/go-json/color.go | 68 + vendor/github.com/goccy/go-json/decode.go | 263 + .../goccy/go-json/docker-compose.yml | 13 + vendor/github.com/goccy/go-json/encode.go | 326 + vendor/github.com/goccy/go-json/error.go | 41 + .../internal/decoder/anonymous_field.go | 41 + .../goccy/go-json/internal/decoder/array.go | 176 + .../goccy/go-json/internal/decoder/assign.go | 438 ++ .../goccy/go-json/internal/decoder/bool.go | 83 + .../goccy/go-json/internal/decoder/bytes.go | 118 + .../goccy/go-json/internal/decoder/compile.go | 493 ++ .../internal/decoder/compile_norace.go | 30 + .../go-json/internal/decoder/compile_race.go | 38 + .../goccy/go-json/internal/decoder/context.go | 254 + .../goccy/go-json/internal/decoder/float.go | 170 + .../goccy/go-json/internal/decoder/func.go | 146 + .../goccy/go-json/internal/decoder/int.go | 246 + .../go-json/internal/decoder/interface.go | 528 ++ .../goccy/go-json/internal/decoder/invalid.go | 55 + .../goccy/go-json/internal/decoder/map.go | 280 + .../goccy/go-json/internal/decoder/number.go | 123 + .../goccy/go-json/internal/decoder/option.go | 17 + .../goccy/go-json/internal/decoder/path.go | 670 ++ .../goccy/go-json/internal/decoder/ptr.go | 97 + .../goccy/go-json/internal/decoder/slice.go | 380 + .../goccy/go-json/internal/decoder/stream.go | 556 ++ .../goccy/go-json/internal/decoder/string.go | 452 ++ .../goccy/go-json/internal/decoder/struct.go | 845 +++ .../goccy/go-json/internal/decoder/type.go | 30 + .../goccy/go-json/internal/decoder/uint.go | 194 + .../internal/decoder/unmarshal_json.go | 104 + .../internal/decoder/unmarshal_text.go | 285 + .../internal/decoder/wrapped_string.go | 73 + .../goccy/go-json/internal/encoder/code.go | 1023 +++ .../goccy/go-json/internal/encoder/compact.go | 286 + .../go-json/internal/encoder/compiler.go | 939 +++ .../internal/encoder/compiler_norace.go | 33 + .../go-json/internal/encoder/compiler_race.go | 46 + .../goccy/go-json/internal/encoder/context.go | 105 + .../go-json/internal/encoder/decode_rune.go | 126 + .../goccy/go-json/internal/encoder/encoder.go | 601 ++ .../goccy/go-json/internal/encoder/indent.go | 211 + .../goccy/go-json/internal/encoder/int.go | 176 + .../goccy/go-json/internal/encoder/map112.go | 9 + .../goccy/go-json/internal/encoder/map113.go | 9 + .../goccy/go-json/internal/encoder/opcode.go | 752 ++ .../goccy/go-json/internal/encoder/option.go | 48 + .../goccy/go-json/internal/encoder/optype.go | 932 +++ .../goccy/go-json/internal/encoder/query.go | 135 + .../goccy/go-json/internal/encoder/string.go | 483 ++ .../go-json/internal/encoder/string_table.go | 415 ++ .../go-json/internal/encoder/vm/debug_vm.go | 41 + .../goccy/go-json/internal/encoder/vm/hack.go | 9 + .../goccy/go-json/internal/encoder/vm/util.go | 207 + .../goccy/go-json/internal/encoder/vm/vm.go | 4859 +++++++++++++ .../internal/encoder/vm_color/debug_vm.go | 35 + .../go-json/internal/encoder/vm_color/hack.go | 9 + .../go-json/internal/encoder/vm_color/util.go | 274 + .../go-json/internal/encoder/vm_color/vm.go | 4859 +++++++++++++ .../encoder/vm_color_indent/debug_vm.go | 35 + .../internal/encoder/vm_color_indent/util.go | 297 + .../internal/encoder/vm_color_indent/vm.go | 4859 +++++++++++++ .../internal/encoder/vm_indent/debug_vm.go | 35 + .../internal/encoder/vm_indent/hack.go | 9 + .../internal/encoder/vm_indent/util.go | 230 + .../go-json/internal/encoder/vm_indent/vm.go | 4859 +++++++++++++ .../goccy/go-json/internal/errors/error.go | 183 + .../goccy/go-json/internal/runtime/rtype.go | 262 + .../go-json/internal/runtime/struct_field.go | 91 + .../goccy/go-json/internal/runtime/type.go | 98 + vendor/github.com/goccy/go-json/json.go | 368 + vendor/github.com/goccy/go-json/option.go | 79 + vendor/github.com/goccy/go-json/path.go | 84 + vendor/github.com/goccy/go-json/query.go | 47 + .../lestrrat-go/blackmagic/.gitignore | 15 + .../github.com/lestrrat-go/blackmagic/LICENSE | 21 + .../lestrrat-go/blackmagic/README.md | 3 + .../lestrrat-go/blackmagic/blackmagic.go | 125 + .../lestrrat-go/dsig-secp256k1/.gitignore | 32 + .../lestrrat-go/dsig-secp256k1/Changes | 5 + .../lestrrat-go/dsig-secp256k1/LICENSE | 21 + .../lestrrat-go/dsig-secp256k1/secp256k1.go | 29 + vendor/github.com/lestrrat-go/dsig/.gitignore | 32 + vendor/github.com/lestrrat-go/dsig/Changes | 5 + vendor/github.com/lestrrat-go/dsig/LICENSE | 21 + vendor/github.com/lestrrat-go/dsig/README.md | 163 + .../github.com/lestrrat-go/dsig/algorithms.go | 37 + .../lestrrat-go/dsig/crypto_signer.go | 45 + vendor/github.com/lestrrat-go/dsig/dsig.go | 224 + vendor/github.com/lestrrat-go/dsig/ecdsa.go | 200 + vendor/github.com/lestrrat-go/dsig/eddsa.go | 44 + vendor/github.com/lestrrat-go/dsig/hmac.go | 45 + .../dsig/internal/ecutil/ecutil.go | 76 + vendor/github.com/lestrrat-go/dsig/rsa.go | 63 + vendor/github.com/lestrrat-go/dsig/sign.go | 100 + .../github.com/lestrrat-go/dsig/validation.go | 66 + vendor/github.com/lestrrat-go/dsig/verify.go | 134 + .../github.com/lestrrat-go/httpcc/.gitignore | 15 + vendor/github.com/lestrrat-go/httpcc/LICENSE | 21 + .../github.com/lestrrat-go/httpcc/README.md | 35 + .../lestrrat-go/httpcc/directives.go | 117 + .../github.com/lestrrat-go/httpcc/httpcc.go | 310 + .../lestrrat-go/httprc/v3/.gitignore | 15 + .../lestrrat-go/httprc/v3/.golangci.yml | 95 + .../github.com/lestrrat-go/httprc/v3/Changes | 30 + .../github.com/lestrrat-go/httprc/v3/LICENSE | 21 + .../lestrrat-go/httprc/v3/README.md | 172 + .../lestrrat-go/httprc/v3/backend.go | 235 + .../lestrrat-go/httprc/v3/client.go | 183 + .../lestrrat-go/httprc/v3/controller.go | 186 + .../lestrrat-go/httprc/v3/errors.go | 57 + .../lestrrat-go/httprc/v3/errsink/errsink.go | 59 + .../lestrrat-go/httprc/v3/httprc.go | 90 + .../lestrrat-go/httprc/v3/options.go | 144 + .../httprc/v3/proxysink/proxysink.go | 135 + .../lestrrat-go/httprc/v3/resource.go | 359 + .../httprc/v3/tracesink/tracesink.go | 52 + .../lestrrat-go/httprc/v3/transformer.go | 37 + .../lestrrat-go/httprc/v3/whitelist.go | 113 + .../lestrrat-go/httprc/v3/worker.go | 62 + .../lestrrat-go/jwx/v3/.bazelignore | 4 + vendor/github.com/lestrrat-go/jwx/v3/.bazelrc | 1 + .../lestrrat-go/jwx/v3/.bazelversion | 1 + .../github.com/lestrrat-go/jwx/v3/.gitignore | 39 + .../lestrrat-go/jwx/v3/.golangci.yml | 125 + vendor/github.com/lestrrat-go/jwx/v3/BUILD | 47 + vendor/github.com/lestrrat-go/jwx/v3/Changes | 222 + .../lestrrat-go/jwx/v3/Changes-v2.md | 390 ++ .../lestrrat-go/jwx/v3/Changes-v3.md | 140 + vendor/github.com/lestrrat-go/jwx/v3/LICENSE | 22 + .../lestrrat-go/jwx/v3/MODULE.bazel | 34 + .../lestrrat-go/jwx/v3/MODULE.bazel.lock | 230 + vendor/github.com/lestrrat-go/jwx/v3/Makefile | 98 + .../github.com/lestrrat-go/jwx/v3/README.md | 263 + .../github.com/lestrrat-go/jwx/v3/SECURITY.md | 18 + .../github.com/lestrrat-go/jwx/v3/WORKSPACE | 2 + .../lestrrat-go/jwx/v3/cert/BUILD.bazel | 34 + .../lestrrat-go/jwx/v3/cert/cert.go | 48 + .../lestrrat-go/jwx/v3/cert/chain.go | 80 + .../github.com/lestrrat-go/jwx/v3/codecov.yml | 2 + .../github.com/lestrrat-go/jwx/v3/format.go | 104 + .../jwx/v3/formatkind_string_gen.go | 29 + .../jwx/v3/internal/base64/BUILD.bazel | 21 + .../jwx/v3/internal/base64/asmbase64.go | 51 + .../jwx/v3/internal/base64/base64.go | 139 + .../jwx/v3/internal/ecutil/BUILD.bazel | 14 + .../jwx/v3/internal/ecutil/ecutil.go | 76 + .../jwx/v3/internal/json/BUILD.bazel | 19 + .../lestrrat-go/jwx/v3/internal/json/goccy.go | 49 + .../lestrrat-go/jwx/v3/internal/json/json.go | 127 + .../jwx/v3/internal/json/registry.go | 90 + .../jwx/v3/internal/json/stdlib.go | 47 + .../jwx/v3/internal/jwxio/BUILD.bazel | 8 + .../jwx/v3/internal/jwxio/jwxio.go | 29 + .../jwx/v3/internal/keyconv/BUILD.bazel | 31 + .../jwx/v3/internal/keyconv/keyconv.go | 354 + .../jwx/v3/internal/pool/BUILD.bazel | 32 + .../jwx/v3/internal/pool/big_int.go | 19 + .../jwx/v3/internal/pool/byte_slice.go | 19 + .../jwx/v3/internal/pool/bytes_buffer.go | 18 + .../jwx/v3/internal/pool/error_slice.go | 16 + .../jwx/v3/internal/pool/key_to_error_map.go | 19 + .../lestrrat-go/jwx/v3/internal/pool/pool.go | 70 + .../jwx/v3/internal/tokens/BUILD.bazel | 17 + .../jwx/v3/internal/tokens/jwe_tokens.go | 48 + .../jwx/v3/internal/tokens/tokens.go | 51 + .../lestrrat-go/jwx/v3/jwa/BUILD.bazel | 45 + .../lestrrat-go/jwx/v3/jwa/README.md | 3 + .../lestrrat-go/jwx/v3/jwa/compression_gen.go | 153 + .../jwx/v3/jwa/content_encryption_gen.go | 179 + .../lestrrat-go/jwx/v3/jwa/elliptic_gen.go | 190 + .../github.com/lestrrat-go/jwx/v3/jwa/jwa.go | 68 + .../jwx/v3/jwa/key_encryption_gen.go | 268 + .../lestrrat-go/jwx/v3/jwa/key_type_gen.go | 172 + .../lestrrat-go/jwx/v3/jwa/options.yaml | 41 + .../lestrrat-go/jwx/v3/jwa/options_gen.go | 91 + .../lestrrat-go/jwx/v3/jwa/secp2561k.go | 15 + .../lestrrat-go/jwx/v3/jwa/signature_gen.go | 242 + .../lestrrat-go/jwx/v3/jwe/BUILD.bazel | 70 + .../lestrrat-go/jwx/v3/jwe/README.md | 94 + .../lestrrat-go/jwx/v3/jwe/compress.go | 62 + .../lestrrat-go/jwx/v3/jwe/decrypt.go | 227 + .../lestrrat-go/jwx/v3/jwe/encrypt.go | 193 + .../lestrrat-go/jwx/v3/jwe/errors.go | 90 + .../lestrrat-go/jwx/v3/jwe/filter.go | 36 + .../lestrrat-go/jwx/v3/jwe/headers.go | 95 + .../lestrrat-go/jwx/v3/jwe/headers_gen.go | 899 +++ .../lestrrat-go/jwx/v3/jwe/interface.go | 207 + .../jwx/v3/jwe/internal/aescbc/BUILD.bazel | 22 + .../jwx/v3/jwe/internal/aescbc/aescbc.go | 270 + .../jwx/v3/jwe/internal/cipher/BUILD.bazel | 34 + .../jwx/v3/jwe/internal/cipher/cipher.go | 169 + .../jwx/v3/jwe/internal/cipher/interface.go | 32 + .../jwx/v3/jwe/internal/concatkdf/BUILD.bazel | 24 + .../v3/jwe/internal/concatkdf/concatkdf.go | 66 + .../v3/jwe/internal/content_crypt/BUILD.bazel | 21 + .../internal/content_crypt/content_crypt.go | 43 + .../jwe/internal/content_crypt/interface.go | 20 + .../jwx/v3/jwe/internal/keygen/BUILD.bazel | 24 + .../jwx/v3/jwe/internal/keygen/interface.go | 40 + .../jwx/v3/jwe/internal/keygen/keygen.go | 139 + .../github.com/lestrrat-go/jwx/v3/jwe/io.go | 36 + .../github.com/lestrrat-go/jwx/v3/jwe/jwe.go | 1036 +++ .../lestrrat-go/jwx/v3/jwe/jwebb/BUILD.bazel | 43 + .../jwx/v3/jwe/jwebb/content_cipher.go | 34 + .../lestrrat-go/jwx/v3/jwe/jwebb/jwebb.go | 15 + .../v3/jwe/jwebb/key_decrypt_asymmetric.go | 177 + .../jwx/v3/jwe/jwebb/key_decrypt_symmetric.go | 91 + .../v3/jwe/jwebb/key_encrypt_asymmetric.go | 147 + .../jwx/v3/jwe/jwebb/key_encrypt_symmetric.go | 115 + .../jwx/v3/jwe/jwebb/key_encryption.go | 70 + .../lestrrat-go/jwx/v3/jwe/jwebb/keywrap.go | 110 + .../lestrrat-go/jwx/v3/jwe/key_provider.go | 163 + .../lestrrat-go/jwx/v3/jwe/message.go | 546 ++ .../lestrrat-go/jwx/v3/jwe/options.go | 108 + .../lestrrat-go/jwx/v3/jwe/options.yaml | 172 + .../lestrrat-go/jwx/v3/jwe/options_gen.go | 350 + .../lestrrat-go/jwx/v3/jwk/BUILD.bazel | 87 + .../lestrrat-go/jwx/v3/jwk/README.md | 215 + .../lestrrat-go/jwx/v3/jwk/cache.go | 362 + .../lestrrat-go/jwx/v3/jwk/convert.go | 399 ++ .../github.com/lestrrat-go/jwx/v3/jwk/doc.go | 294 + .../lestrrat-go/jwx/v3/jwk/ecdsa.go | 402 ++ .../lestrrat-go/jwx/v3/jwk/ecdsa/BUILD.bazel | 15 + .../lestrrat-go/jwx/v3/jwk/ecdsa/ecdsa.go | 76 + .../lestrrat-go/jwx/v3/jwk/ecdsa_gen.go | 1432 ++++ .../lestrrat-go/jwx/v3/jwk/errors.go | 79 + .../lestrrat-go/jwx/v3/jwk/es256k.go | 14 + .../lestrrat-go/jwx/v3/jwk/fetch.go | 117 + .../lestrrat-go/jwx/v3/jwk/filter.go | 28 + .../lestrrat-go/jwx/v3/jwk/interface.go | 143 + .../lestrrat-go/jwx/v3/jwk/interface_gen.go | 109 + .../github.com/lestrrat-go/jwx/v3/jwk/io.go | 42 + .../github.com/lestrrat-go/jwx/v3/jwk/jwk.go | 709 ++ .../lestrrat-go/jwx/v3/jwk/jwkbb/BUILD.bazel | 17 + .../lestrrat-go/jwx/v3/jwk/jwkbb/x509.go | 111 + .../lestrrat-go/jwx/v3/jwk/key_ops.go | 58 + .../github.com/lestrrat-go/jwx/v3/jwk/okp.go | 321 + .../lestrrat-go/jwx/v3/jwk/okp_gen.go | 1347 ++++ .../lestrrat-go/jwx/v3/jwk/options.go | 76 + .../lestrrat-go/jwx/v3/jwk/options.yaml | 143 + .../lestrrat-go/jwx/v3/jwk/options_gen.go | 297 + .../lestrrat-go/jwx/v3/jwk/parser.go | 244 + .../github.com/lestrrat-go/jwx/v3/jwk/rsa.go | 360 + .../lestrrat-go/jwx/v3/jwk/rsa_gen.go | 1543 +++++ .../github.com/lestrrat-go/jwx/v3/jwk/set.go | 311 + .../lestrrat-go/jwx/v3/jwk/symmetric.go | 105 + .../lestrrat-go/jwx/v3/jwk/symmetric_gen.go | 620 ++ .../lestrrat-go/jwx/v3/jwk/usage.go | 74 + .../lestrrat-go/jwx/v3/jwk/whitelist.go | 38 + .../github.com/lestrrat-go/jwx/v3/jwk/x509.go | 249 + .../lestrrat-go/jwx/v3/jws/BUILD.bazel | 76 + .../lestrrat-go/jwx/v3/jws/README.md | 111 + .../lestrrat-go/jwx/v3/jws/errors.go | 112 + .../lestrrat-go/jwx/v3/jws/es256k.go | 13 + .../lestrrat-go/jwx/v3/jws/filter.go | 36 + .../lestrrat-go/jwx/v3/jws/headers.go | 52 + .../lestrrat-go/jwx/v3/jws/headers_gen.go | 704 ++ .../lestrrat-go/jwx/v3/jws/interface.go | 80 + .../jwx/v3/jws/internal/keytype/BUILD.bazel | 11 + .../jwx/v3/jws/internal/keytype/keytype.go | 57 + .../github.com/lestrrat-go/jwx/v3/jws/io.go | 36 + .../github.com/lestrrat-go/jwx/v3/jws/jws.go | 662 ++ .../lestrrat-go/jwx/v3/jws/jwsbb/BUILD.bazel | 38 + .../jwx/v3/jws/jwsbb/crypto_signer.go | 45 + .../lestrrat-go/jwx/v3/jws/jwsbb/ecdsa.go | 179 + .../lestrrat-go/jwx/v3/jws/jwsbb/eddsa.go | 30 + .../lestrrat-go/jwx/v3/jws/jwsbb/es256k.go | 14 + .../lestrrat-go/jwx/v3/jws/jwsbb/format.go | 235 + .../lestrrat-go/jwx/v3/jws/jwsbb/header.go | 222 + .../lestrrat-go/jwx/v3/jws/jwsbb/hmac.go | 52 + .../lestrrat-go/jwx/v3/jws/jwsbb/jwsbb.go | 94 + .../lestrrat-go/jwx/v3/jws/jwsbb/rsa.go | 71 + .../lestrrat-go/jwx/v3/jws/jwsbb/sign.go | 110 + .../lestrrat-go/jwx/v3/jws/jwsbb/verify.go | 105 + .../lestrrat-go/jwx/v3/jws/key_provider.go | 291 + .../lestrrat-go/jwx/v3/jws/legacy.go | 88 + .../lestrrat-go/jwx/v3/jws/legacy/BUILD.bazel | 21 + .../lestrrat-go/jwx/v3/jws/legacy/ecdsa.go | 204 + .../lestrrat-go/jwx/v3/jws/legacy/eddsa.go | 79 + .../lestrrat-go/jwx/v3/jws/legacy/hmac.go | 90 + .../lestrrat-go/jwx/v3/jws/legacy/legacy.go | 36 + .../lestrrat-go/jwx/v3/jws/legacy/rsa.go | 145 + .../lestrrat-go/jwx/v3/jws/message.go | 550 ++ .../lestrrat-go/jwx/v3/jws/options.go | 259 + .../lestrrat-go/jwx/v3/jws/options.yaml | 234 + .../lestrrat-go/jwx/v3/jws/options_gen.go | 449 ++ .../lestrrat-go/jwx/v3/jws/sign_context.go | 141 + .../jwx/v3/jws/signature_builder.go | 118 + .../lestrrat-go/jwx/v3/jws/signer.go | 158 + .../lestrrat-go/jwx/v3/jws/verifier.go | 154 + .../lestrrat-go/jwx/v3/jws/verify_context.go | 211 + .../lestrrat-go/jwx/v3/jwt/BUILD.bazel | 72 + .../lestrrat-go/jwx/v3/jwt/README.md | 224 + .../lestrrat-go/jwx/v3/jwt/builder_gen.go | 88 + .../lestrrat-go/jwx/v3/jwt/errors.go | 95 + .../lestrrat-go/jwx/v3/jwt/fastpath.go | 71 + .../lestrrat-go/jwx/v3/jwt/filter.go | 34 + .../github.com/lestrrat-go/jwx/v3/jwt/http.go | 295 + .../lestrrat-go/jwx/v3/jwt/interface.go | 8 + .../jwx/v3/jwt/internal/errors/BUILD.bazel | 16 + .../jwx/v3/jwt/internal/errors/errors.go | 183 + .../jwx/v3/jwt/internal/types/BUILD.bazel | 35 + .../jwx/v3/jwt/internal/types/date.go | 192 + .../jwx/v3/jwt/internal/types/string.go | 43 + .../github.com/lestrrat-go/jwx/v3/jwt/io.go | 42 + .../github.com/lestrrat-go/jwx/v3/jwt/jwt.go | 592 ++ .../lestrrat-go/jwx/v3/jwt/options.go | 322 + .../lestrrat-go/jwx/v3/jwt/options.yaml | 274 + .../lestrrat-go/jwx/v3/jwt/options_gen.go | 495 ++ .../lestrrat-go/jwx/v3/jwt/serialize.go | 264 + .../lestrrat-go/jwx/v3/jwt/token_gen.go | 635 ++ .../lestrrat-go/jwx/v3/jwt/token_options.go | 78 + .../jwx/v3/jwt/token_options_gen.go | 25 + .../lestrrat-go/jwx/v3/jwt/validate.go | 418 ++ vendor/github.com/lestrrat-go/jwx/v3/jwx.go | 45 + .../github.com/lestrrat-go/jwx/v3/options.go | 30 + .../lestrrat-go/jwx/v3/transform/BUILD.bazel | 32 + .../lestrrat-go/jwx/v3/transform/filter.go | 115 + .../lestrrat-go/jwx/v3/transform/map.go | 46 + .../github.com/lestrrat-go/option/.gitignore | 15 + vendor/github.com/lestrrat-go/option/LICENSE | 21 + .../github.com/lestrrat-go/option/README.md | 245 + .../github.com/lestrrat-go/option/option.go | 38 + .../lestrrat-go/option/v2/.gitignore | 15 + .../github.com/lestrrat-go/option/v2/LICENSE | 21 + .../lestrrat-go/option/v2/README.md | 245 + .../lestrrat-go/option/v2/option.go | 47 + .../github.com/lestrrat-go/option/v2/set.go | 92 + .../policysession/policysession.pb.go | 367 + .../policysession/policysession.proto | 36 + .../policysession/policysession_grpc.pb.go | 119 + .../policysession/policysession_vtproto.pb.go | 1261 ++++ .../sourcepolicy/policysession/provider.go | 46 + .../sourcepolicy/policysession/verifier.go | 30 + .../buildkit/util/gitutil/gitobject/parse.go | 261 + .../github.com/open-policy-agent/opa/LICENSE | 202 + .../opa/capabilities/capabilities.go | 19 + .../open-policy-agent/opa/capabilities/doc.go | 8 + .../opa/capabilities/v0.17.0.json | 2392 +++++++ .../opa/capabilities/v0.17.1.json | 2392 +++++++ .../opa/capabilities/v0.17.2.json | 2525 +++++++ .../opa/capabilities/v0.17.3.json | 2525 +++++++ .../opa/capabilities/v0.18.0.json | 2685 ++++++++ .../opa/capabilities/v0.19.0-rc1.json | 2835 ++++++++ .../opa/capabilities/v0.19.0.json | 2858 ++++++++ .../opa/capabilities/v0.19.1.json | 2858 ++++++++ .../opa/capabilities/v0.19.2.json | 2858 ++++++++ .../opa/capabilities/v0.20.0.json | 3064 ++++++++ .../opa/capabilities/v0.20.1.json | 3064 ++++++++ .../opa/capabilities/v0.20.2.json | 3064 ++++++++ .../opa/capabilities/v0.20.3.json | 3064 ++++++++ .../opa/capabilities/v0.20.4.json | 3064 ++++++++ .../opa/capabilities/v0.20.5.json | 3064 ++++++++ .../opa/capabilities/v0.21.0.json | 3086 +++++++++ .../opa/capabilities/v0.21.1.json | 3086 +++++++++ .../opa/capabilities/v0.22.0.json | 3137 +++++++++ .../opa/capabilities/v0.23.0.json | 3168 +++++++++ .../opa/capabilities/v0.23.1.json | 3168 +++++++++ .../opa/capabilities/v0.23.2.json | 3168 +++++++++ .../opa/capabilities/v0.24.0.json | 3243 +++++++++ .../opa/capabilities/v0.25.0-rc1.json | 3271 +++++++++ .../opa/capabilities/v0.25.0-rc2.json | 3313 +++++++++ .../opa/capabilities/v0.25.0-rc3.json | 3313 +++++++++ .../opa/capabilities/v0.25.0-rc4.json | 3313 +++++++++ .../opa/capabilities/v0.25.0.json | 3355 +++++++++ .../opa/capabilities/v0.25.1.json | 3355 +++++++++ .../opa/capabilities/v0.25.2.json | 3355 +++++++++ .../opa/capabilities/v0.26.0.json | 3383 +++++++++ .../opa/capabilities/v0.27.0.json | 3389 +++++++++ .../opa/capabilities/v0.27.1.json | 3389 +++++++++ .../opa/capabilities/v0.28.0.json | 3458 ++++++++++ .../opa/capabilities/v0.29.0.json | 3458 ++++++++++ .../opa/capabilities/v0.29.1.json | 3458 ++++++++++ .../opa/capabilities/v0.29.2.json | 3458 ++++++++++ .../opa/capabilities/v0.29.3.json | 3458 ++++++++++ .../opa/capabilities/v0.29.4.json | 3458 ++++++++++ .../opa/capabilities/v0.30.0.json | 3458 ++++++++++ .../opa/capabilities/v0.30.1.json | 3458 ++++++++++ .../opa/capabilities/v0.30.2.json | 3458 ++++++++++ .../opa/capabilities/v0.31.0.json | 3512 ++++++++++ .../opa/capabilities/v0.32.0.json | 3512 ++++++++++ .../opa/capabilities/v0.32.1.json | 3512 ++++++++++ .../opa/capabilities/v0.33.0.json | 3534 ++++++++++ .../opa/capabilities/v0.33.1.json | 3534 ++++++++++ .../opa/capabilities/v0.34.0.json | 3602 ++++++++++ .../opa/capabilities/v0.34.1.json | 3602 ++++++++++ .../opa/capabilities/v0.34.2.json | 3602 ++++++++++ .../opa/capabilities/v0.35.0.json | 3619 ++++++++++ .../opa/capabilities/v0.36.0.json | 3721 ++++++++++ .../opa/capabilities/v0.36.1.json | 3721 ++++++++++ .../opa/capabilities/v0.37.0.json | 3825 ++++++++++ .../opa/capabilities/v0.37.1.json | 3825 ++++++++++ .../opa/capabilities/v0.37.2.json | 3825 ++++++++++ .../opa/capabilities/v0.38.0.json | 3826 ++++++++++ .../opa/capabilities/v0.38.1.json | 3826 ++++++++++ .../opa/capabilities/v0.39.0.json | 3826 ++++++++++ .../opa/capabilities/v0.40.0.json | 3847 +++++++++++ .../opa/capabilities/v0.41.0.json | 4007 +++++++++++ .../opa/capabilities/v0.42.0.json | 4076 +++++++++++ .../opa/capabilities/v0.42.1.json | 4076 +++++++++++ .../opa/capabilities/v0.42.2.json | 4076 +++++++++++ .../opa/capabilities/v0.43.0.json | 4079 +++++++++++ .../opa/capabilities/v0.43.1.json | 4079 +++++++++++ .../opa/capabilities/v0.44.0.json | 4190 +++++++++++ .../opa/capabilities/v0.45.0.json | 4306 ++++++++++++ .../opa/capabilities/v0.46.0.json | 4353 ++++++++++++ .../opa/capabilities/v0.46.1.json | 4353 ++++++++++++ .../opa/capabilities/v0.46.2.json | 4353 ++++++++++++ .../opa/capabilities/v0.46.3.json | 4353 ++++++++++++ .../opa/capabilities/v0.47.0.json | 4422 ++++++++++++ .../opa/capabilities/v0.47.1.json | 4422 ++++++++++++ .../opa/capabilities/v0.47.2.json | 4422 ++++++++++++ .../opa/capabilities/v0.47.3.json | 4422 ++++++++++++ .../opa/capabilities/v0.47.4.json | 4422 ++++++++++++ .../opa/capabilities/v0.48.0.json | 4466 ++++++++++++ .../opa/capabilities/v0.49.0.json | 4466 ++++++++++++ .../opa/capabilities/v0.49.1.json | 4466 ++++++++++++ .../opa/capabilities/v0.49.2.json | 4466 ++++++++++++ .../opa/capabilities/v0.50.0.json | 4598 +++++++++++++ .../opa/capabilities/v0.50.1.json | 4598 +++++++++++++ .../opa/capabilities/v0.50.2.json | 4598 +++++++++++++ .../opa/capabilities/v0.51.0.json | 4598 +++++++++++++ .../opa/capabilities/v0.52.0.json | 4615 +++++++++++++ .../opa/capabilities/v0.53.0.json | 4640 +++++++++++++ .../opa/capabilities/v0.53.1.json | 4640 +++++++++++++ .../opa/capabilities/v0.54.0.json | 4640 +++++++++++++ .../opa/capabilities/v0.55.0.json | 4665 +++++++++++++ .../opa/capabilities/v0.56.0.json | 4688 +++++++++++++ .../opa/capabilities/v0.57.0.json | 4710 +++++++++++++ .../opa/capabilities/v0.57.1.json | 4710 +++++++++++++ .../opa/capabilities/v0.58.0.json | 4710 +++++++++++++ .../opa/capabilities/v0.59.0.json | 4737 +++++++++++++ .../opa/capabilities/v0.60.0.json | 4737 +++++++++++++ .../opa/capabilities/v0.61.0.json | 4737 +++++++++++++ .../opa/capabilities/v0.62.0.json | 4737 +++++++++++++ .../opa/capabilities/v0.62.1.json | 4737 +++++++++++++ .../opa/capabilities/v0.63.0.json | 4781 +++++++++++++ .../opa/capabilities/v0.64.0.json | 4826 +++++++++++++ .../opa/capabilities/v0.64.1.json | 4826 +++++++++++++ .../opa/capabilities/v0.65.0.json | 4826 +++++++++++++ .../opa/capabilities/v0.66.0.json | 4826 +++++++++++++ .../opa/capabilities/v0.67.0.json | 4843 +++++++++++++ .../opa/capabilities/v0.67.1.json | 4843 +++++++++++++ .../opa/capabilities/v0.68.0.json | 4843 +++++++++++++ .../opa/capabilities/v0.69.0.json | 4843 +++++++++++++ .../opa/capabilities/v0.70.0.json | 4843 +++++++++++++ .../opa/capabilities/v1.0.0.json | 4835 +++++++++++++ .../opa/capabilities/v1.0.1.json | 4835 +++++++++++++ .../opa/capabilities/v1.1.0.json | 4835 +++++++++++++ .../opa/capabilities/v1.10.0.json | 4867 +++++++++++++ .../opa/capabilities/v1.10.1.json | 4867 +++++++++++++ .../opa/capabilities/v1.2.0.json | 4849 +++++++++++++ .../opa/capabilities/v1.3.0.json | 4849 +++++++++++++ .../opa/capabilities/v1.4.0.json | 4849 +++++++++++++ .../opa/capabilities/v1.4.1.json | 4849 +++++++++++++ .../opa/capabilities/v1.4.2.json | 4849 +++++++++++++ .../opa/capabilities/v1.5.0.json | 4849 +++++++++++++ .../opa/capabilities/v1.5.1.json | 4849 +++++++++++++ .../opa/capabilities/v1.6.0.json | 4850 +++++++++++++ .../opa/capabilities/v1.7.0.json | 4850 +++++++++++++ .../opa/capabilities/v1.8.0.json | 4867 +++++++++++++ .../opa/capabilities/v1.9.0.json | 4867 +++++++++++++ .../opa/internal/bundle/utils.go | 147 + .../opa/internal/cidr/merge/merge.go | 367 + .../opa/internal/compiler/utils.go | 95 + .../internal/compiler/wasm/opa/callgraph.csv | 2461 +++++++ .../opa/internal/compiler/wasm/opa/opa.go | 27 + .../opa/internal/compiler/wasm/opa/opa.wasm | Bin 0 -> 436729 bytes .../internal/compiler/wasm/optimizations.go | 271 + .../opa/internal/compiler/wasm/wasm.go | 1786 +++++ .../opa/internal/config/config.go | 176 + .../opa/internal/debug/debug.go | 35 + .../opa/internal/deepcopy/deepcopy.go | 31 + .../opa/internal/edittree/bitvector/README.md | 15 + .../internal/edittree/bitvector/bitvector.go | 206 + .../internal/edittree/bitvector/license.txt | 27 + .../opa/internal/edittree/edittree.go | 1109 +++ .../opa/internal/file/archive/tarball.go | 42 + .../opa/internal/file/url/url.go | 42 + .../opa/internal/future/filter_imports.go | 49 + .../opa/internal/future/parser_opts.go | 43 + .../gojsonschema/LICENSE-APACHE-2.0.txt | 202 + .../opa/internal/gojsonschema/README.md | 482 ++ .../opa/internal/gojsonschema/draft.go | 122 + .../opa/internal/gojsonschema/errors.go | 366 + .../internal/gojsonschema/format_checkers.go | 368 + .../opa/internal/gojsonschema/internalLog.go | 37 + .../opa/internal/gojsonschema/jsonContext.go | 73 + .../opa/internal/gojsonschema/jsonLoader.go | 410 ++ .../opa/internal/gojsonschema/locales.go | 472 ++ .../opa/internal/gojsonschema/result.go | 220 + .../opa/internal/gojsonschema/schema.go | 957 +++ .../opa/internal/gojsonschema/schemaLoader.go | 206 + .../opa/internal/gojsonschema/schemaPool.go | 230 + .../gojsonschema/schemaReferencePool.go | 64 + .../opa/internal/gojsonschema/schemaType.go | 78 + .../opa/internal/gojsonschema/subSchema.go | 151 + .../opa/internal/gojsonschema/types.go | 62 + .../opa/internal/gojsonschema/utils.go | 161 + .../opa/internal/gojsonschema/validation.go | 837 +++ .../opa/internal/json/patch/patch.go | 45 + .../opa/internal/lcss/README.md | 3 + .../opa/internal/lcss/lcss.go | 197 + .../opa/internal/lcss/qsufsort.go | 169 + .../opa/internal/leb128/leb128.go | 170 + .../opa/internal/merge/merge.go | 64 + .../opa/internal/planner/planner.go | 2600 +++++++ .../opa/internal/planner/rules.go | 337 + .../opa/internal/planner/varstack.go | 71 + .../opa/internal/providers/aws/NOTICE.txt | 3 + .../internal/providers/aws/crypto/compare.go | 30 + .../opa/internal/providers/aws/crypto/ecc.go | 114 + .../opa/internal/providers/aws/ecr.go | 148 + .../opa/internal/providers/aws/kms.go | 106 + .../opa/internal/providers/aws/signing_v4.go | 204 + .../opa/internal/providers/aws/signing_v4a.go | 410 ++ .../opa/internal/providers/aws/util.go | 39 + .../opa/internal/providers/aws/v4/const.go | 36 + .../internal/providers/aws/v4/header_rules.go | 87 + .../opa/internal/providers/aws/v4/headers.go | 67 + .../opa/internal/providers/aws/v4/host.go | 75 + .../opa/internal/providers/aws/v4/util.go | 59 + .../open-policy-agent/opa/internal/ref/ref.go | 36 + .../opa/internal/rego/opa/engine.go | 65 + .../opa/internal/rego/opa/options.go | 31 + .../opa/internal/report/report.go | 218 + .../opa/internal/runtime/init/init.go | 261 + .../opa/internal/semver/LICENSE | 202 + .../opa/internal/semver/semver.go | 250 + .../opa/internal/strings/strings.go | 82 + .../opa/internal/strvals/doc.go | 33 + .../opa/internal/strvals/parser.go | 429 ++ .../opa/internal/uuid/uuid.go | 115 + .../opa/internal/version/version.go | 36 + .../opa/internal/wasm/constant/constant.go | 77 + .../opa/internal/wasm/encoding/doc.go | 6 + .../opa/internal/wasm/encoding/reader.go | 965 +++ .../opa/internal/wasm/encoding/writer.go | 778 +++ .../opa/internal/wasm/instruction/control.go | 183 + .../internal/wasm/instruction/instruction.go | 33 + .../opa/internal/wasm/instruction/memory.go | 39 + .../opa/internal/wasm/instruction/numeric.go | 199 + .../internal/wasm/instruction/parametric.go | 29 + .../opa/internal/wasm/instruction/variable.go | 54 + .../opa/internal/wasm/module/module.go | 385 ++ .../opa/internal/wasm/module/pretty.go | 84 + .../opa/internal/wasm/opcode/opcode.go | 218 + .../wasm/sdk/opa/capabilities/capabilities.go | 12 + .../opa/capabilities/capabilities_nowasm.go | 14 + .../opa/internal/wasm/types/types.go | 36 + .../opa/internal/wasm/util/util.go | 18 + .../opa/v1/ast/annotations.go | 1034 +++ .../open-policy-agent/opa/v1/ast/builtins.go | 3637 ++++++++++ .../opa/v1/ast/capabilities.go | 290 + .../open-policy-agent/opa/v1/ast/check.go | 1329 ++++ .../open-policy-agent/opa/v1/ast/compare.go | 440 ++ .../open-policy-agent/opa/v1/ast/compile.go | 6129 +++++++++++++++++ .../opa/v1/ast/compilehelper.go | 62 + .../opa/v1/ast/compilemetrics.go | 9 + .../open-policy-agent/opa/v1/ast/conflicts.go | 79 + .../opa/v1/ast/default_module_loader.go | 14 + .../open-policy-agent/opa/v1/ast/doc.go | 36 + .../open-policy-agent/opa/v1/ast/env.go | 528 ++ .../open-policy-agent/opa/v1/ast/errors.go | 124 + .../open-policy-agent/opa/v1/ast/index.go | 980 +++ .../opa/v1/ast/internal/scanner/scanner.go | 478 ++ .../opa/v1/ast/internal/tokens/tokens.go | 149 + .../open-policy-agent/opa/v1/ast/interning.go | 1838 +++++ .../open-policy-agent/opa/v1/ast/json/json.go | 106 + .../opa/v1/ast/location/location.go | 132 + .../open-policy-agent/opa/v1/ast/map.go | 108 + .../open-policy-agent/opa/v1/ast/parser.go | 3088 +++++++++ .../opa/v1/ast/parser_ext.go | 814 +++ .../opa/v1/ast/performance.go | 85 + .../open-policy-agent/opa/v1/ast/policy.go | 2005 ++++++ .../open-policy-agent/opa/v1/ast/pretty.go | 82 + .../opa/v1/ast/rego_compiler.go | 17 + .../open-policy-agent/opa/v1/ast/rego_v1.go | 208 + .../open-policy-agent/opa/v1/ast/schema.go | 54 + .../open-policy-agent/opa/v1/ast/strings.go | 54 + .../open-policy-agent/opa/v1/ast/syncpools.go | 92 + .../open-policy-agent/opa/v1/ast/term.go | 3106 +++++++++ .../open-policy-agent/opa/v1/ast/transform.go | 431 ++ .../open-policy-agent/opa/v1/ast/unify.go | 240 + .../open-policy-agent/opa/v1/ast/varset.go | 121 + .../opa/v1/ast/version_index.json | 1478 ++++ .../open-policy-agent/opa/v1/ast/visit.go | 832 +++ .../open-policy-agent/opa/v1/bundle/bundle.go | 1845 +++++ .../open-policy-agent/opa/v1/bundle/file.go | 517 ++ .../open-policy-agent/opa/v1/bundle/filefs.go | 143 + .../open-policy-agent/opa/v1/bundle/hash.go | 136 + .../open-policy-agent/opa/v1/bundle/keys.go | 173 + .../open-policy-agent/opa/v1/bundle/sign.go | 130 + .../open-policy-agent/opa/v1/bundle/store.go | 1251 ++++ .../open-policy-agent/opa/v1/bundle/verify.go | 290 + .../opa/v1/capabilities/capabilities.go | 18 + .../open-policy-agent/opa/v1/config/config.go | 393 ++ .../open-policy-agent/opa/v1/format/format.go | 2291 ++++++ .../open-policy-agent/opa/v1/hooks/hooks.go | 97 + .../open-policy-agent/opa/v1/ir/ir.go | 486 ++ .../open-policy-agent/opa/v1/ir/marshal.go | 147 + .../open-policy-agent/opa/v1/ir/pretty.go | 44 + .../open-policy-agent/opa/v1/ir/walk.go | 93 + .../open-policy-agent/opa/v1/keys/keys.go | 99 + .../open-policy-agent/opa/v1/loader/errors.go | 62 + .../opa/v1/loader/extension/extension.go | 40 + .../opa/v1/loader/filter/filter.go | 5 + .../open-policy-agent/opa/v1/loader/loader.go | 861 +++ .../opa/v1/logging/logging.go | 274 + .../opa/v1/metrics/metrics.go | 370 + .../opa/v1/plugins/plugins.go | 1195 ++++ .../opa/v1/plugins/rest/auth.go | 1211 ++++ .../opa/v1/plugins/rest/aws.go | 1088 +++ .../opa/v1/plugins/rest/azure.go | 287 + .../opa/v1/plugins/rest/gcp.go | 173 + .../opa/v1/plugins/rest/rest.go | 366 + .../open-policy-agent/opa/v1/rego/errors.go | 24 + .../open-policy-agent/opa/v1/rego/plugins.go | 43 + .../open-policy-agent/opa/v1/rego/rego.go | 3028 ++++++++ .../opa/v1/rego/resultset.go | 90 + .../opa/v1/resolver/interface.go | 29 + .../opa/v1/resolver/wasm/wasm.go | 174 + .../opa/v1/schemas/authorizationPolicy.json | 43 + .../opa/v1/schemas/schemas.go | 15 + .../open-policy-agent/opa/v1/storage/doc.go | 6 + .../opa/v1/storage/errors.go | 121 + .../opa/v1/storage/inmem/ast.go | 304 + .../opa/v1/storage/inmem/inmem.go | 452 ++ .../opa/v1/storage/inmem/opts.go | 37 + .../opa/v1/storage/inmem/txn.go | 539 ++ .../opa/v1/storage/interface.go | 252 + .../opa/v1/storage/internal/errors/errors.go | 54 + .../opa/v1/storage/internal/ptr/ptr.go | 141 + .../open-policy-agent/opa/v1/storage/path.go | 142 + .../opa/v1/storage/storage.go | 139 + .../opa/v1/topdown/aggregates.go | 302 + .../opa/v1/topdown/arithmetic.go | 240 + .../open-policy-agent/opa/v1/topdown/array.go | 105 + .../opa/v1/topdown/binary.go | 50 + .../opa/v1/topdown/bindings.go | 401 ++ .../open-policy-agent/opa/v1/topdown/bits.go | 88 + .../opa/v1/topdown/builtins.go | 224 + .../opa/v1/topdown/builtins/builtins.go | 329 + .../open-policy-agent/opa/v1/topdown/cache.go | 363 + .../opa/v1/topdown/cache/cache.go | 651 ++ .../opa/v1/topdown/cancel.go | 33 + .../open-policy-agent/opa/v1/topdown/casts.go | 131 + .../open-policy-agent/opa/v1/topdown/cidr.go | 419 ++ .../opa/v1/topdown/comparison.go | 48 + .../copypropagation/copypropagation.go | 497 ++ .../v1/topdown/copypropagation/unionfind.go | 131 + .../opa/v1/topdown/crypto.go | 783 +++ .../open-policy-agent/opa/v1/topdown/doc.go | 10 + .../opa/v1/topdown/encoding.go | 406 ++ .../opa/v1/topdown/errors.go | 149 + .../open-policy-agent/opa/v1/topdown/eval.go | 4327 ++++++++++++ .../open-policy-agent/opa/v1/topdown/glob.go | 127 + .../opa/v1/topdown/graphql.go | 690 ++ .../open-policy-agent/opa/v1/topdown/http.go | 1648 +++++ .../opa/v1/topdown/http_fixup.go | 8 + .../opa/v1/topdown/http_fixup_darwin.go | 13 + .../open-policy-agent/opa/v1/topdown/input.go | 100 + .../opa/v1/topdown/instrumentation.go | 63 + .../open-policy-agent/opa/v1/topdown/json.go | 405 ++ .../opa/v1/topdown/jsonschema.go | 130 + .../open-policy-agent/opa/v1/topdown/net.go | 64 + .../opa/v1/topdown/numbers.go | 202 + .../opa/v1/topdown/object.go | 235 + .../open-policy-agent/opa/v1/topdown/parse.go | 60 + .../opa/v1/topdown/parse_bytes.go | 157 + .../opa/v1/topdown/parse_units.go | 125 + .../open-policy-agent/opa/v1/topdown/print.go | 86 + .../opa/v1/topdown/print/print.go | 21 + .../opa/v1/topdown/providers.go | 211 + .../open-policy-agent/opa/v1/topdown/query.go | 639 ++ .../opa/v1/topdown/reachable.go | 151 + .../open-policy-agent/opa/v1/topdown/regex.go | 281 + .../opa/v1/topdown/regex_template.go | 122 + .../opa/v1/topdown/resolver.go | 118 + .../opa/v1/topdown/runtime.go | 130 + .../open-policy-agent/opa/v1/topdown/save.go | 523 ++ .../opa/v1/topdown/semver.go | 59 + .../open-policy-agent/opa/v1/topdown/sets.go | 94 + .../opa/v1/topdown/strings.go | 791 +++ .../opa/v1/topdown/subset.go | 242 + .../opa/v1/topdown/template.go | 45 + .../open-policy-agent/opa/v1/topdown/test.go | 30 + .../open-policy-agent/opa/v1/topdown/time.go | 341 + .../opa/v1/topdown/tokens.go | 1311 ++++ .../open-policy-agent/opa/v1/topdown/trace.go | 895 +++ .../open-policy-agent/opa/v1/topdown/type.go | 82 + .../opa/v1/topdown/type_name.go | 36 + .../open-policy-agent/opa/v1/topdown/uuid.go | 56 + .../open-policy-agent/opa/v1/topdown/walk.go | 163 + .../opa/v1/tracing/tracing.go | 55 + .../open-policy-agent/opa/v1/types/decode.go | 191 + .../open-policy-agent/opa/v1/types/types.go | 1204 ++++ .../open-policy-agent/opa/v1/util/backoff.go | 44 + .../open-policy-agent/opa/v1/util/channel.go | 32 + .../open-policy-agent/opa/v1/util/close.go | 22 + .../open-policy-agent/opa/v1/util/compare.go | 169 + .../opa/v1/util/decoding/context.go | 31 + .../open-policy-agent/opa/v1/util/doc.go | 6 + .../open-policy-agent/opa/v1/util/enumflag.go | 59 + .../open-policy-agent/opa/v1/util/graph.go | 90 + .../open-policy-agent/opa/v1/util/hashmap.go | 271 + .../open-policy-agent/opa/v1/util/json.go | 195 + .../open-policy-agent/opa/v1/util/maps.go | 34 + .../opa/v1/util/performance.go | 133 + .../open-policy-agent/opa/v1/util/queue.go | 113 + .../opa/v1/util/read_gzip_body.go | 72 + .../open-policy-agent/opa/v1/util/time.go | 48 + .../open-policy-agent/opa/v1/util/wait.go | 34 + .../opa/v1/version/version.go | 58 + .../open-policy-agent/opa/v1/version/wasm.go | 13 + .../prometheus/client_golang/LICENSE | 201 + .../prometheus/client_golang/NOTICE | 18 + .../client_golang/prometheus/.gitignore | 1 + .../client_golang/prometheus/README.md | 1 + .../prometheus/build_info_collector.go | 38 + .../client_golang/prometheus/collector.go | 128 + .../client_golang/prometheus/collectorfunc.go | 30 + .../client_golang/prometheus/counter.go | 358 + .../client_golang/prometheus/desc.go | 211 + .../client_golang/prometheus/doc.go | 210 + .../prometheus/expvar_collector.go | 86 + .../client_golang/prometheus/fnv.go | 42 + .../client_golang/prometheus/gauge.go | 311 + .../client_golang/prometheus/get_pid.go | 26 + .../prometheus/get_pid_gopherjs.go | 23 + .../client_golang/prometheus/go_collector.go | 274 + .../prometheus/go_collector_go116.go | 122 + .../prometheus/go_collector_latest.go | 574 ++ .../client_golang/prometheus/histogram.go | 2056 ++++++ .../prometheus/internal/almost_equal.go | 60 + .../prometheus/internal/difflib.go | 655 ++ .../internal/go_collector_options.go | 34 + .../prometheus/internal/go_runtime_metrics.go | 143 + .../prometheus/internal/metric.go | 101 + .../client_golang/prometheus/labels.go | 189 + .../client_golang/prometheus/metric.go | 276 + .../client_golang/prometheus/num_threads.go | 25 + .../prometheus/num_threads_gopherjs.go | 22 + .../client_golang/prometheus/observer.go | 64 + .../prometheus/process_collector.go | 180 + .../prometheus/process_collector_darwin.go | 130 + .../process_collector_mem_cgo_darwin.c | 84 + .../process_collector_mem_cgo_darwin.go | 51 + .../process_collector_mem_nocgo_darwin.go | 39 + .../process_collector_not_supported.go | 33 + .../process_collector_procfsenabled.go | 96 + .../prometheus/process_collector_windows.go | 125 + .../client_golang/prometheus/registry.go | 1076 +++ .../client_golang/prometheus/summary.go | 830 +++ .../client_golang/prometheus/timer.go | 81 + .../client_golang/prometheus/untyped.go | 42 + .../client_golang/prometheus/value.go | 274 + .../client_golang/prometheus/vec.go | 709 ++ .../client_golang/prometheus/vnext.go | 23 + .../client_golang/prometheus/wrap.go | 248 + .../prometheus/client_model/LICENSE | 201 + .../github.com/prometheus/client_model/NOTICE | 5 + .../prometheus/client_model/go/metrics.pb.go | 1399 ++++ vendor/github.com/prometheus/common/LICENSE | 201 + vendor/github.com/prometheus/common/NOTICE | 5 + .../prometheus/common/expfmt/decode.go | 456 ++ .../prometheus/common/expfmt/encode.go | 196 + .../prometheus/common/expfmt/expfmt.go | 211 + .../prometheus/common/expfmt/fuzz.go | 40 + .../common/expfmt/openmetrics_create.go | 695 ++ .../prometheus/common/expfmt/text_create.go | 520 ++ .../prometheus/common/expfmt/text_parse.go | 933 +++ .../prometheus/common/model/alert.go | 162 + .../prometheus/common/model/fingerprinting.go | 105 + .../github.com/prometheus/common/model/fnv.go | 42 + .../prometheus/common/model/labels.go | 229 + .../prometheus/common/model/labelset.go | 158 + .../common/model/labelset_string.go | 43 + .../prometheus/common/model/metadata.go | 28 + .../prometheus/common/model/metric.go | 593 ++ .../prometheus/common/model/model.go | 16 + .../prometheus/common/model/signature.go | 142 + .../prometheus/common/model/silence.go | 107 + .../prometheus/common/model/time.go | 359 + .../prometheus/common/model/value.go | 365 + .../prometheus/common/model/value_float.go | 99 + .../common/model/value_histogram.go | 179 + .../prometheus/common/model/value_type.go | 83 + .../github.com/prometheus/procfs/.gitignore | 2 + .../prometheus/procfs/.golangci.yml | 45 + .../prometheus/procfs/CODE_OF_CONDUCT.md | 3 + .../prometheus/procfs/CONTRIBUTING.md | 121 + vendor/github.com/prometheus/procfs/LICENSE | 201 + .../prometheus/procfs/MAINTAINERS.md | 3 + vendor/github.com/prometheus/procfs/Makefile | 31 + .../prometheus/procfs/Makefile.common | 288 + vendor/github.com/prometheus/procfs/NOTICE | 7 + vendor/github.com/prometheus/procfs/README.md | 61 + .../github.com/prometheus/procfs/SECURITY.md | 6 + vendor/github.com/prometheus/procfs/arp.go | 116 + .../github.com/prometheus/procfs/buddyinfo.go | 85 + .../github.com/prometheus/procfs/cmdline.go | 30 + .../github.com/prometheus/procfs/cpuinfo.go | 519 ++ .../prometheus/procfs/cpuinfo_armx.go | 20 + .../prometheus/procfs/cpuinfo_loong64.go | 19 + .../prometheus/procfs/cpuinfo_mipsx.go | 20 + .../prometheus/procfs/cpuinfo_others.go | 19 + .../prometheus/procfs/cpuinfo_ppcx.go | 20 + .../prometheus/procfs/cpuinfo_riscvx.go | 20 + .../prometheus/procfs/cpuinfo_s390x.go | 19 + .../prometheus/procfs/cpuinfo_x86.go | 20 + vendor/github.com/prometheus/procfs/crypto.go | 154 + vendor/github.com/prometheus/procfs/doc.go | 44 + vendor/github.com/prometheus/procfs/fs.go | 56 + .../prometheus/procfs/fs_statfs_notype.go | 23 + .../prometheus/procfs/fs_statfs_type.go | 33 + .../github.com/prometheus/procfs/fscache.go | 422 ++ .../prometheus/procfs/internal/fs/fs.go | 58 + .../prometheus/procfs/internal/util/parse.go | 126 + .../procfs/internal/util/readfile.go | 37 + .../procfs/internal/util/sysreadfile.go | 70 + .../internal/util/sysreadfile_compat.go | 27 + .../procfs/internal/util/valueparser.go | 91 + vendor/github.com/prometheus/procfs/ipvs.go | 241 + .../prometheus/procfs/kernel_random.go | 63 + .../github.com/prometheus/procfs/loadavg.go | 62 + vendor/github.com/prometheus/procfs/mdstat.go | 279 + .../github.com/prometheus/procfs/meminfo.go | 422 ++ .../github.com/prometheus/procfs/mountinfo.go | 180 + .../prometheus/procfs/mountstats.go | 710 ++ .../prometheus/procfs/net_conntrackstat.go | 118 + .../github.com/prometheus/procfs/net_dev.go | 205 + .../prometheus/procfs/net_dev_snmp6.go | 96 + .../prometheus/procfs/net_ip_socket.go | 248 + .../prometheus/procfs/net_protocols.go | 183 + .../github.com/prometheus/procfs/net_route.go | 143 + .../prometheus/procfs/net_sockstat.go | 162 + .../prometheus/procfs/net_softnet.go | 155 + .../github.com/prometheus/procfs/net_tcp.go | 68 + .../prometheus/procfs/net_tls_stat.go | 119 + .../github.com/prometheus/procfs/net_udp.go | 64 + .../github.com/prometheus/procfs/net_unix.go | 257 + .../prometheus/procfs/net_wireless.go | 182 + .../github.com/prometheus/procfs/net_xfrm.go | 189 + .../github.com/prometheus/procfs/netstat.go | 82 + vendor/github.com/prometheus/procfs/proc.go | 338 + .../prometheus/procfs/proc_cgroup.go | 98 + .../prometheus/procfs/proc_cgroups.go | 98 + .../prometheus/procfs/proc_environ.go | 37 + .../prometheus/procfs/proc_fdinfo.go | 138 + .../prometheus/procfs/proc_interrupts.go | 98 + .../github.com/prometheus/procfs/proc_io.go | 59 + .../prometheus/procfs/proc_limits.go | 160 + .../github.com/prometheus/procfs/proc_maps.go | 211 + .../prometheus/procfs/proc_netstat.go | 443 ++ .../github.com/prometheus/procfs/proc_ns.go | 68 + .../github.com/prometheus/procfs/proc_psi.go | 102 + .../prometheus/procfs/proc_smaps.go | 164 + .../github.com/prometheus/procfs/proc_snmp.go | 353 + .../prometheus/procfs/proc_snmp6.go | 381 + .../github.com/prometheus/procfs/proc_stat.go | 235 + .../prometheus/procfs/proc_statm.go | 116 + .../prometheus/procfs/proc_status.go | 242 + .../github.com/prometheus/procfs/proc_sys.go | 51 + .../github.com/prometheus/procfs/schedstat.go | 121 + vendor/github.com/prometheus/procfs/slab.go | 151 + .../github.com/prometheus/procfs/softirqs.go | 160 + vendor/github.com/prometheus/procfs/stat.go | 258 + vendor/github.com/prometheus/procfs/swaps.go | 89 + vendor/github.com/prometheus/procfs/thread.go | 80 + vendor/github.com/prometheus/procfs/ttar | 413 ++ vendor/github.com/prometheus/procfs/vm.go | 212 + .../github.com/prometheus/procfs/zoneinfo.go | 196 + .../github.com/rcrowley/go-metrics/.gitignore | 9 + .../rcrowley/go-metrics/.travis.yml | 23 + vendor/github.com/rcrowley/go-metrics/LICENSE | 29 + .../github.com/rcrowley/go-metrics/README.md | 180 + .../github.com/rcrowley/go-metrics/counter.go | 112 + .../github.com/rcrowley/go-metrics/debug.go | 80 + vendor/github.com/rcrowley/go-metrics/ewma.go | 138 + .../github.com/rcrowley/go-metrics/gauge.go | 120 + .../rcrowley/go-metrics/gauge_float64.go | 125 + .../rcrowley/go-metrics/graphite.go | 113 + .../rcrowley/go-metrics/healthcheck.go | 61 + .../rcrowley/go-metrics/histogram.go | 202 + vendor/github.com/rcrowley/go-metrics/json.go | 31 + vendor/github.com/rcrowley/go-metrics/log.go | 100 + .../github.com/rcrowley/go-metrics/memory.md | 285 + .../github.com/rcrowley/go-metrics/meter.go | 251 + .../github.com/rcrowley/go-metrics/metrics.go | 13 + .../rcrowley/go-metrics/opentsdb.go | 119 + .../rcrowley/go-metrics/registry.go | 373 + .../github.com/rcrowley/go-metrics/runtime.go | 216 + .../rcrowley/go-metrics/runtime_cgo.go | 10 + .../go-metrics/runtime_gccpufraction.go | 9 + .../rcrowley/go-metrics/runtime_no_cgo.go | 7 + .../go-metrics/runtime_no_gccpufraction.go | 9 + .../github.com/rcrowley/go-metrics/sample.go | 616 ++ .../github.com/rcrowley/go-metrics/syslog.go | 78 + .../github.com/rcrowley/go-metrics/timer.go | 329 + .../rcrowley/go-metrics/validate.sh | 10 + .../github.com/rcrowley/go-metrics/writer.go | 100 + vendor/github.com/rivo/uniseg/README.md | 121 +- vendor/github.com/rivo/uniseg/doc.go | 108 +- .../github.com/rivo/uniseg/eastasianwidth.go | 2588 +++++++ .../rivo/uniseg/emojipresentation.go | 295 + .../github.com/rivo/uniseg/gen_breaktest.go | 215 + .../github.com/rivo/uniseg/gen_properties.go | 261 + vendor/github.com/rivo/uniseg/grapheme.go | 483 +- .../rivo/uniseg/graphemeproperties.go | 1915 +++++ .../github.com/rivo/uniseg/graphemerules.go | 176 + vendor/github.com/rivo/uniseg/line.go | 134 + .../github.com/rivo/uniseg/lineproperties.go | 3554 ++++++++++ vendor/github.com/rivo/uniseg/linerules.go | 626 ++ vendor/github.com/rivo/uniseg/properties.go | 1800 +---- vendor/github.com/rivo/uniseg/sentence.go | 90 + .../rivo/uniseg/sentenceproperties.go | 2845 ++++++++ .../github.com/rivo/uniseg/sentencerules.go | 276 + vendor/github.com/rivo/uniseg/step.go | 242 + vendor/github.com/rivo/uniseg/width.go | 61 + vendor/github.com/rivo/uniseg/word.go | 89 + .../github.com/rivo/uniseg/wordproperties.go | 1883 +++++ vendor/github.com/rivo/uniseg/wordrules.go | 282 + vendor/github.com/segmentio/asm/LICENSE | 21 + .../github.com/segmentio/asm/base64/base64.go | 67 + .../segmentio/asm/base64/base64_amd64.go | 78 + .../segmentio/asm/base64/base64_arm64.go | 42 + .../segmentio/asm/base64/base64_asm.go | 94 + .../segmentio/asm/base64/base64_default.go | 14 + .../segmentio/asm/base64/decode_amd64.go | 9 + .../segmentio/asm/base64/decode_amd64.s | 143 + .../segmentio/asm/base64/decode_arm64.go | 7 + .../segmentio/asm/base64/decode_arm64.s | 203 + .../segmentio/asm/base64/encode_amd64.go | 7 + .../segmentio/asm/base64/encode_amd64.s | 87 + .../segmentio/asm/base64/encode_arm64.go | 6 + .../segmentio/asm/base64/encode_arm64.s | 97 + .../github.com/segmentio/asm/cpu/arm/arm.go | 80 + .../segmentio/asm/cpu/arm64/arm64.go | 74 + vendor/github.com/segmentio/asm/cpu/cpu.go | 22 + .../segmentio/asm/cpu/cpuid/cpuid.go | 32 + .../github.com/segmentio/asm/cpu/x86/x86.go | 76 + .../asm/internal/unsafebytes/unsafebytes.go | 20 + .../sirupsen/logrus/terminal_check_bsd.go | 2 +- .../sirupsen/logrus/terminal_check_unix.go | 2 + .../sirupsen/logrus/terminal_check_wasi.go | 8 + .../sirupsen/logrus/terminal_check_wasip1.go | 8 + .../github.com/tchap/go-patricia/v2/AUTHORS | 3 + .../github.com/tchap/go-patricia/v2/LICENSE | 20 + .../tchap/go-patricia/v2/patricia/children.go | 363 + .../tchap/go-patricia/v2/patricia/patricia.go | 606 ++ vendor/github.com/valyala/fastjson/.gitignore | 1 + .../github.com/valyala/fastjson/.travis.yml | 19 + vendor/github.com/valyala/fastjson/LICENSE | 22 + vendor/github.com/valyala/fastjson/README.md | 227 + vendor/github.com/valyala/fastjson/arena.go | 126 + vendor/github.com/valyala/fastjson/doc.go | 9 + .../valyala/fastjson/fastfloat/parse.go | 515 ++ vendor/github.com/valyala/fastjson/fuzz.go | 22 + vendor/github.com/valyala/fastjson/handy.go | 170 + vendor/github.com/valyala/fastjson/parser.go | 976 +++ vendor/github.com/valyala/fastjson/pool.go | 52 + vendor/github.com/valyala/fastjson/scanner.go | 94 + vendor/github.com/valyala/fastjson/update.go | 110 + vendor/github.com/valyala/fastjson/util.go | 30 + .../github.com/valyala/fastjson/validate.go | 308 + vendor/github.com/vektah/gqlparser/v2/LICENSE | 19 + .../vektah/gqlparser/v2/ast/argmap.go | 37 + .../vektah/gqlparser/v2/ast/collections.go | 148 + .../vektah/gqlparser/v2/ast/comment.go | 31 + .../vektah/gqlparser/v2/ast/decode.go | 216 + .../vektah/gqlparser/v2/ast/definition.go | 110 + .../vektah/gqlparser/v2/ast/directive.go | 43 + .../vektah/gqlparser/v2/ast/document.go | 89 + .../vektah/gqlparser/v2/ast/dumper.go | 159 + .../vektah/gqlparser/v2/ast/fragment.go | 41 + .../vektah/gqlparser/v2/ast/operation.go | 32 + .../vektah/gqlparser/v2/ast/path.go | 72 + .../vektah/gqlparser/v2/ast/selection.go | 41 + .../vektah/gqlparser/v2/ast/source.go | 19 + .../vektah/gqlparser/v2/ast/type.go | 68 + .../vektah/gqlparser/v2/ast/value.go | 122 + .../vektah/gqlparser/v2/gqlerror/error.go | 197 + .../vektah/gqlparser/v2/lexer/blockstring.go | 58 + .../vektah/gqlparser/v2/lexer/lexer.go | 525 ++ .../vektah/gqlparser/v2/lexer/lexer_test.yml | 758 ++ .../vektah/gqlparser/v2/lexer/token.go | 148 + .../vektah/gqlparser/v2/parser/parser.go | 200 + .../vektah/gqlparser/v2/parser/query.go | 368 + .../vektah/gqlparser/v2/parser/query_test.yml | 545 ++ .../vektah/gqlparser/v2/parser/schema.go | 635 ++ .../gqlparser/v2/parser/schema_test.yml | 760 ++ .../gqlparser/v2/validator/core/core.go | 24 + .../gqlparser/v2/validator/core/helpers.go | 154 + .../gqlparser/v2/validator/core/walk.go | 300 + .../v2/validator/imported/prelude.graphql | 250 + .../vektah/gqlparser/v2/validator/prelude.go | 16 + .../validator/rules/fields_on_correct_type.go | 121 + .../rules/fragments_on_composite_types.go | 42 + .../validator/rules/known_argument_names.go | 84 + .../v2/validator/rules/known_directives.go | 50 + .../validator/rules/known_fragment_names.go | 22 + .../v2/validator/rules/known_root_type.go | 38 + .../v2/validator/rules/known_type_names.go | 80 + .../rules/lone_anonymous_operation.go | 22 + .../rules/max_introspection_depth.go | 86 + .../v2/validator/rules/no_fragment_cycles.go | 96 + .../validator/rules/no_undefined_variables.go | 31 + .../v2/validator/rules/no_unused_fragments.go | 32 + .../v2/validator/rules/no_unused_variables.go | 33 + .../rules/overlapping_fields_can_be_merged.go | 560 ++ .../rules/possible_fragment_spreads.go | 70 + .../rules/provided_required_arguments.go | 64 + .../gqlparser/v2/validator/rules/rules.go | 119 + .../v2/validator/rules/scalar_leafs.go | 39 + .../rules/single_field_subscriptions.go | 89 + .../validator/rules/unique_argument_names.go | 36 + .../rules/unique_directives_per_location.go | 27 + .../validator/rules/unique_fragment_names.go | 25 + .../rules/unique_input_field_names.go | 30 + .../validator/rules/unique_operation_names.go | 25 + .../validator/rules/unique_variable_names.go | 27 + .../validator/rules/values_of_correct_type.go | 246 + .../rules/variables_are_input_types.go | 31 + .../rules/variables_in_allowed_position.go | 41 + .../vektah/gqlparser/v2/validator/schema.go | 536 ++ .../gqlparser/v2/validator/schema_test.yml | 731 ++ .../gqlparser/v2/validator/validator.go | 158 + .../vektah/gqlparser/v2/validator/vars.go | 259 + .../gojsonpointer/LICENSE-APACHE-2.0.txt | 202 + .../xeipuuv/gojsonpointer/README.md | 41 + .../xeipuuv/gojsonpointer/pointer.go | 211 + .../gojsonreference/LICENSE-APACHE-2.0.txt | 202 + .../xeipuuv/gojsonreference/README.md | 10 + .../xeipuuv/gojsonreference/reference.go | 147 + .../yashtewari/glob-intersection/LICENSE | 201 + .../yashtewari/glob-intersection/README.md | 26 + .../yashtewari/glob-intersection/glob.go | 178 + .../yashtewari/glob-intersection/match.go | 91 + .../yashtewari/glob-intersection/non_empty.go | 155 + .../yashtewari/glob-intersection/simplify.go | 43 + .../glob-intersection/test_samples.go | 84 + .../yashtewari/glob-intersection/tokenize.go | 252 + vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 + .../encoding/protodelim/protodelim.go | 160 + vendor/modules.txt | 237 +- 1146 files changed, 750885 insertions(+), 1880 deletions(-) create mode 100644 policy/types.go create mode 100644 policy/validate.go create mode 100644 vendor/github.com/agnivade/levenshtein/.gitignore create mode 100644 vendor/github.com/agnivade/levenshtein/License.txt create mode 100644 vendor/github.com/agnivade/levenshtein/Makefile create mode 100644 vendor/github.com/agnivade/levenshtein/README.md create mode 100644 vendor/github.com/agnivade/levenshtein/levenshtein.go create mode 100644 vendor/github.com/beorn7/perks/LICENSE create mode 100644 vendor/github.com/beorn7/perks/quantile/exampledata.txt create mode 100644 vendor/github.com/beorn7/perks/quantile/stream.go create mode 100644 vendor/github.com/cespare/xxhash/v2/LICENSE.txt create mode 100644 vendor/github.com/cespare/xxhash/v2/README.md create mode 100644 vendor/github.com/cespare/xxhash/v2/testall.sh create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_asm.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_other.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_safe.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go create mode 100644 vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/LICENSE create mode 100644 vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/README.md create mode 100644 vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/compressedbytepoints.go create mode 100644 vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve.go create mode 100644 vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve_embedded.go create mode 100644 vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve_precompute.go create mode 100644 vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/doc.go create mode 100644 vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ecdh.go create mode 100644 vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ellipticadaptor.go create mode 100644 vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/error.go create mode 100644 vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/field.go create mode 100644 vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/loadprecomputed.go create mode 100644 vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/modnscalar.go create mode 100644 vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/nonce.go create mode 100644 vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/privkey.go create mode 100644 vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/pubkey.go create mode 100644 vendor/github.com/go-ini/ini/.editorconfig create mode 100644 vendor/github.com/go-ini/ini/.gitignore create mode 100644 vendor/github.com/go-ini/ini/.golangci.yml create mode 100644 vendor/github.com/go-ini/ini/LICENSE create mode 100644 vendor/github.com/go-ini/ini/Makefile create mode 100644 vendor/github.com/go-ini/ini/README.md create mode 100644 vendor/github.com/go-ini/ini/codecov.yml create mode 100644 vendor/github.com/go-ini/ini/data_source.go create mode 100644 vendor/github.com/go-ini/ini/deprecated.go create mode 100644 vendor/github.com/go-ini/ini/error.go create mode 100644 vendor/github.com/go-ini/ini/file.go create mode 100644 vendor/github.com/go-ini/ini/helper.go create mode 100644 vendor/github.com/go-ini/ini/ini.go create mode 100644 vendor/github.com/go-ini/ini/key.go create mode 100644 vendor/github.com/go-ini/ini/parser.go create mode 100644 vendor/github.com/go-ini/ini/section.go create mode 100644 vendor/github.com/go-ini/ini/struct.go create mode 100644 vendor/github.com/gobwas/glob/.gitignore create mode 100644 vendor/github.com/gobwas/glob/.travis.yml create mode 100644 vendor/github.com/gobwas/glob/LICENSE create mode 100644 vendor/github.com/gobwas/glob/bench.sh create mode 100644 vendor/github.com/gobwas/glob/compiler/compiler.go create mode 100644 vendor/github.com/gobwas/glob/glob.go create mode 100644 vendor/github.com/gobwas/glob/match/any.go create mode 100644 vendor/github.com/gobwas/glob/match/any_of.go create mode 100644 vendor/github.com/gobwas/glob/match/btree.go create mode 100644 vendor/github.com/gobwas/glob/match/contains.go create mode 100644 vendor/github.com/gobwas/glob/match/every_of.go create mode 100644 vendor/github.com/gobwas/glob/match/list.go create mode 100644 vendor/github.com/gobwas/glob/match/match.go create mode 100644 vendor/github.com/gobwas/glob/match/max.go create mode 100644 vendor/github.com/gobwas/glob/match/min.go create mode 100644 vendor/github.com/gobwas/glob/match/nothing.go create mode 100644 vendor/github.com/gobwas/glob/match/prefix.go create mode 100644 vendor/github.com/gobwas/glob/match/prefix_any.go create mode 100644 vendor/github.com/gobwas/glob/match/prefix_suffix.go create mode 100644 vendor/github.com/gobwas/glob/match/range.go create mode 100644 vendor/github.com/gobwas/glob/match/row.go create mode 100644 vendor/github.com/gobwas/glob/match/segments.go create mode 100644 vendor/github.com/gobwas/glob/match/single.go create mode 100644 vendor/github.com/gobwas/glob/match/suffix.go create mode 100644 vendor/github.com/gobwas/glob/match/suffix_any.go create mode 100644 vendor/github.com/gobwas/glob/match/super.go create mode 100644 vendor/github.com/gobwas/glob/match/text.go create mode 100644 vendor/github.com/gobwas/glob/readme.md create mode 100644 vendor/github.com/gobwas/glob/syntax/ast/ast.go create mode 100644 vendor/github.com/gobwas/glob/syntax/ast/parser.go create mode 100644 vendor/github.com/gobwas/glob/syntax/lexer/lexer.go create mode 100644 vendor/github.com/gobwas/glob/syntax/lexer/token.go create mode 100644 vendor/github.com/gobwas/glob/syntax/syntax.go create mode 100644 vendor/github.com/gobwas/glob/util/runes/runes.go create mode 100644 vendor/github.com/gobwas/glob/util/strings/strings.go create mode 100644 vendor/github.com/goccy/go-json/.codecov.yml create mode 100644 vendor/github.com/goccy/go-json/.gitignore create mode 100644 vendor/github.com/goccy/go-json/.golangci.yml create mode 100644 vendor/github.com/goccy/go-json/CHANGELOG.md create mode 100644 vendor/github.com/goccy/go-json/LICENSE create mode 100644 vendor/github.com/goccy/go-json/Makefile create mode 100644 vendor/github.com/goccy/go-json/README.md create mode 100644 vendor/github.com/goccy/go-json/color.go create mode 100644 vendor/github.com/goccy/go-json/decode.go create mode 100644 vendor/github.com/goccy/go-json/docker-compose.yml create mode 100644 vendor/github.com/goccy/go-json/encode.go create mode 100644 vendor/github.com/goccy/go-json/error.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/array.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/assign.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/bool.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/bytes.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/compile.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/compile_race.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/context.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/float.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/func.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/int.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/interface.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/invalid.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/map.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/number.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/option.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/path.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/ptr.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/slice.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/stream.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/string.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/struct.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/type.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/uint.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/code.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/compact.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/compiler.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/context.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/encoder.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/indent.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/int.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/map112.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/map113.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/opcode.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/option.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/optype.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/query.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/string.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/string_table.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm/util.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/errors/error.go create mode 100644 vendor/github.com/goccy/go-json/internal/runtime/rtype.go create mode 100644 vendor/github.com/goccy/go-json/internal/runtime/struct_field.go create mode 100644 vendor/github.com/goccy/go-json/internal/runtime/type.go create mode 100644 vendor/github.com/goccy/go-json/json.go create mode 100644 vendor/github.com/goccy/go-json/option.go create mode 100644 vendor/github.com/goccy/go-json/path.go create mode 100644 vendor/github.com/goccy/go-json/query.go create mode 100644 vendor/github.com/lestrrat-go/blackmagic/.gitignore create mode 100644 vendor/github.com/lestrrat-go/blackmagic/LICENSE create mode 100644 vendor/github.com/lestrrat-go/blackmagic/README.md create mode 100644 vendor/github.com/lestrrat-go/blackmagic/blackmagic.go create mode 100644 vendor/github.com/lestrrat-go/dsig-secp256k1/.gitignore create mode 100644 vendor/github.com/lestrrat-go/dsig-secp256k1/Changes create mode 100644 vendor/github.com/lestrrat-go/dsig-secp256k1/LICENSE create mode 100644 vendor/github.com/lestrrat-go/dsig-secp256k1/secp256k1.go create mode 100644 vendor/github.com/lestrrat-go/dsig/.gitignore create mode 100644 vendor/github.com/lestrrat-go/dsig/Changes create mode 100644 vendor/github.com/lestrrat-go/dsig/LICENSE create mode 100644 vendor/github.com/lestrrat-go/dsig/README.md create mode 100644 vendor/github.com/lestrrat-go/dsig/algorithms.go create mode 100644 vendor/github.com/lestrrat-go/dsig/crypto_signer.go create mode 100644 vendor/github.com/lestrrat-go/dsig/dsig.go create mode 100644 vendor/github.com/lestrrat-go/dsig/ecdsa.go create mode 100644 vendor/github.com/lestrrat-go/dsig/eddsa.go create mode 100644 vendor/github.com/lestrrat-go/dsig/hmac.go create mode 100644 vendor/github.com/lestrrat-go/dsig/internal/ecutil/ecutil.go create mode 100644 vendor/github.com/lestrrat-go/dsig/rsa.go create mode 100644 vendor/github.com/lestrrat-go/dsig/sign.go create mode 100644 vendor/github.com/lestrrat-go/dsig/validation.go create mode 100644 vendor/github.com/lestrrat-go/dsig/verify.go create mode 100644 vendor/github.com/lestrrat-go/httpcc/.gitignore create mode 100644 vendor/github.com/lestrrat-go/httpcc/LICENSE create mode 100644 vendor/github.com/lestrrat-go/httpcc/README.md create mode 100644 vendor/github.com/lestrrat-go/httpcc/directives.go create mode 100644 vendor/github.com/lestrrat-go/httpcc/httpcc.go create mode 100644 vendor/github.com/lestrrat-go/httprc/v3/.gitignore create mode 100644 vendor/github.com/lestrrat-go/httprc/v3/.golangci.yml create mode 100644 vendor/github.com/lestrrat-go/httprc/v3/Changes create mode 100644 vendor/github.com/lestrrat-go/httprc/v3/LICENSE create mode 100644 vendor/github.com/lestrrat-go/httprc/v3/README.md create mode 100644 vendor/github.com/lestrrat-go/httprc/v3/backend.go create mode 100644 vendor/github.com/lestrrat-go/httprc/v3/client.go create mode 100644 vendor/github.com/lestrrat-go/httprc/v3/controller.go create mode 100644 vendor/github.com/lestrrat-go/httprc/v3/errors.go create mode 100644 vendor/github.com/lestrrat-go/httprc/v3/errsink/errsink.go create mode 100644 vendor/github.com/lestrrat-go/httprc/v3/httprc.go create mode 100644 vendor/github.com/lestrrat-go/httprc/v3/options.go create mode 100644 vendor/github.com/lestrrat-go/httprc/v3/proxysink/proxysink.go create mode 100644 vendor/github.com/lestrrat-go/httprc/v3/resource.go create mode 100644 vendor/github.com/lestrrat-go/httprc/v3/tracesink/tracesink.go create mode 100644 vendor/github.com/lestrrat-go/httprc/v3/transformer.go create mode 100644 vendor/github.com/lestrrat-go/httprc/v3/whitelist.go create mode 100644 vendor/github.com/lestrrat-go/httprc/v3/worker.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/.bazelignore create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/.bazelrc create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/.bazelversion create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/.gitignore create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/.golangci.yml create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/BUILD create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/Changes create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/Changes-v2.md create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/Changes-v3.md create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/LICENSE create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel.lock create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/Makefile create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/README.md create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/SECURITY.md create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/WORKSPACE create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/cert/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/cert/cert.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/cert/chain.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/codecov.yml create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/format.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/formatkind_string_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/base64/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/base64/asmbase64.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/base64/base64.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/ecutil/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/ecutil/ecutil.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/json/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/json/goccy.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/json/json.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/json/registry.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/json/stdlib.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/jwxio/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/jwxio/jwxio.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/keyconv.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/pool/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/pool/big_int.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/pool/byte_slice.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/pool/bytes_buffer.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/pool/error_slice.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/pool/key_to_error_map.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/pool/pool.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/jwe_tokens.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/tokens.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwa/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwa/README.md create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwa/compression_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwa/content_encryption_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwa/elliptic_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwa/jwa.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwa/key_encryption_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwa/key_type_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwa/options.yaml create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwa/options_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwa/secp2561k.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwa/signature_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/README.md create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/compress.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/decrypt.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/encrypt.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/errors.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/filter.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/headers.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/headers_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/interface.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc/aescbc.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/cipher.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/interface.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf/concatkdf.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/content_crypt.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/interface.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/interface.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/keygen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/io.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/jwe.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/content_cipher.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/jwebb.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_decrypt_asymmetric.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_decrypt_symmetric.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encrypt_asymmetric.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encrypt_symmetric.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encryption.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/keywrap.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/key_provider.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/message.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/options.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/options.yaml create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwe/options_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/README.md create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/cache.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/convert.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/doc.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa/ecdsa.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/errors.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/es256k.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/fetch.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/filter.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/interface.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/interface_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/io.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/jwk.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/jwkbb/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/jwkbb/x509.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/key_ops.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/okp.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/okp_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/options.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/options.yaml create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/options_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/parser.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/rsa.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/rsa_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/set.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/symmetric.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/symmetric_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/usage.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/whitelist.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwk/x509.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/README.md create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/errors.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/es256k.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/filter.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/headers.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/headers_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/interface.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/internal/keytype/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/internal/keytype/keytype.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/io.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/jws.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/crypto_signer.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/ecdsa.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/eddsa.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/es256k.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/format.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/header.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/hmac.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/jwsbb.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/rsa.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/sign.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/verify.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/key_provider.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/legacy.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/ecdsa.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/eddsa.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/hmac.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/legacy.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/rsa.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/message.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/options.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/options.yaml create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/options_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/sign_context.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/signature_builder.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/signer.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/verifier.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jws/verify_context.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/README.md create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/builder_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/errors.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/fastpath.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/filter.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/http.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/interface.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/errors/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/errors/errors.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/date.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/string.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/io.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/jwt.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/options.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/options.yaml create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/options_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/serialize.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/token_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options_gen.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwt/validate.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/jwx.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/options.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/transform/BUILD.bazel create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/transform/filter.go create mode 100644 vendor/github.com/lestrrat-go/jwx/v3/transform/map.go create mode 100644 vendor/github.com/lestrrat-go/option/.gitignore create mode 100644 vendor/github.com/lestrrat-go/option/LICENSE create mode 100644 vendor/github.com/lestrrat-go/option/README.md create mode 100644 vendor/github.com/lestrrat-go/option/option.go create mode 100644 vendor/github.com/lestrrat-go/option/v2/.gitignore create mode 100644 vendor/github.com/lestrrat-go/option/v2/LICENSE create mode 100644 vendor/github.com/lestrrat-go/option/v2/README.md create mode 100644 vendor/github.com/lestrrat-go/option/v2/option.go create mode 100644 vendor/github.com/lestrrat-go/option/v2/set.go create mode 100644 vendor/github.com/moby/buildkit/sourcepolicy/policysession/policysession.pb.go create mode 100644 vendor/github.com/moby/buildkit/sourcepolicy/policysession/policysession.proto create mode 100644 vendor/github.com/moby/buildkit/sourcepolicy/policysession/policysession_grpc.pb.go create mode 100644 vendor/github.com/moby/buildkit/sourcepolicy/policysession/policysession_vtproto.pb.go create mode 100644 vendor/github.com/moby/buildkit/sourcepolicy/policysession/provider.go create mode 100644 vendor/github.com/moby/buildkit/sourcepolicy/policysession/verifier.go create mode 100644 vendor/github.com/moby/buildkit/util/gitutil/gitobject/parse.go create mode 100644 vendor/github.com/open-policy-agent/opa/LICENSE create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/capabilities.go create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/doc.go create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.17.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.17.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.17.2.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.17.3.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.18.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.19.0-rc1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.19.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.19.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.19.2.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.20.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.20.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.20.2.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.20.3.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.20.4.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.20.5.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.21.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.21.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.22.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.23.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.23.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.23.2.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.24.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.25.0-rc1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.25.0-rc2.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.25.0-rc3.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.25.0-rc4.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.25.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.25.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.25.2.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.26.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.27.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.27.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.28.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.29.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.29.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.29.2.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.29.3.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.29.4.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.30.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.30.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.30.2.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.31.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.32.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.32.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.33.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.33.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.34.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.34.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.34.2.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.35.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.36.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.36.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.37.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.37.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.37.2.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.38.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.38.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.39.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.40.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.41.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.42.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.42.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.42.2.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.43.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.43.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.44.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.45.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.46.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.46.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.46.2.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.46.3.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.47.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.47.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.47.2.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.47.3.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.47.4.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.48.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.49.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.49.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.49.2.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.50.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.50.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.50.2.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.51.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.52.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.53.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.53.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.54.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.55.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.56.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.57.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.57.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.58.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.59.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.60.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.61.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.62.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.62.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.63.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.64.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.64.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.65.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.66.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.67.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.67.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.68.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.69.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.70.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.0.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.0.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.1.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.10.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.10.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.2.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.3.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.4.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.4.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.4.2.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.5.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.5.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.6.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.7.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.8.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v1.9.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/cidr/merge/merge.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv create mode 100644 vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.wasm create mode 100644 vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/optimizations.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/config/config.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/debug/debug.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/deepcopy/deepcopy.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/README.md create mode 100644 vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/bitvector.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/license.txt create mode 100644 vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/file/archive/tarball.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/file/url/url.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/gojsonschema/LICENSE-APACHE-2.0.txt create mode 100644 vendor/github.com/open-policy-agent/opa/internal/gojsonschema/README.md create mode 100644 vendor/github.com/open-policy-agent/opa/internal/gojsonschema/draft.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/gojsonschema/errors.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/gojsonschema/format_checkers.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/gojsonschema/internalLog.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/gojsonschema/jsonContext.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/gojsonschema/jsonLoader.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/gojsonschema/locales.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/gojsonschema/result.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schema.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaLoader.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaPool.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaReferencePool.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaType.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/gojsonschema/subSchema.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/gojsonschema/types.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/gojsonschema/utils.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/lcss/README.md create mode 100644 vendor/github.com/open-policy-agent/opa/internal/lcss/lcss.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/lcss/qsufsort.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/leb128/leb128.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/merge/merge.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/planner/planner.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/planner/rules.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/providers/aws/NOTICE.txt create mode 100644 vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/compare.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/ecc.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/providers/aws/kms.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/providers/aws/v4/const.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/providers/aws/v4/header_rules.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/providers/aws/v4/headers.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/providers/aws/v4/host.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/providers/aws/v4/util.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/ref/ref.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/rego/opa/engine.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/rego/opa/options.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/report/report.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/semver/LICENSE create mode 100644 vendor/github.com/open-policy-agent/opa/internal/semver/semver.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/strings/strings.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/strvals/doc.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/uuid/uuid.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/version/version.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/wasm/constant/constant.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/doc.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/writer.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/control.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/instruction.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/memory.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/numeric.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/parametric.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/variable.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/wasm/module/module.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/wasm/module/pretty.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/wasm/opcode/opcode.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/opa/capabilities/capabilities.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/opa/capabilities/capabilities_nowasm.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/wasm/types/types.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/wasm/util/util.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/check.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/compare.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/compile.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/compilehelper.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/compilemetrics.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/conflicts.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/default_module_loader.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/doc.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/env.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/errors.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/index.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/internal/scanner/scanner.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/internal/tokens/tokens.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/interning.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/json/json.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/location/location.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/map.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/parser.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/performance.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/policy.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/pretty.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/rego_compiler.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/rego_v1.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/schema.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/strings.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/syncpools.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/term.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/transform.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/unify.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/varset.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/version_index.json create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ast/visit.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/bundle/file.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/bundle/filefs.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/bundle/hash.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/bundle/keys.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/bundle/sign.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/bundle/store.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/bundle/verify.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/capabilities/capabilities.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/config/config.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/format/format.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/hooks/hooks.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ir/ir.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ir/marshal.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ir/pretty.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/ir/walk.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/keys/keys.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/loader/errors.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/loader/extension/extension.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/loader/filter/filter.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/loader/loader.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/logging/logging.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/plugins/plugins.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/plugins/rest/auth.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/plugins/rest/aws.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/plugins/rest/azure.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/plugins/rest/gcp.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/plugins/rest/rest.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/rego/errors.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/rego/plugins.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/rego/rego.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/rego/resultset.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/resolver/interface.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/resolver/wasm/wasm.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/schemas/authorizationPolicy.json create mode 100644 vendor/github.com/open-policy-agent/opa/v1/schemas/schemas.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/storage/doc.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/storage/errors.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/storage/inmem/ast.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/storage/inmem/inmem.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/storage/inmem/opts.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/storage/inmem/txn.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/storage/interface.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/storage/internal/errors/errors.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/storage/internal/ptr/ptr.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/storage/path.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/storage/storage.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/aggregates.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/arithmetic.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/array.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/binary.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/bindings.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/bits.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/builtins.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/builtins/builtins.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/cache.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/cache/cache.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/cancel.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/casts.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/cidr.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/comparison.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/copypropagation.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/unionfind.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/crypto.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/doc.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/encoding.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/errors.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/glob.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/http.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup_darwin.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/input.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/instrumentation.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/json.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/jsonschema.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/net.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/object.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/parse.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/parse_bytes.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/parse_units.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/print.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/print/print.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/providers.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/query.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/reachable.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/regex.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/regex_template.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/resolver.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/runtime.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/save.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/semver.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/sets.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/strings.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/subset.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/template.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/test.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/time.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/tokens.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/trace.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/type.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/type_name.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/uuid.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/topdown/walk.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/tracing/tracing.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/types/decode.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/types/types.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/backoff.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/channel.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/close.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/compare.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/decoding/context.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/doc.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/enumflag.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/graph.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/hashmap.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/json.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/maps.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/performance.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/queue.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/read_gzip_body.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/time.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/util/wait.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/version/version.go create mode 100644 vendor/github.com/open-policy-agent/opa/v1/version/wasm.go create mode 100644 vendor/github.com/prometheus/client_golang/LICENSE create mode 100644 vendor/github.com/prometheus/client_golang/NOTICE create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/.gitignore create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/README.md create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/doc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/fnv.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/get_pid.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/labels.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/num_threads.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/observer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/untyped.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vnext.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/wrap.go create mode 100644 vendor/github.com/prometheus/client_model/LICENSE create mode 100644 vendor/github.com/prometheus/client_model/NOTICE create mode 100644 vendor/github.com/prometheus/client_model/go/metrics.pb.go create mode 100644 vendor/github.com/prometheus/common/LICENSE create mode 100644 vendor/github.com/prometheus/common/NOTICE create mode 100644 vendor/github.com/prometheus/common/expfmt/decode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/encode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/expfmt.go create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz.go create mode 100644 vendor/github.com/prometheus/common/expfmt/openmetrics_create.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_create.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse.go create mode 100644 vendor/github.com/prometheus/common/model/alert.go create mode 100644 vendor/github.com/prometheus/common/model/fingerprinting.go create mode 100644 vendor/github.com/prometheus/common/model/fnv.go create mode 100644 vendor/github.com/prometheus/common/model/labels.go create mode 100644 vendor/github.com/prometheus/common/model/labelset.go create mode 100644 vendor/github.com/prometheus/common/model/labelset_string.go create mode 100644 vendor/github.com/prometheus/common/model/metadata.go create mode 100644 vendor/github.com/prometheus/common/model/metric.go create mode 100644 vendor/github.com/prometheus/common/model/model.go create mode 100644 vendor/github.com/prometheus/common/model/signature.go create mode 100644 vendor/github.com/prometheus/common/model/silence.go create mode 100644 vendor/github.com/prometheus/common/model/time.go create mode 100644 vendor/github.com/prometheus/common/model/value.go create mode 100644 vendor/github.com/prometheus/common/model/value_float.go create mode 100644 vendor/github.com/prometheus/common/model/value_histogram.go create mode 100644 vendor/github.com/prometheus/common/model/value_type.go create mode 100644 vendor/github.com/prometheus/procfs/.gitignore create mode 100644 vendor/github.com/prometheus/procfs/.golangci.yml create mode 100644 vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/prometheus/procfs/CONTRIBUTING.md create mode 100644 vendor/github.com/prometheus/procfs/LICENSE create mode 100644 vendor/github.com/prometheus/procfs/MAINTAINERS.md create mode 100644 vendor/github.com/prometheus/procfs/Makefile create mode 100644 vendor/github.com/prometheus/procfs/Makefile.common create mode 100644 vendor/github.com/prometheus/procfs/NOTICE create mode 100644 vendor/github.com/prometheus/procfs/README.md create mode 100644 vendor/github.com/prometheus/procfs/SECURITY.md create mode 100644 vendor/github.com/prometheus/procfs/arp.go create mode 100644 vendor/github.com/prometheus/procfs/buddyinfo.go create mode 100644 vendor/github.com/prometheus/procfs/cmdline.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_armx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_loong64.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_others.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_s390x.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_x86.go create mode 100644 vendor/github.com/prometheus/procfs/crypto.go create mode 100644 vendor/github.com/prometheus/procfs/doc.go create mode 100644 vendor/github.com/prometheus/procfs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/fs_statfs_notype.go create mode 100644 vendor/github.com/prometheus/procfs/fs_statfs_type.go create mode 100644 vendor/github.com/prometheus/procfs/fscache.go create mode 100644 vendor/github.com/prometheus/procfs/internal/fs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/parse.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/readfile.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/valueparser.go create mode 100644 vendor/github.com/prometheus/procfs/ipvs.go create mode 100644 vendor/github.com/prometheus/procfs/kernel_random.go create mode 100644 vendor/github.com/prometheus/procfs/loadavg.go create mode 100644 vendor/github.com/prometheus/procfs/mdstat.go create mode 100644 vendor/github.com/prometheus/procfs/meminfo.go create mode 100644 vendor/github.com/prometheus/procfs/mountinfo.go create mode 100644 vendor/github.com/prometheus/procfs/mountstats.go create mode 100644 vendor/github.com/prometheus/procfs/net_conntrackstat.go create mode 100644 vendor/github.com/prometheus/procfs/net_dev.go create mode 100644 vendor/github.com/prometheus/procfs/net_dev_snmp6.go create mode 100644 vendor/github.com/prometheus/procfs/net_ip_socket.go create mode 100644 vendor/github.com/prometheus/procfs/net_protocols.go create mode 100644 vendor/github.com/prometheus/procfs/net_route.go create mode 100644 vendor/github.com/prometheus/procfs/net_sockstat.go create mode 100644 vendor/github.com/prometheus/procfs/net_softnet.go create mode 100644 vendor/github.com/prometheus/procfs/net_tcp.go create mode 100644 vendor/github.com/prometheus/procfs/net_tls_stat.go create mode 100644 vendor/github.com/prometheus/procfs/net_udp.go create mode 100644 vendor/github.com/prometheus/procfs/net_unix.go create mode 100644 vendor/github.com/prometheus/procfs/net_wireless.go create mode 100644 vendor/github.com/prometheus/procfs/net_xfrm.go create mode 100644 vendor/github.com/prometheus/procfs/netstat.go create mode 100644 vendor/github.com/prometheus/procfs/proc.go create mode 100644 vendor/github.com/prometheus/procfs/proc_cgroup.go create mode 100644 vendor/github.com/prometheus/procfs/proc_cgroups.go create mode 100644 vendor/github.com/prometheus/procfs/proc_environ.go create mode 100644 vendor/github.com/prometheus/procfs/proc_fdinfo.go create mode 100644 vendor/github.com/prometheus/procfs/proc_interrupts.go create mode 100644 vendor/github.com/prometheus/procfs/proc_io.go create mode 100644 vendor/github.com/prometheus/procfs/proc_limits.go create mode 100644 vendor/github.com/prometheus/procfs/proc_maps.go create mode 100644 vendor/github.com/prometheus/procfs/proc_netstat.go create mode 100644 vendor/github.com/prometheus/procfs/proc_ns.go create mode 100644 vendor/github.com/prometheus/procfs/proc_psi.go create mode 100644 vendor/github.com/prometheus/procfs/proc_smaps.go create mode 100644 vendor/github.com/prometheus/procfs/proc_snmp.go create mode 100644 vendor/github.com/prometheus/procfs/proc_snmp6.go create mode 100644 vendor/github.com/prometheus/procfs/proc_stat.go create mode 100644 vendor/github.com/prometheus/procfs/proc_statm.go create mode 100644 vendor/github.com/prometheus/procfs/proc_status.go create mode 100644 vendor/github.com/prometheus/procfs/proc_sys.go create mode 100644 vendor/github.com/prometheus/procfs/schedstat.go create mode 100644 vendor/github.com/prometheus/procfs/slab.go create mode 100644 vendor/github.com/prometheus/procfs/softirqs.go create mode 100644 vendor/github.com/prometheus/procfs/stat.go create mode 100644 vendor/github.com/prometheus/procfs/swaps.go create mode 100644 vendor/github.com/prometheus/procfs/thread.go create mode 100644 vendor/github.com/prometheus/procfs/ttar create mode 100644 vendor/github.com/prometheus/procfs/vm.go create mode 100644 vendor/github.com/prometheus/procfs/zoneinfo.go create mode 100644 vendor/github.com/rcrowley/go-metrics/.gitignore create mode 100644 vendor/github.com/rcrowley/go-metrics/.travis.yml create mode 100644 vendor/github.com/rcrowley/go-metrics/LICENSE create mode 100644 vendor/github.com/rcrowley/go-metrics/README.md create mode 100644 vendor/github.com/rcrowley/go-metrics/counter.go create mode 100644 vendor/github.com/rcrowley/go-metrics/debug.go create mode 100644 vendor/github.com/rcrowley/go-metrics/ewma.go create mode 100644 vendor/github.com/rcrowley/go-metrics/gauge.go create mode 100644 vendor/github.com/rcrowley/go-metrics/gauge_float64.go create mode 100644 vendor/github.com/rcrowley/go-metrics/graphite.go create mode 100644 vendor/github.com/rcrowley/go-metrics/healthcheck.go create mode 100644 vendor/github.com/rcrowley/go-metrics/histogram.go create mode 100644 vendor/github.com/rcrowley/go-metrics/json.go create mode 100644 vendor/github.com/rcrowley/go-metrics/log.go create mode 100644 vendor/github.com/rcrowley/go-metrics/memory.md create mode 100644 vendor/github.com/rcrowley/go-metrics/meter.go create mode 100644 vendor/github.com/rcrowley/go-metrics/metrics.go create mode 100644 vendor/github.com/rcrowley/go-metrics/opentsdb.go create mode 100644 vendor/github.com/rcrowley/go-metrics/registry.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_cgo.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go create mode 100644 vendor/github.com/rcrowley/go-metrics/sample.go create mode 100644 vendor/github.com/rcrowley/go-metrics/syslog.go create mode 100644 vendor/github.com/rcrowley/go-metrics/timer.go create mode 100644 vendor/github.com/rcrowley/go-metrics/validate.sh create mode 100644 vendor/github.com/rcrowley/go-metrics/writer.go create mode 100644 vendor/github.com/rivo/uniseg/eastasianwidth.go create mode 100644 vendor/github.com/rivo/uniseg/emojipresentation.go create mode 100644 vendor/github.com/rivo/uniseg/gen_breaktest.go create mode 100644 vendor/github.com/rivo/uniseg/gen_properties.go create mode 100644 vendor/github.com/rivo/uniseg/graphemeproperties.go create mode 100644 vendor/github.com/rivo/uniseg/graphemerules.go create mode 100644 vendor/github.com/rivo/uniseg/line.go create mode 100644 vendor/github.com/rivo/uniseg/lineproperties.go create mode 100644 vendor/github.com/rivo/uniseg/linerules.go create mode 100644 vendor/github.com/rivo/uniseg/sentence.go create mode 100644 vendor/github.com/rivo/uniseg/sentenceproperties.go create mode 100644 vendor/github.com/rivo/uniseg/sentencerules.go create mode 100644 vendor/github.com/rivo/uniseg/step.go create mode 100644 vendor/github.com/rivo/uniseg/width.go create mode 100644 vendor/github.com/rivo/uniseg/word.go create mode 100644 vendor/github.com/rivo/uniseg/wordproperties.go create mode 100644 vendor/github.com/rivo/uniseg/wordrules.go create mode 100644 vendor/github.com/segmentio/asm/LICENSE create mode 100644 vendor/github.com/segmentio/asm/base64/base64.go create mode 100644 vendor/github.com/segmentio/asm/base64/base64_amd64.go create mode 100644 vendor/github.com/segmentio/asm/base64/base64_arm64.go create mode 100644 vendor/github.com/segmentio/asm/base64/base64_asm.go create mode 100644 vendor/github.com/segmentio/asm/base64/base64_default.go create mode 100644 vendor/github.com/segmentio/asm/base64/decode_amd64.go create mode 100644 vendor/github.com/segmentio/asm/base64/decode_amd64.s create mode 100644 vendor/github.com/segmentio/asm/base64/decode_arm64.go create mode 100644 vendor/github.com/segmentio/asm/base64/decode_arm64.s create mode 100644 vendor/github.com/segmentio/asm/base64/encode_amd64.go create mode 100644 vendor/github.com/segmentio/asm/base64/encode_amd64.s create mode 100644 vendor/github.com/segmentio/asm/base64/encode_arm64.go create mode 100644 vendor/github.com/segmentio/asm/base64/encode_arm64.s create mode 100644 vendor/github.com/segmentio/asm/cpu/arm/arm.go create mode 100644 vendor/github.com/segmentio/asm/cpu/arm64/arm64.go create mode 100644 vendor/github.com/segmentio/asm/cpu/cpu.go create mode 100644 vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go create mode 100644 vendor/github.com/segmentio/asm/cpu/x86/x86.go create mode 100644 vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_wasi.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go create mode 100644 vendor/github.com/tchap/go-patricia/v2/AUTHORS create mode 100644 vendor/github.com/tchap/go-patricia/v2/LICENSE create mode 100644 vendor/github.com/tchap/go-patricia/v2/patricia/children.go create mode 100644 vendor/github.com/tchap/go-patricia/v2/patricia/patricia.go create mode 100644 vendor/github.com/valyala/fastjson/.gitignore create mode 100644 vendor/github.com/valyala/fastjson/.travis.yml create mode 100644 vendor/github.com/valyala/fastjson/LICENSE create mode 100644 vendor/github.com/valyala/fastjson/README.md create mode 100644 vendor/github.com/valyala/fastjson/arena.go create mode 100644 vendor/github.com/valyala/fastjson/doc.go create mode 100644 vendor/github.com/valyala/fastjson/fastfloat/parse.go create mode 100644 vendor/github.com/valyala/fastjson/fuzz.go create mode 100644 vendor/github.com/valyala/fastjson/handy.go create mode 100644 vendor/github.com/valyala/fastjson/parser.go create mode 100644 vendor/github.com/valyala/fastjson/pool.go create mode 100644 vendor/github.com/valyala/fastjson/scanner.go create mode 100644 vendor/github.com/valyala/fastjson/update.go create mode 100644 vendor/github.com/valyala/fastjson/util.go create mode 100644 vendor/github.com/valyala/fastjson/validate.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/LICENSE create mode 100644 vendor/github.com/vektah/gqlparser/v2/ast/argmap.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/ast/collections.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/ast/comment.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/ast/decode.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/ast/definition.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/ast/directive.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/ast/document.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/ast/dumper.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/ast/fragment.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/ast/operation.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/ast/path.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/ast/selection.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/ast/source.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/ast/type.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/ast/value.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/lexer/blockstring.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/lexer/lexer.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/lexer/lexer_test.yml create mode 100644 vendor/github.com/vektah/gqlparser/v2/lexer/token.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/parser/parser.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/parser/query.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/parser/query_test.yml create mode 100644 vendor/github.com/vektah/gqlparser/v2/parser/schema.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/parser/schema_test.yml create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/core/core.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/core/helpers.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/core/walk.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/imported/prelude.graphql create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/prelude.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/max_introspection_depth.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/provided_required_arguments.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/rules.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/schema.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/schema_test.yml create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/validator.go create mode 100644 vendor/github.com/vektah/gqlparser/v2/validator/vars.go create mode 100644 vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt create mode 100644 vendor/github.com/xeipuuv/gojsonpointer/README.md create mode 100644 vendor/github.com/xeipuuv/gojsonpointer/pointer.go create mode 100644 vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt create mode 100644 vendor/github.com/xeipuuv/gojsonreference/README.md create mode 100644 vendor/github.com/xeipuuv/gojsonreference/reference.go create mode 100644 vendor/github.com/yashtewari/glob-intersection/LICENSE create mode 100644 vendor/github.com/yashtewari/glob-intersection/README.md create mode 100644 vendor/github.com/yashtewari/glob-intersection/glob.go create mode 100644 vendor/github.com/yashtewari/glob-intersection/match.go create mode 100644 vendor/github.com/yashtewari/glob-intersection/non_empty.go create mode 100644 vendor/github.com/yashtewari/glob-intersection/simplify.go create mode 100644 vendor/github.com/yashtewari/glob-intersection/test_samples.go create mode 100644 vendor/github.com/yashtewari/glob-intersection/tokenize.go create mode 100644 vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go diff --git a/build/build.go b/build/build.go index f0952971902a..091b24e2d348 100644 --- a/build/build.go +++ b/build/build.go @@ -21,6 +21,7 @@ import ( noderesolver "github.com/docker/buildx/build/resolver" "github.com/docker/buildx/builder" "github.com/docker/buildx/driver" + "github.com/docker/buildx/policy" "github.com/docker/buildx/util/buildflags" "github.com/docker/buildx/util/confutil" "github.com/docker/buildx/util/desktop" @@ -113,6 +114,8 @@ type Inputs struct { // DockerfileMappingSrc and DockerfileMappingDst are filled in by the builder. DockerfileMappingSrc string DockerfileMappingDst string + + policy []policy.File } type NamedContext struct { diff --git a/build/opt.go b/build/opt.go index 1c6d429b8716..32c56b692d75 100644 --- a/build/opt.go +++ b/build/opt.go @@ -6,6 +6,7 @@ import ( "io" "maps" "os" + "path" "path/filepath" "slices" "strconv" @@ -21,6 +22,7 @@ import ( "github.com/distribution/reference" "github.com/docker/buildx/builder" "github.com/docker/buildx/driver" + "github.com/docker/buildx/policy" "github.com/docker/buildx/util/buildflags" "github.com/docker/buildx/util/confutil" "github.com/docker/buildx/util/dockerutil" @@ -38,6 +40,7 @@ import ( "github.com/moby/buildkit/session/sshforward/sshprovider" "github.com/moby/buildkit/session/upload/uploadprovider" "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/sourcepolicy/policysession" "github.com/moby/buildkit/util/apicaps" "github.com/moby/buildkit/util/entitlements" "github.com/moby/buildkit/util/gitutil" @@ -322,6 +325,20 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt *O } defers = append(defers, releaseLoad) + if len(opt.Inputs.policy) > 0 { + env := policy.Env{} + for k, v := range opt.BuildArgs { + if env.Args == nil { + env.Args = map[string]*string{} + } + env.Args[k] = &v + } + env.Filename = path.Base(opt.Inputs.DockerfilePath) + env.Target = opt.Target + env.Labels = opt.Labels + so.SourcePolicyProvider = policysession.NewPolicyProvider(policy.NewPolicy(opt.Inputs.policy, env).CheckPolicy) + } + // add node identifier to shared key if one was specified if so.SharedKey != "" { so.SharedKey += ":" + cfg.TryNodeIdentifier() @@ -535,6 +552,19 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp *Inputs, pw pro return nil, err } dockerfileName = handleLowercaseDockerfile(dockerfileDir, dockerfileName) + + if fi, err := os.Lstat(filepath.Join(dockerfileDir, dockerfileName+".rego")); err == nil { + if fi.Mode().IsRegular() { + dt, err := os.ReadFile(filepath.Join(dockerfileDir, dockerfileName+".rego")) + if err != nil { + return nil, errors.Wrapf(err, "failed to read policy file %s.rego", dockerfileName) + } + inp.policy = append(inp.policy, policy.File{ + Filename: dockerfileName + ".rego", + Data: dt, + }) + } + } } target.FrontendAttrs["filename"] = dockerfileName diff --git a/go.mod b/go.mod index 8056e6378c88..c6a2a1ca17bf 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/docker/buildx -go 1.24.3 +go 1.24.6 require ( github.com/Masterminds/semver/v3 v3.4.0 @@ -14,7 +14,7 @@ require ( github.com/containerd/log v0.1.0 github.com/containerd/platforms v1.0.0-rc.2 github.com/creack/pty v1.1.24 - github.com/davecgh/go-spew v1.1.1 + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/distribution/reference v0.6.0 github.com/docker/cli v29.1.2+incompatible github.com/docker/cli-docs-tool v0.11.0 @@ -36,6 +36,7 @@ require ( github.com/moby/sys/atomicwriter v0.1.0 github.com/moby/sys/mountinfo v0.7.2 github.com/morikuni/aec v1.1.0 + github.com/open-policy-agent/opa v1.10.1 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 github.com/pelletier/go-toml v1.9.5 @@ -43,7 +44,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 github.com/serialx/hashring v0.0.0-20200727003509-22c0c7ab6b1b - github.com/sirupsen/logrus v1.9.3 + github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af github.com/spf13/cobra v1.10.2 github.com/spf13/pflag v1.0.10 github.com/stretchr/testify v1.11.1 @@ -75,6 +76,7 @@ require ( require ( github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/agext/levenshtein v1.2.3 // indirect + github.com/agnivade/levenshtein v1.2.1 // indirect github.com/apparentlymart/go-cidr v1.0.1 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/aws/aws-sdk-go-v2 v1.38.1 // indirect @@ -89,12 +91,15 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.0 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.38.0 // indirect github.com/aws/smithy-go v1.22.5 // indirect + github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/containerd/api v1.10.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/ttrpc v1.2.7 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker-credential-helpers v0.9.4 // indirect github.com/docker/go-connections v0.6.0 // indirect @@ -102,12 +107,15 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fvbommel/sortorder v1.1.0 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-ini/ini v1.67.0 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/goccy/go-json v0.10.5 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.7.0 // indirect @@ -121,6 +129,14 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.18.1 // indirect + github.com/lestrrat-go/blackmagic v1.0.4 // indirect + github.com/lestrrat-go/dsig v1.0.0 // indirect + github.com/lestrrat-go/dsig-secp256k1 v1.0.0 // indirect + github.com/lestrrat-go/httpcc v1.0.1 // indirect + github.com/lestrrat-go/httprc/v3 v3.0.1 // indirect + github.com/lestrrat-go/jwx/v3 v3.0.11 // indirect + github.com/lestrrat-go/option v1.0.1 // indirect + github.com/lestrrat-go/option/v2 v2.0.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect @@ -138,17 +154,29 @@ require ( github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/rivo/uniseg v0.2.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/procfs v0.17.0 // indirect + github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect + github.com/rivo/uniseg v0.4.7 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 // indirect github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect + github.com/segmentio/asm v1.2.0 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect + github.com/tchap/go-patricia/v2 v2.3.3 // indirect github.com/tonistiigi/dchapes-mode v0.0.0-20250318174251-73d941a28323 // indirect github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab // indirect + github.com/valyala/fastjson v1.6.4 // indirect + github.com/vektah/gqlparser/v2 v2.5.30 // indirect github.com/x448/float16 v0.8.4 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect + github.com/yashtewari/glob-intersection v0.2.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0 // indirect diff --git a/go.sum b/go.sum index 55b623389755..8ce7dbc21469 100644 --- a/go.sum +++ b/go.sum @@ -10,12 +10,18 @@ github.com/Microsoft/hcsshim v0.14.0-rc.1 h1:qAPXKwGOkVn8LlqgBN8GS0bxZ83hOJpcjxz github.com/Microsoft/hcsshim v0.14.0-rc.1/go.mod h1:hTKFGbnDtQb1wHiOWv4v0eN+7boSWAHyK/tNAaYZL0c= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM= +github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 h1:aM1rlcoLz8y5B2r4tTLMiVTrMtpfY0O8EScKJxaSaEc= github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/apparentlymart/go-cidr v1.0.1 h1:NmIwLZ/KdsjIUlhf+/Np40atNXm/+lZ5txfTJ/SpF+U= github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= +github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go-v2 v1.38.1 h1:j7sc33amE74Rz0M/PoCpsZQ6OunLqys/m5antM0J+Z8= @@ -46,6 +52,8 @@ github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bytecodealliance/wasmtime-go/v37 v37.0.0 h1:DPjdn2V3JhXHMoZ2ymRqGK+y1bDyr9wgpyYCvhjMky8= +github.com/bytecodealliance/wasmtime-go/v37 v37.0.0/go.mod h1:Pf1l2JCTUFMnOqDIwkjzx1qfVJ09xbaXETKgRVE4jZ0= github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= @@ -92,8 +100,17 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/dgraph-io/badger/v4 v4.8.0 h1:JYph1ChBijCw8SLeybvPINizbDKWZ5n/GYbz2yhN/bs= +github.com/dgraph-io/badger/v4 v4.8.0/go.mod h1:U6on6e8k/RTbUWxqKR0MvugJuVmkxSNc79ap4917h4w= +github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM= +github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI= +github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo= +github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= @@ -114,14 +131,22 @@ github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQ github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= +github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw= github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -141,6 +166,10 @@ github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw= github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -149,6 +178,8 @@ github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8J github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q= +github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= @@ -198,12 +229,30 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lestrrat-go/blackmagic v1.0.4 h1:IwQibdnf8l2KoO+qC3uT4OaTWsW7tuRQXy9TRN9QanA= +github.com/lestrrat-go/blackmagic v1.0.4/go.mod h1:6AWFyKNNj0zEXQYfTMPfZrAXUWUfTIZ5ECEUEJaijtw= +github.com/lestrrat-go/dsig v1.0.0 h1:OE09s2r9Z81kxzJYRn07TFM9XA4akrUdoMwr0L8xj38= +github.com/lestrrat-go/dsig v1.0.0/go.mod h1:dEgoOYYEJvW6XGbLasr8TFcAxoWrKlbQvmJgCR0qkDo= +github.com/lestrrat-go/dsig-secp256k1 v1.0.0 h1:JpDe4Aybfl0soBvoVwjqDbp+9S1Y2OM7gcrVVMFPOzY= +github.com/lestrrat-go/dsig-secp256k1 v1.0.0/go.mod h1:CxUgAhssb8FToqbL8NjSPoGQlnO4w3LG1P0qPWQm/NU= +github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= +github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= +github.com/lestrrat-go/httprc/v3 v3.0.1 h1:3n7Es68YYGZb2Jf+k//llA4FTZMl3yCwIjFIk4ubevI= +github.com/lestrrat-go/httprc/v3 v3.0.1/go.mod h1:2uAvmbXE4Xq8kAUjVrZOq1tZVYYYs5iP62Cmtru00xk= +github.com/lestrrat-go/jwx/v3 v3.0.11 h1:yEeUGNUuNjcez/Voxvr7XPTYNraSQTENJgtVTfwvG/w= +github.com/lestrrat-go/jwx/v3 v3.0.11/go.mod h1:XSOAh2SiXm0QgRe3DulLZLyt+wUuEdFo81zuKTLcvgQ= +github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU= +github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= +github.com/lestrrat-go/option/v2 v2.0.0 h1:XxrcaJESE1fokHy3FpaQ/cXW8ZsIdWcdFzzLOcID3Ss= +github.com/lestrrat-go/option/v2 v2.0.0/go.mod h1:oSySsmzMoR0iRzCDCaUfsCzxQHUEuhOViQObyy7S6Vg= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= +github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= @@ -254,6 +303,8 @@ github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/open-policy-agent/opa v1.10.1 h1:haIvxZSPky8HLjRrvQwWAjCPLg8JDFSZMbbG4yyUHgY= +github.com/open-policy-agent/opa v1.10.1/go.mod h1:7uPI3iRpOalJ0BhK6s1JALWPU9HvaV1XeBSSMZnr/PM= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -270,18 +321,22 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= -github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= -github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= -github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg= +github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -290,12 +345,16 @@ github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+x github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= +github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= +github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/serialx/hashring v0.0.0-20200727003509-22c0c7ab6b1b h1:h+3JX2VoWTFuyQEo87pStk/a99dzIO1mM9KxIyLPGTU= github.com/serialx/hashring v0.0.0-20200727003509-22c0c7ab6b1b/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc= github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spdx/tools-golang v0.5.5 h1:61c0KLfAcNqAjlg6UNMdkwpMernhw3zVRwDZ2x9XOmk= github.com/spdx/tools-golang v0.5.5/go.mod h1:MVIsXx8ZZzaRWNQpUDhC4Dud34edUYJYecciXgrw5vE= github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= @@ -309,12 +368,15 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tchap/go-patricia/v2 v2.3.3 h1:xfNEsODumaEcCcY3gI0hYPZ/PcpVv5ju6RMAhgwZDDc= +github.com/tchap/go-patricia/v2 v2.3.3/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= github.com/tonistiigi/dchapes-mode v0.0.0-20250318174251-73d941a28323 h1:r0p7fK56l8WPequOaR3i9LBqfPtEdXIQbUTzT55iqT4= github.com/tonistiigi/dchapes-mode v0.0.0-20250318174251-73d941a28323/go.mod h1:3Iuxbr0P7D3zUzBMAZB+ois3h/et0shEz0qApgHYGpY= github.com/tonistiigi/fsutil v0.0.0-20250605211040-586307ad452f h1:MoxeMfHAe5Qj/ySSBfL8A7l1V+hxuluj8owsIEEZipI= @@ -327,12 +389,22 @@ github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/ github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk= github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab h1:H6aJ0yKQ0gF49Qb2z5hI1UHxSQt4JMyxebFR15KnApw= github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab/go.mod h1:ulncasL3N9uLrVann0m+CDlJKWsIAP34MPcOJF6VRvc= +github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= +github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vektah/gqlparser/v2 v2.5.30 h1:EqLwGAFLIzt1wpx1IPpY67DwUujF1OfzgEyDsLrN6kE= +github.com/vektah/gqlparser/v2 v2.5.30/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= +github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= +github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zclconf/go-cty v1.17.0 h1:seZvECve6XX4tmnvRzWtJNHdscMtYEx5R7bnnVyd/d0= diff --git a/policy/types.go b/policy/types.go new file mode 100644 index 000000000000..019860648739 --- /dev/null +++ b/policy/types.go @@ -0,0 +1,137 @@ +package policy + +import "time" + +type Input struct { + Env Env `json:"env,omitzero"` + Local *Local `json:"local,omitempty"` + Image *Image `json:"image,omitempty"` + HTTP *HTTP `json:"http,omitempty"` + Git *Git `json:"git,omitempty"` +} + +type Env struct { + Args map[string]*string `json:"args,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Filename string `json:"filename,omitempty"` + Target string `json:"target,omitempty"` +} + +type HTTP struct { + URL string `json:"url,omitempty"` + Schema string `json:"schema,omitempty"` + Host string `json:"host,omitempty"` + Path string `json:"path,omitempty"` + Query map[string][]string `json:"query,omitempty"` + Perm int `json:"perm,omitempty"` + UID int `json:"uid,omitempty"` + GID int `json:"gid,omitempty"` + HasAuth bool `json:"hasAuth,omitempty"` + + Checksum string `json:"checksum,omitempty"` + // Filename string `json:"filename,omitempty"` + + Signature *PGPSignature `json:"signature,omitempty"` + AttestationBundle *AttestationBundle `json:"attestationBundle,omitempty"` +} + +type AttestationBundle struct{} + +type Git struct { + Schema string `json:"schema,omitempty"` + Host string `json:"host,omitempty"` + Remote string `json:"remote,omitempty"` + FullURL string `json:"fullURL,omitempty"` + TagName string `json:"tagName,omitempty"` + Branch string `json:"branch,omitempty"` + Ref string `json:"ref,omitempty"` + Subdir string `json:"subDir,omitempty"` + IsCommitRef bool `json:"isCommitRef,omitempty"` + + IsSHA256 bool `json:"isSHA256,omitempty"` + Checksum string `json:"checksum,omitempty"` + CommitChecksum string `json:"commitChecksum,omitempty"` + IsAnnotatedTag bool `json:"isAnnotatedTag,omitempty"` + + Tag *Tag `json:"tag,omitempty"` + Commit *Commit `json:"commit,omitempty"` +} + +type Actor struct { + Name string `json:"name,omitempty"` + Email string `json:"email,omitempty"` + When *time.Time `json:"when,omitempty"` +} + +type Commit struct { + Tree string `json:"tree,omitempty"` + Parents []string `json:"parents,omitempty"` + Author Actor `json:"author,omitzero"` + Committer Actor `json:"committer,omitzero"` + Message string `json:"message,omitempty"` + + PGPSignature *PGPSignature `json:"PGPSignature,omitempty"` + SSHSignature *SSHSignature `json:"SSHSignature,omitempty"` +} + +type Tag struct { + Object string `json:"object,omitempty"` + Type string `json:"type,omitempty"` + Tag string `json:"tag,omitempty"` + Tagger Actor `json:"tagger,omitzero"` + Message string `json:"message,omitempty"` + + PGPSignature *PGPSignature `json:"PGPSignature,omitempty"` + SSHSignature *SSHSignature `json:"SSHSignature,omitempty"` +} + +type PGPSignature struct { + Version int `json:"version,omitempty"` + KeyID string `json:"keyID,omitempty"` +} + +type SSHSignature struct { + Version int `json:"version,omitempty"` + PubKey string `json:"pubKey,omitempty"` +} + +type Image struct { + Ref string `json:"ref,omitempty"` + Host string `json:"host,omitempty"` + Repo string `json:"repo,omitempty"` + FullRepo string `json:"fullRepo,omitempty"` // domain + repo + Tag string `json:"tag,omitempty"` // unset if canonical ref + Platform string `json:"platform,omitempty"` + OS string `json:"os,omitempty"` + Architecture string `json:"arch,omitempty"` + Variant string `json:"variant,omitempty"` + IsCanonical bool `json:"isCanonical,omitempty"` + + Checksum string `json:"checksum,omitempty"` + + // Config based + CreatedTime string `json:"createdTime,omitempty"` + Env []string `json:"env,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + User string `json:"user,omitempty"` + Volumes []string `json:"volumes,omitempty"` + WorkingDir string `json:"workingDir,omitempty"` + + HasProvenance bool `json:"hasProvenance,omitempty"` + Signatures []AttestationSignature `json:"signatures,omitempty"` +} + +type AttestationSignature struct { + Kind string `json:"kind,omitempty"` + Timestamps []TrustedTimestamp `json:"timestamps,omitempty"` + // CertificateSummary +} + +type TrustedTimestamp struct { + Tlog bool `json:"tlog,omitempty"` + Timestamp time.Time `json:"timestamp,omitzero"` +} + +type Local struct { + Name string `json:"name,omitempty"` +} diff --git a/policy/validate.go b/policy/validate.go new file mode 100644 index 000000000000..137501deb918 --- /dev/null +++ b/policy/validate.go @@ -0,0 +1,501 @@ +package policy + +import ( + "context" + "encoding/json" + "log" + "net/url" + "os" + "path" + "strconv" + "strings" + "sync" + "time" + + "github.com/containerd/platforms" + "github.com/distribution/reference" + gwpb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/solver/pb" + moby_buildkit_v1_sourcepolicy "github.com/moby/buildkit/sourcepolicy/pb" + "github.com/moby/buildkit/sourcepolicy/policysession" + "github.com/moby/buildkit/util/gitutil" + "github.com/moby/buildkit/util/gitutil/gitobject" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/rego" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// this is tempory debug, to be replaced with progressbar logging later +var isDebug = sync.OnceValue(func() bool { + if v, ok := os.LookupEnv("BUILDX_POLICY_DEBUG"); ok { + b, _ := strconv.ParseBool(v) + return b + } + return false +}) + +func debugf(format string, v ...any) { + if isDebug() { + log.Printf(format, v...) + } +} + +type Policy struct { + files []File + env Env +} + +var _ policysession.PolicyCallback = (&Policy{}).CheckPolicy + +type File struct { + Filename string + Data []byte +} + +func NewPolicy(files []File, env Env) *Policy { + return &Policy{ + files: files, + env: env, + } +} + +func (p *Policy) CheckPolicy(ctx context.Context, req *policysession.CheckPolicyRequest) (*policysession.DecisionResponse, *gwpb.ResolveSourceMetaRequest, error) { + var inp Input + var unknowns []string + inp.Env = p.env + + if req.Source == nil || req.Source.Source == nil { + return nil, nil, errors.Errorf("no source info in request") + } + src := req.Source + + scheme, refstr, ok := strings.Cut(src.Source.Identifier, "://") + if !ok { + return nil, nil, errors.Errorf("invalid source identifier: %s", src.Source.Identifier) + } + + switch scheme { + case "http", "https": + u, err := url.Parse(src.Source.Identifier) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to parse http source url") + } + inp.HTTP = &HTTP{ + URL: src.Source.Identifier, + Schema: scheme, + Host: u.Host, + Path: u.Path, + Query: u.Query(), + } + if v, ok := src.Source.Attrs[pb.AttrHTTPPerm]; ok { + p, err := strconv.ParseInt(v, 10, 32) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to parse http source permission") + } + inp.HTTP.Perm = int(p) + } + if v, ok := src.Source.Attrs[pb.AttrHTTPUID]; ok { + uid, err := strconv.ParseInt(v, 10, 32) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to parse http source uid") + } + inp.HTTP.UID = int(uid) + } + if v, ok := src.Source.Attrs[pb.AttrHTTPGID]; ok { + gid, err := strconv.ParseInt(v, 10, 32) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to parse http source gid") + } + inp.HTTP.GID = int(gid) + } + if _, ok := src.Source.Attrs[pb.AttrHTTPAuthHeaderSecret]; ok { + inp.HTTP.HasAuth = true + } + if req.Source.Image == nil { + unknowns = append(unknowns, "input.http.checksum") + } else { + inp.HTTP.Checksum = req.Source.Image.Digest + } + case "git": + if !gitutil.IsGitTransport(refstr) { + refstr = "https://" + refstr + } + u, err := gitutil.ParseURL(refstr) + if err != nil { + return nil, nil, err + } + g := &Git{ + Schema: u.Scheme, + Remote: u.Remote, + Host: u.Host, + } + var ref string + var isFullRef bool + if u.Opts != nil { + ref = u.Opts.Ref + g.Subdir = u.Opts.Subdir + if sd := path.Clean(g.Subdir); sd == "/" || sd == "." { + g.Subdir = "" + } + } + if v, ok := src.Source.Attrs[pb.AttrFullRemoteURL]; !ok { + if !gitutil.IsGitTransport(v) { + v = "https://" + v + } + u, err := gitutil.ParseURL(v) + if err != nil { + return nil, nil, err + } + g.Schema = u.Scheme + g.Remote = u.Remote + g.Host = u.Host + g.FullURL = v + } + if tag, ok := strings.CutPrefix(g.Ref, "refs/tags/"); ok { + g.TagName = tag + isFullRef = true + } + if branch, ok := strings.CutPrefix(g.Ref, "refs/heads/"); ok { + g.Branch = branch + isFullRef = true + } + + if gitutil.IsCommitSHA(ref) { + g.IsCommitRef = true + g.Checksum = ref + g.CommitChecksum = ref + isFullRef = true + } + + unk := []string{} + + if src.Git == nil { + if !isFullRef { + unk = append(unk, "tagName", "branch", "ref") + } else { + g.Ref = ref + } + if g.Checksum == "" { + unk = append(unk, "checksum", "isAnnotatedTag", "commitChecksum", "isSHA256") + } + unk = append(unk, "tag", "commit") + } else { + g.Ref = src.Git.Ref + if tag, ok := strings.CutPrefix(g.Ref, "refs/tags/"); ok { + g.TagName = tag + } + if branch, ok := strings.CutPrefix(g.Ref, "refs/heads/"); ok { + g.Branch = branch + } + g.Checksum = src.Git.Checksum + g.CommitChecksum = src.Git.CommitChecksum + if g.CommitChecksum == "" { + g.CommitChecksum = g.Checksum + } + if g.Checksum != g.CommitChecksum { + g.IsAnnotatedTag = true + } + + if len(src.Git.CommitObject) == 0 { + unk = append(unk, "commit", "tag") + } else { + obj, err := gitobject.Parse(src.Git.CommitObject) + if err != nil { + return nil, nil, err + } + if err := obj.VerifyChecksum(g.CommitChecksum); err != nil { + return nil, nil, err + } + c, err := obj.ToCommit() + if err != nil { + return nil, nil, err + } + g.Commit = &Commit{ + Tree: c.Tree, + Message: c.Message, + Parents: c.Parents, + Author: Actor(c.Author), + Committer: Actor(c.Committer), + } + + if dt := src.Git.TagObject; len(dt) > 0 { + obj, err := gitobject.Parse(src.Git.TagObject) + if err != nil { + return nil, nil, err + } + if err := obj.VerifyChecksum(g.Checksum); err != nil { + return nil, nil, err + } + t, err := obj.ToTag() + if err != nil { + return nil, nil, err + } + g.Tag = &Tag{ + Object: t.Object, + Message: t.Message, + Type: t.Type, + Tag: t.Tag, + Tagger: Actor(t.Tagger), + } + } + } + } + + if len(g.Checksum) == 64 { + g.IsSHA256 = true + } + + unknowns = append(unknowns, withPrefix(unk, "input.git.")...) + + inp.Git = g + case "docker-image": + ref, err := reference.ParseNormalizedNamed(refstr) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to parse image source reference") + } + inp.Image = &Image{ + Ref: ref.String(), + Host: reference.Domain(ref), + Repo: reference.FamiliarName(ref), + FullRepo: ref.Name(), + } + if digested, ok := ref.(reference.Canonical); ok { + inp.Image.Checksum = digested.Digest().String() + inp.Image.IsCanonical = true + } + if tagged, ok := ref.(reference.Tagged); ok { + inp.Image.Tag = tagged.Tag() + } + if req.Platform == nil { + return nil, nil, errors.Errorf("platform required for image source") + } + platformStr := req.Platform.OS + "/" + req.Platform.Architecture + if req.Platform.Variant != "" { + platformStr += "/" + req.Platform.Variant + } + p, err := platforms.Parse(platformStr) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to parse platform") + } + p = platforms.Normalize(p) + inp.Image.Platform = platforms.Format(p) + inp.Image.OS = p.OS + inp.Image.Architecture = p.Architecture + inp.Image.Variant = p.Variant + + configFields := []string{ + "checksum", "labels", "user", "volumes", "workingDir", "env", + } + + if req.Source.Image == nil { + if !inp.Image.IsCanonical { + unknowns = append(unknowns, "input.image.checksum") + } + unknowns = append(unknowns, withPrefix(configFields, "input.image.")...) + unknowns = append(unknowns, "input.image.hasProvenance") + } else { + inp.Image.Checksum = req.Source.Image.Digest + if cfg := req.Source.Image.Config; cfg != nil { + var img ocispecs.Image + if err := json.Unmarshal(cfg, &img); err != nil { + return nil, nil, errors.Wrapf(err, "failed to unmarshal image config") + } + inp.Image.CreatedTime = img.Created.Format(time.RFC3339) + inp.Image.Labels = img.Config.Labels + inp.Image.Env = img.Config.Env + inp.Image.User = img.Config.User + inp.Image.Volumes = make([]string, 0, len(img.Config.Volumes)) + for v := range img.Config.Volumes { + inp.Image.Volumes = append(inp.Image.Volumes, v) + } + inp.Image.WorkingDir = img.Config.WorkingDir + } else { + unknowns = append(unknowns, withPrefix(configFields, "input.image.")...) + } + + if ac := req.Source.Image.AttestationChain; ac != nil { + inp.Image.HasProvenance = ac.AttestationManifest != "" + } else { + unknowns = append(unknowns, "input.image.hasProvenance") + } + } + unknowns = append(unknowns, "input.image.signatures") + case "local": + inp.Local = &Local{ + Name: refstr, + } + default: + // oci-layout not supported yet + return nil, nil, errors.Errorf("unsupported source scheme: %s", scheme) + } + + opts := []func(*rego.Rego){ + rego.Query("data.docker.decision"), + rego.Input(inp), + rego.SkipPartialNamespace(true), + } + for _, file := range p.files { + opts = append(opts, rego.Module(file.Filename, string(file.Data))) + } + dt, err := json.MarshalIndent(inp, "", " ") + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to marshal policy input") + } + debugf("policy input: %s", dt) + + if len(unknowns) > 0 { + debugf("unknowns for policy evaluation: %+v", unknowns) + opts = append(opts, rego.Unknowns(unknowns)) + } + r := rego.New(opts...) + + if len(unknowns) > 0 { + pq, err := r.Partial(ctx) + if err != nil { + return nil, nil, err + } + unk := collectUnknowns(pq.Support) + if len(unk) > 0 { + next := &gwpb.ResolveSourceMetaRequest{ + Source: req.Source.Source, + Platform: req.Platform, + } + unk2 := make([]string, 0, len(unk)) + for _, u := range unk { + k := strings.TrimPrefix(u, "input.") + k = trimKey(k) + switch k { + case "image", "git", "http", "local": + // parents are returned as unknowns for some reason, ignore + continue + default: + unk2 = append(unk2, k) + } + } + if len(unk2) > 0 { + debugf("collected unknowns: %+v", unk2) + for _, u := range unk2 { + switch u { + case "image.labels", "image.user", "image.volumes", "image.workingDir", "image.env": + if next.Image == nil { + next.Image = &gwpb.ResolveSourceImageRequest{} + } + next.Image.NoConfig = false + case "image.hasProvenance": + if next.Image == nil { + next.Image = &gwpb.ResolveSourceImageRequest{ + NoConfig: true, + } + } + next.Image.AttestationChain = true + case "image.checksum": + + case "git.ref", "git.checksum", "git.commitChecksum", "git.isAnnotatedTag", "git.isSHA256", "git.tagName", "git.branch": + + case "git.commit", "git.tag": + if next.Git == nil { + next.Git = &gwpb.ResolveSourceGitRequest{} + } + next.Git.ReturnObject = true + + default: + return nil, nil, errors.Errorf("unhandled unknown property %s", u) + } + } + debugf("next resolve meta request: %+v", next) + return nil, next, nil + } + } + } + + rs, err := r.Eval(ctx) + if err != nil { + return nil, nil, err + } + if len(rs) == 0 { + return nil, nil, errors.Errorf("policy returned zero result") + } + rsz := rs[0] + if len(rsz.Expressions) == 0 { + return nil, nil, errors.Errorf("policy returned zero expressions") + } + v := rsz.Expressions[0].Value + vt, ok := v.(map[string]any) + if !ok { + return nil, nil, errors.Errorf("unexpected policy return type: %T %s", vt, rsz.Expressions[0].Text) + } + + resp := &policysession.DecisionResponse{ + Action: moby_buildkit_v1_sourcepolicy.PolicyAction_DENY, + } + debugf("policy response: %+v", vt) + + if v, ok := vt["allow"]; ok { + if vv, ok := v.(bool); !ok { + return nil, nil, errors.Errorf("invalid allowed property type %T, expecting bool", v) + } else if vv { + resp.Action = moby_buildkit_v1_sourcepolicy.PolicyAction_ALLOW + } + } + + if v, ok := vt["deny_msg"]; ok { + if vv, ok := v.([]any); ok { + for _, m := range vv { + if m, ok := m.(string); ok { + resp.DenyMessages = append(resp.DenyMessages, &policysession.DenyMessage{ + Message: m, + }) + } + } + } + } + debugf("policy decision: %s %v", resp.Action, resp.DenyMessages) + + return resp, nil, nil +} + +func withPrefix(arr []string, prefix string) []string { + out := make([]string, len(arr)) + for i, s := range arr { + out[i] = prefix + s + } + return out +} + +func collectUnknowns(mods []*ast.Module) []string { + seen := map[string]struct{}{} + var out []string + + for _, mod := range mods { + ast.WalkRefs(mod, func(ref ast.Ref) bool { + if ref.HasPrefix(ast.InputRootRef) { + s := ref.String() // e.g. "input.request.path" + if _, ok := seen[s]; !ok { + seen[s] = struct{}{} + out = append(out, s) + } + } + return true + }) + } + return out +} + +func trimKey(s string) string { + const ( + dot = '.' + sb = '[' + ) + + components := 0 + for i, r := range s { + if r == dot || r == sb { + components++ + if components == 2 { + return s[:i] + } + } + } + return s +} diff --git a/vendor/github.com/agnivade/levenshtein/.gitignore b/vendor/github.com/agnivade/levenshtein/.gitignore new file mode 100644 index 000000000000..345780a4444f --- /dev/null +++ b/vendor/github.com/agnivade/levenshtein/.gitignore @@ -0,0 +1,5 @@ +coverage.txt +fuzz/fuzz-fuzz.zip +fuzz/corpus/corpus/* +fuzz/corpus/suppressions/* +fuzz/corpus/crashes/* diff --git a/vendor/github.com/agnivade/levenshtein/License.txt b/vendor/github.com/agnivade/levenshtein/License.txt new file mode 100644 index 000000000000..54b51f49938d --- /dev/null +++ b/vendor/github.com/agnivade/levenshtein/License.txt @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Agniva De Sarker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/agnivade/levenshtein/Makefile b/vendor/github.com/agnivade/levenshtein/Makefile new file mode 100644 index 000000000000..3bbda319e4f9 --- /dev/null +++ b/vendor/github.com/agnivade/levenshtein/Makefile @@ -0,0 +1,13 @@ +all: test install + +install: + go install + +lint: + gofmt -l -s -w . && go vet . + +test: + go test -race -v -coverprofile=coverage.txt -covermode=atomic + +bench: + go test -run=XXX -bench=. -benchmem -count=5 diff --git a/vendor/github.com/agnivade/levenshtein/README.md b/vendor/github.com/agnivade/levenshtein/README.md new file mode 100644 index 000000000000..34378aabecf1 --- /dev/null +++ b/vendor/github.com/agnivade/levenshtein/README.md @@ -0,0 +1,80 @@ +levenshtein ![Build Status](https://github.com/agnivade/levenshtein/actions/workflows/ci.yml/badge.svg) [![Go Report Card](https://goreportcard.com/badge/github.com/agnivade/levenshtein)](https://goreportcard.com/report/github.com/agnivade/levenshtein) [![PkgGoDev](https://pkg.go.dev/badge/github.com/agnivade/levenshtein)](https://pkg.go.dev/github.com/agnivade/levenshtein) +=========== + +[Go](http://golang.org) package to calculate the [Levenshtein Distance](http://en.wikipedia.org/wiki/Levenshtein_distance) + +The library is fully capable of working with non-ascii strings. But the strings are not normalized. That is left as a user-dependant use case. Please normalize the strings before passing it to the library if you have such a requirement. +- https://blog.golang.org/normalization + +#### Limitation + +As a performance optimization, the library can handle strings only up to 65536 characters (runes). If you need to handle strings larger than that, please pin to version 1.0.3. + +Install +------- + + go get github.com/agnivade/levenshtein + +Example +------- + +```go +package main + +import ( + "fmt" + "github.com/agnivade/levenshtein" +) + +func main() { + s1 := "kitten" + s2 := "sitting" + distance := levenshtein.ComputeDistance(s1, s2) + fmt.Printf("The distance between %s and %s is %d.\n", s1, s2, distance) + // Output: + // The distance between kitten and sitting is 3. +} + +``` + +Benchmarks +---------- + +``` +name time/op +Simple/ASCII-4 330ns ± 2% +Simple/French-4 617ns ± 2% +Simple/Nordic-4 1.16µs ± 4% +Simple/Tibetan-4 1.05µs ± 1% + +name alloc/op +Simple/ASCII-4 96.0B ± 0% +Simple/French-4 128B ± 0% +Simple/Nordic-4 192B ± 0% +Simple/Tibetan-4 144B ± 0% + +name allocs/op +Simple/ASCII-4 1.00 ± 0% +Simple/French-4 1.00 ± 0% +Simple/Nordic-4 1.00 ± 0% +Simple/Tibetan-4 1.00 ± 0% +``` + +Comparisons with other libraries +-------------------------------- + +``` +name time/op +Leven/ASCII/agniva-4 353ns ± 1% +Leven/ASCII/arbovm-4 485ns ± 1% +Leven/ASCII/dgryski-4 395ns ± 0% +Leven/French/agniva-4 648ns ± 1% +Leven/French/arbovm-4 791ns ± 0% +Leven/French/dgryski-4 682ns ± 0% +Leven/Nordic/agniva-4 1.28µs ± 1% +Leven/Nordic/arbovm-4 1.52µs ± 1% +Leven/Nordic/dgryski-4 1.32µs ± 1% +Leven/Tibetan/agniva-4 1.12µs ± 1% +Leven/Tibetan/arbovm-4 1.31µs ± 0% +Leven/Tibetan/dgryski-4 1.16µs ± 0% +``` diff --git a/vendor/github.com/agnivade/levenshtein/levenshtein.go b/vendor/github.com/agnivade/levenshtein/levenshtein.go new file mode 100644 index 000000000000..861f409dd2b7 --- /dev/null +++ b/vendor/github.com/agnivade/levenshtein/levenshtein.go @@ -0,0 +1,101 @@ +// Package levenshtein is a Go implementation to calculate Levenshtein Distance. +// +// Implementation taken from +// https://gist.github.com/andrei-m/982927#gistcomment-1931258 +package levenshtein + +import "unicode/utf8" + +// minLengthThreshold is the length of the string beyond which +// an allocation will be made. Strings smaller than this will be +// zero alloc. +const minLengthThreshold = 32 + +// ComputeDistance computes the levenshtein distance between the two +// strings passed as an argument. The return value is the levenshtein distance +// +// Works on runes (Unicode code points) but does not normalize +// the input strings. See https://blog.golang.org/normalization +// and the golang.org/x/text/unicode/norm package. +func ComputeDistance(a, b string) int { + if len(a) == 0 { + return utf8.RuneCountInString(b) + } + + if len(b) == 0 { + return utf8.RuneCountInString(a) + } + + if a == b { + return 0 + } + + // We need to convert to []rune if the strings are non-ASCII. + // This could be avoided by using utf8.RuneCountInString + // and then doing some juggling with rune indices, + // but leads to far more bounds checks. It is a reasonable trade-off. + s1 := []rune(a) + s2 := []rune(b) + + // swap to save some memory O(min(a,b)) instead of O(a) + if len(s1) > len(s2) { + s1, s2 = s2, s1 + } + + // remove trailing identical runes. + for i := 0; i < len(s1); i++ { + if s1[len(s1)-1-i] != s2[len(s2)-1-i] { + s1 = s1[:len(s1)-i] + s2 = s2[:len(s2)-i] + break + } + } + + // Remove leading identical runes. + for i := 0; i < len(s1); i++ { + if s1[i] != s2[i] { + s1 = s1[i:] + s2 = s2[i:] + break + } + } + + lenS1 := len(s1) + lenS2 := len(s2) + + // Init the row. + var x []uint16 + if lenS1+1 > minLengthThreshold { + x = make([]uint16, lenS1+1) + } else { + // We make a small optimization here for small strings. + // Because a slice of constant length is effectively an array, + // it does not allocate. So we can re-slice it to the right length + // as long as it is below a desired threshold. + x = make([]uint16, minLengthThreshold) + x = x[:lenS1+1] + } + + // we start from 1 because index 0 is already 0. + for i := 1; i < len(x); i++ { + x[i] = uint16(i) + } + + // make a dummy bounds check to prevent the 2 bounds check down below. + // The one inside the loop is particularly costly. + _ = x[lenS1] + // fill in the rest + for i := 1; i <= lenS2; i++ { + prev := uint16(i) + for j := 1; j <= lenS1; j++ { + current := x[j-1] // match + if s2[i-1] != s1[j-1] { + current = min(x[j-1]+1, prev+1, x[j]+1) + } + x[j-1] = prev + prev = current + } + x[lenS1] = prev + } + return int(x[lenS1]) +} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 000000000000..339177be6636 --- /dev/null +++ b/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt new file mode 100644 index 000000000000..1602287d7ce5 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt @@ -0,0 +1,2388 @@ +8 +5 +26 +12 +5 +235 +13 +6 +28 +30 +3 +3 +3 +3 +5 +2 +33 +7 +2 +4 +7 +12 +14 +5 +8 +3 +10 +4 +5 +3 +6 +6 +209 +20 +3 +10 +14 +3 +4 +6 +8 +5 +11 +7 +3 +2 +3 +3 +212 +5 +222 +4 +10 +10 +5 +6 +3 +8 +3 +10 +254 +220 +2 +3 +5 +24 +5 +4 +222 +7 +3 +3 +223 +8 +15 +12 +14 +14 +3 +2 +2 +3 +13 +3 +11 +4 +4 +6 +5 +7 +13 +5 +3 +5 +2 +5 +3 +5 +2 +7 +15 +17 +14 +3 +6 +6 +3 +17 +5 +4 +7 +6 +4 +4 +8 +6 +8 +3 +9 +3 +6 +3 +4 +5 +3 +3 +660 +4 +6 +10 +3 +6 +3 +2 +5 +13 +2 +4 +4 +10 +4 +8 +4 +3 +7 +9 +9 +3 +10 +37 +3 +13 +4 +12 +3 +6 +10 +8 +5 +21 +2 +3 +8 +3 +2 +3 +3 +4 +12 +2 +4 +8 +8 +4 +3 +2 +20 +1 +6 +32 +2 +11 +6 +18 +3 +8 +11 +3 +212 +3 +4 +2 +6 +7 +12 +11 +3 +2 +16 +10 +6 +4 +6 +3 +2 +7 +3 +2 +2 +2 +2 +5 +6 +4 +3 +10 +3 +4 +6 +5 +3 +4 +4 +5 +6 +4 +3 +4 +4 +5 +7 +5 +5 +3 +2 +7 +2 +4 +12 +4 +5 +6 +2 +4 +4 +8 +4 +15 +13 +7 +16 +5 +3 +23 +5 +5 +7 +3 +2 +9 +8 +7 +5 +8 +11 +4 +10 +76 +4 +47 +4 +3 +2 +7 +4 +2 +3 +37 +10 +4 +2 +20 +5 +4 +4 +10 +10 +4 +3 +7 +23 +240 +7 +13 +5 +5 +3 +3 +2 +5 +4 +2 +8 +7 +19 +2 +23 +8 +7 +2 +5 +3 +8 +3 +8 +13 +5 +5 +5 +2 +3 +23 +4 +9 +8 +4 +3 +3 +5 +220 +2 +3 +4 +6 +14 +3 +53 +6 +2 +5 +18 +6 +3 +219 +6 +5 +2 +5 +3 +6 +5 +15 +4 +3 +17 +3 +2 +4 +7 +2 +3 +3 +4 +4 +3 +2 +664 +6 +3 +23 +5 +5 +16 +5 +8 +2 +4 +2 +24 +12 +3 +2 +3 +5 +8 +3 +5 +4 +3 +14 +3 +5 +8 +2 +3 +7 +9 +4 +2 +3 +6 +8 +4 +3 +4 +6 +5 +3 +3 +6 +3 +19 +4 +4 +6 +3 +6 +3 +5 +22 +5 +4 +4 +3 +8 +11 +4 +9 +7 +6 +13 +4 +4 +4 +6 +17 +9 +3 +3 +3 +4 +3 +221 +5 +11 +3 +4 +2 +12 +6 +3 +5 +7 +5 +7 +4 +9 +7 +14 +37 +19 +217 +16 +3 +5 +2 +2 +7 +19 +7 +6 +7 +4 +24 +5 +11 +4 +7 +7 +9 +13 +3 +4 +3 +6 +28 +4 +4 +5 +5 +2 +5 +6 +4 +4 +6 +10 +5 +4 +3 +2 +3 +3 +6 +5 +5 +4 +3 +2 +3 +7 +4 +6 +18 +16 +8 +16 +4 +5 +8 +6 +9 +13 +1545 +6 +215 +6 +5 +6 +3 +45 +31 +5 +2 +2 +4 +3 +3 +2 +5 +4 +3 +5 +7 +7 +4 +5 +8 +5 +4 +749 +2 +31 +9 +11 +2 +11 +5 +4 +4 +7 +9 +11 +4 +5 +4 +7 +3 +4 +6 +2 +15 +3 +4 +3 +4 +3 +5 +2 +13 +5 +5 +3 +3 +23 +4 +4 +5 +7 +4 +13 +2 +4 +3 +4 +2 +6 +2 +7 +3 +5 +5 +3 +29 +5 +4 +4 +3 +10 +2 +3 +79 +16 +6 +6 +7 +7 +3 +5 +5 +7 +4 +3 +7 +9 +5 +6 +5 +9 +6 +3 +6 +4 +17 +2 +10 +9 +3 +6 +2 +3 +21 +22 +5 +11 +4 +2 +17 +2 +224 +2 +14 +3 +4 +4 +2 +4 +4 +4 +4 +5 +3 +4 +4 +10 +2 +6 +3 +3 +5 +7 +2 +7 +5 +6 +3 +218 +2 +2 +5 +2 +6 +3 +5 +222 +14 +6 +33 +3 +2 +5 +3 +3 +3 +9 +5 +3 +3 +2 +7 +4 +3 +4 +3 +5 +6 +5 +26 +4 +13 +9 +7 +3 +221 +3 +3 +4 +4 +4 +4 +2 +18 +5 +3 +7 +9 +6 +8 +3 +10 +3 +11 +9 +5 +4 +17 +5 +5 +6 +6 +3 +2 +4 +12 +17 +6 +7 +218 +4 +2 +4 +10 +3 +5 +15 +3 +9 +4 +3 +3 +6 +29 +3 +3 +4 +5 +5 +3 +8 +5 +6 +6 +7 +5 +3 +5 +3 +29 +2 +31 +5 +15 +24 +16 +5 +207 +4 +3 +3 +2 +15 +4 +4 +13 +5 +5 +4 +6 +10 +2 +7 +8 +4 +6 +20 +5 +3 +4 +3 +12 +12 +5 +17 +7 +3 +3 +3 +6 +10 +3 +5 +25 +80 +4 +9 +3 +2 +11 +3 +3 +2 +3 +8 +7 +5 +5 +19 +5 +3 +3 +12 +11 +2 +6 +5 +5 +5 +3 +3 +3 +4 +209 +14 +3 +2 +5 +19 +4 +4 +3 +4 +14 +5 +6 +4 +13 +9 +7 +4 +7 +10 +2 +9 +5 +7 +2 +8 +4 +6 +5 +5 +222 +8 +7 +12 +5 +216 +3 +4 +4 +6 +3 +14 +8 +7 +13 +4 +3 +3 +3 +3 +17 +5 +4 +3 +33 +6 +6 +33 +7 +5 +3 +8 +7 +5 +2 +9 +4 +2 +233 +24 +7 +4 +8 +10 +3 +4 +15 +2 +16 +3 +3 +13 +12 +7 +5 +4 +207 +4 +2 +4 +27 +15 +2 +5 +2 +25 +6 +5 +5 +6 +13 +6 +18 +6 +4 +12 +225 +10 +7 +5 +2 +2 +11 +4 +14 +21 +8 +10 +3 +5 +4 +232 +2 +5 +5 +3 +7 +17 +11 +6 +6 +23 +4 +6 +3 +5 +4 +2 +17 +3 +6 +5 +8 +3 +2 +2 +14 +9 +4 +4 +2 +5 +5 +3 +7 +6 +12 +6 +10 +3 +6 +2 +2 +19 +5 +4 +4 +9 +2 +4 +13 +3 +5 +6 +3 +6 +5 +4 +9 +6 +3 +5 +7 +3 +6 +6 +4 +3 +10 +6 +3 +221 +3 +5 +3 +6 +4 +8 +5 +3 +6 +4 +4 +2 +54 +5 +6 +11 +3 +3 +4 +4 +4 +3 +7 +3 +11 +11 +7 +10 +6 +13 +223 +213 +15 +231 +7 +3 +7 +228 +2 +3 +4 +4 +5 +6 +7 +4 +13 +3 +4 +5 +3 +6 +4 +6 +7 +2 +4 +3 +4 +3 +3 +6 +3 +7 +3 +5 +18 +5 +6 +8 +10 +3 +3 +3 +2 +4 +2 +4 +4 +5 +6 +6 +4 +10 +13 +3 +12 +5 +12 +16 +8 +4 +19 +11 +2 +4 +5 +6 +8 +5 +6 +4 +18 +10 +4 +2 +216 +6 +6 +6 +2 +4 +12 +8 +3 +11 +5 +6 +14 +5 +3 +13 +4 +5 +4 +5 +3 +28 +6 +3 +7 +219 +3 +9 +7 +3 +10 +6 +3 +4 +19 +5 +7 +11 +6 +15 +19 +4 +13 +11 +3 +7 +5 +10 +2 +8 +11 +2 +6 +4 +6 +24 +6 +3 +3 +3 +3 +6 +18 +4 +11 +4 +2 +5 +10 +8 +3 +9 +5 +3 +4 +5 +6 +2 +5 +7 +4 +4 +14 +6 +4 +4 +5 +5 +7 +2 +4 +3 +7 +3 +3 +6 +4 +5 +4 +4 +4 +3 +3 +3 +3 +8 +14 +2 +3 +5 +3 +2 +4 +5 +3 +7 +3 +3 +18 +3 +4 +4 +5 +7 +3 +3 +3 +13 +5 +4 +8 +211 +5 +5 +3 +5 +2 +5 +4 +2 +655 +6 +3 +5 +11 +2 +5 +3 +12 +9 +15 +11 +5 +12 +217 +2 +6 +17 +3 +3 +207 +5 +5 +4 +5 +9 +3 +2 +8 +5 +4 +3 +2 +5 +12 +4 +14 +5 +4 +2 +13 +5 +8 +4 +225 +4 +3 +4 +5 +4 +3 +3 +6 +23 +9 +2 +6 +7 +233 +4 +4 +6 +18 +3 +4 +6 +3 +4 +4 +2 +3 +7 +4 +13 +227 +4 +3 +5 +4 +2 +12 +9 +17 +3 +7 +14 +6 +4 +5 +21 +4 +8 +9 +2 +9 +25 +16 +3 +6 +4 +7 +8 +5 +2 +3 +5 +4 +3 +3 +5 +3 +3 +3 +2 +3 +19 +2 +4 +3 +4 +2 +3 +4 +4 +2 +4 +3 +3 +3 +2 +6 +3 +17 +5 +6 +4 +3 +13 +5 +3 +3 +3 +4 +9 +4 +2 +14 +12 +4 +5 +24 +4 +3 +37 +12 +11 +21 +3 +4 +3 +13 +4 +2 +3 +15 +4 +11 +4 +4 +3 +8 +3 +4 +4 +12 +8 +5 +3 +3 +4 +2 +220 +3 +5 +223 +3 +3 +3 +10 +3 +15 +4 +241 +9 +7 +3 +6 +6 +23 +4 +13 +7 +3 +4 +7 +4 +9 +3 +3 +4 +10 +5 +5 +1 +5 +24 +2 +4 +5 +5 +6 +14 +3 +8 +2 +3 +5 +13 +13 +3 +5 +2 +3 +15 +3 +4 +2 +10 +4 +4 +4 +5 +5 +3 +5 +3 +4 +7 +4 +27 +3 +6 +4 +15 +3 +5 +6 +6 +5 +4 +8 +3 +9 +2 +6 +3 +4 +3 +7 +4 +18 +3 +11 +3 +3 +8 +9 +7 +24 +3 +219 +7 +10 +4 +5 +9 +12 +2 +5 +4 +4 +4 +3 +3 +19 +5 +8 +16 +8 +6 +22 +3 +23 +3 +242 +9 +4 +3 +3 +5 +7 +3 +3 +5 +8 +3 +7 +5 +14 +8 +10 +3 +4 +3 +7 +4 +6 +7 +4 +10 +4 +3 +11 +3 +7 +10 +3 +13 +6 +8 +12 +10 +5 +7 +9 +3 +4 +7 +7 +10 +8 +30 +9 +19 +4 +3 +19 +15 +4 +13 +3 +215 +223 +4 +7 +4 +8 +17 +16 +3 +7 +6 +5 +5 +4 +12 +3 +7 +4 +4 +13 +4 +5 +2 +5 +6 +5 +6 +6 +7 +10 +18 +23 +9 +3 +3 +6 +5 +2 +4 +2 +7 +3 +3 +2 +5 +5 +14 +10 +224 +6 +3 +4 +3 +7 +5 +9 +3 +6 +4 +2 +5 +11 +4 +3 +3 +2 +8 +4 +7 +4 +10 +7 +3 +3 +18 +18 +17 +3 +3 +3 +4 +5 +3 +3 +4 +12 +7 +3 +11 +13 +5 +4 +7 +13 +5 +4 +11 +3 +12 +3 +6 +4 +4 +21 +4 +6 +9 +5 +3 +10 +8 +4 +6 +4 +4 +6 +5 +4 +8 +6 +4 +6 +4 +4 +5 +9 +6 +3 +4 +2 +9 +3 +18 +2 +4 +3 +13 +3 +6 +6 +8 +7 +9 +3 +2 +16 +3 +4 +6 +3 +2 +33 +22 +14 +4 +9 +12 +4 +5 +6 +3 +23 +9 +4 +3 +5 +5 +3 +4 +5 +3 +5 +3 +10 +4 +5 +5 +8 +4 +4 +6 +8 +5 +4 +3 +4 +6 +3 +3 +3 +5 +9 +12 +6 +5 +9 +3 +5 +3 +2 +2 +2 +18 +3 +2 +21 +2 +5 +4 +6 +4 +5 +10 +3 +9 +3 +2 +10 +7 +3 +6 +6 +4 +4 +8 +12 +7 +3 +7 +3 +3 +9 +3 +4 +5 +4 +4 +5 +5 +10 +15 +4 +4 +14 +6 +227 +3 +14 +5 +216 +22 +5 +4 +2 +2 +6 +3 +4 +2 +9 +9 +4 +3 +28 +13 +11 +4 +5 +3 +3 +2 +3 +3 +5 +3 +4 +3 +5 +23 +26 +3 +4 +5 +6 +4 +6 +3 +5 +5 +3 +4 +3 +2 +2 +2 +7 +14 +3 +6 +7 +17 +2 +2 +15 +14 +16 +4 +6 +7 +13 +6 +4 +5 +6 +16 +3 +3 +28 +3 +6 +15 +3 +9 +2 +4 +6 +3 +3 +22 +4 +12 +6 +7 +2 +5 +4 +10 +3 +16 +6 +9 +2 +5 +12 +7 +5 +5 +5 +5 +2 +11 +9 +17 +4 +3 +11 +7 +3 +5 +15 +4 +3 +4 +211 +8 +7 +5 +4 +7 +6 +7 +6 +3 +6 +5 +6 +5 +3 +4 +4 +26 +4 +6 +10 +4 +4 +3 +2 +3 +3 +4 +5 +9 +3 +9 +4 +4 +5 +5 +8 +2 +4 +2 +3 +8 +4 +11 +19 +5 +8 +6 +3 +5 +6 +12 +3 +2 +4 +16 +12 +3 +4 +4 +8 +6 +5 +6 +6 +219 +8 +222 +6 +16 +3 +13 +19 +5 +4 +3 +11 +6 +10 +4 +7 +7 +12 +5 +3 +3 +5 +6 +10 +3 +8 +2 +5 +4 +7 +2 +4 +4 +2 +12 +9 +6 +4 +2 +40 +2 +4 +10 +4 +223 +4 +2 +20 +6 +7 +24 +5 +4 +5 +2 +20 +16 +6 +5 +13 +2 +3 +3 +19 +3 +2 +4 +5 +6 +7 +11 +12 +5 +6 +7 +7 +3 +5 +3 +5 +3 +14 +3 +4 +4 +2 +11 +1 +7 +3 +9 +6 +11 +12 +5 +8 +6 +221 +4 +2 +12 +4 +3 +15 +4 +5 +226 +7 +218 +7 +5 +4 +5 +18 +4 +5 +9 +4 +4 +2 +9 +18 +18 +9 +5 +6 +6 +3 +3 +7 +3 +5 +4 +4 +4 +12 +3 +6 +31 +5 +4 +7 +3 +6 +5 +6 +5 +11 +2 +2 +11 +11 +6 +7 +5 +8 +7 +10 +5 +23 +7 +4 +3 +5 +34 +2 +5 +23 +7 +3 +6 +8 +4 +4 +4 +2 +5 +3 +8 +5 +4 +8 +25 +2 +3 +17 +8 +3 +4 +8 +7 +3 +15 +6 +5 +7 +21 +9 +5 +6 +6 +5 +3 +2 +3 +10 +3 +6 +3 +14 +7 +4 +4 +8 +7 +8 +2 +6 +12 +4 +213 +6 +5 +21 +8 +2 +5 +23 +3 +11 +2 +3 +6 +25 +2 +3 +6 +7 +6 +6 +4 +4 +6 +3 +17 +9 +7 +6 +4 +3 +10 +7 +2 +3 +3 +3 +11 +8 +3 +7 +6 +4 +14 +36 +3 +4 +3 +3 +22 +13 +21 +4 +2 +7 +4 +4 +17 +15 +3 +7 +11 +2 +4 +7 +6 +209 +6 +3 +2 +2 +24 +4 +9 +4 +3 +3 +3 +29 +2 +2 +4 +3 +3 +5 +4 +6 +3 +3 +2 +4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 000000000000..d7d14f8eb63d --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,316 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile + } else { + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt new file mode 100644 index 000000000000..24b53065f40b --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md new file mode 100644 index 000000000000..33c88305c46e --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -0,0 +1,74 @@ +# xxhash + +[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) +[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) + +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | + +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: + +``` +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) +- [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) +- [Ristretto](https://github.com/dgraph-io/ristretto) +- [Badger](https://github.com/dgraph-io/badger) diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh new file mode 100644 index 000000000000..94b9c443987c --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/testall.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +# Small convenience script for running the tests with various combinations of +# arch/tags. This assumes we're running on amd64 and have qemu available. + +go test ./... +go test -tags purego ./... +GOARCH=arm64 go test +GOARCH=arm64 go test -tags purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go new file mode 100644 index 000000000000..78bddf1ceed7 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -0,0 +1,243 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array for the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} + +// Digest implements hash.Hash64. +// +// Note that a zero-valued Digest is not ready to receive writes. +// Call Reset or create a Digest using New before calling other methods. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest with a zero seed. +func New() *Digest { + return NewWithSeed(0) +} + +// NewWithSeed creates a new Digest with the given seed. +func NewWithSeed(seed uint64) *Digest { + var d Digest + d.ResetWithSeed(seed) + return &d +} + +// Reset clears the Digest's state so that it can be reused. +// It uses a seed value of zero. +func (d *Digest) Reset() { + d.ResetWithSeed(0) +} + +// ResetWithSeed clears the Digest's state so that it can be reused. +// It uses the given seed to initialize the state. +func (d *Digest) ResetWithSeed(seed uint64) { + d.v1 = seed + prime1 + prime2 + d.v2 = seed + prime2 + d.v3 = seed + d.v4 = seed - prime1 + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + memleft := d.mem[d.n&(len(d.mem)-1):] + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(memleft, b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + c := copy(memleft, b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[c:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s new file mode 100644 index 000000000000..3e8b132579ec --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -0,0 +1,209 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + // Load fixed primes. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 + + // Load slice. + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end + + // The first loop limit will be len(b)-32. + SUBQ $32, end + + // Check whether we have at least one block. + CMPQ n, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + + JMP afterBlocks + +noBlocks: + MOVQ ·primes+32(SB), h + +afterBlocks: + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end + JGE finalize + +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h + + CMPQ p, end + JL loop1 + +finalize: + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + // Load fixed primes needed for round. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + + // Load slice. + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end + + // Load vN from d. + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. + blockLoop() + + // Copy vN back to d. + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) + + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s new file mode 100644 index 000000000000..7e3145a22186 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s @@ -0,0 +1,183 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go new file mode 100644 index 000000000000..78f95f256103 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -0,0 +1,15 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go new file mode 100644 index 000000000000..118e49e819e0 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -0,0 +1,76 @@ +//go:build (!amd64 && !arm64) || appengine || !gc || purego +// +build !amd64,!arm64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := primes[0] + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -primes[0] + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go new file mode 100644 index 000000000000..05f5e7dfe7b7 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -0,0 +1,16 @@ +//go:build appengine +// +build appengine + +// This file contains the safe implementations of otherwise unsafe-using code. + +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go new file mode 100644 index 000000000000..cf9d42aed536 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -0,0 +1,58 @@ +//go:build !appengine +// +build !appengine + +// This file encapsulates usage of unsafe. +// xxhash_safe.go contains the safe implementations. + +package xxhash + +import ( + "unsafe" +) + +// In the future it's possible that compiler optimizations will make these +// XxxString functions unnecessary by realizing that calls such as +// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. +// If that happens, even if we keep these functions they can be replaced with +// the trivial safe code. + +// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is: +// +// var b []byte +// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) +// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data +// bh.Len = len(s) +// bh.Cap = len(s) +// +// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough +// weight to this sequence of expressions that any function that uses it will +// not be inlined. Instead, the functions below use a different unsafe +// conversion designed to minimize the inliner weight and allow both to be +// inlined. There is also a test (TestInlining) which verifies that these are +// inlined. +// +// See https://github.com/golang/go/issues/42739 for discussion. + +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. +// It may be faster than Sum64([]byte(s)) by avoiding a copy. +func Sum64String(s string) uint64 { + b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) + return Sum64(b) +} + +// WriteString adds more data to d. It always returns len(s), nil. +// It may be faster than Write([]byte(s)) by avoiding a copy. +func (d *Digest) WriteString(s string) (n int, err error) { + d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))) + // d.Write always returns len(s), nil. + // Ignoring the return output and returning these fixed values buys a + // savings of 6 in the inliner's cost model. + return len(s), nil +} + +// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout +// of the first two words is the same as the layout of a string. +type sliceHeader struct { + s string + cap int +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/LICENSE b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/LICENSE new file mode 100644 index 000000000000..fdf6d88225e3 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/LICENSE @@ -0,0 +1,17 @@ +ISC License + +Copyright (c) 2013-2017 The btcsuite developers +Copyright (c) 2015-2024 The Decred developers +Copyright (c) 2017 The Lightning Network Developers + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/README.md b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/README.md new file mode 100644 index 000000000000..b84bcdb77df9 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/README.md @@ -0,0 +1,72 @@ +secp256k1 +========= + +[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions) +[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) +[![Doc](https://img.shields.io/badge/doc-reference-blue.svg)](https://pkg.go.dev/github.com/decred/dcrd/dcrec/secp256k1/v4) + +Package secp256k1 implements optimized secp256k1 elliptic curve operations. + +This package provides an optimized pure Go implementation of elliptic curve +cryptography operations over the secp256k1 curve as well as data structures and +functions for working with public and private secp256k1 keys. See +https://www.secg.org/sec2-v2.pdf for details on the standard. + +In addition, sub packages are provided to produce, verify, parse, and serialize +ECDSA signatures and EC-Schnorr-DCRv0 (a custom Schnorr-based signature scheme +specific to Decred) signatures. See the README.md files in the relevant sub +packages for more details about those aspects. + +An overview of the features provided by this package are as follows: + +- Private key generation, serialization, and parsing +- Public key generation, serialization and parsing per ANSI X9.62-1998 + - Parses uncompressed, compressed, and hybrid public keys + - Serializes uncompressed and compressed public keys +- Specialized types for performing optimized and constant time field operations + - `FieldVal` type for working modulo the secp256k1 field prime + - `ModNScalar` type for working modulo the secp256k1 group order +- Elliptic curve operations in Jacobian projective coordinates + - Point addition + - Point doubling + - Scalar multiplication with an arbitrary point + - Scalar multiplication with the base point (group generator) +- Point decompression from a given x coordinate +- Nonce generation via RFC6979 with support for extra data and version + information that can be used to prevent nonce reuse between signing algorithms + +It also provides an implementation of the Go standard library `crypto/elliptic` +`Curve` interface via the `S256` function so that it may be used with other +packages in the standard library such as `crypto/tls`, `crypto/x509`, and +`crypto/ecdsa`. However, in the case of ECDSA, it is highly recommended to use +the `ecdsa` sub package of this package instead since it is optimized +specifically for secp256k1 and is significantly faster as a result. + +Although this package was primarily written for dcrd, it has intentionally been +designed so it can be used as a standalone package for any projects needing to +use optimized secp256k1 elliptic curve cryptography. + +Finally, a comprehensive suite of tests is provided to provide a high level of +quality assurance. + +## secp256k1 use in Decred + +At the time of this writing, the primary public key cryptography in widespread +use on the Decred network used to secure coins is based on elliptic curves +defined by the secp256k1 domain parameters. + +## Installation and Updating + +This package is part of the `github.com/decred/dcrd/dcrec/secp256k1/v4` module. +Use the standard go tooling for working with modules to incorporate it. + +## Examples + +* [Encryption](https://pkg.go.dev/github.com/decred/dcrd/dcrec/secp256k1/v4#example-package-EncryptDecryptMessage) + Demonstrates encrypting and decrypting a message using a shared key derived + through ECDHE. + +## License + +Package secp256k1 is licensed under the [copyfree](http://copyfree.org) ISC +License. diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/compressedbytepoints.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/compressedbytepoints.go new file mode 100644 index 000000000000..bb0b41fda182 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/compressedbytepoints.go @@ -0,0 +1,18 @@ +// Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015-2022 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +// Auto-generated file (see genprecomps.go) +// DO NOT EDIT + +var compressedBytePoints = "eJyk2wNCIAgAAMBs27Zt27a52bZt27Zt27Ztu+4RN/8YgP8pmqLC650yqn+RN9o5WamIzkLa9Agq2bEoRaoJ/oRJplKgCMS4iEExiT3u5gnfu//K8yz4XhSnQzTXYdtp6i45sAprIIyKo/AhtXelFMQPtnAcGf58V/JbVveLAN8Vcf0Q+bnIk0kWveCiUp9zy1Ko9e0uaYkPT0UfsCTgaZhyQTuNksSeqX9vTrQ5GH/G8fWiUIiM8Zvl31woXxiquMqOQMJCoQiZ/iXuDOqY7tTyHlv85rmSanWdJIe0JD1VkaEwqclvlvbtOy07cCk4KT/jXWxBk6kb7TMLkeWtCZli02fkxlspmDtHfMonSi0ScKI+ypNPlZVHgO1W4GkR4AkSbYSxX5oGlIctKHhCjRY9eR0GsIluyh4QFg9t4FU0TdkfLf4BSXR4MMmKV/aDCqtzxLvXaFGs0GVb7zLqgb0kn7nDXfGhpaDHAVIodPh3ummk9NZItmfr8juoNyszjGfELYp+OX5UK0lt7hcNU4NjciFV5Wd7rF4mtLxEEuUAmyUdQwDHGfahHiT9dk4+r7K6EPYYBbBaAQeWYegqF0z5jh/FwuFiOMV3oh7nteiCQCzGOJqamLDUBuox6qN4ZOLY4x+vIdtftTZM3+hI/xBsP5Zv0Yo1QQ8jcIP70VItBoA5MhNnb/AxAJzAPDvNTmrNgIqr66MA4MupfWmXLUa0gZu6/vA5lc2V6cKJC0xIayi9bT8kBWbiIVj/9oh5qCV/g53tVrxQ6XfhqsPWYFps2OYNc4vTaFEdPWT7BVQ8mWoNfwWqBHRV5KIyHgt1Mavq2Z7ogUtQRuRyipzfdg8hKVRf1r94lYx6hjntvVTgLiRvOtETLWBq/baEcWugzzIzxpLreLZWuSpPIj+yd7xLqrhNUu6uWQIESsLW8mIPQPS9tpxRI70BAQEYTY+gGvz8N5xgw3s1lCLjdaQA5ncTo18lEO1pjX0HkiKXSwBpVohq/1PSTUKwMXmdemLJmH0VfWx+xu+rIySuZYabbIJZRmoP7hx4qrjNR7ZLln4Fa96EAuW9C2RGaEjJXQ9XFGPJDi3O6PpdBHFKaPg0HXSqNaSf5HVB/npt+uEgjJWHh/bMHlB+Cq37JsrxklflGImfRY0WYdtecfotfzSaGGJXYGPdY6+Uk5OmyIgHXhgmAvG8kLXWetUZMxo5SuRCpdfdoNQ68uHArCZBkTJ2PTWfBs9MFazROg8x1CQdlvnwm3GSYWGuoJ6WsgBgGHZAxDltrlZDiYEpBMu2yV+sJ/M4cX1Vid9DJbwSWuMXdX22YqljiMvUf8gp5OUDO8mz8uD4izxRrDBa0k3sEsOdiPjVk/tGcJ9d47fkgYdy3jBfKSNrrG1Bvdn37xeqdSxFcEYdoggg3n8iy8Wo4FsHQJwB3QnexMLFrXO/4o5JCTJYUZFGBlT9yeU5rCdnGvJZeE68PVJVTlVxezDz3hnIMuy+EiH0QoasAI2GGlHvlsfDLcad8O7RfbwYd6nlG5phUmqOOQ6xgKFG309xCsbKvDumzmZ8WpVWGCvxYKExEGktwqkKfedH9PbOqcLSpzIDmoCD+/IQv2ZSKB3WzF+bZzAoxImoNSJPDDeCNkpeTbu58L3n8UQ6S3SHPhRoT9QneplWmOAhQoLTGNmzIqMZUflkPDNZJ1PUr7qnLP3OiPWWKfVm9HIXv7X2zdP0oFP5yC5cGCKG83zVSeBqd14przJKjmTOJTEFIJcZZt6LLnSPjxZkSMge3GLpx7KkCao7yMOxdxm7gKn4l5w4vrz1D/MuTgXvedy45y4n3YzmeUNNV12X5hUuCnC/RbBIfxXrE1JJffx1VpHc2Z88roux91yJK3zm9FHPcxWFL2eElsDqvYgoYlrrxIDCF8w0L9xVCJ/K1wBYV6z0Il1liZFnrh7oC7QnCoi/AZVC5wzuseezcZmsO1PXijTQ0Bz2N8iU9xr6H+hAMrmj5/kfZNGZwfRbQEYgFxnYiZ/RcnKxv6L/Ug2cGHP9l0pkpT1bcb6HfTZLVoy+UXd2qckfNk5rcKMh1Rl49vNssVRf7Hd8AfivO3ESlJUSz4MFkdDmiOzmO3GCXh864/drgFmZq6f1XiZsaz4r6NbjOTOQdQJf21TOmp1+bfXxGWCL5M810GZBhc6YLVGxcWFHwEhf/d8HuDXUa+i2WUcclqdokltn6q1nu2eLoI3aiyuYvyr08xBNo8h6o3/yX9fAyxP9Mmwwudd0GpruWyd+6Th6G7vUmuZ4LHgO1XTEVeMhK5PHoDx4J0JDDEVGv2kJdA/3hHRZXcjF/n6qveOFmgDS/K9c7qgLFfacIAS+VHxEk+Xm3UKLvhMgQXSU+GVKjei1uFONV4Ye67v33x7hbEEbT78HMWliBfVkEqbEOLiGV/81jLALoxKPMEAZaKMp2vBrwAHmcYqvhnZC1UE6Cr/JZW6k+69Bgxp+sGI3BfNvp5h/8spmJqg4myTAcqzYaW8Ami61S7cR5I5iErsXlk1oVtuqnyJi7rpaa89xob7p9udfTmT/ikUfL49GLZpxgYnjt7W/so79IqGHzZIGIIARcU7ktlYCMyhTUsBzf3Bt1fI3TPdf9q89J0+v8+lSdcIjcojNA6OKoSVmZl1IDJQ1FFUSB5nuRRnBQad6DQS/gHAZxefrebtPJA3qGzu4EsFsPJXaPKGq5gQaz1ERF1dpdRQmIeMWxLcyKaPK31zIUjilx0vGc0CupUPlgfzlAnni2l49rX6nxM8NF71OfhDKFLBIggWSfbBKORmOU1ZvFZIkfWIpDwOKy78HNLKSp/FNA7paMy9pcCxu0OvI9kcWld/GnM57qRnliEKgPUeQ9/Prs6koi6i8QAOov/xlN1Me38NfUgZIe5LJqw2p4hvWm0D7YuqCoZdYCnRVvwVDT/xcUc++4D7NeXYhKC1Ffqm8Jy7NWQE3v6koJY2q+T3yGH7+2c4V5HLboKAmLx7LSvsCRDjEhoUARsfKpiOSAxmJAf2Evbi5Z/+Ep1UFmFxGIrCPagCv/jUfTrQ+tB3CCp7L8Av2vR9x+42mKoeCso2te6hJ8+VZ3cq9beFbpyZUb/fOOwBjlS3NpWlKwU97IdO8Ey8Cr3w0TE9YOTY8Mu3LpY7j7PPBQtP/lrd9WaPQ7dJuSMp+mS/k8i4fIuuDBwMIzVouGhrn9t+nvTuXacu4D60+FYx+4+E27N0Q+QQY3rrMISc3dovqzKig1eMkIDQ8d5/Fwo7WflX2uHsYn16sKtRiQyMIagTwBBeTPEsm5U2YYkfBomnnO2pW3R9IdO2QghSgaQX50RveltU68Mck0D08wkYGWLA9TrrPfi7gzxJAyiup8DZsUrLsipVuEL47gpqdNtsaG7GV3ShLcCPfEKQEKzGRVs98mluYbdgEUagANpVz9XOstElchJ43RRkpMsmXYeoSzZatjSTDwGm6vHVgHrUrCk6dzTrrH2QZehW2xpjcc+JA4qp5zvuGSYgvC32BceJXaHvQMhwHs/5DXsxeioT3l4awYPOiJOVDihOyPZYb2iAWOUREfP3Seb2+3qyytoILxhIPf+0OYjWCTg8rb3mV3iAVpk5+PpwxDdwXI5GJ4Rz5BbmrY1ze7wcbSJvhEzIWONaUROb4ZEvIHHbMVBqyw0ITDtsv1S6dzgmZxZgRcrWSks18w49DFkEqrysIGd5HJcM5jpNWJvXax5ki9ggd6DDzzLWI6zBqgFht/NoOMiNfj8rkz61v0a8ynunoGYT2YB75l8dz80Vz+NvTEpN38PUdnv8dw12YmVQDvlXDXrrc7kdH5tigy4yLb/X8OIGY98t4PchBPxvtl/lZ7tCzOqU1fIFs5XqG0Xd5PpslyRkcpY0e81mcmxLPPE1dXm/j3t5BQpLKlKku137Y/Ln8HmU36BlrBS0Pr3Q36f5B+S8A48pP/Tb7BNB51Y8UI7Ub1vs6CueReJtn/crGW78BnfUiaJClNdGhaHdyveuLOsMjuUxQs8AUNi/miOuH1LE4s+v0UG8A4sXVdldX+FRkRgmcg5vk6xKFlGvIC1YIybgNrUgwUdE2P8iesBha81++nZ7KnL3l8E0ngcicmo3KpRbm+Wutc1c0NQhEnnjo0uiZ4xuVEFomiMtwkdD7ZlpBf/c5Awbf7wwUb4CxF+ha/zpWs/kLZIPRTaA2PHSQlGH+jSjRovkiI/Tn9Ba2mPNsW8v94k7CcHcrg6FtxVHjhd7Y6zPjOvYqeSstSHVAB9b7mLV4Q+TU1HM/g4eCcgHxQ+XIjelvMEVb4Bgp6VnQojLgPDC90zpna3WQMjvfaSoARO3vM1QioFr04Z+lg/UUre7tL3t6QYh3hPDuVC0p+y8DsZVAXs40fPjGFEUl2S4UuO5Q7+opHhQ1whzh6HySiEdJDcc/TMuk1oOVBLhEvgpzCJIngjYZ0Q8R/a5nBTNZdiEJwBlx7zjyI/BK36wsYgUW/NxePqrXlxQzmeY+o9uBGDJ4Au+WQePJ4SI/N43qkCHvnbHcCx0VTpsF/KP+ShrvwUvPlYho36mo/LA7HoywPU5iePgWgXCmA7QS9B+D+CwarAEkUtsJveNSkl5/aBqTQ7kWqA7ZP4DSR5WRPSsBSVrIH2EK1pPpQsgBCXs3o9KJROeFtFDeEEL/Yigqd5VvaQhL407jWgpFgNFlmaKcq2PTXWlpgyUwmPyUNmVHmza8saL+x7X4wxx5nBsM2UTqfafXxuR87Jv8+i1zpuNu4juOCM+QdGW9M7K7WnjUhtxq8I4p4xWzjT+/TGdcDfLZk4I45tl78NBJtR/H8mKcCaZ18eEcXf+J/nYUup7oMZu5+ed8KR48m7omvLKoDLOdVTkM+/6U5FcHDPGddnvoo6Fx9WCGkGYUPVE0y+qbFHOhIGEKpXSaJE/I2qLSYdKfIeJl0RzyqiGSVAiPE+3NWaBAGP39rkU6qmT0JixxcnRlbDrJx9ilvQNtiEZosu+M1K/8BXdvn9h3mIkrkPpKCEAPokV6J3ow1edX/3jttNgVdrLAdg9G7XkWZ1DrHb5smS2vgC5Tl4ez6YPKwROTT2y81Fo8ckE9gzWDkdLGFGM7ZTgvavoDCouXzQpgwx1wGEhYYegGbe89daiHrMIdyM1/BTu8xewC6uvpxt0NlJtgSwhZTh4/DPJKB5CD5wGrRFU9QdZpvRrh2zKhTrICkjaO60UKkjjw5S74vEMr9kYOcO9Xsugd8NXr3lToFFQPUda6LnnApS5MmCRth3sg+1/r96dEKGj6rAA+nDoQzJkZ4lnG7GWoZHyfdA+gV2Up7lpx74NOvChoWRCFwa1Ti4FfQfOCZUeiJwrdSekkNUgzkDcGXNciiAGpJaihg5PMtWVc+JL44g6fcH3wn/0hjiK0+XgkS4YXGusXaWowk1S5Mb88s1ZVfIHrVGrhKjIwXqrbjGlRsofTTW3L/1YMAfduSgZsTfE7x1pqDKJS5/QAL6/R8wkDP6znlJTOjJ3Q7mKRJSb7NxO5lqAum565vFUAEjG/F6ag+JXcliFEAtjB50aaVJq9Xdmr/41U1W3ohT37SFS+tO2QhxwCxYKLfZRZ5DTejhYjGjv9nJ+MPVs+reOkdiDN9B/umM3Dra5BYIR3e3Ma+sh7s7A3Vx8bChA637iqIcQHBRB/KC2jhIOJ6TloMJKwq7kb0E9XF9mJKmCg3hmo5d4RJzHqbNcjzYx7Yk5pW9u2bnVVTfNizvlXChMV4Jf5oYhhhryI6ltlUEoy0dcnVrxqGCr3oxeTTUf5UBKirSe1/gYybkHmnwPlavGS7xGvudqLruLswtEhy+WLGktsyMnJ8W9COKhKX+yT/JsbS4agPalAWAgq0Go9WOP3RF1RuzHL1/Twfhb1NYiWnyHTV97NVRg59dnhMnGmBgXkg61GiZUapX50LNQZxzNuqugyEHjxA5KJfat5xMGdWLkGemJFwfcuskXJeWXikIhyV4hgdXzZRgUxRslgMfjuxQLvmkV0Mo5xvsDp3TMwkg1PqkGEkXlV7AWFN3s01uhJ6yDjC2Uk+o6z0pfnoqwyJnVgeA3WjXxaVtR7egPbF8hfxxAfwKF0smjTHmboF0kgivS8DFJLv75/Ucq8fbYoMgadUoqnmcGJcHbqo0es+Dhi9Xl7vCUVADTnhnuyVi1432m6bM6IF6ay6xJCRKjLVmAyEooegmNImVU5uh5FGxvOAAyQ706a+NJGgUoH7knQYOL8ZyXPTJSWiqNEpr26wUb1hbclv6Q2iY+PTf2KneMpMOoIfJjCN562NYCeMp+soVRGxbyic3Kw5CfLkEZdmVFpFlp/gH3QYYfvp+catQcIPNavc4lWGp4i1Acl01RNbxphG4D/KBQ3D1souHPwVFKgaiC/gateFzhTrPRF19IEVRm4h1rGFjVw/0h0O8psDwOIUQVE6yWSmoOaMX56IjcaA3jEE/TStAQxNE3H5qejiTpfdUTzQdTRY2T0u2OiDdBC+f2dMXvjCGFtQItlBJQTQB+5X1HSQO3L/UBu2N2IH1WfCIaagjNUmrY+DpLuPswrx3pnRiwB+IZ/JH4x1YV5Frju6HvMEaEbV1VMA5+tyOcIbe18oGEX+GHKGb55evhOtLwWOEnIgwWtNtEu2k6sjlmeiYYIZe94CyD85JcA8jNtrGoOwPIzuOXQDjq2FyAZB4Y5oHWAFNbhuA7DjS3b/0r9FnVkB4st7jizbrNtR1TYyoGS7+lR4A7b7ya/FmVLMgKfAbohRbBjWWpfltG3mUVMQRTTI/F+Gsw0zwd0Y+OeqznaCusBQw2Lc8QWFJAylqaDK8ZRwDapfYycfqT1j2SxjIEL8FScDvLoaovVia3yBwMh2po02KhuyvIc1A3KVHZPpgD8RYU4+EhZCPky3ziajZKg3rOGHuBzw1A6g9gXNpN0pbWWS/gEyhMSGxg4AofPhtBYp1z5l/jmMREAJ0qnNpj3vXsXXaAKjNDjJel7acD0GUPaHRhsKq28WwZ/UuD+ILPjHDtblSZDMWR2a+tNY6kQm47emyolszCZo2fsc25fjpIzGXs0cetux4Dqzv03nH6XMdZwhuPqcrOixrdEEkqRPadaaRgfJymbY1HncC6myfR3zrPkEA3PLdw1x3ljW9+iZ1sPUeatW0c/nNggFNkllR4SxeOrOqgg18rwG8dPbaeun3WPVrvoIgd7oO/fUQJKSujMTWHZa8opUxebo6CahujaYmcGMC9h1ndg7r3k11sdoudFbd/0Ywwispr0E8h8XOUD2mVPlPesHFkAvglILLJ4WXRjSSnNcOdhwVilZTFgIlnDdimNm0JVz8Iz3WoP0LCp0+1sIDtpcuFVFUX6xd94DWvwsU0GstZp/Bd6TxbxfLluEDEZiy3YUJOsXSGBH8bcTJ1aZyu1woCqm4f4ogQG/otHbC0UKoYwwxNemdnuEdPF9Z0JMYKKo08Z53vle3XWrbODlTX+i9FAN5t6qe5RRgWuI6I68LMMB2ZBWGCwu8i2DinS1I9L4KgG0j0u7nA0vqcuJS/WCcLKXrgG20fgdRQWgYrhA0KHO5dqfeyNvSeU4h0pU754wmk58W07CK8z5cg/zzXGMuD8W+4PJtt+49U180vl5GWVWaUQZJ+MOOITDDV9UuA4tJ83c9vlnUzNeroPXVrPkt7kOA57ZNe2AkyfX4AR+q8paq/U8PwhABf1pXN79ERqG289WZdsb9waoaMeiyNYpvlwdD6gjoUZgOO3DdHKlYHSCeG87Wg9f0EMUkCS85KP9YETYylYArRn2Cy4CusH20VtLfLkP88l3VxQXV/PkPu0iwC3bFscgTVpUONTRqRoMCM2yI7MuLC9YPQTZsbFI4lMkpYCPKXaCra5lirPqucwJs1GqoCydQc+/2JuzNW3ZtuRYudz6zh1i3d9+alM6DMbSG08ao8AZwLW/KHbKBv5egSSHqsYB45H+aWJ96wANB3XanS5FvmKYQmO/mpndEk/N7geQJ6Eagc102vAM4FeOJRnmSoS58qGiPhsU5iDV4DwWJhZvFGSIslchcwjhuwOkvFRqlUfM8aQo3LWSXaXOS8oyPS86HHDIQtfeRAENPZ3OwnjkLG65FuhlIkOaCG4R3oThoPTsfTknjxf5z1JBhRBRCum8JRTWqHEYyOMHsnjicS/DH9zRCkEHdzRfkLOZoitOdpZsNaAYEprChcMLrLDZooVOj0vBgN2kEU4o569PY1x02SsaBWTffTVZvVnOc1j90s6YN5C754ZTMM04+J2B9V7j3BfM9YxQ+SSEcSO/QheRAjs1Fha1KUPNXg8lFuUJlOIQnWxvrupG1DPukFu0EYJLCUAhnBAL5/tXKdLHTKyRbm1BaDJSdDLcPSOngECoQH0C7UAOju6rz02WfgME7XNGRhmVdPLjfkdKle8Iw0cVwA7Zp2b5Qo4N99oOs61z6Y4GQy08B6xmWmlNccFSceTbSsTO+d6OfrK7pp/iqX4DE/RSSLhU/tvBcNlrUJXrr4tRK2L6hB8qn/JXI/jBPaO3TNKmVx4g4M4u27svPZfb/XozPSwBbi3qgsU3Q5+E1eZ3rKKolbViC+/LrGjZy1mm+Kvt7Ht9bd90kJ95+HJB2DhKY8jpzvUmXaCE8I6tnBvvao24ClQhwtFoKfsdVT1p80/vVBVyUNAb1sRPpTjjgvUr10ktSB1o0Kn7324E32nKDNkULKlBTRdNNAaAYDnQP/Eo7qukPuOjyR8oRJ2HU3f8ZvHTFXy9FYM+gxZj+wSDYkEch2ckTifXLHwDQaNSrIsGYKzaeJ2jAWsDHy1nStpusbz/O2FH46bUG94YJTVc9Wi7NkKNwt/PbmUJEds2/mP6wp3Z3sMRodI9NG7LZElsya61htUDAq8ucyuzD18z7n0wyJqKxSHJPOpvR9eAoSGrZJrhs9X0LKDrW/AuHAZ/cXQ1no/qjPeoZSJR33jTb8+4dUW8VX2Q7kBpO9ZrTTiYg3LXBPmKOGG6ODoKIeJ70obE3SBEB4AXJLHZ/4pmCvCAgABV+JB5v8dgAz+tjn/HSzi6oBGOCxqYVz7lg2JDM8d2LOQO7Z43ITYVfauEvbDjLM60S8t51edi33fD7bXGFKV/NmYy8yQTzsRnqx4vDblg4KV+/jxFSUE0x0M3RkOTy2FaedY7h1TIL/vViyJ2NO6eI92dcWLq3HQGio2RPh0MkEXRb2xtN7WkqE/MmGSlceDK5OEn5SnD11iI5y/VYbJfJj0jAIR6d4E5/oDAKKwJeufLHmIZZSKLSZ2VfCeHAEC5sktoU2na+vC1aAePQ9NeoQRegyDxHc60XsoW6iFXMkPu0FZ9Qfv3ZdKFHYHXn0G5pmYERvVgbctB4r7GJ6o4+eJxZDm+GkiC4Jw8wbZ5htmKiMAfhnWwGd9v/ihBblCIVp1nuFcnVQjJB915PyxvUE9wZC15lFUxk0Epa7YjhGUtVtjBdPIC4x3nIZJ8icVEUAAgwa7gBJ7alchWR04DXnJCyTVfaJWBNJz6q5oHv7qUMHd9VyfMdj6NTum63Nir/p7n3uLoJ0PwPwYbS83a1KNzAvyhdV9ypHLxGeDbP9LpOnmtUkC/TWOtdFKaZRR2dypMKeGE+0GZg/e5tjPvt6bhQeLl6KdYNIKCmMLx0rUp//YkkiGpJJtEPPNcLX6u7G2JYRRQPSzba9Xp+0S87BFdtHqHCvdxvkTvbOpg9xh5xQ4kBgwoXBCPlGlWplP0H+UQwB5D8lKIciWtdmVZLszP447LqeIrCKf/IoCUZei9vdI+bTHi1PthbG7xIDKO6+YJryCS0xA/4chsPnxVBRwjIJINQ9nuO41273xGQKmDdFEBs6vAVobitbvn1ZH23uZMESZX96o9XDuamK6Kf/KgpQfcj6tBtSX5QEV1CZkrzojQUO/+PHA6gUv2ZjqMOxyDZevUuOjUTo1gtGsY+w+kJvIUYSNg9w6lCqVcKA6tA1fNhvXf438haETgbYt4y07KqSsVXc8tKr3VTMQPJ1JLGuoa48AQJhRdy+BvR3rOvBSV++ZTsCkgDwb80VpzJAOSbqkCj5hu7EHLmfbvkbHZ4VSVluMY+LHxGZyowi3vqaabEBNDdXD3cGPO1m6mbdm0vxmwQhCgyHCcT1FaL//4wxyROIcOKe3YCpknT2YjYtcaX68pc+RC/MyEkAJoBr7uGkVxGJ3ZDGgecqQ2cjIqnH1ZUV0U5cEVx2L5jaTxw04k5xI0Ber8m+rJ7saZuPN+ygx6lHrMJenixAQmg/7N2VQuqmpHDxAMve+Anxp9hosyVm768h0LjWSUpdT29yGfMyfHKKWFjn4beQvUPYntPtGNp8G1nIJbzL8Fdp01C9GrnZjWCJZvK1sZB70onSsrWY585T8zSSyn6N6betbDnr88jIz0ssSwQ1UDROSfzqf1sV8+aKNFiMHkupDbFGZ7FZ1ndTgCHNaFIlU6Dwy8ZJO6J+QFNE5U+KfJfWA944DwusC/BEpsdDWiiFk0ECYn6t54z/BVFEeSrRsq4TAXYTfnXV6yA5URbVIzbIVyIAzZELWFWSTd57L3290vRBSCJSjkji0hpFHM3sCNZPpIrN6oK3YFycolTa4TCJrc801u+kuTZTVKS2Y/XmXvECwCMWnWsDhJwOahZi5cmlV5EcIpGq92gtZk+w8cmFm2I8RGs16IO63GvzA/RBiucWo5n9aBlcnZ2uKaIzEcrD7fMFvxQ00PGRkOCxASoZZxphcSWPfHFBgS9TEb2lNWwmxq6UQoIztgE30WGcJdoN04rJLKowps50xNxzhXkne/rIf7U5NAyy7ZSME/CH3/RvchIFNPQKqv0cxEPB8lDpN/NO1p0cjjqvqEa7xaZakbe50pY00WW6kK43s8cXUp3zYpFirE/K7944O11A1RbitQ+c9TCD8UUfB31YWkpgrelvnK/DhlQEfouj3IM8ouVMaC1DivFeY4XBJk7bVzOYwacQbil2pS2w+ij09l94qjEHXN6VpKFnOEao1GFXR76vaUak+CvKG3I2XjlkQPnwU1rs/y+6bIAUCQ27Ek6wcg5y1l55ay17DnSXh7FvItOONpLqpY5Sezkd+NaKM8tK8MaBqb/mmfCbkKtOLLoFs4x5Ndnjqomiz0iWzc7HMxdH7hZVWYyo/UXAovQOKSUtzk8GFe+HTMsI4mWgC190MpH8CEEXG0GI1MfREdtr+BaHQeR+Xv38xiiTFOUOyQLIiQ16eEGnISsTegDKImVYs0BtwslhG/taCz0lr3rTm/FAB6u0aw/e0gKi/RySn65kjWqtcqRzQT5LyKEdWGJI6Z7+OpOS9hUWruXezmdJbmVLng8yol9gTsm+cfK9PaCWQIFue3fB+NZn7cCkyWqUvTaJuyTc2ESuyn8rXB6k2RhDiDTZ3Q8YOnjuzTwOcThJYthMT+yS/9atI8q0QiJWu5Y3e4BhkZu/nOmri519zbCWxBbt0U2oHW+oMAmhowXCKbOt/tdM3Y1tgUUCflonBKpHvU6U3ZCdq7SwJ7jDPufsvbJPOTcGiLpTCjA7Ou4g0G1NtYus3jjworABGLURfulDRHiVblBdEaB4dX6N8+CmTZ/NhT79735JRrAzlBaHsxRQYIX2hCe51OqzlsNpBjPaDi/gL8le4JHoAdUS1XNNkmoMsNnYYrBquQebob3HpYiChA7uTe6SCWTyi9aLN1x28YiEAT7JZj7UU21eXn0IlvDbhy+0gh3tGGKOWvnOweVdwaxlBshDG8NAyvaQ6MSAZgjLoDaenXpyKC5quYWpJxKByw5Sb5cycLfU+wf5cHiF1dlDiFwt42l+k7G7zuxVOhHGNW87pg4j68HXzg+e4LUwKStit7LA0odvycHWXSW402BVqAsK5TW46hg2A2jT3kjzp6icPZP1xjUUmPxqAusg16PSiJTA56zccK8b1xHdvpj+pO6rvPsHjiT2lUD8DucY3fdiPxgotEjcBwVTeLDWKzp1K8s7m1hHVKtnrRqcRlgzy91296MNte85PlYV1iiFiATvAJsh7qLhI6DsnqBhsQJgmbdOHtgrBnpcRHXg2tgVxbKV7LE+ybuJB84LDlnZhQ+ZtbbAZlgU1VWjtG3HwrSrADO8v6JyE8EPdbMWYBfRQppTmmEXbGhbtx/h3UAWaTvtOjbazFqWbvk8qY1zmD6c/NeFCDeHxslrGlKc0SfvXUA7yHlUqVZ8x14WmG6NXiQLNfi/Wom23SGMkv92EQ8uG/bieilLbrEq6U3m2N9q3hyb+EBrfLmAfPHGK/2QbYa1R7tRuFn4XlI7Xz9PuO2TidNgpe2vfqEoP5T2XDsKg+XuQO+Gm4g4VX4sSFUzpduyVFWShH6AH3MvW7Gg+6EkZoMkr3zzuXLHw860t0RU7AaRdG/eoyrWgNkOmxvTANm9ClEx6BrHCfWvDnSkwB7euqrpJV8fLIaBSlCznaszkqy4244x7Yb9ysS/o9L6DEHRZVWfj737HblwDYCV8qzH5gU/kXI3i9+HnmmdcHPpHnb/fR3mF/be1ZHF317MXD0yZdB3Kuw0P9W27dsdXqlw1O7lVP+YlFMch/sn9mwLqOGLjdmT2AuXUo8Yqv2s6Q1T1suwnwjBE3NY8u82vqh5G2M++POyDG3X33xu0HmTrGKIWtr1OAD3VuFvfoUAqHwGwLYctVDYm/siP+7gDohJ+6faIE92xYZZp89KUfWftRoBkogf5NmSNZqF/l+9aXKrnepP4nDEs9PbP6cHQPg9I2/0EXIReLK32XKE67BT8MWt1CtVQXOx7/9wzaXzl2pd/t6TkN7bWcaOkg6AN0D3Ld8CtuLS/Gxv4Vo3RS1aorr1j0BF2OQLslpR42mJYOjHNd0S4Jd5mZYovju3fVlegfMAxEQAyt7bY379E0RRwnLeZUXgOCd1qdTqPr8lDhJ7PiFx/UttYlIythOYvoJoyUaPeezq0H/PPZCStqDXNbnga0eobIl1sNQ2K+pEKejL4h5MF9VZiC+QE5JLOoUbC+MZC0tFiLIpdNbjBbd4csPI3ht5jR9DVpbN2KlHlSiMf5oiY0e3CnqS0JZRgaHn+3MZBVPmd7yh7otO8nme5NYX0cKFzSVChjicW+FegIwGXBIQDqnuWr5zPbC33m0Gh6rZiZPF3ebb3Z1FbEtNNC1PmCkMYEUP29ybzDX5quuNfqL2H+hlgkIvNtYzGCv1hnPRhRCcjD+iMI5SXMYQq+9BB0jTURw12evhhSdrMuoidAsvl5a+T8Yr7rYDApt4T81l/cork/EbuGrznAXC903Zb/228hAYsjcB91mHRaOrd0edavZ99CNZdIyhcu8x+L9vXp+pqc7Yl24olI/61LymCfx7+OqCQwJ9GJ731kVt6X+K4GC5aiDyhRrXD2cJ0ygRICzZpd67TegiihiuWKOcY3JdQ/rlYjvs1yUjVbMgDfBndtWAzO2nTzLhqiJsRbo3OyVvfaoLdSgliLH/nnorb3pe3vwsgowu9MwbZ1HzEhu+njq/3TuEahhv9N5uMYBDrF3FasqnMyWaRSp9SO0dXU2ZiuLzAJtdMPi1dXYKRJGVP6AfXcGud94sD+qkkWN5dR5273gGJKc1Wk/KPd/NouhGRe8pL6u4G7xC1MvI0IID8xj5hfCWb9i8PlBfSd0I6qTfXKeP81glBC1pZVKb5ClKYCJpYff6nR7/v29ydsYYxqs5YOWYxLG05oifisQ7gyIumKjdUllSGpcF3hCSacTaVhFlF66dJ6seKWtnnogsRwEGy4loiNKzs4kJ0pqO0IPjDjUr5MTwvMugA0jHEJyowYOHCv1Ga4GmIPPmpl4lvyLcqt3ANXcTJ7ok6+2JQ92FiHKY+fOqb6u/y+KnNDUV6FvYtVxXSAGRTBjUhorqguaHUJq7kyndn/lpdao6K+HhE2qhYH9l+u//AWgmkbnjkENL7jymP4RRUOKRVPOs5O2ZdTkctUWDTOLncJHE4we7i6APEzfbQQs+ai0tRRMFz82YuZ09MkViJz3qW9E55L4Cild0sRnLA5sjddNollpJXaR+IylOW84+42K0YilrkLLQRIam11NYU8tKltkrKWdK6gCduHt/M3YFlYxRRjLSD2j1FqE8ApLKrqRrEksa7S9+3VNHbkn//3NsOL9JaxHnpkrtQQ9xFfBITwGQ0rQ1I+YZspVFuBHRk8kAlck2070qWuECNZff23TE4926Ky/nTzvemhoHBRhbFNO4ElF8bLWybKvPWLSCBGYqRngBOPjdWpMNQihKS8V6mOXX8THI5RYaFt2kOJdaeudP4gDdGmrR3NiY+H+K4HyRQDoi43sr7Va8HCfUCgc6slfMbbXn/igWx3KJPJjyBytreFPHhVm5UM6x/LwgqkQKLZGEMyLRpjILHoylLNtPsZ+xsk5qjuYdnTmG755KtPbQ923NYCy9R6eY4zfGaucmL4eMt68uZ5CqfvLjkAWr6/epBXwyBOZXxIkQWnZeVxmGF01tSBoj7jG42jkIggFp5BR/Qq1ltV5UA/ZQXYTL1BqNQgV2l5zTtZNOOOEVK2wedW1tsonQEQ4XiRzKr0fkZSqR3BOj7ACXfDMGmwS4X59kQystTaMInfuFWzia4uYSONvZcQtQOwOcPL3MwYWE7Ig+/MlmApvOZv4vZZmNEBxSnrMF2jP4UfP8w4SjcqOcZGMxEp5LT1Lfa15joK/jKMF0t3IZ8Iwd0DOFCZ3QxoxpdrHZyBbX1FfOcUdAQZgFqnuzDtlTsuw01FBWSNUD30qRKrmNpTLGhEBp36trunJZL2ahIl7zyL8WAxJmUN1/FF3qkFPf7dETEJX9f22kJOjKaP42cE3JPXxjmvlPZ9YzHCyeaDzp+NTdB2GefCCamNR4JV3HFVcFMXNqJ4Hh9yStLAczsX2YZ4JKJOQ5SLIUfrRcLrqmO/Wj3kTPDa3rfH/9NDmEeNnnJb/Wee/hBiCnyryAYZgu9ugc1qY26lQLewY+GMOkGbGz5HfdzfByxVgaCm4oAoZJUsvG8vI3pVZcFRULP6hgPzmfHt7zQ9+ck0/NFKGtgAx6wVaIuUTJOgEfgvCjikFSydff+GfCsogXsPmpiBAwEdp1UutKr5fQcjAWrhMthbQK18NuR7lwjVTpqhGrgRFxkQcNX8tSBHuW9BFOL2B8ks835oPih9Cxp7sKvD8YMERqlIs0BGiQCp65e5Yz49lITMSorlfy+KcUpVuAMYLdbpOp4msTYEPIf0l8TJVXGbc6Yr9AWoz8xxzxIylXl6VxF4WkhtH0U5Zj7Gba13dgtVRbX4vsphAV5RyzgRKW1wMjYeK9kkEQ+squ21bgz5HeRHQiP3159yYwokmAO3EGeAQWD+VYR87qlneZxaRdyoV38b94fr5vH+v0ZXrgC6RsWMslZaQGqN1xEzCKvcXrIFIQqciCV8d1OmTYJFw71kn3cBzZDA5l4rt7FVKxqrvYBFllfU9dIwCVZZ4YiRLYwSpZm7yjAVCN/ecaP5Zzy6CGDpGttWrsKmxB6g1AChFtFmKFBhkKIXdED7UmRXNJeCbp61zdqvf5APXIR2eWM+kEEK65AV34oD32e6gL7bFc2Ljsafj7B30aRYMdzy7IjfhiQVQBf2CTNQCd8PWx53uzQxnNPrl/VKkJM/L9z1bsMYcHwjN1bk+q/Wa4bq9siqjU0ZsKDSWU7i1nXL52l5JycaRTo7tvUai+2QFS2Dn4F9uIqGK3kUMCNYYBsPNemxBqEI5POVFr6ajMyS9F+4T9w9wXptBztb+lFPFaWQ4Ma/q6L12VVOh3h30B8h0AL3evKu809yU4b+U7i6Bscs/UREDWraoEs7UetLSffkTB3ZG9OH25gUtXU4dBp+CiM2QSBOBFAF9K+BvTgM/O74+7PheIZKsBWUY6emam1McPMdYLlLzIl6BdHAy5uLoXLE7NyZw9bSJKbrXUs+Akd+j1TMytjC9ke7YBcUb9+OuUVOxg8HXqHX6SdRef8ERFu/wNHXvglBeUAMMC7dYA6AJfFt7VzPxpv3tdvULTm+Px3UUjxp+JV5ubNUQO5+6chjYeIyWan4/T76SLU7tI2an6o1AR8iUzgapMAN65pJFScW7Mv0aBI2Bou7G2NxXbMkusrxWKi9umbLCjW4cZb9WzGUY8XcVwRlnqqmYKwY+Zfw1smlVfyBasNI4kxPIOC+kpurhAdujUKGGTHWmStRPCcG14SrwwUAst3FHyZKvsQGo59GHkS5jur4vPTQus9lWtHAtSwBbY2N2WbOeEL8T9x+FZdvQ1Lw/g/UzTWho1IE4PeZXlRp2Pm0j3dLSN91ao9YmpjtwRS1QwGzrhBkDk18PWvbkRBhwvW8YMXedJSISHnKvCzCAynoYZCT2uBdlAy0bRSUj/3DT7NZ28iRd6b0E7xWiqlO1JiFZg3eE9tQmfDhYaTQLTyJ7fb9AbO4O1XCgxIQVmB4KABTOgPxF1Hvk0wab16UoW8W6uLUXboJDtapCP6ZhGMpJgumHruxmrb+jMcLyx8/4u4f8Xe+oROFTCkahOkYe29VDRgnN8vVB9KTa1Sl5cazTY+ImVrZP7E2T461mGb6Nu7M6s/YLlVKXu16+nkzwQ2x6vYZ7VWLKaMux/yhkqLC6lOoz4srWBGT3DQUCiX16pyn2iXg6+3TbLj7k6fsvqP59ro6No0AuQZq55ksvot6+33KXo2M4/QQsCvAroHsimaeweOgc1C9xEd4tYMMKOEamF+khyICDXCLeh9G2p0lFmiuuGRr3nZziwzNC2Ei2Cg1VU78nob+rgLEXFa+V3pXgzuK2NbkWb6TkUePlYhAbfeaDze4ZjBG9gRKQrxhLa1mWWGjymxP7Kunke9O6i9QkbYRxbKZgDsz87Jtbz3EZeF2zs95mGm1/RRjy7E1o8/XyzB6rczulg2jqA6DpIDKVhKVFSRPB+OQHFGuv61WEuhVvOepHs1yiYWS/yTLv+exbRPi9UJdO1KazmfzECtNchfyJPJhdQpoSPrhBmSboZNkJaQpIzswCTgQrOo3Y6pjBWIRFbHFnpBpd3Vc8mFFh2EVyM++e2LNEOnH2IOjNQF0V2odyuJTX+csT49srzDDlOg5MaWqAPCD9LBecRjAj2VuoGzGOKTLs65v1WoD9eFF98K/8CLdryzpFuD1b3sX4KFM+MaITq3q36IEDdmfp2uSe2KFT3hPojt50ztGIPBFTtHHQ76AI6pLbiQg3JGt7BlNdPE2CkRuXl+GN0UWhvJ4DzMZe0KcveQlNwP4GOoUutW0fww9GxHd3SZBm+SnTHaKiIVLkNVGUfkcmopFvjMJS0FiSoiohe5PTXnCVJIre8F4oaG4ecNMiQF4tSjmUwRTDIIKWApBb4LE+xq9mtrT5aTyYcYKkGAQ7cRjyPSUDbQMYcgTRJkUFaV90ehHpvJR4c1Fah9AAqRqapZCakmCXxUVzkIIJbnXNWkqjcBOb/iSRhJHziVl058/KUjMNIVXOcdAjmiRq6dl58U7E3s2YLTurZ/0MzUypt/SJ9h+PaNqeYtd+1HDWKB/4ZaZvK4fHqLoJXnAf9ZHb9eFCN9piLm+Pehexkbg6eeAyuMF1GiPxXjnEMT6Y/eLuiaviYDYuNYm7A7LAQzLClp86B3TUmnhCIPJAKG3CS1xnDXT4Z7UShZBKCvTPZ8Mep5Lpz7zlaecFMSy8JXbADWHGfMa97EoND3A8oT6KEqqrEsxVWWd4PGza8HsfoY+kZZx0wSYOJjKtzBsU2JVMn/jamqK6Ee2R/dNxsT6JYPObjcEwycMGU+m+JLapmLo9I8Ugiz7a8p8pRZkx/Ct3hTzd5Nu8KOFpF+AugO/Ew3niJxELy3m5nDSihJS1vpay6KiNuXzr/RUho4R7HWJvp1sfaSaepbJXhZrvHATx7j50AIIZXywXZd6aoyojGg9X74p7ytRhxKzgMFwBC78gA0PLHLydMnKA2d8519etnZDxGTtPKABOxKgcn7KeSJKMmPdeCXiSumXxW56rmdxyrJJbP3PwThuTYodXd3RSGG5NZgNpQM/jIKmQl/5XOY5QW3U4Ib98S0HbN07YA8obDHSLlBuFdwOWlqqGM2OkvI1e7qnFPd5fenqJxRSr3Ob7vqaz2IbV/aGMcETUFTt+znfOS99QjnRo5NtqzM91I0Le/p6awFyWpohg3PAZf0hAO4HjouiVTiw2u01HykyfmCVs6wP+j/nhM5Rmo5t8T4y9lfYxl0/hgTHVXAisE34+dZsI2cMSi3F3OUzJ7srM7vy/NYTaIa3aeGd73LFk3lXr4LEQ7/qXzu/XqnyrSyqIBpKN6UYtQnkrDuiVBswefzgfEdOBebm+591Ri10PjLMWChEOIQF2s/ZfNXsl9LZAWzo68kIMG1yz2HHtnYn2dwEMC6k6nfRNUMrqAeGewxmzGEwj3nmk5q7GBxaKACSt+6XnCgQ1ZzQGnmU0gKhvyYXV0Zd3R6xUULhL8WKgr4TB1fiT1lSbV4+rVueupSKj29Pktx5gUu1bc4my3DemwZcIJuVPtjHS0SEziOq+21hub2eTp9nksjW2cMeJTznzaW/7JKDOJR3hnxclbori4J3Ci8y2k/oXOIBPaXiDtOE8TYeOoMG6DqmyksyqtLgSh05qD0+ATM6jNSzS9NJbO5dTnTfJYmgM0+bW1aDGWgIpe7P9rYyOJg9wrHk8h5Ev840HbNdGXSAp4SdMy7fwHIIknZNf9iw74gRbLP/GGVPWhHDGBPF0O0q9ca7jLqUCE/4iGpgvWx07Rr4os2rM46nR/5l5J0RIrbYnsXZ8atMOp67RFI2UOkE1YVImt+Nz8TtPmL8yS+9JMLn3NsF3UbiuVIUasG2bM/JeKNm8wzdUdngTII1A2guwY2xc/3oOhF5AMC5wHx1XvC41GOdUjc1/BRXpQb++MlDKll6GFfaxsk3LUgILXvXMFVg011a35XSP+zrCA8arwZrev42kOeRxQ9C5qIRb80Ck0j8SqmO+urRAMmmGdswc7CfaR+wFTIZU+MI09e0pPdB+xB3eaIsdisiWCP3/gNfXiEx3OTTlwaKSIZwIZdxbuHe76pGlaclDtLmRyhjzrLPRw25NxHdaVu4K7ugxAn8ZhMgaslx+j9DYTS3ML98h+cQFFTMhcAmC1Rvgaf9YnOTI5f3IBYFMAdVPIGl2A3GoIofQUfDeWPQDZ0EXhnXeOExBc0CZyLlXNpKyAp+H6wA+32NomAMFfRxDp9G7obRW/44s+bhNbxmSPSV8INiJu7eNufd3+4fIljPaDMmmt0b21ja/f11Y0cvvJTCtilv79jjKQyd/sw74KikH6viV+LjPStSZQtomYyldg3iA2PMRUEFMDU09ohawT6UDM4nsxwQKEz4+hdMrweuIrODJbf9asuUUWumkR90pqVwNMIQpMv4t69TXxenhhETEM0wf5UY2ezxeOydRAQM4ryh2+L7zTBkeqD/ZXRU+1YcJG610XeguBEJ7oKlE192z/bVbJsB6tPMpXjfUBbiLQ8hNxFil9jbiwz7Idgjpb6tfyKKVcNdTGEqWOsGst0eR4i1+6gIrwSl/OL7YQ7fhVVR9M3nc/YDwqV2Fs3l+Uti18niZ3iCqsXUPWecYJefeDOQukhSH9Ll+SbcJJht14I96KYoF4i4qrztbztBC40erk/jyM+hsDentEtVDOqLg/ecdLnrTJAEnWnxpZvdymEFZLk66S83CZzMnv2FS7IOFJAu8+7+UrEU7S/CUu+aZH+wCuHixWK6iq5Y0U3yr0V0CxiTkQWGPs91JkCxQlf01PuNAzXQDjzM17bCWK+LYsgkIEJYxpv+ohbyvT8DbXDSLNZGkxdHxYiC+ZITaGg5DCRnD3bcEBX6zbH3nqKnCOD2V3RedhErCxQt6vdXXLTFrvXNLgYvm65VmAf6pVH+gutSjEei6URst4JHEJUh+pMsMC6yHKbXO84dI2NXIm5hi93lcbwwAhYzgSic0rfLbG/svYhNTnUTxgiEidg59bmvvvCS1eUPI8YMUT9SEnhvXMl4nKzkH0jcOVP4ddckKrWXNDW9uJQuD8wOpOShNRJN/0G43JIhbGTf+alU5m7CJMfnDmsvWyHiUNL+is5PlMpTYGqg+cvYTiy8XeVKFL9xOZTu7/SLD1ZYO145fdjsWLKZnTBPZdIUs4+wcRd/cUKTaXa3URejWWfdvNG1T8JG+MhELsys4WVGndlQbp8ChxhrxpyDHm8d1sz6zTMqIN3EXYr/SZDGMwGIUywqimIPn/fOaJ27Welpq3cMW5i3g9vWc850Jah6wN/d6duqlJJU6BL6BvsLOjuZbxtOa5+B/DWZ19tXLmO7uB3nexcCYxHGmW33Zi4KDbEiIGWCIgNQcc0WVfzUjoLy8wIja0+52s4JJ1sYfBY3cfCtGIPvxnCkSUmhp2sAoNTGIM4WDO8atzF4x7tNhHrUwJkY42tT4kZrsx1YjTZoTU6UPwQQ3Am75yOZdDpd3AdAp1ppTskW+2nXsN2KSGKuhJHGeJfuVz9KS0igByrMCh+CXzgbMODtv9Afki9Y7hGEBpVhwsxHYmg6AszAmuIxg/llxfKG9VUHGRFB+K2+HsWZGAbnJXkbkvosgJrFRgDsS3rKZ5YQ7cs9ZD0/KjTvECcZZjoVKp2093IfjQEGjelaJWEVscqpAzZRRjnaBrBtQl5FqV3bZMP8J5Vu8DO8fXXhsfhHkf9lYUJDExFfwq75yzMLD+kG9kPdwLA9Rdi+arPyEXkXvAtipHNNgTMedvB+GthKAAhd5OZXYWI2Vj9wA0HKulx4wMO7mCLyYjxcw2WIIOvvUSkxQ7pTZfajgK/gdPn7OU042OYbFHDMYX8BC8LKM3GiL+KOLEIt7bgDxklmZ9Y4I5KzphsGiyp0GErOQx2bdkEYq/f60xwIteDGNxbPN419D9f/BcwkhrBLkLkzMm1qYlMzWyCdCvFtm4iaX/T4HLxoxgLmRokbzAy6vhEQbMkG34bj098rqc6KsK6+mkjdcEVQ5GcySlrQ1fHD3+S/aKcALXbkSoxsqTzfiBIivCP9mKWYm3VSij1eMjICKR4iTtn1iYRtBVx8qIBH88aFUVHkUphHGClQfjSMXVKgjtAk3f1355uSvWekMEtnJjOUYKNB6fuC1ElhOWVNQV3GwaljRjL/iCKC+r3eE5+B5Hd1AVdRbug7cU0Nz/DQ45J71QIc5BKGarQJwBCe6pXfuDfOaDLbi8SrG60u0p3w3uhBhnSFgIErUb95VJHJt0sskFT1LsCBS9flYgkya/CgxAmrU5gp+D0A25T+osY00jUnrwQYql/DlM2Bu1oY5Mre1TaAHvwcL2wDcFs4q5K0ByAHCwVlzM3EgUd0uS+r9n6aAzlR4Vp4XUz4p6Wx2lBUVHh0QDwbZ4HU3wV27YPclcJTMCv0NDrWDYqsw+Z8Ccom1cp97YZof1dFpvrRnONadgd+nEnY1ytib9lVIgSUKLSUf+yBoDy3L0FEsRc5zzwG6t/8ma8fRu4/vDTw33qm5RoSviozctV9EaJDb4OumsxIcbBBpcoF8G/U8stWyTTKn9Glmd8bhHR4E0yW+UnLOfEnnFeM8+v/oAeU3faTzg6W0ATLq0DpyPjuh9TX99ICVgFse0HnfUhqIvaZG0J4dssMg7gjmyC2FcJgJLVtm7TND+yyhAoEFiPOT++FYU3op+McXRGVNRFfDPQRGT95LgphZCJnsh1zZkGtgr9odAWpa+82RBd653N1UqjdwBOOGzUWRKT2JqIn5W4dNV+n9OZ22O9PfFd6Lh2fh28bM3S8eH/+/+RxaTUywKAKo1a0zLP0Z+N/b5/l4YbZhm6Vxn4rev+WVn0osaDGIr63/kKsyhP109KPAGU40IKHfBLasX7/Ue6PRiEggAAAM22bf5s27Zt27Zt27Zt27Zt1w1xgzzbafIr7vcXRhIlw2vQ8eiQX6SFhMBen9mqE7cxdYlRMPng2nGmsUCijTUAoV+j7bvt4oIXKpGRs63FUjuzgb8Pq2T/nYsVlJ3c01Rq29XGPtCWuZ1LdOzk78yrR7RGyNhJPYV2WQtk7sMAxPWEJykovzhI+d+r890INfzM1Uyn+VH2t56HUh9Imow0OU/9RwwRzMYS1++P4ELAjfeT7u7Bi5J5/pR4qiqTgspy6BPJ0D6vylIKaIMTQ2ODmeY3wBDDP68fLEMEYoskJ9s8ftsGAin0zBsFXoIa0Wu6eCvQjOzwY6wJL+WO8UHoL+R+C+U0t8BtK4fllGmAx4zxCTQNrfE9WKXohq365/GVYeXXSIpNjSIRlwiS4tV7nGlLlfwsmMlxTKtqT94BhCmoGG3+27W5EOaZjPUE5gknbBRfmMRcUNnjEIb1wdSOB77v48dxm7E5mhKnmAga8x4FLS09f4zZ1rjc5QDo95ZvL2GI+bGRUwdwCYfmczBAbpZSbDxxdnzzEEvbneFryhmtQ7NW4aDmaC6BVDGf0n89J9/oXORnDm5FPA9yHgVfyK0/EgN5XpJMuESPSfVOnAYYq//AuzZbf1yns5mHAbb+dBmkVdr77afy1VIghRLfgEJoEx7EWN0fQUyE3B2ENvd/98t4gKlX9Q1jb1EAx4V80WQvY1Dk5KRX1uc6Lsba/Y9lMvqwdhfPO0bhb7yCTFvuQRlnT61I1GxrA3c7pFTyjs+HslcXgCcNjnWRFj9viPB+Op8hGX9CCquKGIibgTBPZwOdmy8JqS3whB/SlG33ANpQYjwJk7KR0tWuqEDR1qn1AxemgfIORjf3srmQBP1ggpbrZ+Y4hxRX4MMp3ho1R0Ec7S8JwZfyH1VDWIdXLI+AZ2/WTtx63SjYO107Oix7lFC0Zj2+AMqrGluR0MG4i//C3zda7Bd1tr6d5n6dhaFtrNWCTOBZamPmMfe5QFnq6bXYXMzrPTn/XJLexZOiLu+cZ842qIKInMLxuPkA9As4yPY4ErPBEmRD64s58/ZDdx2qjHRzy+UVXuxefh2KWIzO8RxIWal8PSgUN3pAyderL0+qKfojrImsoN4qNv+RuCH7I1I8iASwMAhdSDqq3PtmplWOgxpjtbx9wn+yq/wqYJTheiPnqatfo6Rlrrf4yT+47VhM2BOAqQj2pHaksnercP4ZzXVL5ry0If/SZt4JQU/jTnjCbEzfHD6GhE9ucsLSBD2ZwnIGTNq6a4Oxc7DVPLelT5aeV3x9KnuhramvmqrjMw3U9oe/WMSSgfWKREnm7+90hTXpxGWtB8b6Q4X3uIX2yLynvKN1pz3yyacHBQ5pylV0sZVjPXe0gJrIEz4uNbQf8p3x+Plc/tVCwR0ECCYr03n+GebaOK9vH9vVIOwaB1sTAfPhm9osq9EW3aHLOG9wNEUH8GJB7MFm7Xt71dkj8LO/0CuV2OYaDphVlf4xXeRWSiwI6/m69qllFC4XZ97zjo1wl2dwqT5e4IsFKYGK13PWLjjN6YosUV5lsx+BEFCfRGQgeMVQAteMzk2ylElkDZNRHU1PJVshLnkPQh5msSb0/FQFQzvw25dhWFWT54yts4Z2Y5vSzae7O64F367D7OTvMQtXMHxgfduMtKj3OqKDTT4VdnowYzBFTyNi0lLLrbZ0+TSs1vbA/2yqYgpMDFZ3gfOJTy4x7eeDK9YWOhmAAr1Q9yUhVbqZ2G9X1o1HSJrgHuBCY87DVgMiagFkwlYnA269hrb5CQPWlGfrG1SyBgK54psQTVXAFyo7gpTzVIyx1jkYCGXhZ1IsWLSAgqR0QSqLjHV8XJopOqSGujAXJc6tFY/5yLUt0zaGCdGSIFnrmaAoUd2/OGwJUlsif+X4QjVx4OpDq2yALw6SRJXn2UQ+2yzurvexVROHBorWs2ft40uZw/z9iwqwRHF5Ll2sodcs1LehXOeFurDQ17wuuadF4LWi+TP7tgFyvL3ZRAZiqRGFov8dsvd05TzXbyF7406ZBC4U4Y8vceoMW6ZjhxexHRmJqwmGbNuLApgiX1ln/7KAiSn1Q/uuUg1piKjwJhRXpDidBN5th4T9mGy9VL8aaJaAXJPE+QjQId99yQxLT2GbRzb7hDtA0TN/mYCT2JvfITa2ZbB1Cefw9C0GIXbdW40FP7kzYR5AZ3wQOpdGuJ44OvwdNX7ivJ0UVKBfOGgm2ZkTJd+RW3L3v70CizA7dJVBs2ECjanjyKmI/EemGzUbdDKl4YMV1C3gCj0j/Ko5PNabMdfkoJOwx8pTcSx/i8izlwfFGyNS2F+AW2S/LbXm/NqxjX97NJ9gj70gCoA3KFaF0sWf6qDVhJgis/RTxYgzvkNcmiJMp2A9JeWG/vDFzXZR2kpxFBG4yKS3Rh6snQU/3+hiG8JGtYGjdfqmy/dPpD5eZaaK6zT+8gSSlhekuOiPuNB1kLRl73mZ6laN8brtuPcRLWp3TyMpgJmxGtjSNwP1KiGZvWrhJwFCdIUopPKiK1wEj4EtEjXieXcqeZoAGRE0V6dADyY4a14N4/BFVg1fo2C9b+l1U4hHF3F2ur9qZMzQ+qyzmTaikfAtZBiSguZE5On7CvM4fWuJsByBHveqZ7gu5YQw1u2bJpG80c1c1oGg9GXauG14F80Jsan45QHsG3/4G7uv7vfcZdt2xClQh0svSSiaBEDCm7gLT0cPR4flSab9EFaOFcKcs3I1nvT6qn6wRZjImTUPcrWN39Q/IhTXKZmgHLtJP3xJezOgwSLP6evlh+Pkl9Zl3n88stFXWQMFWPrV+seGzBEB/05vgEdpvIud0Ay6Y3KjvDJd95oh7SJwVZtieNbzsU0rYrQ4zleyHxj8r/SQhRVXMyXiIIAH2TG0eZnhef5z2yVFgke13BqxBDlQMS0Tz2XYC/reUSNH23SgnR+wSnNx6JAxWwCkjNjSmMi6773dvVfg+aPa20dIJGy32HzmZWp8GyiG4mhU3t5mbNfv4cnLbz22ySdjz5QmYh1x+peg4O+DoubJ1nE1KThHBRNekDa6UUm2F3e0NQgzSxFYo5M6DICpdmZJuOJ5q578XenJwOA3JPe2zjQz9SpPRCjDqzNFDWfHQd2ypGhilfeS0L1OsHHSMwKbfa8lDmiVOBXDbFoGRwYEE+Vw0bMbwFrft6s09eBJ9FnlPFHMBG5GQRQ+gyqFnuMEDEGsuIPCMZZTi56GeVi2XwoEkpsqaCkjIL+UPyG14rD9xL+MnJzgYCiJwJBb6HRbEzMlJZ6wHfgC7iq1CxLNKYhyltw9qYQFNHXJNxUtGVHfdrFv7wNxVMoZSnUqueq0duf4iYr0e7JtG72xikIwYZhJQ05Zy9r9gnoIOiCioeqz89AAwZPcO8ulMY1395MxZONqlSAMEFYuXOSr5rj9GXsC6K+afw7vy/VSnC9ozIkVcRQ2JDXU8R69FosfeFtP8P6L+C1GG7q3PFYGDCw+tc1abldPLZnpwQyz/UThcGZ+gM/6cbGKwG2aoqUvi2jBYkT1wcNkYrDPZ8xgJUhAkSkjgikEbQ+s/MEUXAd8Qapj+28Uul4yMDF7JQQ9w/vkFGG6w+pMiKaUqloNMWthDlx473wLlRmk1nwxGTuJipsP9iCOrlTmO71NQS3wRo6qZ7TH8my6TvMLWHnRyf+Jttx42vPfWOqFjx6zSDzzs6lcQ/CL7ymEz+o2UwihaziYsKeKyxCkOr8QHrvfUwVnQvLNjweg8dFisas2K22E2mKJ2+zgW7kAlMi+wg5gKw1Zw9yztr0D8DxTKCYBRJhuYZbGMFxisE2FlZVLnRUEnqv9R9R0YYQgT1uhpWlTpHhiZocVEvgRR5Ce74LOeGjXWr3hnYfNWYUr6x6oUtFqOZ7jrpW5emqzkVZLbnCzxavcB9SdPTEC2Xd+n0BqYlC6q7X5I8cac04SQ5P70hjpVhUAKrNjykPsJoqlab0lLeMbUi93XGKWnOod19sXkovyqDnS+Z3uW8l1MZrwND3mM47dq7Zk/nFbXlMlaIlEK3xyXwPiSHLlio122veWm/JaeUslWXzGT/JWrKuHhVgYykrX7DIvflkv+Ldvs5dv8eNm1KB0jaclikEuwY51ChcJ2/Ie87pjlNJuD3Wa5j1Dg/T8X2fzm9pmuNIsoLKNR5mPOUpsLK2+s4TKaec4qUi6qJvGcfcT71GX5roK07IFhAuF/x7nFmggmQESK4JgFGbyNZjjEUqgDqMct3DH7kR/iPhHseNSMZ4iYaQNItxFtIn0UCJBa16epzQlJWmnuSKRYClnd3xrGf1zic24shNFnxqyXoDYptQCDRma8VEkuRYTbpzfRAqax7mRpTZg1OH3DaICXTwCVkoy6MfO6yetcDjk6JCEOKlmUMX7voFBDjNRJm/8rfeo8dbTa4NRzif7yHRiOYvSYbDy6nEU8lMK4ZJvjzfsMuyCbsJfJLU0BzeaBo+ZBmL0UNK6ceTSc7WaBm67QuafmiUA8kqgAXdGxC53i7XW1Skmvad086jFqNfJe+HANhmABxgBlD6RxxVxd+ziIJln5+9n/18IdeV4rGS6sHxfYgtjwSP7utZINmlZrooE83OX+S6EZ4LYRGR4KVB1hp6uFxSmKsC9tVihqJ66v9y3nj9Q6B9Ywsx6hNy51Co83Y+EYFjQOA/LyjAPt3erc5EECn3zhXUoejsTE5JS8cJxugLm2KYkPbuUhgtQOD7Y2F2nW1XalyC0MlTLTl69AEQDv73GqKMNN1GaqRpfs7zxQKw7TLTZC2rn9fc6QwE9vUZ1+M75HKqcS6U423a75VzCSWPS7sE/kw0bMP+Sdr2WmV8U0Vw1NiuiZC1FSokOXK+4XigsnhFp4mn+Us+ji+FlPLgsS6DIiBz7hH3tUZSxTIacAQSnu9R/n+gn3mzRAIzCFJqeD+oLL/rvcLz8Ek5A4P5AfXvxsFkSm87xS8OCesDfOsx7+2dFXtbilC4kQXLeokIxKxvS34RcjxiCr5tTvjsGxpwB1qOvEKTHsWpIT1PYjBuHbR12FHm8egBZXkXB25XiguZq90R/G8dqAHeXpS52TPIohOapMqHvw1qrbBoq6XTGLsPqmQBfyo10oAnWcUgGDdviwWdGTEVi1Usi0J2inGe89VQ65GShzrX0VRJre1krPrxrUVmz3vlwI7cNxDDw0/WTUIEl/SyNPt8ezOELDzrQROnHrEYC+xbxfgaAeOcfL15M6dBvDbgymsYCE69szmDGvCmiEYpCucM8zAloBjVw0/cjseu3svAGMEYWeqZgPDGqztjwAszAeLF4F+dj1zg4iBYBA7Y/3cNBkY4EvmDKu+TamnMW/2p8/bREtVh4NubTgOJXnbnsjdPJR/qc6rMHy2Ub7Ew91Zoer38eIdlEOd1dEN7MJTSJuvdK+GMl2MTRdJsTJUaZ7nZvpa1Mndk7mnmWLpJIDlEIunXwh5hn+2f5g79q3mI4GLj9TqE5lTCk+4fy8cu8TKbR70mLJSP2JKxtzsj4+er0455kyIEqABzvI8WKKYPF5CwHL1n/IeJ4sKm3EVN7FepSvTXJ/+2Gu2K5u2qS0Yt8S3EiNJWY8rrvCXWid1AHGuZXdmuMCJYg1Nx28nY5EL8dn9bsKRB3YYZFOw8Svkta3M9mE2YMg8rs1kQEwwQttRJ8W/qfFosWj4IO+gn+zvfMFf7zq/lNIcPEo7Sa8WYnCY5CjPgMQ+m035VYujwOBQ4JA/PZtdFWOdObQmksvlsGx9GcZ4R084YqgAbamwOU8iXV7y6/yMRJ3Y1I8Ii0bBXL75eK11t3R+RhICx95L1QJW4YReId26IPVXYKh7/+TbwDlCGa3oPu3mJGqPMKlDe4NpTZKukP3uD9lx3EG7OuN3uOtb0l1lpzJdwMaXAb6IA3n+DfZm8770BIJ0xed/5FJI8xK4qODcY4dtHK0tUg52lgQi7l7AkNhJKR/hh3aTtPP6nk/J1XubAGmogfat+D/L3+KDyHZADU0V3ZlXNGN1vLa39xq7RATWDbsbGR+6Zbetc8BvXIjJ87PpCwTsY5/w9CDX6GonXWpzho97vRWxjeB4KPp8Ea9cDBBB2sA6vG8tTZ7wV1RalfhdHo+ofW5tRZhDn7fioQOWjAkn6HICMFyMLCS826dZviJp/OPN1UdDgOq/Pu6cvdeYDrktG6Duc5bLCSWilmn5zNHJq6CHvekWzv1IIqWDNvzmA0Ptvs1qJwhypvcDCkWNN8cBZ+D1T1TTXW36156XPNOgd76PmO9+Gm9yXV6QJ/lP5tAWUiB8b7QrkXnKRWUSDxB4tIoOPUozK8Ot5fpl7i6FwMCV054o+N/PITviMNK/+Ai7sUDleB1IDsYaJBagGNDGefG3qK1cmpHK/XUrNGCujoPCWHramFbTNSs/FIJKVqkO65yx1t+5vVB93j3kZqaQ9VPhf42LuxJApfZAWFrqrGQvfI+KvS3K+RNL+LVLP8I+OWrp+y2dJyOcCuO242tjz3GHkPQh+slq5QxcnZWoU0ism+PiGGF3dZaOvfb7yCuJSGV4OfydMt0nHE5DeDhIEyOkwu/rxKAcyeXbUeIX+6JYVXeTVqx0NnZB5Jy574+CXCq1dUzjybrSIUQbkQd2I00T93u9BCPCkiQgvuPseD/RwanN4vGhzpvfv87/upUvS3bdz5XIUZa8qUOrLRDEeO5HdaYM1aue49t0cqLNIqIeSPvfTcJ7IhE7R5fpCHaN/4/BRT/PHDMQ3IfG7KkcJ6EGj8NI+6HxDzXtbgdguUqXRDUZKJW/4d75OMBSfRn3F6oCBB1go3cDGMDM7WWkh5omChowUdDE1ntjby7hKqmRstYS47PHS+6X2bB/lvqSbZnIvubfLzYjmh6KTPLgzac2+z8796SQW7fyrIOCooM+oeCUuQo6PolCctT3zmkFwQWoDZhYYb9p89l5BLOO2TadHleeKaFBXy1HVKObxNPYWXPc7wquUW0OlGzY/qbHcv+fHhRKdlJsYn1BYxo506sYtUA+OsRsTVz8IliKjdPVNxLskOnWNUgKcmySCcImDOmiEtBh42+vTzOw9wz2UFEh4MexKUp63ENEpm8q5aip86Im7oAT8FBLWSN41DdmGe3SF69q+JCk+jrtl5cDgiZ8O4VX7vt460OepuqQqly9Vt3GsFYboMvhw+j73whFuW2QkZIDqK+V1amwP2n/V+hZhFQp2nL8F4EBmbgu6h/DAl13ycDv6VnEUEwbBuGTFFIWcJEQ9fwE/SpOuzDtNN1N3YaTfhO08/H8vyasYUqXqea8E35lwDmDvtJMpuwa0p1ggsCGp2piY31ZS0xpjbgwA+LP7YXZGrLjo0NC0ghArthpRh3LzP61fOvHDalyMSxa9n+CGfUQyaTVuBvryQTwFKaR7k4jnJghFCRuJ7PCvdebN1vmFyFi2dUcXIj12UOCT3v+hNa54HCUwbFM2g/HCkBZDsHag9em3WpYI9u5dpQs1MbK1UhXEDC5ODg3u73E2Za0fmKUl6a+3busXd+V0yMoPm4OWcLjdJu5NONdcwn3F16otq4rqSdn1Qk8crOxuLu4irkVt3caOnpwygg0N+3r3YxaY4Wvx8PpNXSGBC0UAY7isPSgAvWaaGm+kuokMOxMIZAVBACgVQDCCRIjGRnfVgP/gHJVJ3nUvyRCBtPjrcdgKlKznuX9d+2mpA8A6RpSvsGmmboNqD2aFI/RuFWIl1obx/SS1WIOJKm4x6gj1YdJKgDCYMC467ovFdfMnu/CKr3AebJE6YqPzslUFqRDMdiB4TJ5zAqd0fBzIhQYpPwCG3S3xsJmq/EbHSs6MqbyhxSj9XqaXlLy/HV32IzXqhv2qe6rHV/g9PvxiAgIMZkGvRJh+dJoyUGeAYL4m4O7QiL23D53vu65CwYuOXwEdRdi5RKedH7Yoj6dJVQOsNQPGLH9Zlu0uV7HOfim++Ru9JmYpzyiDaq5r2vqOBlmEgV8rlV+HNbPxl/FSVi3Iu07lVDvqOylbOfSYqZSLALixfVUl5pNjNDNkT8LywXAysZLVKIR5Ww1n0MOGdIkq7/ZWSboFJjmLgyxKZdvJUf211+9mEI4X2Xkiok7yIXW6U0/1QO8rkgteAfA6/PVOdpJpwhwRk3w+13rNWycD6ND4pjr2oBn0YPlDmx0PslCVEPF6soyetvGSD/vnWOHKnEySRq77ewbNaoB68oJHTMvIKIMmBgwsgi4I9XVJAPs0NwiaIOOMkjMBDKUCjzKLfd+DDpWgHdOLMKFqmuG/h73/F2vl1EOtlxI6reX/3W1yYIpvlfWKBLOkPKOnrKThYv4WCBY/SzTb3EGzpNQ5W39ayRBEkpsmS/YM9tAu1ToA3UHNTbhjN6buJK68ZVEIGyaTzYQap62b7VeqtjvNdEJBynRJi3YuVYOeN1553E077jlrgQbP7ZGyCLNIkXTQEWwPfigPcAX0/cf6V+rXWtN0fMW7tapp5XbAfkLw438pcsuoo+KlrWbTh7uG4bxLlcBwjUXdFcSulDl95ZwRQxu9nUSue+DgtcGfk/RcKB48RvVxWZpcYlA6ZWh3x3HYbXTZTU5x3frodv9qXQ1X77OBVy0ucGiu8nb2/xgEWafP9MMmbVZE2G6t7tPpq6aDK4iqqMfu3btRZAW6hj6JFF/Xs5p9I0Xdq6rEH1n3wLQGPOmdK+1HnQVD1c93Yx0MW0Wdgr7reCn30NqGdeKcTR4t4XTj8tOAuXrtNoX44ZnAYfCgkd9yCE7xBNgqvAqA9TTWKzSSTlppdrPk9YEEOZJekQDh2ioG2UqdjUT22oThNIjczBH5PgcL631X7vguYzGlOC4sMPZ1XcvU5dA510uWAAxkJphCXLlfg+Ccie1eyHNjwOZ6lz2k+7P3tYpTvRX8s0Q3WfOz6ltuiz9bgSm3okJ2Egr8iZoYGM5rydXmeJ8zT3yT8o7lzwBcj2e1Oo57+Nrda/jVdGdg3vdSrJLZYXlc1VGYakjsaHFMJdKQrxoCL4aC7ygxQXYooT1utyQKta7dz1eCZvjxtdQImb7FzbGxKpuWjpFj37hfoezNGnxvsWsedYBRtHzD9BiHWWfeXQ4dYnE3d4/5EHWG+O9q2WCgg7MUbniapTDmsQgq2JbkrlG/m2EzYnSeGSctg1e8YmqM3MdIxmjpLsKSlhL7WxHnYDzZ5dfWXUCvQtXsj6j3fb3oThHmBnW+7MWYasZsLz9mnWbiJeXUpWG+1W0qb5qz65cFGcl3Qsv/y2h9KjlBFG0vFblOisY2vRd4V1EHDgUGL1vbMjSNgG8lpSYM3V4lbp6Je5RAmwp4e5Z63bJ38taR8WQu0CUNtqCDgAJ4Rzt1C4kcajPimANDd+9eucw4LWgXJf1PxA2sQQY7TnGDbt6hs2HE9O7YIlZcIF+zP1x3Nb2Ksfom1Fp5d1au3l0fls5GKPau0/DZ5Kq50SNQ4TQLOCNQp1K1PL9+Ujf/F0jYbAN3hegwcAw0o4wA1VMqmPTB5J1r37lWeaNmFFEI73szCtcs2ur/xPmMcjLLg9gGyPHIOiT2jXTSDRIMsayiuVWXTYgOJeDT20169w5r+lF3a44i2aMHXQ61OjehVM8dUyyPfQUyWmGzV+8AXiqNQ0Ix6gvq23Je7dRKz9JMN3hrEyI2/2cczeaJtANIsd92BmIDs0OHH7eSsp6ObiS8V4K6xMXfsUudNv4GeQatNjkHgU8KcO0AjgNr4OHFavLzHOAmcgiYzKx3Vi8OqMihS5ehcBtHpjzzNP0ILwsEEhEwdWbIQP9GAsyhXY2vT2diKSvFbFo3LAePNJrhFOz3bqk2VHrwjp93pEeArfwoPV8ACe2tHNwkCYKKSwDPOWrZCr97t3ItateTpF5Cb6r+VuoFqXVEmT4aABk079MRSfodN0uu4ZhsUtS4dJwJspjRGPZzQ54BvIhN9CslAxsjWuqNhFuFNUtxMOJdhDE+QaSWAb12qhnH8fnIB8f1RGJm0d1O41yBrpJc4Wvc2xQaHOJuv/iTiq6UTANOfgrl78K4bw8c1wuShl5W0ij5grb6XU1BCygDAFL4JYdnoGKjpX9qeuTANvn80EVRhB/dVURUGxjbw58G7P68+FRePHqLYc62P/vt4gIQAUb4lzCz3+yzwImPVCQhBeibY2nvGp9X0wbDCWJk8mzxdOaQzW+PFGAhAfktTA/tru4RNfsRvAeADSH4tIc0GIL7Q4RTbyHa/IVKBsAS5p9jurT7d9nKv/000b0tMgHLKQuJeZ8QPmvaKLNztyJ2/53G+Y0WgDz+LCeXFhLac/odj0rRsT4imatageaDKUwI9Zix9dBXRrjTX4feKxJEpkO6j7eH8gW8MxxLokeJNhJPw7RyPulks7+KSRi6P54vdCa0SjkXYGwkjMk/JdKXo0qYm1aSzNwybaeDDJn5SYciaH0NmQpiuVkrzsUPNf88GWZe1zLL5Yj71Oy0mzFEIpXn1vuYVQX9lS6/J0vFhxNwaq03/LLXOJf1pEFXgRv5BuXt6ccK9oxVfiZZFkvf5rN8kF1kW2GfIgS5FoveQPtv41zVfPGzCY32JM88jEJVtgkEKPFD+8h7jSl9hNMWa4qxG1BuapBkkNxzkBV2ucUP1BAz5KCxIdcZs064pvFT+bUfSH/xKU5mNvSMW+37SCdUdVJRXdEGfPanazG+FarW5Y7B0L1LCh0TYb4VHrZjWefwNlW9M7zpc5qx/XNVn/QzToEpDdYjOlNQZeVOzbfZj4C5KT3HEklYRFgpeHmOAqsMLsG5o4ud0MdHHyjdWgZrYtfYX8BzeuhBQULUSnrKKA8brHYjqTcQxf49pXKNtI6Hp1QWMW4W7iESNB/5SOHzl8yBLSCjhkfrF33lZYHO8VEUqqjk5oSXQveWOMC21W6ZmzuihWCQYJOuDSaiLl513f63xWcGwveva3qZ3zMMUPD6wQp4MZWKNUBU2eosJoPNC2HxcX1cR7uER19By1pn0WkovRAboNLh+YNlE5YY9/bTt5/FDRYdpwIpQp4v6EapngXVaqvkFdhRXAADZ54yaJekOJjQsCuVNP5upcR2KYNGZpd6Gud5iurrKqndUT32nUU1YyMrW6OuCMh93BBAVgjWmezLv2Minl+Q1/8q6rWxX22+Z47fX5YqrTYb5gEZQjnPfS4D4/U9i9BX1uoAkGO5+17/t0ZXZ3TKFJuDOd0j9L8YAlt/kp3/bhzza5rY2aUxrhu7CvyUh54xdpfkb1AgFL1lUj2LDkoLaXTEp61n7i0ag2NqyAji8dJ2whLyRslWkxNWeSe/gpQpEHPtzDTe+WMgXlQqtkrGIC8P1nlUWpnNxWxxJyXLdSRWnkkwIGN6uB0Ac3Y8dHQ6X3bRDxy18u6OodB5i/ljNcJPjCqUp8JLvHS0a9vrryCTQoK4HeMQMEsg6t+DpPNAZSqsgiNG5p2xnTdn4Xm+7hvGXsi6HIHS4jwsh7fuvpVtNswhtsvLMkJ7h5Zi4gEWtV7NNWXkbXRkwN0Ywwu6hZNSkb/PVp+hYclvQVYQAHs5B4tYhTrBCQAhKVvpTdNfzYevu1y7NtPlQqe6xaya4LKDmFj5auNkgb/CFiJdZOzCqftv2Myqun2E/ZYP/MuYVR3BhAJlqTbMrj75FapnzLm4IsX9d4lM26Gwd7uymBRa7EXFgSbquCUHeip67qa/mkjRA/sFSZIvUfvrNVX0sZGGAMIXw9fa+4MViZ+8YP5jwlgpzamG+LpdAr4xYPQ3WNY+TJ90cw0A0lFFrkdZo7+JrE/00Xc/M63skDnX8d9ZQgcK+dvA101HCAHFY36zg7Quf9UQbCJvNpW78OLmpWKvTilNsZJ5d9srWlVj7H0PX0GuubjWs3vaJqwawHtl5nPZFitm1mB84WOWQqvjqWY8fstq2RWT3/cAdE6WW/CeZIsx3wMoZwZFtdpd+axyOag6zaKSMayd3F4Bt3L3OSJpperTnTHk7KH2iI1wh4D38IOBvlAsYe2OJmTY6iIhnhhn6CdgE0X6zDZlDMZB8tiSVHupPLAMjoSrnQZDNzBv0fNqtWeA1pb1aIbxyXNcQQtwe+94yS6B0TSUikmWTHz6YulqqDThH2jRm26KWV0YOg1MPIeU93dNvGfC9sllV2KlYUhuvhXSk0muGakm+UfdxTFPzKrvchcoKwAdLYb1YTf+lB40VJmaWUi4a5UgauV+hFe3XKrJwD9Mb6iW7SbaLJdpkBsBwcEMdwWBCZSCH2vgZstK0uc5gdCzF/Du9LQ3iOFRztdmvdZHuB1hiPXTC0hanSkocLBaoM2sxTECwano/CmsARyfsYGu4s1VmGx656FLfjersyDUqMYl28rhj2n3YEHa4BpeP/WtWmCs4OR+jWOsQ1TcFfJ+vECQKgYZZ4kiCDuTBKHlxCVcWLSi8cNlnZrVWpV5ocBgxX3GeM+EEpP57+S5t9bxwI0Sf79vMk7oLYP5Wcqk46kP5PEi1oZ1z8f7eARM7vt5DBLrm84t2cpajAKWei0mK1gy52mARWHJpZtD2tw6MFBUUtOL5BsIWmiIE6QIfqAs/GM1KCONq5qNvs+V9p6cT2KwKF0JXbksbDsMvHcwnugEFnG98ST4qg5rxNrUG+6n/48vgLxgxPQ5LZhpDCHEOuZEl2IHCqca5wOhaoxWwhAF8IgvH0qzNzR205N4y8RvweBnD/pljh+u1CRSeqctyHC3noh5sq2va+GYw1nrQyRrIM4/hWH5hgR+jpwRlhzjDAcBCd8ryTBscrWyih7VUuGZFE1ZV+yBVlEchKqzxkdwiBVhR5rK8EAEpIukwzu1paOCh5v0ItVtQ+u/qO1l/B5053Yngvz6NaHty3k+wu09qUKIOrXahVnWeX14X0UgRFwsUC7eshEdDmdQk/6V5ALhtmlkKgtHyoA/7HmzZdk/vOJWirDQL0kMr6K8dhijGvqZOVph7E2Sw6dba/QoSr9sSSoXbBJGDvTfyVkh06dg/an5ZdhWICM2mKbTuagaOAWVJuKi8swJCeqrJktFQddQbfy6iDD39e1fvcamXZB7OBbcl6EaKa2ZaFnrbfuNdBONQwUDAJ6+3e1+UXPmedRDccKlefkB+YAAf1/e+08VIDJ2o2Id6/FOtG692jX9oyOHiv+DIAbAWQXUNeIpv0ZDtrWlFvQEeO28j/gnukrq6avCnQAX7N9+Qs69pkn0JwMLFyyrMvD+XjnjTCogiK9h/0mRcoxzGWon9OjMPHSMgbimfWTBES5z+T5GOWuzwXidTZEL0YRYA9h2wF4gZchlzBKgGToso+zicT/xX7RnL8fZzigZEJ2i5BOHJ7NUv2oV0vBmK36ss4bww+QuMn5GgVJaUQcerEZ2WvqvumacDMnwDyTA0bZNAKOXT9hPAwQpLNWq8XGoSQqzJrvZCRQjES12sD9XM5e0ZzeXMg5CWPUXl6dZEjGghv85X8vwyZtMS0kyatDF/DBS6DY17DR9tsRaoukAnfLjYtEyYJGPyTQK+4ocef7ZrT9udtEh4eRbvX6f8Sa1DkdGjk3EeAthM9xhUJa2AvXRfDH4duZmzuVnKw+f7X6Lq7ghnxxWHhGYuDpHRgzh2plnob2hPXFx7TC7uf7JOQaGhSvO5MTY9T2HxMRptAuTO1tvweY6Geozj8ptoc3WFI3q/hCQBASfi09NQ5qMCkNkhGKNARmio7ulPU7Z8mvoGQtD9JK7oLfXU2bB1MhgpACFgHFu7JWOSlU07ycu3kL127Z11flT8V3UMGBwAEYn6pLMt+xP00QM+MA6QF6RJy/UTtCzUik2v0GybeFt4WoNKc0ZzazeQW+Amg3n1dc/qkPR54qAgs9O7JUm7Aiq+MuyImYq+DdzJhOvvNGFD422kZ6hLhzfb3RhPjGbSIYQM4zBh1GdYlSIb2HT5ivbJPXLIN1HlHSsHercI+jYRvMHaUjg8f2gfeQEcb956f6JcsBB4DeuDhngNhHQrPlRvAg0lGzSRdecvjVEwPOV+HEZxVlBK/vi5I2/NaQiD/s27iECy6euqpk9ySbNK9K7M4AJsNb/UyhsmEEJ6Ea01DqqFMhlp08L5hr9q1CkSv9QrzUW6TIUlUB/tT4yLGvj19Ya6Eeshks/QOt95aCEWQm/M8ECxK2pMRlASnzFkTbEV0hGmHylqe91pC9e4PGxQLrH12DMDIImx9qSV0aiIzpDV1JDoKiym587jnlf9Mx3tV3yJytLTjVysxKOFPdytUDdH/4zr0nIl7Uz277UwCGli+3nEilsj2ze88qYA5y9KsxHcw7XiQMmTO4dyNZdO/CVZ2yY3EWygQX6PYA53gny6Ymtd9vF712y2t9C46FMXe4R/e66kXAAj4hl4z814MnO3weRKafDXq9LNQxGgWC4kYfcEy6QXZBuvNkePJ+IR2JHn56ci4nWCHX6P1HEXDOVEkEujLrHuQup1BuuD44/AoIbjpYLVTw52YO4r3DVaZIKmsINcZTcwEQ/Vxnb8N4R1i9gfE1QldP00MJTJ5aVDXvNTYRwGGjuuYWpNuIN4cZ7AbcFdgr/141hi5P7iUpjYX30Sde1dTuJIgVoayVKlzlTmgvO0UJFrpc0pT6qIIaH1a9II5enLGarnTapkL2MzaZDXifazeCA8NOvuZ6VTpcdjlYhOaq3G9aP+tQ8AM4VnEL8DIO+9V4D2W1n9I3PLXtWHM/xso+0HiX0ShC8NpFE105k/hMGjUTg13EzncxDJtmO2L9xgifjxmXsZPArp8VDutHxK9kDv8FkbjDTjgeSw1k98YZuv3MqWsBf5FOlKfVY6Cj7CH1KE9S7OGfs3zyvtvYFNoE2mrczd3W4hADmiCLPlIVo17sy2/oh1ds3sNNo0UrkivyY6WgNrr5TPh+0rYlT4RIjLnGFiDXY5q85J11zsxCnnjF0BmcBqkzBaaRkW09W+mOBgX4H+791E1lKT+Ny+XL6zBOCgU4SDv7SRc2nbMh6fx9OEvDG0CtM7b9g7UhOXbBi80vya3REDBsryyImTxqcvnV3PDbtDWMGy/l2waT75qE6svfJM6aLMimqcWcZoc53+UwtEMg/B1wQR6KoE9riRXw8HvTQyKo4kFnAwd/3DaDCzgFVbG9ZGK7o5QlSdCeSFZgtzX++vNbTsREcMrmvvmK67m5sjKWVRkaE3HZLH2DRm7tK6HOLDfZw+tS/Jpguqe6u7pejmdfjBmn/5yJQ9FuLZAtNWwnbtZ7lh7IlCPlz3Xjke7aKb5nOXgFaAz/zjOLq/jzhH5afFLyCQKnMfHwGbsA4OmVrVKRRkD84TjQo1ljj+0nHK0CbuYqJ+gyzMJuTYQmgqx17rZtaFrJaL2sf1X0tsFDrmX0ToBI2nuwCjsF+pcmxgM6BJBcNujZ1syHqYwrQjj3fvetzxrWr5BSYkb/gXNfvQ+BCBwCVSlRFZD+xRzF+B9bXInoEL8izvye6ZcjXa3DJBPvinbMycanXEsZ1rWLg7X8w10wC1z6ExglrrwrUm4JxHDmyW9O3qj0UPLguGEcUIGy67m324Ie77rLrVMmyyW0LueiZo5xYZ574neEQfZvNxH5f1Px+XdDuMMJow8juEzCPbQljfrJlh7x0Io1jLny1nJqTfoJ/9jqp0do3FP0nbZfwJyeDJstBzxT1ZOFySCxEOyqNNLSbgR6etDeXcCld3YWlq2g8WXYv+We+RRYGnJlFDaRU7mh9On0vVJbVyWzjdEKQsUfTxAUEQ5QRyyFGYTAJGH2rnE3kCQBnu7YF07r0T9n/BsFj3APE1TAYn0hTYm7CQAmDf9e0XNYYqBBIm2fWkiPtaIo6NMPrKegeGZODfUQdn889Phc0QGs1NMxZdVxEZQ9Lzrdeawr7BC5lg3IeEF5GFz16HZZ7RaiqyVlOSsluNerBSmx6yo6yGHTlCWmbM8tuaN4qTVR83D9O1gIgwxFYriQFOMN8DEOd3OrrU60Si9YNL0Hxt2RsDNusjZXDtAEREYztw5z7RjtApwysgikBGaIh6LmB7KfJg6T35+0bB6Q8OlWpJQLDCSOU1u3t/E80Fla2IRTNbKi8v0QPArBve5Jw1qnhES2RTOQUT/OPTEmj+O5G5fMewqQwJAYuP3LuKbU6a2k2vX8qa9eKpngyY2580dDz9vMTibX2ozxl8vAMN0B7y20nnNqa3SYDQ6WMiFtglfz/ark9ObNP8+VCr7cmRHq1c4FHY5r/GfXqWSN9fsOGIRjNTSGuIOUyeudOMwn7LbAarn6oR2+vcPM99iGQzU1O5pxmHB14dD/h9d735qbsZEgUZdSQsMU/8+ZHqmhMGcwvxh1lUmP1DwXKihf4hgwE0jJEiS66YtHtZvVDkMlhV0usAU0VV3A7fbWM6o9PIfeImtqcsJtA67odzaE3q3ODjsa0GFWXcce1Jc5Ox++hOIyNfT7Py5xcFU6b9Kw37XtWkbk8DtGMyUSzFat0NnYxJgWt3ZWuddiX5EaAKB10IzsEGj5WXzX9lfGR+iodVsfefp6msUUg2yo8XaA4o+nYZxpxJsNxoI1zI0VB8577zqGhlbXf2SLfU7AEIAfgrpSKgQzarx6cMazWMHS9bQFQDDfpjJ5xG6PfWjRC5ElGiAZWI0a2WsYRfYhjXFQou5JWIKtjNt47WYGvoG+JmGGUt018SGatf+W0rhDTPKPcZ7Foq6P/8hOq97Ee9fDMkCGvF06sCr+A4Am9IhjUKBtSZgznrQ/0hUarM9dCNn2uwPOhFpwTAs3BmSZYAj0aynnxghq+WlHUZLsGfFqNKijS/VE5Y8OmxkniAKfRknic2wZfKOnHaDeHOgH0YJ7wwXUsRPuw1B5e6JSfVrIx3Nv+Al/lGyko8f3RaU3/O5m3xk5tJCxkTc8wHdjE4SlQWNdWf8dgSocW101wdjG3PHofj0zZQCFwst7Ql+lmSJxS/tl0UJhEGUsGUGKPrxV54lnS/yOC9qwwUtjUvzmmIoUoHn58OWtrzJq+6qtEU4rmIQ3gaDoJOMho9YzaIBqK4JAyboCJCCXFX2RbIg4uZQsDetH0vQV5QwKgcw9Y2Z00CXAj4wYJRQuqGn9EViHOLBO5h/4nIF/MGfg2xM4o3LxEjs0XqCW37DHssjU8si4BqwnCPmJ6wnbEZ9CbJbuN2y1Bi6UosYZkl0+DbKgdpzLvlWl/OyzM9Fok7lvvegCiQ97DKDBmbpFdg51QMlgDq/9Dw34qDHUPkW1OgUwgq8ZQghymZkNnI5ktc9pcTtCTgWRm62Eu5PlyM7pidL5c+x+PyTT6EzzI+FZnvJKMtX+3J+TBm+3ShTVF2TE65PCj8U+d4zZcggFFnTz5RtEuXgeLyCUhhP3vykST3EaT9NNPJdvPMjJnC0G+x/2Jhl/fzmwLLWVi3vxRjfk3ZAIcM1DL6tPHzZjS8GhHj3jQaXy/zIeZTfc1AgXlXxNxSQXYfizMM3me6iY93vT/7s56FoArmRh+GBy5PvOf10DSOsDqtSdQnB5oi08kTvadzX7G1TRzn68WVfnrmp35HjW1mfpy8F1gvFrkSSlCaI6kCeDLaWPZZCS4tGKvENFKDcSbFgdXwxAZk+wonTv4OZNe771YbJX86k7cR+ymYh+XEgdtp33VoqujQrCNQbgrvA8DA1V2Ly1nDE/5n1LiaCshe8LYa+1oPKjF1OJToAVDKmhdug90j4YCU9r53JrMCLEn1CUR3HwyZfQIokTcBpMIQ8BBRROAcP037pBFGYrD2/OM8FfhYh67G0mmVjgoMb+P0zey92YJW5EoW+EfyB4J9sEyMc+BvfdmMgzVZ4azf7TZ/3eDGrgrPCESGnf7aDxhrSAb3+vWQzU6isw6CTpAoH/5tbBleidzcIM/jMKhvvs3HxFpJXskM2rzW4V0z8gPGpaHb5xfP+fiDunjaM1e0r0ICsH8AYMkamn1F+O/964nVvoglrD6t6DfNx1KXep6eZdaH/n4YlgyVDeXPF8b1R//Cv0fOpnTom1O3LBl/PPgG7SOv8BLDZ3Rjc3uw7OfCp1LfkhvkdtBd+T0DRVAfysSiJB5fqoVDYILEjoSUhCrY6GVQQlQa7eHwQQoqwx2sXhL9FUpyOD0mSMQO/Nb+RhFCHHRax9pMuThfGmkjFzB7vyA+10QQvlPcYZXo28hfe7E04rFHL5/0vUAVc9kgGphR3rirR4iNeeLm2P75Qp5uWPaM2exJCFWG3v11rvD72M+qgk2Lng3AsL2+1i+h5ffgrwp/l4zT6vbnsm5fhPTxyaAtAfPDJ1JsPl25QfKUaJdnoDcTPVKpdSMC9oUIU3vzRzOJsBLSQFOisrjSKO+Hqbh4vjXYkV6vxk1g1Vjo8i+Q+R8VpSAD5rG5CeAOQ+hFC+z+ufgQTeddSpAZwXi24KUETMQNxkra4KC/GdH1RxImvNAGGe7ksl0Q2mEctuVF8tnKrjVgWchuwl7ujOQgmOth2Mpkn5HaYD1H8RdM6/vVadhpB9DH7cI8JMB9oaoZmvrBBKk5XETGURA3D0FmwXX2+uOXSf+blMc/YazcptOXpO3bC9kSqmHqilvomOpbHpECw1DdAd0HnS0IInuKQEq6N5FaDRIOtURRt7t1OL7FYkoeaF4wgyfl6tYAT6PFLFLyMPBWM4jq1WDp8Pt7oElWwT3tteMurCvZMFFE8Y0VrlYTRLGRhTGr+BYV0AKxg39QvokwCbDrAW4uGsWKKwNTuReOKI7hkIU1iqf59+LfPK8jJrtgFEr4QZh4l26rzEw7oINGe151R2gueQvCunXN6qjUTC3QHB1ti6pas/XIzC7SJx/ABxcaAS8A+79sIKoC2UsKTLr2ycH3936FO+5Jjgu4Xu1kt2AHlGYdYgq9nUUHHyP3AWFKYDIT2KnbMxPe1ZkktwimVLT3bA4usRdOlZbXVxRjQ/qJbeA031rjVywnEqGfHPYSHfI4ABYyT4IzBg+oRqjN4vu5aE3z8aM3ukYU1wwNbCCIEILL1cRf7UFdzDnhW7jO3m+MehfTu2L8WhJr7/SUjLdI2n5e/4Ll0qj0H6kfYKBncSTjNqSqgNMmBa585lmK5JLYhDhbg+24fygXyreD+58hzm1rcmlV50/EzmxHpMOuXWMwucR7mDAob+WPWbCD79ZfOlf6oebiC5prhI6n9IlqMHBVK87IZeKAat7nMeBOoY8Kgcbmm7M5NsxYgkijXC5og6sRT+4I7UhQGTRcGY3vRcWzEFq57m+AJXfl4Y6i76tA/6nmQWPZVV6qTtfNSP0z3lgj2gqMmySl4S6veKclbYzAkF7OXA+/xnFZmiZ7XJoOSPXH2ip39q+uEeLG5ZqpYpwoSTbSJ5tZo+hx6ZSTTkq+l8VfRdPShOdOx6Qhxql2ZpM/ZMI3Q7kG9xibIeSdON1478+uWyAYz5/1Rgx3G5sZEkoGRxte8aq/Tjwmtm5visLEnQp19HNOsSSdcCj3rG6VJBZwC27Wl2XJVL81mSLFUIDsUzclrgrJDl99BL1cxw9py1qI4/jW5/RzCQeDsoQDYrv86tblWZDr/xYlqXdurxaqG7lCPQhN+y5zsjBgswXOeomZBmuuq3OmS6yJeTcS1PVr1xQKEsf9iM63kald+x7ULse6tpuNkcPIMiXMFdE2vViu/cRpc0u/nHD7JJFEKWyFTlkQjBiRUuAVGOQWzJNEv4tUuqfzpMh8kVYQfbT0AkI2eSvMBfKcnDOMsGrj0VoQLuCbGO6/LbNjHD5yKX5SIr3BOIKo7KEHkz36gpSpmrd31woPW7hZKjtaE5ylvj9Mw1wG6zrbHUWSyKBW5XTPvJB9qo12Eq9hQiKIig3sb/6AFQYVT+aN3k3YYFqFFz7zyAJg54rVI4vMPjWzHjAYrKsTWPagv0JIxqvs0zezlLhZzUxyHfdVbCqyaMbk8lCCmBO7iaVJb23M3uQynAemptw/6nIKSCRfYrBt6eeQxnK8/ZKZZ3sf3F8CFgYu/sXzDRdfrZVcrqmpxaOdY9fm8e1n32Do4xgPv0Es/KNzRiNWzZc92r3GxqfGz3zN7OqcIzEYzfoMyIhW7GnGcyMwEgY0dSshfrZ6ySXmlxKK+vLVBQd4d5+kR4/YeZ1yGdfUxbmHPEK4DiHPwo+r4Q9ScaGjbgUzEaiSJlhDC9SoMyIAjPXYtU/ozjrAV/7sArUWiKxZoKR/okVfrZG4jfqVgn3Eac2SA1V5+kkp1/HXg6ra0cqf+ey8deZ1K4Ugibcg3DT0DI4UBtkyECYggEKcmnWNoVzye2Vj6n6p0SrA5wEFUYvLnMZYbf3UhrGcMajfS/OlRa/+fhNjznRuVkjyS2jNa4YsP2ttukxjvRnsCv/shdtbZPokfYayA9tVyhdLiaMoDvZjPozJawuMQmtjdW0CeTnfYdYZSQVETTC6lgxwPiq4sBhBcSqEutR5O4Y92f4hv5tuHEJ4+xKh7tx+jT9CYXlC2hQ2ITJjSIFgF7bnNi+bDsrDMREQqDiH+nuLcJb06snE4BdzkU8BdaGVMz1lqxWdmmbdCwFP2zTan1ZFFqMpLO0kXnf6sGVLryaJ9YaTybpu+oyYUr2GugfayCvR39vr/4Ftk127XB+P8vRh6K4dYu3rEKLON1aKkHuOplcgNwZi6MZp0Q2ndDecEIEePegnfJe7jiKw4zia/SKvKcNQp5Zf8G6V65WUq2xXnNLi1p9U1EGNPg+Q4nR7qhvKJmxrd7jjWxTB52gelnnNacOFL+78cgpCZw0VQNFV9HESCDi7yjXfY7m2taQa2AAmbhULV2YlPyPmsP1mNbMRfxJCSoIp2I0N6pAi4d99jpUmQtduhcsffyMN4DOfBeQtJoRXByLFgWuYulaLkGQ8Ixp3dFEM/e7EDW9ji1RyTu7liGuTiVSji5c5exPn8yDm51a8vDfshbg9ptuJLkxUY168mAsnai1eZE7wIMTQJYIHmJ0MsCYtKCFFf7Q64HKVqmMhnVfGG3epK7UMQe/+e2S50+Jdb6lhKl0RTHD3u5T3waLPVJVcDlSTUvRDth0f4g3Cbo7txm98bwNsupC48NYNrNgKDFTz6Se//TFYkBcOnFEFFlc/F+BqU56j+5Yn2pODpUVqF+zt/iZolXqwztt0Ahc4lVXh5DeQjt/5UdOjQSW3kvnCE684bDrQIEKwa7yWRBryN4GezIAFdTeyVBhewt59ZsIg9vqy+xwtXh+2UlN/kTbpo0KdQwN+lS+aPeMzJ0QnvpTwDbh+daoI7Eed7rnf88zDESXC5onKHXVOzUpDIfMn0eOAQTNOaOXqHJM15loxhGXIFaJ8eBuZ/3qAX3Q5k2uy+Gx01MRlenIGbLkefk0iqSOnFoent+C8fkwKpJwe3Is99Hnf+oWm2CedcY/u2UrK2fydvhbfzw8iunwcV9EeP/diIdqhLpWWeqYpI1vt03wWkP+3BtUFVnvX8KxtggU5OCe1SmwoiyNA7fXD+WZuWVnFt6isc905oyriYGb7L8JjrV8XupdOKgxcwIXmVFeKXK4RFjRPiDahWDIQPloFGf9Yv8yvs8wS+k0SUZj+fiF4aeFEXYhROGUmE0NkR/achop4/yJSqD/uB7NStT8xnbqDyU3f9EpTzbBRgdyktUu/XFcadExVAq76mQRH58aCSzhnmrfOucmzQ+MbOuKPNqe4EAEtgQ2t1zwDBcPZzRoXOUMWBQfH23L65UWQMnUckmhW+P/6f2N1X2IWMbz2+Fv31LW57VCr6p4edg4GL2bWj2ZV9H2njbyZlT4LoMPa1B1jscGMgpAOKA5kzssmdcpc3bw6k4h209UGwb1keOrHKOUJOhik5P3Wwn8Jj3fC3evRQsHisJgo9U2Cllf2EYXua5eZa9UkJ0T+Urihq3AmWWL/kW4PhoEYAAAAY9u2bdt2PrZt27Zt27Zt27bdITrIwcjUlXeLe61oCgG/sxUiz40kFngOiYH0ewNIaNS8oTjCIfCuHuHhkVCW7yhZd44oje6WcupQsjqOcotUG8UHrq++DQlfU7fHh+wG5A5vLBgbAHWGHa5QHd/IH6suUs6U4zbCWk9fD7ga7CMXMNOb3ptACowyF7JJn/t+pudfZMsaZj/nq+c0/2patXMAAzcTk3m9n0ON1KGAnLLE+8AO1S/LVTEaqopoI0QWFr+CgxGQ9MQ/4RvAqRJKvG8lY12dKbLhWzYs6ig2q8bpzU+3m3N9VMwZFliMJhj6lSSuQ2OwWegl29tsOgLDxTe89eZXY1aVo+u/PsRUhfJtl7RdZeoekbMhczaxL9jSF15GYujX2G55CwXh4PwDZhv1+jBwzIShEczLojQyHKvVKisigV9ALhKwQiJ/97kgLsbQA5WueOimdhguO4o4TJ1dynG501XRAD0BUDCGV+id4yiFmyKuz4FHRv7ccvJ86oR62i9MfEeodLejhpAy/Bsz8sIHdITdMvR76ipANppDn1ZgylIAbDEv3/kdZ+qfuWYHaXhX/xavw7cqq/XHKKXM747AdnYSsUIv4cZaiKqm9AtFk/noFC775tQZLkGUHe+uf7sHLl664Cj35HVf4jBe2JbpzBU8hOMw3U+AmPeBlLkKqIjaaV0FIhTXNNeVo3ariR48mxO6jR21F+O50u8yELzg1Lv2MwBopuQmXqbKAAxeBUghZHeJUy+1y9/UOgI7FRbUvrmFc9hMz++eU7yAA4Tncxf8+wuBjx5hBeokK19QJzdp+plvG4KNdQUhPZJMQ8BofWdi7JbK6QUqi1ee77MPbacZi4AK6StwI1rzkwLGytGjC3IK2jVoFjwD7IrYqua/SVlOqbUsoz/s0KbjafqLYVGq6mK6P2IpktHPK+1rrYuWQzVay9Zj65oVSogERbcClWu1Vf6N/IhLxvxCEe2yTHaBFRPrzWNvQ1UzCm9AMN6js/tMogjw9VJUMUQTmLMWMd7V1mt5PwXusR+kwmtyNL6ga+F1NYqIUxjYwopq6Yf3XjdnriVtTCzwQr0wjyZ7SXR9WHK1cPLE8NnST7Nl2Vxn5nsiTCfAbEgLBTwu1QrJ7m4KDujZPz5eD2kqEVs6kdwFoYcqnS1UOPthjbEO3RE9Vue49YruVAHZCS7Uesq+bpyMh/IYpiajU7UxbmhJ8wD15bvF+zc30UGmqrttC6R4tjjM99Gagx22WHAIV34lDyQvMx1fPh8NSnfclW51Ah/cuEj8eRy7Lkw7txTDON9XAMGVn9OvKpbm52TxCeWlPyENVxVagLCu+eYVaOziY3b5MjXcBKOaVCCM9HXov+0rWA4BNEP/YzLf8xLktvMFvao2sDwf2p8oiPJux727l7NgxslreQPMfXZTH5Lq1+PcC1c2nEHf7NMbRYrtoxxdlEzCmQoZXtPpcdlg/ghrRj7uQ7r8gnvVlTedC6V3K6a3MXA5GjZm4GXyZ2rchghmSGrYZZB3fqlXgnV5Lz0LDn99/NxloID40PJwjSiOb4cSuJmlJbEmyi8O97RxWDXuyBugN2eP0bE0uQkf+nykapObzy4FguYNhGMYeos5feZZ80YJjwvCRJte31Qgn+y3DmjncVcSTN+eJwehp2o73IsOq433HsnP7zJnRfCNBWkRXCMf16KATQ5zX78K6plYSXptCwXDOMGEEL95PWryxF6OSpL/FRw7lPPgfe0I1coiMXuNhmdddCAbcMVf+nmuk2kOeNAixlSN/swq5PzRbgQ7ncACr+l8BVSS7FuuNCeAMEDLge2vA2OCYrL5mwSJPfCKXpD9+Dkx6154CL31wcKpoaK4CylLbdCnUfn4VMwIpCestLJKjidazI4weYki9AWw4dHkMFBC75IItfMefOQP8Ekgow6w+OSeUtV1snrDxpQgiVK9Y1AlUSv5BihMuKyALfgPxJ0Q/pJnSn0pNKRefue0KD/sTnCLWOP4bNK73lhIGPBsZKqlW9pWQeqpToJ967z4SFrGOGA71mFDsGVHxBmq+2f/MmcKA87v9rHaQYSwC60rdgbmtTae9F/kHRL+DTvF3+zFJUrcTEDMXR8IPKsIXgRXfb3+i905NOpZBBZng8+/LmqwjDEAVaEkdc/ND2532JC1fc5Lc/k6mJatnbtmAKKHoUYcZArtX0C/hMuqF9yndQoGpHZaYApUgHRB+jeoDQ+GEzkoVyCCDw6Ca1FTloRNh+KTQKVyyEJh7mQd/jct3xlQIBDy/s7eOMHQ5z8+UMR1m69jbizlUd3hlYeKsQZrgnjq+OmfPco/i6IYJLOG5E1klh+CqTi3T8C53OybeGEEX8GFawG1KwPUQnXRY4Ol+PvTfDiolxBHnXnSuW/QLvEzMcvhLmpMSi2FK8GBQaT7GFdZt+TFCKq45IS3yaRR5EPHJOmwEbv+eujBR1S7wCLECJyKFqQpIHJPMknQLnnSKqtF6R07xqMmlVTKYoUeDq6ovGUe64XlxD5OulVQyhSDnRMRZiXriweUPuEsChi91jU1q7tFgtlX2Q5SDT7tf3Uxk5yOw4VSRQnxBm1EvYskWqkzlRkmBDv2BnkigwbdApAxv7YoccgyK/67w6n/wltb+EqDgmFvQkL11wzxv3HNF8K3R5X5z4jSmwCoAa61DlRmYpXIpNNeJp5FmkAhXrJlujd193IQtsMh0BQzU2jZtx0cvibw38l35QKtPs0amLlzA0bD5jdJP/jE+365CpCmjsH2a5VKfWUiQuzSxZnbqqMXCX9NV0/RP0Cu9V+qWiv0lHMmmmf2lqIxvR82UixEuGyQUCu9fB8o5RLQVQI1rieILP7tAt6zElYhzO8krab4mfJUvmcFjJR7ZCru5jAlCUBsI7H1JA2usfqu6BszAalmV379G6+hKFjm3jee1xd0wbSQbxRr1b7RBGVd/2QoidPed46iI3t5wMoTzJ2VGL6zwtYDytdg3h96/Dy25Ngh4m8pOJXxeyvAWrnUGBVpjFkToNJLrbA337Clu3JbyYTfH/yetiKlH8MJpbr8fSWYQAqh0SHVOq4X45iNdYLCsbd3AqeDIenDlBD3Y2sjDr+j7K1FjrFdkslZ25ZUEr7wYKOAXjma48U8J3Zg4K4v8XNswgiqlmQmBy4SgN+JOFHTMnDi0FdptNkMrxIf7vYsgx0HVhTzsTzqlc7+QFtWAZfOSN2etG16lYqsl1YjH1xfo2zJVNY8O4QrM/GLxnoHPgxZaSoxC1VxF5rKkGFS7ZObCcajVS2XLMLAAP7t1BFRB5cazZcSEXMKpyfOLRWF93Ax9warXvjTPdlhrp2MOi+/MbJYuDzyOMu5sepfV/x0zdKNMQnxfhoYiLXOgGlFtBvWMrr4IKlXM3BV7LkCFl1BJuk1a3fzrULVEBbB1YdvycmOMQUfLWTzFq2ahCmk3eDzVDx4/jfulA3j7eEVxKzcoYZaAn/tafybZMRsyoC0bFUy2sf/oeFjxQehWNulZ7AlmhOzQrVik6kw2xqwBKij+P5412NxlwGaXKe34KyUIXZhBZ0qdxGxYrZafJxdSG8YR9bUhd95uCd7iGMPEAoQlxwylyrjlN713gH9Zh9iWvi7RWObcWl+JNh2ZXSacr4Qi45QatoZOZzuXe90Q1yxg/oMIbFGCrJvxcscyR2F0M1D5nkJItDcWN1DmWh/Kj1sHwo+EsrdAokseuL0VHIohi2+1a9MbzV7AOQrst+xQxeQ9tX7rqOGVDVEBeAkNSvrZOzAlHDx0sa/DQGEafpmfICpuo8ETsJUBSo/yBKG01Y/GNCXSfuPFEgeqK2l7Y634TI08vIBtbgV5RjMVIVFtqK/Jh6c+/IGD14xWg8i+bRebx96HHEbeQti3tgKDQq+GJW2wGeofu1F889z5ve5vLfX/6BFZebMK+Z3GG3WXjLkesx8UfVPawUGMU9oqCHWsSN7meZM5EYdyfjjV6ktLEdoCuTV/e54Z88i5bYHuJw1ByC+LBrDuDlJn7/IDsmTP76Q3FZIGiUnAYRUjl/Nscknjeex2GoBIWL9XcHV9F6WgyaNOiZG6b2Kz14oA1dF5rWGa4P8drD4Isdb24QdWWI5pOfCC6/yB/VoUaKwnIzwjfyr1k1ZAJ76n7mIZXQRwTcEVznK3kEzFE55bXn8imb01iDEEK2zGXvLQL6b7Bt8VjEIPdazx61iHgL4EXID5i+Uoh3dmhGxYumYZT4ICU/S5E/iq5CxMW7hJy3dL2G/BrrDniXVMZA3HiiX2NsRk2+kK4FnMnrrXF0NWMjlIR4YO97H2jzUWz554FZQB/1dYdXBgh+EvlwBmsSpTud+zOg/0pYn5T00bWKax160L+N+2cPvDbMVsX3PgUHGH4W9gkVNRX0N2XC+ADvLHsMBbRHLu1cHxKTvhB29q0IjGAXjLvxPpsMJRgruPShcElsnbBDgl8L3gTZaEN7pkFFElRxeZQjlWhXmkauhGqUDKb1BOFYCzYEipJXRPLocExHDrGkLZ/CSaACSlhMbpe/JVsKe7OYK+Zx+Qp934wCoHGnWvOKg4q1VeV8t6ak0ZNzDlQPXTVnFblPy0UnPiFl8QTVwJyCjOBrw0PL2Ip9fUMPA1qcBWanqrJ4zUvODXyrAA8xd/Z2Jgf1v6utE8kHF4S2PA+1/aK8RZjlGUnV3NgqZ+iKgatqbHuZgyhshQvCLL9YyOxqDdgM2qDgwPvk3r8POYjzq3qJTvgIuEQuaNRG7RPz5ELLrUm2VfNwH5VmzLrEfKKi7HR4IvsU9JE5nClRsXZH2f1gXPJlmkIIjkgUii8sWq2J//REJtoXzv7+hlHDLYj/pmWWfg//8l3cnh0i/zS+Enb1GiHNVwnPKmtVhSWOmjhGghXaNdgu+7hjbB9ZQ0YvfDBK/xA3WaWi7i/gaZLtrBZ07ABrL05ua21JZ0YoJ65Wdp2uAhR9xqG94jQMypo7mah2S7XmEZuANola/nRs6SOthyQZsSNWI06ckHvrUEjQ1zqMCNPzq2Id1CZ2Mr1E1CsNOZT0AcJwCq91vXVcOsoUxWyby8VrCRykjva1csf37pI+SSfZNbbZFf9T2X49fluZajuXkDOmlHn8MITcFLF2iO5iTD311rrUkszMqGdQ1jFENOfr3HK9JBwInq6zVzxMOzTnsGgnDDjpP6jPJxDIAzjhyf+4LiBHzSkdnao4u1XPQOpAK1IFefQohl9u4ccTv8MTCr9AZNr3Hk/ZQ3NbED8IOgoKWYlLJJq2H1IVRpygliSqg1Falu8dBIZ2ICpUgXfw0P4d0XEvkt4qCrcrkyZi8oPTpUJ38mZetmo/k3qcLtCz7xCrfyBiXy0uoB4kA9xgX/jw99bHZkM9DaXicU7R6KR2C7YO1Jh30SEGoNT4aM7OD5kVRx+JwSLUcP/y8OByQz6PZOnKKrrbJMkQN1FHMp/DajiQWIWdqomzwuL/699n4EfDlu5YLcZg/FZYEbPLffw1ysZvuHgEc6r663ZrFew8KIuDRrzMHNI2fZWse/HtsnmyU2Z1kVtc4Dbwg7k8kf6kPH9b1crXHEkVmfBwdq1rDomjmTmBEWvfL7K0hGpAYWScVi5Tm7QFke6bspHuOO4bDEn/uyibF7YyC+12VPJb6bWnA8ZFj073uJByP8ED4ZGAh/P4WwEiK0muC9QH5Vrjj+RIcI3hwdR+BxfelzCfGhgtSerQ21VuojgGfZ/fHXddGVLjkKD4ml5i9Ai8F6+H5HrqKvKK+G+Ui0fWO/GM5s8wWMdjPWPbSqhr8l0jX+CfhxKX+XmfNzUlFWp1NaQ62cOp1/EK9JYTyo+WcHYTstuwHi79/E5b4l7VhNb3y5e4D7oBKbNZPR1XPhNIrLBAKyurKuDscmpYXfKWLzcXbHWbs4MbozlrRmM8cigQUW5cnd+6v7kK13Z43crblx3gF/YpSZjgvRiipMiPXud+LhSOxuur0WWb7/jZURr7u+r4n/VEb39gwKvOIGlJUR1hQ0V9J7gERaOiF5Y4eWRl1hKmXENHTVRZuWMnbCSmNXk5OluK33BWtNMaBmXUJwLv4Cxt56rBK1zEmtjHo1yCQhbvb2APwDXCCM46lrLeYi6yufEVaRE8ccBIo0eRsxVjA+0a3MMWVh/pdWNj0pAAsChOjG8ORGKinb890bbBBiqgdMYE9TFzjaEudHCDr7v23o+s28LQZJwHqFzn8m3WBm+YINVcg1Q9ZXgpgwhvdW9G6KAvLjKqt8pZSSbWPCZb9zeQOy8AVjMyYMXiJsBka1DZ1yaE+PUdP5+xbgnZ0YkvlmnoiO54spRQ1teVoDgs6WaLMed4bccr00CT8mE4g6jFGEqBag5H/EcOF9d1iA9z6mz0r6dC8fEMuhtEbeLXBNEpC9y0Q4uLi+G3T3pIwR7gKcTiZs+SJlUAYIrDkEXiCi6q7+zBb18+VWOOZxSzVS8k6+Em/ZIWSQ8Cfa8zwpPJ7hAIymRUPXQB0Nev1Y09GOFu0rhCtC/PjfMezlFrW1plk+oqFnpATWga2qsE+AOy7XGv43KzAxnx2g1RTxjZgc1ZVn8j2qRqktkl4PFfBOyBt1HjaOm3lT23nGIQp7fVzkmuknckxEQoBI7YRyxf4dMptj9xCDXxL3k5L9BarQoQHhiaRR7pt2oN5AHsxvs2s5OQfHwQNx5HTs3zDEdQVvYeT5aiiy6kDw1VUfuLA+nGFDnZTKPI+3eYNFfVVk+UGwMSLiWWrw+6R9Nvt06fHcVtOm+5earWOXfYXJl1AoAdZ8qlS+SGrcCqljqelylCJkpeRNwleDbEBYZ5zXm1Pdm+LkYvoDlKAUCdKsUAzLv8e6dNjtOUsRAjjfopx5JcHaE7m1Ea0eyc2S+qbbYcXTEMDN6d9fIU9FmJxjTXymPnk9vxYE44SUgAeg3kecF/IOwLTulx966emmSnundnlcRm1BTFhc4LuE3uahr+l3+Vd7HIVPPporjBCd5cAyrKvgbdepiAix+Fh5vcpBrcy3DsvAfDYXpEXRZjm5hEhJX3nQcKeiHoTh48DqjJebPQoAwKQ04Iqtx4QGkjf016u8nQwdjYU/IvgVoU5NNFT1IfVN9GK6ZHvRORR6PzjJ8VJraroF4GNtYjEkxsI/qrwPE4bFobskSOTv0GE7eNUGgPsOrxlkF7g1ear7OvEbArnK0bPZT+tSHciuTcZ3WaQ5SXSBEOzgelkjHBhzEUH0W+clvp3vbidl/HYb2bxuDEqZC8GV9n6bX7Jm79sncRW8i07909k2oB/4Np+eid0fMn7FFU5L/G8uX4bTPUeHNLV9VwPWi6a28pIW3KsSMA6aGyMJybrR4ks+HaCRAkas7l3SFrZafQU2QJ1GXkkIdsibBwXc7qVVikmRna1Uca4dvScvhJVs4TZIIwV2odwzrqqjvEdGninpW6dAL0bOeUXwzKsPZ1BU8IfPnG/J0Ax0StU/Vu/wtt44sMpyDyADIXVKAGOZlEcN8NULLU4q+ntW3/bFByhU0mPvJxr8Smoz0JjMXvPo/SUGsz42woudNXghe3cWH1bdqa36vLCE7FfR8z88F/B/JImnX5XFckhV7ZRuFa1xTEevj4nyCJJApGqifbyvHRDtDMxb/Ol9O4B9wha3ppyWvRTd30ZWaLvLyInAzxVx6jrxMB+aqIcnsNd98uT/fL7LmX6vqDUC1nSGuBqcJB2tdwwGpmn0OmMmZAC1DdY5oyjTd0/2i8nC+6ypLNBzoM0fkvIEt1Ecg5q7IT97JyIpciUTQJkN0/4gMdpvYrHc1y0gWTTUsAWC8YkHeDDytX2eT9M3rTOmcFc5Jk5DgVxtGjHikHugwg3SE7p4ViYDKkIq1B4azegV0xQ3Zf7WtdXkEU3sbevY7m9nI1jyRMYUn1wwTB1JKorvM4E9bgWU5akeaYvGQEHwym777EelXL3Pxahnhd3cNPkWQw0bmQ0wQVcfWoURzJUUt7ue/IKEsrCt6NMl4GOGpB5o4NeGe5NAw99zntLoSP4mR7qqx742v4PZy23n4U/PsLf8fjIY98Oe3FaYIFb/e2SZyX8iWEYny6anF+o9mhIGAMNuB/sIiLRtPjJ5vsn0GxuTYW4oRTbyOc2bhiIKZNqXqF0SI0LfzGM3kWnlmkJVZM9cuhYyIkCioqTy8QjcdN9AlRGHJ31YyLE30kfykIe3CQX8JeOXxUG6Bm56Ng1W9bTqIb6KD2lnzx2PCVUJFeQICE0EHym9Y/OpKGqjwPUvOikHLuyRFXHmvIsxTuzgjd4oYcqvAiEV2j4u5wj/tZGOjSXCAvmXn9MIhujkIyeUbh2rcsX3u1hPIzfgzOBIjFLk9rL5laa7VripH0PfTESKl1LYxs3wx7NWlEDxt22ZdjVLroFhQNcEsuhrIjHAldF4Y6qlwYwfEHdAjwwwNuO30fr17XBZ2psBbHXcBkwniVGvbXnrdWbx6gy2VKhGwQSOnbC72kC5rhIt8zZ9sujb78uOAEB3MPxE/JkrLW3iPk3stqkKvvO4ATq+mSRbZikIEwgrrY8rK8VGaL6NKUSDNHVte6Cv4yEY4jLr3PQ0CHdTolrLqWan6LTtEC/NJRZOUtQflmO65UJ89on5ZFhxOfQ7LzCIBpUUTeJ/hkemYmL2vjmjwW76Tb8YRqQ5OaEfArk1c5gSgQsytrA/H8iEfrAfPrAzJkwegU7NN6jQXQNC/EXP7fsgCDc2mC05x0JLz7oYNkgtU+Cr+3qkSd+n7FCFCVsP+hbrMu74GjZ3Ai1XqdMcRei0/nf3Prv+htRwtj67R+Qup4ywcWDJv86wKZNlwULrSjYo+aIl7j/qhGNvIm72jyXlM7+tPvzBPtsE8xWtBLy9eRM1pCBED9H+ZGl4BNWdv0j3JOGBA7QVrLGZhumpfBB6tSxXvezkHKkJu1X5ZIBKp1RbnWAPWodlrafKAeEWtcJEvVJx9gJjoPkD5Y32VLiRAa5HyCf7VuHvutPxxCciL1nc/0vRFwqpDBjhITqjEZMiC3SJ1QNv4/aD8m5cMPT5l4x7oBlZkqpapTno4Qi69kvTetua6KIPlGEqo54a7jgKaN+WZC33VYUnd8V7gctmJrUf9q2VtZhZXOYxu1foDS4xqkA75JSEwEG012IbLWW102pRg2eMXeXC5vp2zMHYD+uzkFaB0DPVUmnQK+cwZeCGBgP0Ywl8V/ozQeFplLcpaGJgIhuBGdnMTm1zR3OBZeEdCa8csCfJ5Eu2/Pk2thsRIaKJmM9e+fc7mU01jdHHLtZdPpqIE6mtTuBDoG5yvwQ7MeO08F+szKjDunH/y28UOT5Wj9pZ5+6ge1EsJ+0JLqSb/hWig8hgMS/fDJldjEKZMsISfl3eEQCVQT6aLWhuLnveCRcZjugf0Kx/Pj+MqHMSSAIks3tLuRboWEbB5w+23FjbvqOLTXdWBzRIsoyz8lQu0uWXGUN5rhMbvQEyxkqvdbGINIJN913BgbLoPAypZkEfmmYbh09t7nNW4dzjvJ+CTpRoIc57nb234h2JDjKYPaEVPGoL6Z4oUYVIpNhdjWHkEjpa+9aBgf1+/ZiAmUP9kWO4quIiwTZS5k7EignbdvaE08S2ZWyTnXO5S+SfRi79ctW9SZLWHSzpfZhlhjJr3zTnaebeNEaGYrT/JtbjKGcVADSGxmxBzfr3vD4p707tWF6t7783PuJCnj9SBTSOTOqwcpR67832nPQmTVcFlT5Moq9l122vIKRexDcIXuUasxNSadPonbc0uhr0G0zHNY7eaGL9ygzFner8iHkN41ZOKccYQAby8XOs/AutQwZ5UDGFyLBq+KjxUEDqMbz+febLrePzvzQv8nx6VHW5BGOQXSnSJy4PqWxUINXV6eseaD4ZfxvkORPIdwyE1F2TsE332rK0oXGeCzQLvXlm4HZM1sZPm55bPtYDQa7T3dycRQeqd6U18j6tLMq9/VZEnuOWLZWlXhICUX5vJRiCWrCVTAw+MJ0AnxdU5IHiXsW+e+dG+DLyRJaDJA1s61D4w3SYDRnEURT0CK4qpTqYy5wumDgIJr9G8RjOW2Mf7ASd3YOhrDoHDB4GbToakJnpu4W8UUa+dHbgGd3JciDlgCVEdJjQ+LAetH2Z0718yVGyshbtZnyJ86SxVzRu002LyZG4w1efSBIKup5EYHISNkHsn+mg/yQrBVuJTtCaxGIqdvMm1VSJxVQj12SX8HFIZoA/ZhGd06TvQrJRQYpM+AeseWL+bhUxzbBoqMkSia3M4hzk5+9H/jKSj9gvQT3IlJLcUyY+AI3HU4qz/iBEzN9qus0CT6E1nFKila/hdi4ZtZ0gPgKHq8441xQaUfbCrx2Ucdd2+9cs5BiErN4S7hWZPBlgBLbt5rJdj795DviM5/u4SCS7JYljBf3SPGuiSAZMZTuSoJ47IXKGHYb9KCbsB2h1NQ7oHs8LpcMLG279Gqh4aq9wL0mg8FGBczoY0sN2OKLs1PpaT+C4l23kYbrG5V+Y798YybYlMlf+CSSAO5sc769he6nm81Tu0BZuvGJM19C4VtSENA0zH97K0qRttKFbC5lZ8IzLw714J68g6n8zBX3NY2BqfWZDZSBG1PT/AG0UgkrxIUsrrBCAvlP99wekYPQ3PustNxSl9VNhfxCm8sRQEK5TT89O/IJl+hr7I8qw6mHadnwBlkoTM5XgdEa7+YXrXFLAAMY8SOCALzw921ei4q71TUiL9uWMHEpLmn/1gLo4btkiy15Tb5SGnR+UYtXEWDYlsiXF9CD9zAZtGYf3RXbuh4LjkRawvhvXZE//QtOtoKsgnykHTrwQEnlU9taRYV87oBG11t+uxrSEuKeNARIl+n4o/0vDk2xD7zZ0b3rQ9HliKL89WgF1jko5KMdB4dJ1zznbx2QBZVb5yAMGQctXRYFg3/Rh7JyowPVfFWhO9fKV+8oq8k3UWE6aBBDISAzs2yEoJ97tGPdrMVdcaya9FXlnNiKPDqmDUL2+w44Szx6R6TqnDDBxsz9bhHZ0vcQAFzLg2yt7UWhH2SMJbkvr+v8qMZ1DeEYnRi4jGSsui6QTtlBMQVGp+vXc1MtbIUEG7htaressttbty2sgZe5Xr7ey0BG7zTBWSVe1hrOgAnZ+V65gh77HRSiU/IttYYEHmwi9FCReFGv3NAlB1RL8Qi7tL1kfoggzebPDsD8H5lFGOu4iL3ziEp9IkBfnZf7LgDkT0+Z5O2hsFSYBVUKPu8XUX12rpKSfB9rPnWe0ZRyVxczgJqm+f52cDx3BcOtZOFmVA8mlyMkrMJSDnsOsxy/iFdbgt/lG85jdh3XM3hGpB9zijw779sW9FS3NgEje04tWxIyYh7C4aLjSOn5pUvVD1eV18S+Be/2TG0rGJY8GvwYR4Z5lXxvlcU/aTKU7nceDTi2O3x29/sXjcysnWTyHzfeN1WIgqkswUWpInhSwQxQ+5r02/ao2znXSvtPUWvxUCbKBuatSydKwudkpsZZGTKicBbdtikNu7mWxgvt2WkHnzEGk4niVcNYIvZwUCN+W961St6aw9Msf1vEFmhsz+Zm3kgPYFx3h4t34YvVYKeYypwS3AsPQ+vg9L6uarAdO7GbmEOgkctnsqZypohzHKxulS1UfZsDE+XFiP0Bvc8DyjNfS8Kry3NxUDm6LamVwQMGY5nSf6wqqCKZekaWLLiS5InhOuso7Yplz6jN1IENihvkmLkXODyAv/tPP/pRW5bqlHe1tEuB8pOFDSuxhR3O6xqN21k46Qd9cOZCYgnYTQzsu2XHnYSCyZ6kV2js39SNg5XrHX9hJqGqbKfT66GtgFKfHkfAjoO9VnpX0RDfWwQ70f7vvKl7BPUM2fpYwbghTQeVUIpKfHq4xNGx0m9ITbYo2QbaIvxed/wLliyuZR2dQb1hMocB/XVoq3yttrMaXt+f02UC0yynxrJ8iVnch3S18WE4Kg9tGg2c/yTqGXQvnb6EfxvoeEvmhTIWSWgZ4URJi+BOWCsxi7k/Nue2K2HOGYjeQlouSU59jRyowVeAIrPtMxYqKh4TaYMbILnhJIbmhznQlolGErP632HpeTwFP/Yti7WqKQX5Hsi3t7KlHe0OQ2Dfwz8VD8kEy6t8LUpZGRsu+GEQ2dlkhZj3/04HVJsFA8N3w7X7VZcoq6CRPPpaV9fQy03myrG3DrlNDLSfRkk5xpruXLLNZwtUi0mn4oMg4O8uLZ6g8+GkIDF3Xkz8tQ6lmcfoIkh3T2xysw+AUsEV35vw7vq/1dQ08ZDiPb7NmoMf5uvaCL/v7o0nGica9ZccXjDbTqdlxITLb5xyEPa+veRRJODjiOcSWXy/aV5HNrxVLUZGt4AyT9TaGgMHOvvPw0qouZvRfdhm0beYHV+Rx3/joQIZIK9xQRYsggvzw6YtHzHI+wZKTuZ3oVm3PCGw11JMYYYmwoHra/XutV/RApyuTZ8gX1BfUEq1Itz1JIznkCdaY/HkL59hcuyt4/JvnV+vwaZ3lbclraI3dbpILd9IjMjIyLfpb9C/NAD25bHxSS2X1I8KZM7+YO2RI17egp5knpS9xwY7vm3Mu0vdlfj4PA0SNk0b1csLi/kuT5cVUzYiP4E0/t7m8awRCzF86CpsADytdoYCYyutCxIrQyeToSmhFgTfl0x5VRGwspmzrJGCd/yYv18MOP3VsCremb/DRe30bBCcoKlxNbjNlvZ82GMUqwWXmdDdLaAIqcW8VPFE1LyRY4fnayP/2qbNR5xZZxDxkRMk42l699rKKtmTmytwhwr/HEqnenWy95JJnSRjf8QBT9ShbEyxQQAlbgKsUklctQVxqLMm9SGtxAyTlDPdIWsTW9Tlu7+ub1G8qU7ke+CH6nPhO6/thyzwE0hUHiffS2YUQ26AHQpRASLoRuBYvpBhiVrelOD3lSRLcqYWa97n4oZiiV3DJcdxepjEuN9R+URp7uEuA8RXIUoi7SOG/gE6dk4t3yKqDjORcK/gIufl0433AEgCLCHJXCeeiRPve9p4PzHw88Tuz0nfQoLmoet/5r2F2LG6Bn6f5S64AxIg/vhWjZTQQHAwDojF2psbW9DRsOwhMchGXWsQ/Yo6nJzzR8Qt0e1P7ZpUK4fMcUvUW1rzyufOF8V/mrKSjy2hlqDqi6vcbe3G2CBBaT4vZaOjNx3u1AASM9gHLFwmnO3/RDfJkKV5pIeVRMSzT79irY5qcggKSx9AoMQ2JVSyfCI9TJ+OsNhdnxQlsiF7Im9GmSx/rjZjDeOzwi+tW5O4UwUE6NfLptz/Ht2VvrxE8f9LcQuUe2dNLhYowRzX9x6x62S2jStt+okmZ1Q4lQ3r4WEmWxCx18S3k5fWscPIfliVJT+4G8jvEg0a+Y3yvdP+DpmtGFOmf59D/Jlirr2Jr+B81LZe8ngpxTzgrVmtSthpvJOD1aA6TKNpMvp4gmGWJEfKYn08GW1veSUZHhR/0GFkarc4AiupNRIHOdxLrNTLdfvVGlXwaFcDupmukWedN/gRaD03TBkPTA+QHTOv0igdogkycwnABfrW9JdykrwR4QU2lgRZaEqSmyX6TeLoOKsiJD4dHlcHWbuz31aYB5CyVe7qgkNMZoJqbWA1E/kR0s2R4J+dtbsiNF2MMwuS9+eDICwFsu/gI7IjV1BL5En7iKSfqxFfCnmTlh+rHeOeq7lVfULtlv5WrKFnlkf8ESU3qAkuCZvIO3coMDbVU5iHRl+GqgIIpAzDTSM5K44nbZZOANXzxL5PxvvEk0XAAqPYmEXpfC1WD9zCBgdrQpNfgbXbJXJNb51A6FyvFSsSQI4PxSPQsrd9nKNmbZo8Wkm/iLqVZIi5bJR5jn1BzNlexuVCVi63BwP0jRWlk2Uo0pM0wbQUeg6mtd68vPbNvZvD6jnQ27rRz4CoTXJpA+aBNh6ogNvBRq/ZVw5J2QCm1ngH4yH0rLEva9hK+aYAX0Hx0ea18Waxt4lkzcypHNzgoKVLunYKRfcrmTmT/nqOxvuKCSQ4NuQ0iLMNvWKYAL37491rKenk2xa8KgswxyXGxcq3fGRaOJZfZ8rt2gySwBdElyMKCFPmf1orqKpVvUlNjo7VLiXN/zu4GTOWbPvQ7eueSfiHmEGaEKG/TX2Jvz6Hu9rpBPc4IN8F3PF+nLgWfGfs6WquDjX/MkS/1zictsfzklfOdNvJXC1zBHRM3zB68Qcm1TGh2C75OdY9Qedpx1DZnEt4WILgoWYDMZsSwSKbeVI/8MvXNjxFuCsjwg0adU1tBfQMT232bRoyELy1/w8HS8tfwzfA71L3W+4gFV0grLhP4HRB1Wdk2w7YEVXuJgyMnUzFXihodOTjOR4RmFj18OFGAPPvusPdsYCWYJV9zYapI67gzXLNC4eZWzL0gWiHsjHEVbJNG/GxI3gLJaFSUI+ersdBEeQ2+UJgQPNrJsDkZ3lV7/GoH1MVfi6k07Np9jRkJ2u1xbmvJvaW/tWYWGfB0g8OCBkOCuziuRdtrKomT2UmpbnxE64f/HFm70cib1aniXQ+nbSltBiyqJ5RNZJoo0VYIgSPEP1jWcPjlpJgab4HNdrvpJWXy9C3Jv7V/8Js+5qei+qqfhOLPyhQksIgSFg+RZ+n2NLOjcIaWE40Nl3AI3a9zJ+KAJUSX85hBMF4TDDoWTIHzJTMphsyEpMTIgVxsROaVXeDd1kOmyGPsri7eUK1eJjID47XqXFFPSlMXw7XclL1EvEUR15symzyMs3ToCr18uOtCBEZAldIoDjgw9RLxVxOftX6isli8Lj0XSBtPTeR7FKrwUNjmB1/Gk78dszGqFKTRVzhxTJbjgQ8L1P/qLaWEXqyc64EGU1nIF/wSMW9OZbDRZjzo1n65mbBOJ0TwwVcqxjB8zsbMyZVZAM57fBMYrC2tzQpxlXCybTFES5O9SPk4DlFQrHB3xzr3R764hDB+BYQl5dFed+UIFxzr9fjbTk3MLzqsFym1pLpMELxB9KEfxn55graykfcSyKPNBn9+nEPSMv9iK3NFmZDqbLfs8wQulTrbYMaeskLnxptfyS9mumgv5H1MyRlxXVgaD0IDXBj5gnHdrVocI1r+nR53QLC+Ag6dQJm/dmE1wMEbaPJ5ZvJ7Ii1u5Zdglig7zMWDa/G7Kbu3SM2hZK4kuaB+HTW4p9XmGJSvUNWSuJR9ZYtfYfNXEJSt2ulinhjYSLvUajmZ7SDlzMmCA9nkHpRVvciYJkDUCHHWJE4ALOYh/2uS/gOELLtoAe15eTixvCv4QxLdQLrd+x+z0KL1o9J8kGgKC05obOQhXjHFpeyGkDc1J7qV74PxSY6uOJLKq8/gT9KgzG9UL1TLmdFAee9858QW0vWe35t8jaHfodKcgIQ3/HocTQoduf1Kr4MzyW44dl1+wXL31waIDp24cebRuChhBrEU4gkqM+Fs7wYBa7WUAcPp+XwP96UGcnBQp7aEgrkK1bwacylzhstBX3GO37Pww4J3Q8QKN+aJGfes1l/NAjxk/NoK6vKk4SNT4yvBUZcNHcpk5O5xD/Gbc1q085+QnEjUtRqxEwlW1dxw0Mz0Jdnsa+ok0fuIDAtvIlL4vA894WDJQIGMnrPptsAcGA7vfJNnK7mo2ZmGzoqvjvX97fTS6I3E+g5RT3Rd83VChSdTg6B35FtOW5kCiwVfv8ukwZe2rToCoLq2sGIXfgCE/Se/vkNMzFTQJky3TFNDNIpa41/2ZIBwcgjvpgKAalkd17vFKtqbsGVklcJ6BlJogJg5cQHLKIMz2MEU0GLC8vTlun8tPcJrROKeuPqwr1FtaIqg+5/rB+qfITsD/kz+SoZ7S4RUddCrQZ77FxkXA8lyPeGD/BRm9xrTE8HEJI2Ajd2KpbY1lqUs7tX6jAtekLDcaGPOaa4lZEAYQqZs/hXN5rcNQUWuVfcR9gAnb/yKVsVqhRMYKJ4IXUIv3GHpH4h9xg6C/6ZQBFFZ6KlUPjUwADmKmCU6+Rc1UDMDi6JTHu5xzIUD/UXw6HZLI33MIdSGuxnGEEv1U8zv1DIgnRXBEDTn2dlgewZb4WQS9zOXyHdYIvbgAvSiXVnyLoOIHAeM8txgKkcaz9zUyVfHjhNRn78hc92CAdTQadFvo/uWo5zg0rbcajMR/uZRb10J7RyJkJy5i7q0jo1yKXAZ2M4Z5kRVaGDNVRz1crwdy419dRFzlWmSdAhF2MxC1TASt5i+x5IkIOhgHmUVXPaYstrTAATs24U5xIKa/3XQlEDrmx/Bw7/PN+OlnlNK3kQqjbrzf18bcu+pFX83uC7c4V7ZDa5liN2hCjzkXmQtiQSJ6HEgeKR0Vba+Y+P3Yy9wGC8Tb7WZctp6AHwyuUTZDshdhKssI251M+ndXRYhlNp5ATBdVqUM+HTpYi4mp/PUFX09ID0StYEqmbfduX4vAVyKAzyip/nl1SI51gGgVrmAVlH9EniguFaw+nF0llN/WPl2oos7UZqC1HIIDnq2NAYFaDiai8/j/iAbIZVhBSHDpW6KfDJO4kBHaLaW6wMjfN04xF9gEaZ90o0F/EEGW1ELVBxXo5o6sFTQgYLbj2woKlWmtRV0Hk5xDC0KJkUpWNqCa+XzCXzYr7zGyqUnpikkcbpbLKOYS3L/Ap9ZqJVZ3X83bjMBR2UuhuMlVat52en2G5cN+PK3QB4BkzAZgmB4vPMrqDm4gJ9CTimG7IrOECLIw2qOjqCSPk7To+ZB7vozFWFLhLftYsdVUriiMxlipgk4K4ceYCnbVQa5iTMT4pNIhc6PeYY08FnuyWU8VPMzAZxejGTrA2CmwAIiMLpSVUrmgsJYLwq2SCuVnX49vFuWbgqY44PIydRruEJLT4xJEgMT1RB56XHqYYIRGhwxg9DS+VnIOwUx0lJT7i2fs19+hJAYlooXFu6BSRd1UZ9STyIyRl0ky5E82eXaUqRhXfWoG2Bu+9Qlzjn+iYK10pQ0ymoZEQmYrBOtVkVsTobSNuoRYuIZyhG4rpgBmjuo/6AzZcMMf0CgHgagd9/NVSX6X1gUnqE0Obq3WdXrrizJ+WW4a97sTnadWXV4VbwAYdG2D9Vcj1n8su4q1rSrZ8jo7UdqNQq4Wjp+a7w75iiUqDiEQlQcByo4aBU9XcRdYxsTveH8TibK2diHr/IIKhR9sOwCcuVfd+n4IQgL1eBjZDN4KQ7dgY6hUmLcM6mnDfzUgMykNA8HstEkavqioYFVjn2TuQq0DT3NmTPiiE58RwMHXlCfeNHv+I78DqLKKlj+e34x8B+nTwqf6bb+x+UmPI6kG70VQV6AueXw0vKEUAE/E6kmkYdZIrwsW3XWTKVgSkg4c/0otYiq+6yyMddiW7INwDB52EEV0hwWxhHJf8aeKQnVmOMEZFsyxUxe0cjDCNuBkvMvEIK9XDmBJLDlr5dWYtl3Duk6w06Xc6Q0q98xDjLB2GuFL9KXXoTkrk+36c/w0OlqBmn4yN0E7d6X9O5k/CjRFUmHJDgqXXwQiFj863Dk+MBtqYNCgQxeSW4IuY9bndjTIBvCZUNV/PhKQeo451qrNsLnMCEQVgni0JLoPQjGq3c0AkL7b1/ty0aGZ2G+U9YZh423GjH8yIkXdtvcXt5yRvzgaku6gl2BHeyijfpsFoxsGHGQOGbwiBtZ6/Sz6q2O1vx8NWjrVSQ5pOUZISmLWv9CPJP4D9kAC12HN6aTQrTsvCS234IqMcQzw0dga4Q6U+1p3wOWzLcEXTcZMYMF6i8dQ2kIPQpMlx+ax3nwp5cnsm6FIKsqiuJ4wK/CeDFXpQLbkyYxXLXn2n1iqJUW4h+TI6BPAtFwe/XC1H6pu48nrAIvND2lPtQE/TD5YkmVvKCaPsMx5qOfHA5/T/HRMQ4t/pM4EJaDWrKVc7KxfLPfX54Xi+a5m+geZXuUxIaKZpgv5khDr6p90UO63h+cNlWzt7UTEQQYm/5F9sOsx4WBIWIaNrt2Dp4b2vNJc8fUu1pJLkd5lttOxcGHyAERC/TdB/LpkWA7OFQZ8Otujmy1rhQ/Ci7OaHtrSgyxvqFucQB09i/VaHQ0yAozIo1GHtmrf9ZLczEJVQ0CkRpcP3L1MkP0DOF6pQrguFQvrBq5A6rqnc9s0M+mruGzJS8trtV+ZeASj0hoRjxFb2H+MTsPM/rGlbrX/rGeL15knOs5l/0eXEut2QTPSuIoHSP8ZjpJ6i1C5vhWqK07FNuL8qmFMsHxA7WGcSs712jDMSS9Sx2ETwoQHudneXRpeCDcx3AhEantxT9+AkjAfuX39PBAjWlv2SWVEPjk7IbdvNDq5JdeAn18HI2/H708FZ6+/uAhNHISGSAEIPiAEomIZlffw94zgUzvHFSUyHE+D3WEVy5f2Wylu1Heq5hOQJboqNzdiHMZxv/sDNvEX+CSvTvBwxnaJRt9fZXDdkb0inWTcBoLL8GnIFQAlxxISz5Oxw24GCW/my1upRh7djKBT72qVwsc/rE6nqsJnp1SOgVU+xBNsNkSaXExNQ2XxQJqtwQN8X7116zfsMdqr4SADs9VbqmoGNHmbn8Gtur9ntQFzif2bY46IkixPnv1RoevWkLmImIE+QyedxJJL+lKTJXuNOrEaLA9LVow3+duXiOrVHfUYNT6oQtPYivHP9b6eBC2ms1S5H0jn1fkMyi7ClbNI3VLQqrCMyDKSu5YJLv/MJbPPlY5KhumyIBBoJ4FM6wl2XOHwZbooSP34JuDFA41bXXQyl7PELyxp8Id7Ym5myuuU/NoTQb8JXUpRKdq07psiXPXeuqLrQObHoahRfJgK7Ks2Y4tTQF3/+Z2P0f7xwt4twYnsYaKTrLjXQ1VYEa8bCCcrCOgQELCGq/edjxa6EI7708hzDNVFYmQIBW4dir1mnwPJJaglENCepDTVwSjS/4+rqMtyXVqD0OIgxtUT12QQ+hRwGauedp8JWrOLn+0rXXbC4LohAVEFxTjPVOhtK7Rdcu5kuBDvh3p9ZEr1oEyShqLrxsjZZwGiwZtjaOvk75vwUUFoPJgaQdR+RclWdNZvDwJXIUW7Qb5REZk9iidIHsJRFhYtTCkb9MtSaFaUDUGcAUoQ/x2QUkCg12h194UeFMIXh9YU3KWAxq5S49lslo89RzxAw9suf7mgVeihKwsihTpm/SYFuID68CDQw9wRMS90nDGXIJ2ObYEiko6XCDhsbbc5uCwBMQYXqrEUOKQkBlT8T9i/ju1V5QsZswjJ1g08JDyk1iLqZh9+1qQN76kX7OPQg9E3kndQ92r4FZA1/JGqVoNQDlYU1kjxeDDI2jJxKId+CD573by+JYpFDbggyUJ++02Cr9Q3B5Br/Ldj2G6MzCXslEdjhxKDxS9qi+l9zFNHMHb3QHWJRN104/trvGpsW+4Hc/A3vltp9s+KzJXNuDoMbWfupam1ZffkfIkE70hVBRFksqhNityyQRFYV0lGMOyBbWqxXukBMjEWAcS6NYIIC5sm/uSvthvSB2qxgTKFAMzGx5Kk5QW2xKORjbBHQu4IMLptpuQQQCMbmOKTxp3E7b4ZUL51Wwv1UpdV8RH3GcMnPT4GnHQ1tK0r+Xmec+aDc5qLxf4CxjcavVpfEVpGJQaIv9ltIEteDXiiSHlx8k6JxC+nfHyEwcP7dFkxj7gCPd2yHgqPDi3e56IplLXH858FK9hhMrUyccNyIXOnbMeAD2HsVhJJKAMUSENB88RlZ1kycaFSbfmZ9A5FGXhe1CryFyTeMdoa2Rs/CJ6VqrGqHeWYToLA46SwDj0J6UcuiLoZ0gGTLXb2KcUInNMX16VNJLJSyfv+xB3euKjs5OmHH8vwg6JME8G7kW37Hj/FVH4akOHDF1Pg3gtoSDZLKDBmq2F6jkKFSHX/di6K5Eh/bSyxpU4+9HvVQU6Y8TQ1MmWfVl1JBo6YSw/AMGonGi0aXJIySHemM0vQWVLCtWu8V+M5Myb3p7/SKGsETnUaFntA6YN9Supxhl51O0atruHTzMU6mNE/kHtNr9jwTqmm2xHUSrIjfDJGVCJDwATSziv0dFEWjm7uIkoB6IoGUQVjMD2tbR58qjhEoDb2g8dX/pJZc4Rlifv13z60UzJyMTZAiueX3KeOIB4jbog7cDd8FnHijbj90YKqIS/jf8kVbSawR6tJuTtieZMf0qQVwSBdRsuO2C5k4EQxhqXiv+ROoz8THI7gFxn875mKvY5cAe5jIxfw4r19mQhg8tBP6HcawGt2ODaEA/QoHSi8BAeTi8HW5QazB7+bhxE31XHsopexQ50zvTf7v0TPMafSHbTJ5xRU8XjIw0ZXyRKOF2UQgOaVV26jofBr+yogie+v1RSrr8+64645KqzT8cUQXPm60yHUlaYJbnxINyfMrZO1myRXIYFZxB2PDfhxFe2mu1/CLdKKJhXtJ0Sl3Az6AaI6ife8qETRk+XTlL2FSEFhzX5qYj6T+jD02NLg2syMZlU9M1Mev6wYrVT0Lr7j3EVcSmJ2Rv1rbi6SqsG3Dcm1Ckl8PxIHv+RNxkuiW7eBiYV4RXxZSw9Ml+3aGFIve/zzrRmA7DvCdbCXX4Ozv3sPwo3YndjXRGjZju0X2qsZsRQSXEJUY0kXzzocBU7JFvszFbAvjan9OJoH9llcB2qcl/MW/qCY8caM5+OIeUfFygg79zk/DaQn0Het78DyRyD9sGJNy+3XfPYKTbRS0Ka+mP/QcQpKatXZw6zRatCXbI03FHGWyhirN/D81hRe6LJMMBVZudK2kp9nlLTIfbubFvYRKQsiyX0A40BNUiH0s075vX68zF2zVzulWW4Fvv9yOGqTJT+6CH3LdE/o8TJKTYnwzPKtjv+Yb2ylJXNlOL3Hud5jL5W/cQCeelzAbdHoTFUETIGxiFKfV+lo45S1yK5BD0cQUyGo8GxnRiuN1QjODi4XsD3gbbi1bOdnEGbTdtlkZu9HWN30ed9G6BeDypfO2QOlW+l6gNMANzOynMS8swp3l1IgkBXA26EbhvOi+n96LFuO9EMsGOGgwnq1nnJmBzqigcs/Zx64Pgm9puHobpNCLR72D0CK6sTj+h95CiOicBDhcCTZvvKbJx2oDEnXoSqxa+wh5R3XMJ3XUQdSvktc0+l0/l+eZUEONkmjwYS0k0Cc37XUd3OD/vKI44Oz1BVHIxCvwYM1q0CpNuCKbWXxDTAA98XWUoU/B6TSvIYjNNqQtoGZJGeLdOYChcWlDiAMjBuO54hW+KCZr46sQAG98UG4vkJoFsdPwUlHKHx6vV9xptRPQ2v0pO002QNwPSXXaifL9aPHAQOMJqXO13AEHmd3xrxZMp1nq8oPZ2YX5v2iVFqfvI01+m4c98UuQhZRHP/jNkOYsu0f9CSaF1m3WrBlxN/OFIh2wrfGB4qXrIvUZ/ygmxWczW8ekal4hWAoOeys0g1gDR85D2mqunv5OUnIIFCEfn4k/GaYESvrO4I+fmIrd7lu5+4hOx6i7z3a60oxV6D17B1IpWVbYNmOyodmuAUXRESlthIchptSVLJLIEiJKMaqymzB13wAc1D0AJ+pJUunKdcvMR0hIBZZ9xYY+/Af4ikijI3POR9XYtkHcQAItRITmRq6OnVbGe9SJM1tUUmXggTpEl3DkH43cAO5zitIyRHlGUjc5/X9RnKQzHruz+r/9PSUpNxxR21+blU7t5AYze2uc8YzhOiOINl9+7T3Zp66TzVaUIH2LcicrnsJTOXg3ONuE3HrdPCrlor3p3IXn8mbpFAf2DLdutO6Ip23Ha0F0DIrLiRBcfAyfXTIYef+F+JcSXl3P5AGrB5zPPCkyeg817fg0ge6OgJDptdB/3E4W6cHWZbaP3dY+zgP2ankPJY8iestlzrM3P1tmAX80ISSDMcHo9jkG5aN+7BcVSGZnvyBH0vMX6ixi/+o90ezAMxAAAABjbtu18bNu2bdu2bdu2bdu27Q7RQU4ABTIfuGalczGu8qV6Rx36E7pQcZ/nqLf2aw07+8ltPAqlCXHnYpj7eZNnqCRNVkguSCNATnH54Mw0KysC6CPwSWJ1KIYaNZRKc8CAbqJVD90dSAe/jXh44Jd7MDNnIGlvg4w6nq4elKcSiumzrAhWcTNMAsxOScL+IzmiOAMz9VScx0WNIMdqxtXAqIUkRth81AYdHOUPaXQYySBN1qXWQ9+9rh6LHlkw1+uff36qVlVFRKQW4E1bEsfz0VHIQptLGX8TguXH5Q1W6q2Z74U2Gtqxa+bj4nhyAgjcSdKHg3T5WS9/UwAVogWAqYQZ5KVgkNh2gMjRq2n+S4rFp4AtkC27L47SoxB1OTo6Cl/FHs+fwOyiJGUTSrdmdOrLKz8UrDekR4/i3dfAvUfb9PLRfgI2+K7USvHLxHm7yMfbBYySF2uhhqn5LYgwiuwVZ5WgdPvqB7+HXNRIj4DKopVd/nOgP15yZn65VuedHt74h36YA4NsTOvAlNGSkuayG0ocSBgMW5nhaS6IX6adwQp8UKgv/JZJn13UFzKM3B0/f0l4mr2RVfqO2tQNolpAgRNtEhtVREqUeFHq3qxhvpHFO6Wc7UNv8EI4try8K/2eDHqVGrSW5GT7Bag/xdDxROPZEA3mz92H/tS/bflkGTyT5yAIjmQPlbIXXfltNjMLSSXHBwp8R8P0q2xbxoAAYPBXwLUR99d61DvKS633qymg9D2Vij725XDAit2sn4ogTbqn+Yz9Xg+vqX4wp+YDtPf6bs7p2nSzfEZpWN8TgRdH58nH7vCpyxZ+ZPoCsx5slx/V0eTAeToDTS6EILlsD1qVhs4Mo1xDkTkFVYoKtHsDFDsPmrUOZmR4ZI7jSAvONxUN0V2x4gm4aN7PnpAMtcd1gbKzxRRiPqssWlDXSkjKkq+3GPy/pYCtM9hpgeKKhPJ0Ex6OctNHEf71AE0N8sdLgIzVqj3cxN8ZwG9K90KODVwVz4KWMI7XciBq4bWk9U+d+gMOgyuLUW8DLCmPgK7mASEw5sVSTvlafjzVVZECVC6EzLjp/GIeVfxO2i4FZpicwAaFc9rQL2Oh7zZVzVv1xuRAgjJ4F6jn4EnFZRRC5+33ZFC0UPMbNmGpnzOyjg5UceX3QC7nf5aW8aHamXAV3ZjgcFzpiUDSqhGfBRenF2SkoVSUXLzTxG9MKIBQxphNiiVx9hbccFocsHQvYxtC3fasvN8y1P29R/B+3/D3pxhSavNazG3ghRyRNVqHyyYS/A7oeK3DOKR8O97ickdMuJT58dJspru3GmiO+KHxdVDXklNOLxdYO4MscAxMFdZUl73ik3j95GxTVgTQtEKfuxQ+HPQ0sOOOBtXRfE/CjzwMDhdkFlsSATs017hNEKUwpdIFHl63wwgYtMyUt82y5+M8hwDCE1PGaRhcKnOLM3y6pxj5i1iy5k8T1tIhwIL7UsVPyC8hUA5DqUyFTOtknFnaqUUQYBrSBUZcSU8+ZHwyrb5PnhlzAXWXYpYgVk5PD0h1m2FOQ623He9KBj9LmcP7UC5ikIaMPzNd18y/PPq7L98LTq8mgt4VQtgVKurpI/Mzv+af69CVJfF4a0jK2gM+E1iEallSxpqHtMW0T3pc/FVA4pAFl0HMCm/UtMEsWEMsENCPS7Oq9tqYM9HBD4BG96VSl9C37pHWQnZUgkaVNz8w64qeEDP62eVEhe9Popgbfse0X02itstrU4GCuqYtByhloGgNGAn4CfYmljxdIJ90157Qctp6Qgyv4/UeTtL3leeF4YsDHSM6ZhKIB/HRERpNQP8iDgkkqa1Se6hq5p+qsURFZCSKWl+t3ea4P2dmz9ABVbV9/PW1qteSLR7GZWb9XYESlawld/E3g7g2PKmMCw1HtFONR1C3DcJD5s8k9gPV0kkU5OZ+TirImL5Vk35BsRszmrRsuver/Dmpg/C+f2Up75PXZSCqvZbwolX1wcdHGl5cxfcDY+NE6xgGAh+lQs7AYtFnxjq1lEn1QhsKasfWtx5PHAXrsBls90fPDeX5+nu2iWif6RN++TRdi6qYwnk1iv8IfnqTOJPdNuYPCgltSKu4T6gPomyolYfWP/GkKZX4Svl2Vgsjypejy3TkMwNo/D0JilgEw1YQTZ7SMlFzE2nYjphIShXNWUFjd7tvd/qFtLchMfSkOhH4XEz0YYUM6kkerYvjTagjcyM/Gq9cMCR8QCIkpE7sIk7ql/of9dKYfgRGQpFWcUUSxIbCSmBHbqQe7sSULx7QUqsxr+UGGvzOPg5kLvOGzuKStBSqb7gqp6srTpvj6FSX9ohgYb6UJg9Mbpg1kjoARBzl/DLM5mmggGhAxGDYt+0W38TztchrRpC+gvnDr/sEVUfoQgOhnGHlk+RGJNUQccU6H4LKDAAUdYm7HYGA2Ug/iDpmrFiHkSW8raWqJx039Xar83qzp2SYVMN71HcsGhZiiVlHJp2SDNi8TynKH/PnON91DXUAaOEXk9yFBHTomkfk8yFNK/cwNc2NMgd3gcAKn4/HY/uFnaB5WMy0rLOT7WVWJLO3hD7aYC3sChYwzMSoPldPMoFlYYf9l/boToKYCdMfyOXciU2iz9xk+5mT7bVIZ6vg+PKHLqqNP/FKPsw46wR8fl6cTrfqcDIAcY9crnjbF6izz3iday4PMaUKv4UNKEZdcG9NOAKBUHkgrrzoN6Aj10uJOH4SyjO28W0u3Yi5oHIYyX+h5Lgd8tyjXXvUW1Y6SBftm7IBOSHDKGCs6zJjsolZo1a14Ag0nfCheUVcROGJGKIuW0Ro3SGnDO1FQTfOkV8nbO6cyOTzDC8VWtCBllzf6/MqaTfFtfCxzQeBExncI3k6x57tUN5SiQ0Yd5N05+MOKGXmMYHG5AwJ1nIkB8agwyrpUdXjnHWbswls+k5LqM4VdT0003O4YVcrDcknRp+9noKGZvy1lyuXvMHrVkI6OFd75soklxlP78BgoCGQqlf9Bi9WxrqltFFFzc8ZD8yJLkP+rSsoxWEh4vtjdui3gS8JaZyr/ZnwI45oVwIpUdkYryJOf54IlSXvNdJAMMJidJ74kfesEJBx8k0Kecv0iU+ynlZG0BpUvcRZLo1T48NfiGwPa6K+5zUVI6fiw6kP2Lv41UcskVqI7JpNbri52z45wqEPTl1xwspPCh6KobyHwr5ne8Z40DerJ3qvZxs045hXCioTtUJmHXTzbJ3Uk3NphZjrphioUZGn11dCQT+VtpyZvtbwtZIO8+jCOFlLP1MP7DJzm4WAmMpwiPU6jM92Tz3tKNxL0APcFDbf2a1ypT+aHrofYFDHdwqBnKswxOVvFS75s9PsHsSYOU3zmqZQBXmvlNap/7hU0ib/LAwxLlvvR6cXhVn/0mKYNWSfowueI8mLgpFKFdUXEjPwo/KmXL39k9HHIPQ9PEn9zOTK7+1to6hfHvEEN05IUxgTIVIR5/SpOSbXNOBa9hJI7BKcQ/uLoixyl6V0hGo3yDdIqP+Ol/zqYb6apPNQPuTnqQYxq+HON5xti9s3frE8CutP4PwNASZNQ1NzirnW59wBmKoogcVfHKdz3Aj2i6gpW0XH4F56RkvUOXj+eyosnivinIWg0SQwpAnsWLaAx+5v0D7sS5NnTgIera586NTAyFCjb3Z+xp/EFT5Nf/rBsbye44ukOUomj2jtzHyCMZyuxKVYaPekEolOzSSAYXjrH6T1zDWWZH8gqQOczhzUFo8opGA6JA0+u8iPmRm/jNJuwFOMvBKcEYBABCXC/3Lq/UWHZ+wj0dOm6n15iHp4cNd4ZbmV4srfZnuycP1TNX+cNKnFtQpOR2aVCDHJN1YRtzDYRldUSeGH8oROcYNY7GaJdGSFEZCA8odXIOEutGm+i/ggN3ZlHAeeJlm74nHpHTxaTuOHEapBf5IK2KT+ne8QU57rMEhe/gX6Fr0RXIK0D0UhgMryzvlqxqggmYEr9cU4t5/3OyWBexEyc6Md59vHwgZi24MO+PLZOaG2sWBIneqHtNWdsFtEIBjOnCUf+EZb3w+05hMKmsLf8NyGwvtVR8CkKjXOLrdq0RpEg1TwqxDM1QvESIdV1ZZJVU99yrySFuD6U5MIj3scFSFX+NS8Ql/xEOYzTrEBeBUXR+zhfllrmhIjsYVvLAA7TuXJ5kNahBpHDGjfDd8+j1sYsVjsviAOPGMdbErUdDo+qlSgNadkmOQlqBsVwmLN1yZDsXr0UZkwNdEisCVqXtgv66l8hyrXL6YaHraMCOqVBZXQxlkcsqGtw95/WvABxOm9MW44/FhbaWlCCKyXrVj3zcNx4egviwcZnFpunrryNh9K/hpS38FVO16oo7NFvhCthHIvuToFhwIkjWvkJEuS1klOpJb2NNiKgLdSXX5/8T182L37lcYcFNVDjvp1/2i2JlLmpZp4YMKQUbvnOp1Wiq/effrTVJzLpphO8s9iaUotnphZTTNKQOen/1IJn3H1Fqigh0vDymQZ3phaq5288NDI5YoKQcZmu360Buh905jOxNhYFvYyGLMA4p+mBRtQ19vkwU7oVVdHtwiPM/Kt+/v6vA0l3rbEzOAf2/zscj7iUe3oVOaCZx7JzbcGiQmFgoJaxF+6XN/7V86xi3sZ3nXATRXvlXmxHJdurOCUnHPTsdSNAmAFuYJ5KX628ULhbENbQ6HkqLdHl08DQWy7SryKF9LgpU707TMv34kHCOcJC1+nfsV7iwOfOXWZuvm+/kwwRm0TKS3sYXiQpxiRqMX43IIHbX/jBXj3++yfJGUt7xVjIQlgHOeyhSD+eV2/KRB/U6W8Bkf/g7WCPG6/mFRSf1/az+IXSAUuaKi12u6BlNmxDgLFGLWFT0o75ZZMlHWWRSAHneS3AePU69a1DlqXQajzKqCyBQOkhzPfTtoLfSosrmGLYiF448Ut3k1N+hjVzk6xO40TCzb/i7xQKL/UGqV5C/cYGvvXrL3mfC6a8mBII9ySDROlZrmb2z2BSCx9gBmYrKFO7cgM4MuWpfJr0/yHJEpYhNoV68t2lkZo+n5GCBKdZBl+R6AylpLkj/W8Qr/1iiMgFoYNI9PS70Lci1Dn8dbF8OY0TgrvWPMEcOfCRF8ZqtWp7rnmIJ4mUf1Le7OxJPdW+152FMxPHJf0L95W/PVHc2ueVrXNUu9Tc1J9xJpDSJYLWnjlb9PDW5i6Z6iKb+52CDC4fHseWAjf0K/6WfChSo0NXo9t3O/lGH7K++m5CZTmha+rV5aCG3WXAQP+7ZeZG7D6dw5WFcrxy0CfwSp3cV96w0vXy8B7S6wxlHlcuQdhwixhklulxmwwmASX0nmHB0E6aXGdv+ZU+3Z5L8mt/ItTlsmB/nZ6X9okeBnVfavvKvcXRu4h6at/+urUlNEHYMN5wSm+D3GeUKldWPHFKpYdXvLF4RUW7NgNbDPZppnC1l1z27rKgamiidg3Jwa/3E3km+6XV22b27WqTFTNZrgnNtLC90QCjv1EodsWKsR34Dxdz78YFFxUlnLl5eaVjGi9UPCIgaPjESW7hu9wKX8tF9Vm97sxk9kcNPrkEOVbt/jg9FSIrqx0CWJS3z0c1tx+bsXjnfnkLnWa6izthoxwgQYsg8zdDnLdxPcSOeBS2htpjIJWUgDUK265WWXgyYhcQv9JPVGutuyoiohC605NMfFvdKWqgiymRQozcn44u+uDJKYKF4db5jGUB//HxhM9E4IA1CNVZKigKAHcyjzBeccn5g7bLdtZVoGMrCsgKs77YkZUdLsdE0dFknOkUljtPOaXf2NTiioR7CSXR1pek+BmqLLPSot0fuSnbeLDLhz4SPImjQWT4tOt3vO5HrsZTgT1XsNZ92R3Oq87nnKrmijJUPkOJDCUt7ht59xBZHbT7Q+71iDH3xZhy9TDaFL3YpFMm1huHi9xEBfJAgXYBM8VmTBQVHh62em66I5i5qjR0HPdV3uPa8PCvpEYukinod31IHUhafbHA9EnlJeRGWCyOEUT6q2g1bZ+W/BmSjsp6+IE5M2Yrmc0FzmpbNgqWH1yjyR2ON7sh4IF83L6nDW7FKEHKIvQHanUjz3gcpaPTKJy58YBkV0IIAWPYM0pRGDLfdJU0AAnOTyYhfV+E+rH6T3Pn5WA2utbkozTeXlSKWaWSyLFk8TMsZn47vrlsDAbyqsGkpDBvYihwiBUInNgSRJOkS/8bZE2qWtcbh4mdM2aHon+39nXs0wIx8KaJZVvWgHhbDl3SplzzSlBwjbdC9O/KETV+SNdq1AZ9zkgIBoO4XHQ0MSh23bqpYhwxEgAe6Pe9GPBwiThPuc/dK5D0J4/XWEmCkJsFmMnxpBElPgdHV21kxMO1rm3YPmn4q5/NTSZTED8Y11WHtuba4MjzcUQNQPH/zysh96BzzS/BYj7fFfXMXZFfpicKQSpiPRAQ+ZrrPmpSqoGCrVcoJePPu9wdwcrDx0NLwNpq5S6ehQJ/PNIxVNL4gVmQbhAeYpmIQCNCdN+g+R5jQoLEQqs+8fH54gyQBlXdbf2iIK4IoEwRCkn2pM2Ym7CqQUPz37H7OZNgP8p3ySL4le95kOOhXEqJKQt7y/MDQZRCmSmRgTEJ0s58Riu3yMUhtB6ahMC/C138XRzblIe3oyZ6knz2ak29TlOiuU9iJm26NY05sZmFLBD0rmDnfk+vUTwDQjIvFIJFohRH6nlKI5MIjL3I6n03d+0EO/7v56dDvWuB0o03Z4sQwLO4fgR7KPSytYdjj8W6NV1O+R5nzUWD/Bk2bKNxnUHZUAQLx+4/HTnFsUJ3MjqaO68lGatDFnrmh45e29eZBu7sGlE/3Z4zyRdpkQhJoxgDt6KuM88DqKnoxpTiTJ7u6O7vCSYehKBewbsPWJmyOnpsIqJ8uNRTFV7ufpJlnhv2Wcvw6NQ9v7aNGMNJpsXe8IWfCOhTaBd4eb/NOp2Ns/IMgzR5KdZRCKXQEsCxMASdR1T2WkZu/tm9FXwtOLPV4GV7Hx9Sr1B2UJ8YMUjZxmZeb4ZHMXwzuuPWBew4q8fPSH3uGMdRDhaFZFvxYaux14HMUucyicaSgKhfJqUAg9I7SLbO08NZee3KJ0bCH3zo5IwuKhRR0/9eaonmM5zTHVE+geeP3U6/FuCnivpnDTH+03jR67ZO+y6FE1ZUdKWIlPMZu09y6ET7O9Qkq0ZHeLgBe572/l3EzFbOat49mhFQFdaMnb+J2jt242GhGDnbXeCpRzdyhtIjppCxzyVMLhhZlo3iP902/MpLVlH6w+ejoCyuEeO/PmMpKpuF9ZERRbcKrOt5IBOazdAIwg+VegK6Ez2d/gtm6AMlXIMFF5ZJyeRPdiLR6ML50MhNZbKlnM3fNU/j5cY6m98UYmlcKMT8xRp7QleoLgmelMmOAohx1dKZysk7o+ssiQRT5yHeLYtNvA8wtvnt12Gcf0FIbo5BbMk73bW6VsX5BGMYpXOWey5o34Zat/W320ktSrjGeVv9qQAHh1aqib/Ojvawop1LdDkq1u/fl0ZTsfXSzduLsQ2rTNp8kp+Bb1KjgE+MpTgLDcgnjBSyVdtrzjTd9Yf2aY3Mjld5OhxOVLvHAiqgj8LbxYAPtlp63VCeU06U+MPvfmyKEccaRG2CaOtmYYTd2ErPAJRJG270CxhoNLcoyyjd7WK5LJ+KufplePwsmuvHAT8SJAgCdZQnCmMrU61IQAnuHnvOZj14IzHRDD6JPs7XK41TdU7txd0/NG40/PrLY4dyfRvlhigyg42TLgD6u9X4MkQhKIeQ3nlKRVeXs9rSouTxXFVXbPn9wChXVBv8BjoiN84IzgS/19co2yFlwdeEoSVK7kssMZLpU/8W7vp6Qry4J0RtgcLZ4gsOdAm2jTOoutaUmHPXyyNfSj5P3gkEWSwqJdNnXuCiFNLADjlEFPUnoUoujqnGCnmZRW6NL+M+UP7Rsji60nte4VwvR4/zvyhvVB49wmBFBEEmZ+/wmZfEB71xTMNtybpH/9J38uaQ+f2wIE+FTHUhGDoqezBm6HYj72PNVRwRVP50t/IgL10xOwWPMJZUusnZBs8c6HIerlLcMnEU30Nv9UAtFodsH1Fop2FzOpWNNXsNMEfifqSMyMBa7pBrDimx0Wnw3FgIufafKNWzmiT5RmhZnEJWFvdi1nbPFgGB2h1y4tTsFZ+WqxHw0hQG1k1XZ4Ahjs4/I8GMcsBDbkrzlX83y5QgfOhvYnsoPiYl9BBxUOid9Xp0ds4R2gG2MBvVANMBnR73nHD9NHSytBHYiOnpVF4K25MLf9/o3LfJmlBhQxHjBu64vl4KkdB57O+aYXHpx7HKTAub0zwqW6taDt8lkro9riIskUKqlnluU+lnIGeizL4MP2piFrGs9H8ZSI5k+DwwNDCWVKvnDOJBoEBTXcRG3foC6suEUowK2nw7fN1Q/LV61RL8QCWxeuYP8CtOqR108OCtQHTcL5apFkKRachaDHZGNUkLV6VL7xvwkg9yi3Sifnaaiuh1Q1WZb4+rU2lFUIkERN38Rv6B8LOaLRKlLdmPLHrWk1/i+mLPuYojWk0XsNGht2lLXG40GH97du/CYXofjBrdfxydmBi+0nqT0DqYZqGVs0NfM7MI8oUL//iVzn3mmxzAReKKWPpu4e4pCYGL4wW4P2I3y2j0eDSJKKTBuotwS2F2PlFk+wJyTMJCOVIGnUlVAKayBe/E3siWoYGqsaldeGKV/XuCQlJl+yJk8IVSKd8YeJ8DrkoiEasTx6R5/8O05fqGlzi0Y9maXTNOLNSkkT+jQlPggBR4X9hueYpJ2hOIjpqAomDcswaz76xCo2gk+W18hu5oe272e7MtkxeRobATDiGREsrERgsQUMihIo0wnLoKsQZOBtAYAzrOaImrBOI6+KwmkIqfzI0ppLKnIXE+xnrxNBMJKFyw8DW02/iOexgT9rLdE72Y1eQzXckyZ0FVzqb6NjPxIhVrtLX3tHwfuUhhq3LYKIfcjoTBT7xqG1fqKHh0oQIDAFoeM71T0KQLbB/n49ORwasQZbkbTI8inxqF2EB55v6lo/nahCvZ6drrpTqrQ1XTpkzlD9aF6dgaL8jo6DFPJnh9HWtbjDQHt4cNYSAZI3icc4vtEoQnTWij90XLmhynhVGZVKINyaV0LrPCYLWob1Ix2XEpNfwqByyp7ja7GCkF50p2T+yFfnr8Ex14zGK5VqERjEcUE4YUjpgWycGGmrc6nyYUrMMj3oTmsy2w6AG6AjnmE3yPNDR6aj13vv/OTnE48X4XoZiybeDKIvQM6xCcV4IoM5XcHOZGs1QdDIoH7OqzxYRyoWt8CsXgtX5CtScunQ51dT8c/+7J6WLM0L8nuLX1eXD9aRvfhZhZVyYVHOV8fuKBILROQhbQGhiyCFdfIznXATWzDVFz59pf7ohPMFxgQ07ZJpcYSfU5Io3DYgwv3tJRutP97S+Q/G4XOnmJfF9dgxHCYPJdjUFppfwksiuLud3nGk8MjuYeX2SqXTHuo+lVhnnjX7t7IFWV9DFPay7dYHvqrfD+BNPHZV4/8yyLOMpSygC/LGGVn7cKnyDcglCb9OTlde2WMBYGOwXrwhlOxukcDvtTeHEzis5UPv64hlFCC1ffF1mg91v+P0+4di5T5CVa8DPwATNwGw0ZMov3j1MjNLnirV1B5qDcNk++6c7/QH3SZ5Ahje5pwv8SH4lqJG1Y+1Psr8k9mmmX75K+Lvmv+hmLkHVVK8hY65/S2ieky0djtFKe5yMguWjEVRkIEW/fNIpq7I0Ef2cUaria2v+VbB+gXRyca3h1W1nJtwTf2HSREFE3F0fyKbC9DWH6TdyZ5FsXieWaKaEfF/dHHJC1PBxBnlCcKAnNJZ2fTnuRScE7FCfie1Dk//y/KDPlYHMJ5Voro6CXRtIL10J8wtmK6MA52SWS43GxkIbEY7n+nM9uB3CZOnQEMWdfpLD4Qr0EhUftiZq3ld/9HyQZccAHwbEFdSjQLBVgmno1XSm2wGS6NSH4zq+z0mSa2Vcx/APi2X8WF9vfmAVKrNzisGRanbKrSZVwASOi90/8ZDE/+yqYvEB3NFpHhgFor80048937Eu0sj7TN9lozXvPqQIgOaTJ1BkUiYkWAAZt2DoaQVyNcRJvJ0aaA5BMAeXzbzytGdSxx3nlFpMKsrxvZhgNz01vHnbTenQ0FifQHTpHt+JqZyKQv9TldrCT0EPYH/EFs7vI1Q9K9nxsKegOYdeXUgVDhcst3888klGNnO2rSwJ1LZ5iDbrIfWqFZPCdXLBzw0XQnGkrsDwlfMz+IEwg8HKtxL4DpBXtBEn9lEMz1svHOy8mEu/xD10gRrB1kmsGo0yBHgKD6WDRW4P66+FuP16NJqOmxzXwOiznKmATM2JbMCMDFcWyHRFTvO1+nKcni2H8+nUNLNK1t/IS8aKpQmovU2cQCK2lElzc+6zJe6QJrsSP3Ymrkh2zL5/v+GAUlPiAhHexWjefuw91DRxtJmnB9RvcwSjWv8CLILceAt4k277xoEo56qdIM/Umpgpqevw+b0dvM3/IB9U9WgjNn5S07R4zpeXhpsRSLLdMKsSC0SzyaBJuch3RxlmjT6dqVoLx36vRWwKATCedaTwMA92pwvf8/GTtGisLBq/OlkNMVFYI3N6saITXgwiKihiNmfceJqoDpLhpJ9x+5NAF3ba5Cd3mDU7MeRCLJX1GIPzApe7w17gdcPnVhugUFrigN6IhjaxzyOXynKUOt6lIQDl86JpfkiDJXIkxLIHT4EbQU+K45Z9fItcJMbKwNPeydfHpSWNEjXluPb35J4jHLHKtJl5bVyZio70nMsXs1/NEIFnpqeXqBqveDY2w27pDePrdxf4XhAkZqsG89uAG68cSXSZpAxhD9yXiuN1+eFS34Jn9DDV/ViymLHkMAyU6mfNAKjYFLOIZV2mJYw9N2tWODjeRU9A6eN8u1xzKCFA3dDaSZS7QQpEhFKPt5hAwdosCKw2KPhjngkwHX6HLP2+zGF6UoC42NzG0Dl1R4lXE4OAK9qsJVhos17E9BssTXKZiP7DgeblCionbTmSecM/u4CULv1MeHvDDP+fYTJnkGXBBm1p0LBPx9oumWtauHnh/nuSSeWsxziY84Y3kA6W7Hg60I3eZq2Tcsz1p6F25ipn9h6V4I3/AAuZimlPwHQQsXta30fOH2I9marehQmKe7WGnU1PH/t+7MVGuqOwHsyiGLXdcDly4mt2eCRM3q/LTPsy/yntMovH5kAzdckn86mi1OyXu3enImGk1K95ecUsaozPe8l9VGF9Iqvh90nyA6JiypztSO3E5B68I+uvQpPhRufOZaBnDP39ZwBrfPn8LYpiP33F907fhmQjjOm+1QX8qMvHurcMMmAXP1OMJnhH4n4VVr8ytiYRVWMA1EzQkoYA6qWD1XI8od4tA143p7PpsxRzay6Y9kX06MaF3mDuBmy2d2B1dilCBzeqsFKAX7iJeNRjyGCctD2N0V1XGhADeNl0KF/UlojRqeXKrmr5p7OD1lYWp/IBMEQKrCOJ1ZNy894ugVJKZjQuFSGWojHHCWnJukc413RocfWbEHdMF1wLy918jjhrHzbDVJo1+KQFBuQx/skDzB7VaKwiG0blk8Ttoz/Ozsk5BuJiNn1BJZ+KqE3EnxhWOvCkJWiBGIQmtzg//ctduIPr4/kpfivuEBTszGCfkQ1FsPLAbJ9vuCYcOZ1UmZb8Ae7VpLE7mvXj6jVCoKmvIIIeq9MIse5AWwJws3pXgSYf/mBLHZVohKGXc8vxp3g3YOhXcJLoEdshGWU9IG5o2V05cjSVtl4KiwkbTarTPGF0IQ9HKsBXiwk5fSBIau3tpvgTvTq4HGl9YCGZG0ajN7y2+U3lCsZAv9CR9y1K5Imm+2zDfFlIRfN8o1SFH1U1Y3JZvkHA4SDBNLAHexnpUc4torUJZbpl5bxG4bPKGd9J7/CpXF7wmNqVVURtZDasWmecOMWdXzj95C5eKqY9XgJx9lCIX909p8bcDPEfK6mTxSNE7yi2oQ8jLAJG8gDgxhZMvrp0MXPANediIaV6IGWth6AcFzInsZzv6DfRkpeW1RluGsClN4FzhPGO0QJc2Dfu7pzfvy/4daOF5F/zo3pqSTU3YB92LzrMfODLlU07d7FmZYIyiEK8OulcJ9xofNYhWHlkdFPqPHkDf8KbpkA0oGk21niJXu1r/bdPAKYwpX/nTHQNQKKPjopYSFcQMB0eAibQTFBaHy0d0v/+Gcobw5r4M6HQhRKpnhDrEXdmtS/mJKMPmrSjMHRljjKogbRaKBTiwobXkOvhwSqicEt7uDiwI5j52EkGKOlZyDbACCXDZA6i/upLl1kJS2B9sK+Ox0INxz1GjMcDp3dtE4ELPxfmxTB+ip7TYkx0BF55wXW3qchAXbB/IBfc78gW5aNDp1Qu6U+suvgTgVLwV8xEG+2OJ5ps1h/ptTtUWIhjpEwuBLG1tpDzqsNApH4qg4tSs0usSpBZFmetCBGUBe+RVJiwC/DrItNOSx+j/XB5lhy6Xmv2TSogqILBZyx+ltBdO47s+hr6MLPIovM7tqs9g2H1Z6RS3CySniK6NNjtQawlhxWxOIPTDyRNZ57eA3ON7ifERN9olXGZMDcWMQndVEKdBM5DNxPNh1fdTB6bG3tQahYUuK2dRlFyzTDyp+0M1LlKxasmTkN2UHscasWCV+NzKMzIImqcqnddfqlHQ2EAVzlTPQKPfMtKHroqs9s7pNBPhND66HGud2JBUuUCOoCbNkAiybcl54tw5YS6ajVQbpNPK+DrQNKG+eUYrUkkCENupztxea8Qib5xN7dtWab2ypUfNZCOotX5uA6Xnhg6aCYIzRbr+8IZdubAv7uGEE69dfDO8xxTYGLItHHfb1+w4yRnWgPoevOQsSMtw6yQUBkUrxowwVcrNixIFdueJeciLRvll8fusc1nq0z5fTQZNzguqok/v1/Yl/2jVH64IG81/GuZKk6ZeZbhqVXCVXwYpGiKMaMYLBMnFo+VTzKRVDwatvbVMp2Qo4aGrV43/kyWOAKSA5t/qlTpmzqMPBH3fKz/MlDW+pL58Lt2G0NolznUoceiNa539cTWZwoPXtk+NeLJqLZTu7mhI8yINY3A6hkC683nqn9Ircrc725d+AmskCtpFXXfYCCL1g2EJRm4fgf800jN6Vv1VE/eWu8zljoyCfB/Kk6fgGPUSZRMlSG8EjYnk+dt/gzveSA5CLWG5Y3lJYHpqmvKuE1K7hzhurkHkvUGMRMnA1YzAhCsygbBNkkBkDGm97dmTPzNWKZbc2d/ktIIIXHWt9OsVTfSuHAgIHqbHt4iSdkaB5NE1CV1G3TDEkPHrs6MalKIfW9B71ELYt+VuaCodQD8qE92ESjS593jWstxE2/boyDJKaiGCR6U2UcIBQZvRGeugiJws4q3+daKFnta8e+h19ecxVlOqtSu35xCbj/A4S0aoyfaSip7uRh8E0CfLk3IaImO8kPgpeMOCvUGu4XxHywo5Ehw5DJOioczU8a1TOgzJC+cOLFGLF2krI4nXwduKnSDWk8ZbKGzjw2ld2OGvhDHyDKUT7Ij3Z9rIwI8JjDBWUI4t777UoGJqkMV3TT7wMAph1lFJ6HApjP4xVIpQgKCs/FyXC3dOVPDtHpag0MxqsUOs82WMdhjN9oviYZ2PHvEbu57g989DiEtRhSZU5IQN+G1+a7gXaq/7TCgbX4OuEL1Ffeqx4KU/oVaNXeNLrXBKv6jA2nfTMuB0PMoo3vrtHb2ICaOESctjlFspxxlFEgVgT3jelLK5NKr7G07fwR91JdOluwFUzTamRN3oa2HnC8hq7/j3IcBo04nxVmugYH+nn+po+qN2t584F0WRs+V5vdu/P0GlCxlRqUNYaSFDAQSBS9Ccn8Ldq5WagDUrPtuyUXnkP1zO/JnhnU19aq/iyH6SCXdqjea8hcBpGWa53Cw4+A7qDmohC0lD3Xtmmq8IbOH/1hNYZeJWN5xD/C+1ugsmc2xtfeqd4qTW/JOj/fHW0stp/qiul4c6cTUOtXClMiu2FPT/ZB05Pm5Y/pHh4/OGco638/vPF2eIf3okfv31E0LKX7wlSgSupY5rpm7cvnK1+GNE7KFRLkbQu+qemG4X/0QLY6J6ZyUgjjY3E6EzMDdr2YSpLzWAeLwTyvH4/41M6pnxiGILyiS3Kd97YE3qJ6aEsjeULOa+d3htfI4yimr8YWHsowQ3d25hQENrNWAO3MlNoQKKX0i+BA9lc9lyZc/nibzwq30Ptoth/xvAAp59UfnaQKNh+HdT5ItKvDxOJUauMsInPi+7lb13fB+on0++s5yTVpAjIozv4ydC6ppr6mXHJYGmT/YT1mDGIzVPMonvMB6xQiliz4rMatHC/vkmhP50yDn8mjiuSmFWwn5wZpaRzKq8FKeXuIRXC4gCTYkiyIbxguf21Skf+3iY3Sh8gNWKiAFsYAQpYn7Azuw6bqH6WMd8weXaADv7mFhS6RO1BR1tAdd6kMgZ2tBII1tlJbXO55tpFbY4vcVEbmGych6bxg4irPBQoWY2z3S8vjT8w46rYQdAYCLCBUqKQBl8ZPGZOGnn8nusJING4/Uq8808V2lKmreYfG+Pk/XbaUOphHW9mxsh0JROC+zbLD/fS9+MXeb+tdMh7w+jH9UI8qsinHiuc3Vyhnp09+otqYuf/4ddRkyIRuoUPjzzZc2XH0BTgZDTlmDSAe6DD9erb/fAg32kCBQWlAbitqadR58JJuT2pfLdxYVvEc2D7Q+BGPe8yat+uoM2TaJcp6Gi1g7i4J3bC/vRRulUGw1IBimEy8s0dVNH2boh5sppehCx+eKIi51sVx+/p48oSszcXpMczbKIxLgIHB+mJanPgjMeFHKQVO0H43zbA44DB4mAUkI7fAtsVin3iiwvuaPvzDu2j+EFj4Yl+YMM/yKuFL2mSHyUCfeLwSZ/ixkAFZ4etELB/Dujj0b111pPaeZ0sHN4NTf3Djan3Gp37XdlZ0bBLRdQ9jbJXhP+fmvgl3Sx7mEJwKWCPujhcAhkeBCwG7FRCsVSX2GdLt4FS+EuiIG+Jk+d+mjXfMJfRsYO6lamLagHFpCZGPdWj3aPJ62KUtIICwa4vRQ0ytDVo9iNjPx2qzJ/pFkU33fFIWp/AcIPvLmpxNBdsQEBTK9/AtCUrN2j0bPWeLfDVD4I/KpJ1J4YEvak0/WF14Gujs0IRbSzbG6qQgTGDx3MK+w5Xptg648sxYjEsc72lyRRvcRZiDqAwGJmxXNhbMqnoCOtyJ1o2EJieYC2b3jQ+/Bl27SgoTdu2s4sHJHIRBTe9qGVsIo3+B4hWU4Gk1VGz5A/vVufnN5BLl3a4JWChWZw5285j/ksBT9+41z9TBwHOqwFliwrpzdoQUPQ5f69MgHXWcSYsEyAdMGSebJUSinQY65wDphOEi7OortshUHDrmJbXXYti6tvoNHu0OrDmH0hzmhGRRDtKSzjXzQ9aLdfPq0fAgjo9rk7WssxxtBmvPaeoPKkxJOZWb11Bnc8QwiMvkTdxDR++HIzYzcYG1WZGap1k35SWvjShTJyytDN4VnW3/33i5LkR3fR00NP3x+YR7afR7sjkPsIevxTYpL2hsI0lXAWOfpaxmx5OKP7wOz1oYYXZ50acjRYAGt5L+yRpqkD18etklPD+VXpyiLb/10fmNd31JaUnkaDAn5W6MGtZsj9Em9FaDnoNYfdEJbH1tulu1KPXD2w4R2w88GtaVhTynvGRZvMBoCzHwiBhctQ+VJNk7aeEnbufTUOxn13HNau0tr0Yx2v3oLdsMgI5lMbuv2hE8L1vcGz4ebShJfC5gL8m+qJ03AmWxYeJYJLaghzKF8iIdUCBOwx+VA/St3O7tvBpv2N6nfBs2XUsQH6VHhAhm5v0GKeDd9ZbVn+WPoiSi3DdLQ2pNdT5S0ejyJewW/bXSqz/Z1GsJ4SW14Vk11BcgCnWwaDF2wgQxL8ve5NuQgaq/AGMxYX4vV8X4RytElGzeGvuPfN16faavXy/GDa6IEvxySDWHOYJrb9RuaQHC6XM1EPWcReP/g+qckhX3UI24XHo0K6H7eYnIr/4uOXd6ZELi2urHNK0ncjzmCFe55nDbqKM92qu08oPoqizW8QSXFkEe9jS4MujbAe2Irg+PRLNkCLGSieFrKWus5eLVvaFLvsNSU5yr+td2pH1qH4uB0Htyv49++Snv+4fDk0BZ1J8kQ8zkNLOdW8QI1BzDBCJFdYOKARMg3wnfAUgXPxu+CF3ZFSeghcxXQ2VfVLwXNxkxDpSIMpTFTxNnIq3pT1a2SwtlzcwV3ujNFvN5XcnWh4UxqnxdkALc3rF/I220IKvCnhXH73u+lI68eO7D+uLW0AvyDwGK5skM3+S6srBSD8F8NSpbayIGuQY3k+O9DRmXSRoRCEIQGteMo5PMD8MkIAlD8Jp9pfFD5ykN8tWPpxmzooaOH2NLz4op3n+mudNvu4J1uzznyGQElW5+X2+ccGbkfUgYwnsWBAmwSj4Z+BhePqtkwhE/dh9EutBWWSSwXywihh2Ni8x619svRBafN7tm2D7r+iCt10MLdwOAYpHx8alsww/mLrA7bOkfqQ7tlyNof1B1n31k9Zk2EloQB5QX8mspI+CYRx7VACZccCFoilLKkLZNx7Bw00KACu72PX4pXKprBozxq1NdS7DpQWAeVDnDb1PijKxy2hxMYRWsIo1d5baVGb2moQ4EQi3GwmGVObbtCzlHnecDm29My8XXrXJuGh5gX6DnzeTllV7FQzQQpePnaXBb0IV2LFj8TMGpjW/fzrjdhGLYbgaiSVwV17fTfaaFZ/eW0+KtKTxvzksqrulO2jGsRn44j5KfLkTXATt0kqZ7ikZb+FEQNiMRIf2wuIERWHS091+mEF9CXHo9xaMdHyNWJmDAfjCOaIltczW0eBUgruKWsApWjozIzG80gP8aBb9Lxu0J92HmGcBHvHPLJcYjAw1QegXvuAs/lIcdZ5Fobt5ykdAmcOb2uDnS7j+GLPTXzk9gnHsJ/N0YBKaYhlk1Ddm2wIFQ7cBO7Nx+B9SOxROYtGAe8TTtdPkADNKir3xAd7bBVSaFS3lZW2+BP63kd3+aZHxWc60w3EjBb6jtWjDmfX7/xszd0Tmsi9yVemvcUZ2wor78NPcVkvSLVXYgIA5t2o2xtOI3bYor4RdaHS9AQUZlz1h4tjdst0bAjWgC9m9mOPVhRy+WzKMdRRC6sK1jRu/5xlu91ANDjuoxFYMYRB1JnIMONuX/ZVjY0ZROUtiM0AYwIddRHFfuStpTeFJ+yozMpz1L+Tlan7VqUQoPFMs9v65YvMh1qFhaBFHE3KpIh+UAwGFRwdyRNvB+Y7mYh7PL2GNOpbZdAEmmfzVtI83Dd0wWUuH0m0UUjOUBq5bn0FnOhdPsllZ15y/G5BmWpHr/JaPfzwrXaA2yel6Si8Gg6hg5cS64qBWZGwxizETULT3RvU2G04sPqQqweWlCVKnFu/W6oSyhF2KLU4mRq37vkMoreRyFNMEplE+esgpPU7BcWl3ZoBoXydbpN1QTtett+KWUMin3RZp71PBiLIdoZLiTkA1e8d6S/Bm4mVVQ7PQ8BvOcRORdoKtzmEonT63JheSDpaSUozUeCCbXhImimhsMacYChrry3RSJfzqK/xIkJ10m9YhCENcgX9E5jqVWkEsy2RZsz37WGGgI/PhXxLI0jHKMpjiUsE3AuaZdGm4mbBoeVOf1JDEAorXfbl13Lb3zB+//OpfO/iqxqTEKB3Q+YThMXDG7j/WvGGRdD5qoXxZkXagNIOCDpI3c5cdhj5AFbCkZyIFdyx0EAgLlJxxyB1VnS5GOtfZzA/RTBsPn/FR7iBYKBa10/hLqUK3rZL4mN/RY5+r3FW6gf5UQqQ4SOh3sZSGjGLv7oy18BAokSkCDM8AD9TL0NrKcE2FjjuDWwePVA/Oh50SQfJ8Wn5afIuoVn7kGzLbl/wBn4Vq8URed5T0NImN6uHUKcCXqjvSMdfjYS4J1+0oMODfOAuHP7AI/EqFf7uHQyVaM+KJ28wtVL5aZYy8wBYNFIir4NOXkLgtJnNyHVO5QCnCxKf3vU/3S9bqKSI1xz6CiBBythcYGftW7cW0YGG2cAohTt/abo1Up18bN5x5PrKNsOmEbvKzasqdeS45NkULEhE4basfyyQSehlnvdBiypaj54nlBo3pot16QsZF7DVLIYiKuTlVpfc006d6jVojSHMwc84wnmw5N/RrHSj2yRhRgnUPUsPtkTRz2EussTgivB0fK0Iod4ghTtpGQeMuhvFMkugo0GCjfa1cjtbbg1/VrI8u2CxxLlBkXDm3+9Bc/vN7V/htqzhspKriwy8GCV01NrOhqWp+q5spdU/POP8i66pz5lsrdUlzpuQjVq74lF8rt0AYgLtF7otwiuc/vODqUSN8wfJ9Io40RT+wFjThFwLEdv068B4gsXEtgmOmU1BQFRWRlQFclVIWv7OK+KuSU7i7cuW6aFp1m2GjHHt/ZuFiDw9F7wVDHoVyPxbTEcI6Be871r41uTG2TAb600ngWc0PDsgFwpJ5aV6aqghgAi2kCLCOKKXaiDBLRA6rXtf1wLkTJ0RoVOlgnuWJqPkJs3XteioWeZR962kyO6pb6Ps3gKE4WE6qIZQ91g0+ckFPaskMyhzMVnH41iEcOLnGqd6M9ZFxTrPRktjR+CRnO8NRYRh5IDtu9nk0lyPX9rdmlUIIFL5NB6k97jy9SN1fGanlZDZ8t2zhThOL/eQWQ9G77qDvTa3vn1WYDJlJ+RZRSsJEkuVDYKjvroZ+TOhntYdedDRDZ+LWs7VxXSuqs4nx23NrJfUwcU8EmLr5Ivsk9pP4/COOu2FTvMdiS4GQ9hFJ2Zo0PUIzUOXdpQz3qBXnqvgftHRvFSYUR5ZDosXcTh2dp99SJHBSt0T2KcOHIk1I5ZBulkGyQusBWLq9hTHd7pxZCmlBkvlW+9VJ1BSkfSshM+U2A9rj+FFiQa5syJL364tAAwZbZMKXNsmpyxQLor2SW3R8Dn1zeepeRjX44RuVFIXY9KzKP7fD3tVQMWIcxnKcoAlxe1eCoNbFYVoFAvnvkVn03Y8JuO78f87SAdfzqdZhy1DOiae0XO/COQhBhwmyZPbQlnpKY5uID6Plt6tOL2upIBvF4OpoVDKkEbsNceeRuV+4II7p/8FHOWG9FXMgAq0aHQ2moVBP1xk9Pdy4xgNFW5U0jCFv043shuo++zIc6pDwPUiWvZaPskbdbuX3digBn5hAlb42jOHJkFGrRNFsZEhi+q/93g6D5Iqjvj/cxke5wmx7cDhLluRqRd5Pfc/dW5KUOOmuI/5g1QnHPWPl8gvRdnCbzXRvw7m5qOqn2fX3MQ89Yh5oJacESH1cIqAMZNKSjA09e8RAFZP79MSPKpw7QQl6Ed1p2LJ6kEkebGV7FrVCZ69RJLEgNDiPABxFCWeN96qUman4PJvyjOTrROlUuE+aRe5uww8sp+sRqsbVh33gWWbbIhm1w9pZYVO7epwYFJaChNefVIxE9cFTHLLCrcfvSGq6ffaknfRr1KzkYbyJcrF9qBv7veMPjdh+ecJAZHKGBgbCanZ3mcs+7bpETH8uvtapeBEep8KbY2jL2S2dBCBr1CWiVbyr8AssFsH1qxOxhg8ET4T7QZT6tFVx1ec7Yqs7dJXYgWFX3fjyCqLwBQzzgSDHLu7i8ocYlIdoJJuJz52f47i695af0Q3wh09y7TLstW4e3jAN6q4LegzX01pNXtuQXxNKDCMALCFu11luURw8DfKu6GiwW64Nra3XIOAxd/ITQLw/L6GqY1uI5rqihpZiIqF/z0teWqGn0y4HuFhxABtfZ8HOILv8FAiYnM+ixlknPMgBFUYm2iLm1G8V5ZVlJHP0+pROu9MH52l2gerf4ods4TkbAnaTkn8ohr8NP2A3a8ia9hgMfPIqzAI1mE6K1+f2zEFF754izWPlNFAXfurXkAENpdjYrmq2jL+jBDxiuJ7xZLfvxGx1Y6fZKIQrFmYaG477MyBen1Uqmhr5NZtu6IdqqTReVRbTtPoUvalFrtKLJ7iIe5gU4LUt5q9vUhPwrAiBAOOyqNmt+S9kxYQpBxQkqyLuGxc1YaM0iHj0HbzPJBX1UVBb1Kc0KNNdXF5h0+XErFNqiiTypKo/IZDSyodm/ytAbBvmpYN2YXota9zIJpNm1iJDqqESfuQayg2NiGZrqKbG+p54xMD5Em0ihinqJwM8bGsKFsn9zcrIQ7w3PIi/wzl93d6+A+X0p5tlPrEPG29lPlQBpgl4FlwYZ8+FhIwHJ+8/q0VhrDhdn7ZIN2mOqoys2NDBZSaaPE/ZEW7+DIkWHJCaswX1G22qLFltCqxJj6M0N1k7zclpYGnSQHT6e8AMwQmqVp0es6Ll8XUIh+nwcH8OEoYXC6L1tyiDeBt0HQizkAWYCX1RJpZpKji7HDGYfQdENClNlFi1wX/StpeUw6zfyeI4qaNHsOkAvldwNW32ajpMmQCTQeYNundAvXDqTYlHBe+EDAFOBQDVxfDMSbmwW4NjoUX6ao7zopFpOtnorHUeez1JsMUOtNQ5FSINvy4pM5/e6xqdX2M0E5b+uQgGddsfj1tFdyhv2fZYDfFk6/OWhpxzVNwsrqOR1FBGQRB8urONBczMJBcQkhfMTftWSUFGHG7RBklNrpvUqTVomC5Ck4fQjPR3vvq+af2iyRSMNFgBEgHXZF/9jEd+ty20RzTx5OGGWKDSaocmPt6F3Wu66HBpm0UrB9rD4J8CLtPcmiBtj54HLDW3eYSjS5L/xeAlHfcXgTBqmKRs5SsHY/R0sSYWye6XwoERt0hk9Rl2ZjTvLkvH/ptTFVJ4Rzl+DzbSU3J8sQTKE09oBnw6HdM54jcDUibNjPHdOu8/fAaJIdJZoBsbiXHq7mAi5/q/90VmkSVCmS/bZVwJ4PlHo/3I0exKtFpPTd/PPzjbkeU9WWstFXh+9EBj8oMk1KjytUzytgN7I4fCp9r4jzNO/8DW7DHR6qxgraxzVbQwrUTHpipHF1QqS75BilZ5jIBCOH5sZ2ODwxowCKJs4xGDnLCCtO1V6miSboMamiYSKGZFG8nfUuHbDPMmjZhDCxg9prB/niU2Yvf3KrpB+FqzlHCL0r7esZzTjPSGa0DurIQjkmVQSX8mttx3Tf41GaDNpHzTAZPmrYW9NhxB60xKnBrjQPFPnqz0sdFINW7qSBAkYW4nI+7/25tmOxCbJVpBG0esu9GBeS306LDFjSSlMxQwTG4aJDuyGJaPfhUqFjVHu75S0qThOZUvUv/r8QBP17xBmYYfom7gf1OCBZiMY4vuqX2dvYEPzjRd18/avsGelumNhDhGYmtvEsWGArN+AKZOCPgJjibUHtvUSuuWpgR/od1DFegE8xF+PKM0IDbuZCLeBdcAx2EEKwkvvwXZ1cXZFnwn3RV74Du4+T4AB1h2lVseWm/+v/yw8D/2DSwOIdUf/Sz38ETFEZcfeF7fDAqJzqDWDBZJH90li0YtpxIKu+/Li1SFHDo8wyeeV3rFZn38YYEZ+nYSZHH88O/WqnpsLs/HVLbY3uenMrXXjMBgZ0Oy79XJUK3pbRTg4r7TClt0w2iqmY5QPOOU/MA4DzzQeg5n9HTlRricBHZmoOjcif/KDxjuHkOyniYk59A3OJCjLqOzk8Wi4TfhVgwPMcaqQTRYhr7PW/y7182JbfI8yfZkyD5T/tTvd2RW/ZFsHuujX+ncQKkUq2mVEKsk8hTudJ1oeSAPCEkTe19SZwSMTRi7+1xyUScOGcXCr6re24iP9Huj0g1KIgAADNftm2bdu2bdu2bdvm72bbtm1bs4hZyDGDJ0bv4JmKu4Grc649yb1WRYTQVDXa74VTuCQy/TxGxEKE/64kZSkDSnD9Fzmuw2Qr6U07NZxeR7A3loIh/CDYA55A7GIktXX81uxFY9qdP8lxviorDgN0nFtCjHunlo90cH28Yw1oc86UAAsaozU44vsvmUwbl1ONYRK8y6qJx7mUOp+/vnGEhyr6IcQC6kIKaO3MfrVRHkqZ6Lj6NhWpdZNnaoniljpDkKDHv8gkwdoQx7y5YZ2aGM7VxBoBHb1bms7nmJ1XHip3ozyYz7wU7tt7wjD4UPEINfcRaanxAJFNcvsAWuSr17zgIKk10M1nEs1djiPN60B4I+A047DPLJkbq/7SwaZCgDmvg3B5PZx+1uOFfBxC0ODNIv9t1/7LJAPAXrTtEY3xZzAQz9cIKAXvvehfz4JTjjz4S5HrefyCCKEJfZ/Gq6vwspBXEfZYEmy5jFbnX5tMhl18tQS3CQjWUxZC2ZUXRWxy/G3hj2IoQ3cAUx5cszW6gl1Oh7nnvHc8gPGTwPYbE6Z+7pp7tjgAETFkggpkXoNAXqa+ZSXMb8nfBZL2wXoqDzODBBAz3X2Hf3qkzfnhElf+wOXMpNa8GvoHDinsgET1srj4zbXw+f2ypasbXOGjQ6wonLf1PJ/cc5P6Q6lXxOLPqN20OUJuZ4MuwVLfETCLTSEFiIn/8paB3YMomXy0C9Q6On4PYQXfpwfMuFnM1Otql9lJHarrNw2Ay1edR/IR7smat5ZtkjfchLn/6a7OIJ34VayB2e1S79veYJ9nVO3xSf8GEqNujXW3hBzf5MvKXZcjFhpNoj4Tk0VGX+EgxHt3SWk7x5x2AGKLQMa9qiaY6102RdYV+pNqkVGBgG9IqhEc5dMQJJz04dl/Qa/rZd5lnxRTNodoTWfju6dqWWIaMB3/S3MByyq50dbj1la5j4pEruvcbaq0wJBW6ZgoDoug4zqxt0KZbNLTavpUIoPqJ34K3g3BkvKdntjmqPnORddoRTOglfg4NbXhxovtCBMJ2USphcz+dIGOlxbZqGVMQwXJqg9I+ajwquSdh2MpPrScLkm3kYNgKaQieHx6BpOjMVvdFWPy4L4ZLVErYin2xJBG5YVJy6PQzl1IfyGRn5ZQa/R90NtrmmFhxyObfgCaP7lgt5L9r2BBuSxS3Gj0aF37hukqpram8xUTeU/P2G6tAw7kIu/PACsQYLcoHRjhTJ2R7bYgG3L/q9WBIiija4JnmnRLiKNVP/TpVBYLK+bKNmi3gAa0Jq1qULT5/JAYSFnunIlBKAki7ohneWySgSGBOKouRvnCFFK8gK2cHj1nf6inUXPiKsvhnaUsacfIesm36iLbn5twt9LTuqmi74ZJT9+SV1G6vlxv2G/lCvZxmpzM8K0QF38C0xoY7wii7w5mPxapxO1KZbVFbf+u9mtkxRinf+GuMbFFwC2iUFLiqNPT3G3n/Ctw1ZiE8VCOwEAci4VZb6eqnQflc9+PcRN4HlBjvNQtd6Ah8/7yeAazyHDZ4V3jxNPWIWdtuh5XLMDsNwUPMxilMHShE+27+q24LeKDMRwz1+rI3IBwg84ZBwC1GQyQqXRyEalDoJnmtsH40IGF7CnosIvRJEMMBTa1Q99/koXMY1aWTeSAIzZCyf9ShXuYTdGPIqerQqt5xzS4q1JXYeIw1+Cj8KfvW/sdHpwaXazXnVCiH2RPUUjEM4Epv7diZCisu8ga/4jNxgYI5a3Xad/1b0TCvLn0esVNkmeJPEv/sUebjdv16iApF8jjEb3O8ydk/uFdLg5ptP8YltlZwjlG9vLNDerOgzXA9i4WLd4OkXnqgvyrz1bto+aNuQ9+rLl4Ifl1q+x5F65wTeCNff/Xz61HNfYfMMKB0ZZ5eWro51r5b5tlfW7hsDr3OCoTGVG279slkymUJ6BNDhCp33zR66lNlasKYW4zjfzyr0EmAyI2c7YhVsea4tJOncZYIuIfK/R3xftQDvU3lr41hJiOMzxg2qgO6XvNbT40qKuN12caX6chJNMmn9RmtG72z2XFovfolSoS4gcvH4Yg9CvDTIFRCk95rCIwDWQmlL2WK/lO5619oUBd3rKc8fDBhkrV5TaRBtEK8/nHF0LQJT6l+sGIzEeq211ppGufoTeD/CyyG4jFY3woi4PyUoChy63ccthwX7WuU20/ldJCrMBoa0KIp7c5lA0L04ZLNbZYLVfFSwe9B8cNqeYdwx5ffgnT4tSmngw+G/Jc5XdyWpnU2OKMEotFwROs0q+v8mJnY7ay9mFQMFpbQFKKLIW04CJIUntFb+YRSRwcZqKJ2pDw9PAsPdXmP8YxSPyHDvqSFT5KeHgxYCQDvRKH0H+LLkt13r8KaFrH0rBFdRkR4mpB+BNefJVWvP2+P2hd4m0pqimgnJSQgcjrlqAsSsPo1keIPWvhCW/e23dnknxFPSTIv5+5MRvh6XLSbCGlF/0ypXD/vP4sPlG6CVW+Sbqc0bhJCqCHQhW2Sq4boIN5hI5jVsFORwVAnWWLjlFaOxHOjHoewyqDYrtaDT7HuDurQymUa6l0NJXo6m7iP5GbgGUzqZOX0KDh1TWZtuP+dcVebWOiXDmBTOj5grwOxeppCiAg2t1kDmVRsLshkT7B/5MPL+dItnx14y0yFun+CQHOcy5Qw42305alumySpsFBy5eKn8wn4zh3XWBFUJw6NDSZNlEqxC3HpfCJ1blS91VSHUceQ6zIh/aD/bXboeozJmTk5/SZznNp86bifW9KK68Zypd9ERv3qGbQ4Edoz7zZJeXi0lYV2GmMc/zwa/5bNVOzs6Ws4tCI46grAPaqVfda2gA3sN94BiZV4JF9N96OJO0XnTYUGVVITjo0rNucKdSZ1cQOI0uCCWD/pC3hU6cGJ3b+MVHANtXGtCrUGN65odz83FkRNVHVmz5tfX/2EJjynzyridV9FiGVLzz9KMqWWP2jQanT4X6+g2qDPbdvtbwY1Zou1DMVAi206yOA7MWNIYM3TIADpNPxZAtSOcWIuoiiVLHXvGsBKBddoDvBmz+/X3jbW4jY00QCHRu3hvVmDX7ZB5BrwKyJJ4Ek5LW9opelysW6wikeJ1xYMvLYwDL3KiDxKRn56nQcweuMbddzoHHJDYOTwHnfPggIK9NC4a44b0cbK995/OvgBoWQh8l6KVDROwKG3QPPZVB5P98WNl1fI4Zb1OuyQDnTxzjlMGorWZ+1zTTjOHwv7tjJAgHFYAuhAOEfsdamD1RRHQlvzIS6zXkdcV049hHml2vmH2iywzvuLXaHx0NZp0JbPG0Cm3Qd1/igkN1jxZzrmbewJMwBENPxp8qcnGe+4UrWhWd1iSN+DVkyEXoo/FYp+IS86rayw0uVaZYsyLd8jWYUSkvrYClTvCC/nAuUy7RZuEr0f+JJEliNHjPAzbU3Uejf7wQ9uru79j8L/W7NPmWgzGDuej38mUMaqcs7Qw/LLzVQY5ZXmAU5dcufBLbzWJoiWSBVHih7ta4tZzoY8DiFgXShCKjlNjUfLQcSytZ7QvlDVZRjpPd6ILDkV5snlWGzUrHdOCcsds5ZTiHNTtOPBTY4sOjC7XVlV9K6ZFPK1nFHsMSQ1Op+j94DIJD3ZypVLk6pf5P2BGX9MJThWeqD+iUq26r1bNHbbRZzMTBK63C73hAzeJLfJCYgdf28+trQ0sfiyvaCk2XLmiLWeu1u7X4WKxB9F5KQfG4yIRb10e8TpV764IMTwA8a1DLeXtZVd3wnf6VizNl5SbNhk3ZwuZxZdvR21fohAYpq3/xc3WRs33scCI65uSrz9S6RNMFQMp6rc5YxEJGRK8qS5uwNpROpUAuIAepunXsU8fRoqXhf3A3ZBmo/4LB/l/9swvjUoVGcKPUZw5MDUKNsJJAM2U03lJyN1H6UNbcTXglDkfNLqyK2tdOkfa+KHwPVqMscendwOZROpsQkEHgzmMN+rOp2e0hfwQsTiHU5fJGusjgapLltr+vBpT6h/6m7YLyts2u1JUkDIOAUJoZKpegA91xmxVWDwz9v7gnxEAeCU9UF1tUL3YF7AYFbHVZ5uGZiiEcKPuHfEcid7FHHxV28hjeGD1gXszOVC7LPhfA+kp/NOGb2GGLkYaO60r/35evIT4qDrjAMfenoyF6D5VtuzhCHdyGgnyJU5SsaRZmnOUYcjRWngyGc1WC7YssQ0jzjo9JoNGHXxftfFuJ92Scm9YxDVPYaLcSeFkYPOp1g+Tt6jxeqS+7Un4foQlWYE0mMmgrS1xn4WrZtoopkiWG0dQeDze9YPADavS+iXgt/y0UCm30CxoTCEliX0d5GmxWR67NvF3FDjcDoLZCPkl5axLCXTnWFC9/3DHO/bmhrWqFdiqEef0WoM7CLsyO9jtjgSS9FDSNU30u9lA2Vfjq5FnYE+GjBJjYTV6TUuqE9jtl5NwVPafjw7ZB1D8ztVy04dMe+esNd6JpexuDM6NRDJrje2v6ToFW/ksFjNBRcIXzMAb1WCZuMM6tfJhorEA0+zq+1BbGHZJKh1n7x1r4d9484RL0395YwBiILx6LvdffVhGrPVbKj1uouogJ5u0KqUlDURWZtYyfdUW3xF2035XZwCbAaFllkVVXTMOwWYv6GnxKhGu59JWcVHJVWn4G37qIHNV9HF5HBwetUS7T0hG5IcoseOskMJbwOG3QNPKwSMlQpo2Wz+gtw5wDJvkpKMXDcuwyLNk0Gy+BOXGTqeAUzRBNGyAB0leEvclJIkLwXxyszJQuDEboScp4hWz3NvYK/1jR/GiYIkqVB3V9fDc0ZBcUTFcJHHP8E7saHtXdAzsBPbHeMtbQr6jp5fkGISw8iMxJUFS+h6Fc+iJw5h4ZCNDMr5RG3bv2VWj0q5o0usyu0lMGQFBvSV9dC5APZUf5/S9imGzI97tZA/AV5n8jme3EikM4sLBez7/iSc6jJoy9HU8+o6T7iaQLii3Yg5xhMJwnlcSGyr2xWzAJWhg8N93pnozkaS3GK5q0R5p3REIzSUEqOmijtmPtaJ8nwco4v06dX/FuGqT1Y2/FwlaRrJUpblSCBhf5OW45s9ZYotoOyJWdtCozYkO6uYNfqkFpYdELb6nmkm8ZzGnOvYFiLH4hUsKFT18jtN5kdyD8PSh0QvIepC6S9ZiThPG1K5DLMWLQINL9/1dVPugHEnWf2Mi1JRcMZCcAlk+D9iGchnGnx6tFIJPn2p0kY5uSavcT4kMeuBKhR2jbJ+BMQQONv4FpxxM+ZTol81eIh9nbAWbW7iu6WOlQR5HEORfKO9rQIBAmx/vXdAhx0J+erjphHeNotXncKfZCtWw4kesx8eFk2N3xgWtzugfKMfwzWd6H4c4zdZvGHFY0T5xptWbSjtWIdNAqy1DCPo/RaJqqleKz6wUu4aaRhtqGxGrPgYsyPCkQxcxGFKNagl32y/Jubmoi9UdRDTKMhsGPLzPUZzhAIGNO2ZEgg4tOLGFZ4Q9TsIPR5UgvSxNvuLuNBFYjhMKy4QkHw9x+1sZFC8gxBZSE6IFqxLLhd5skCTqISF7Fey9GT9oSiGePRMcDFjld+Zbie2NA760OUXDy+abyvnoWIvo8qmGbWp5CSEJZBHm9JLhcZKfY0LYkldpGr02uG4LSQAbwrn51EKJVjAJkDgvaKvEQPPn3k9OkUjtlUUiKdGCiACDS57pRMxgF+ZC5BdxvRp1zx3wUL0qomVoF6S4V9SgIf53jHQTVkqn9FCipeRuzRwSei95082SfEDU/vyxZwNF4dlNBgih7SHJZfxesoveo/eCtFXJ/Oj2aVUfhc/UN5GnwVWoLFNtOWpW5JOh6e8PJ+o6tb5wKURnyPy6gBYrV6HS4Q9qWR9wM88ttMPZWdhh896ykOaufFJpP1iPM6kKhfueXtUciUcZzK2KE47P1QKXpOZkxKvsgmlqOqQ9C2pmv/SYzITO1D1NP2tpq9Iz3EcB+YKU0AZzGFJyiZkpBF0dQeQu7rHjrgwongNR3OsvSpqQfgwogxBZlvtH6IIv16RXSAUMfn7TP8+gq24eW9JRKcXblvHobAk0fL36Ugc8aY/qR3pkOWq2OOBdXa2hLhwaKq6NSqB3M9NgiLQAjQ0tSz9RQjF0u0d3r/U1/ozsWwqzKOgjiLsZ0v9N1ZkHJzGb5eKPRHIrllNLS/pSewvDcad9ZCSB8O70NEwmVbDDCxWxtDOsATBrbFMmbGlMhgoxqmu7VoQy9Oi0QnByg0i61QPSadQ45LIclDRQ59T1/G1cJWq8ZWVkTTNf3xU8ZRKSRU4fuHUEzCN93vixeGITE60ZuDFSoBG9CRERDEsAQbblJbsqS9M1PP7Mk+J7X8dPueVGG7UKvpaeGLXkcxSwC5aM3Pl+SxlDVY4TBmTALrdBHObal5RUfrBb3k/bMzY4OhHQBa3q/rcKQPy7ueCmGCA9CdljFg5hEkTD5kaqRkNP3LiwAAZ2JymEJLLL6LixBHvMFqZk54Hm6QnBMBSToJbt9A+taF66BQ0ODSuArmvRopNYxu2FBXG3Vfs5ARTcUN1g6TMGThCWFN1U75KKV1a2eaGJU0TXgzJ7JU6T8BzoxIDWV/7z/uQREXk6RVJanQau5vpVPY1BZZHmW1QIS8a7kl7agzujxVTuSLf/AocoIsMtPxU7vq9dXbdq+YRlWvOpapnHmqpDJ75r+jLklc1VCSnaiEnJaiBzqtwhCEAGDzTK0jlAn3ODxRo/Seo3BB2Vg/JiMeC8/TXIdxQ6VN24jAdzwcDL54o/jCJdVORkb7RFUzz4EMZGfrnP05lvIizW1rv2LMtvGrM5pSd0iKNH5I2CmmjtN1fTJ1kWZmabd1ltwf+B1l7q2wZ9m2O/MiehF3CrpoWQ+kdE3FuSUI0onxnwCZFLvuOJNmgW+l4ZLbYVGaUhByVuvoNru0CbyYcqC6nqVekhtd6D9ig61vUjQ5wsfvb+UsDSLihYEZEMkurZpUtlvm/sL3ELfTPUT6PQg3mNhZ7wbPchWVkZqlZbNBrC+3HmULqIkTwXie0GupFeO+1iPogl15R4jqw66eglrLuj8oFRKe87zSXy6SatB+63+IjYK9yraX2iaxA7hqmQgJrGBve0140609YsTV/PA/Kxb0+/IHwYV6IGszcm/MYBbxz3rSN92STrw9LTU6S3aEildEacHg7iX9I5JEl3rrwAnWgjFej9fncQL02/ayumUy538jtBKdMPTakcYsW2M1fqIa+E6z4hJdoF0LoeM5SXihcKMophZB7+BgFb29jLPpLOtctonODVqOJrKcVkEH0nIjgBMZJrFuNElM+F7D+/pXu5Ti0DAXOSvBtGzKfmVCE/peCo/pX0f6Vx1IpOwh941Qx88X0i7wEjusDuHsrbz8onkQOOKaxbWGhh5R3HG9FP9iT8cis7uuLeq//TTW3B6dR+nKngyUwO1nhs/XDmEKR5JYlTo3qYrm6d1qeUzMgMddNxo/UAG5J+5YizqaofGCuiQHYvDR61N0euvNJ8bT36LjbgVxAYNh3pPCiZ3w4OrKAjanJZ3YiZNPymxcesFWJ3ji4IJji6BObh2GNCtBGZeqhn9/2OsxpG49bIAN4VlBFSqCcFLOkKSR5RrlDeFfFQ3df3QTPDq4FYVTD4Y8a2a1t1GwwvkYhiUM12CK5a59GJOWcXU++GK3SQZN5m5ZLoxRPtpJq5G3EvDHPxjX+Uba0V/Qrdo84vy6fisg8BbCf4C1Xbs5rvpQi4pN9k/2Qhz2gYszy2aYdr3cUWqeh8e114C241zHbVH6NVsk5Vp2IO8i37CyD5AH31Bt8NL96kIf8HN2uO46LZMCOUERgAEx7aZ9/TiGxY2B8ygwY8WHEGqtd9Twpflvp4zSZpyj9zT2S9saUTXvMkAwUFA52juIrBaLpHiHJ7rml/k6pL1B6wiTQeal/qHj7/BLTnVw1dSVOYqc7jUknB9OBNVZayd7ItdSfVFaazXhAW5bq/EyZpOuCKgmp0fwo3sQmkbopJ9Fqd3VeQObYONYsd+ve7iagq5Z2CAvgr5hDwwbZuRtOGyEgVynh5CwUNZsl3XRmJwKvsJ10f0H8jtHCf6OQRk8EV7k1gkvT/Kl2SLyu4jZpmFtTRO/w+KTTnN1wRV4M5X29wtorYQRW/D3jSVA/raYqa0P7Omn7OQfwcnThjncWUCLpBSv2Hhgu38tNKlHLO7+0wrzFZ9I7M49J+5jRMl7UZSyR3tQA/hhiVWFKs7EaMuhExCSS7TtG3z/fagzY+jQApGjQcN9/KTb8WmURTbI6qvkYtOfcXlXvA8YOcbxydWxPlY4y+uKlc9VY2lZAyW3OK/DR2S6iL+7lVr1dxdldI5NAfWkuQast08F1g1ZkGRiuhPqZg8GljmJrRaSybM3R2BzEViXTg+I4ltejFg9OWXsp5gl2WMSbxcOddqdjb1vveZrlqd9VHRfFDCSTmKvHVvLzBXNOSWPlX5mpCzlYN2i8cSat1X3IwKGGEjT+3zgEv9zU48gg2SpMIaeL/fM3qEqLWxIM+MKxh2+7q02a1x+hSdKzDCcoEUC/nXkXijQeYEMxO68RWka5F02IpwEA7eV+Ov58mfmitNxSkSMT6Q2cqD/J4bfM/zdisPPup4b0O4/mMckxkLWy4eq6s4DMbLcy634Jf7tSL3tcGWf6IlQR9A1aiwUENswfpEkEVWqmJ7ZOmy6PyRufhqgNheTMsBwa8mF8kNBOrQWXcNMyw10HIF7ezQLe7tqFEYtW/9YCT1xBa0STv1oH1Pyr5smWN7PPinV744ualmrJSo/9y+bNTfqNsVWwWolaVn93XVNvIfJgVCvVJmGv5iAC7gNGJtllMh7xJP5lypOskTe4fliqmy/k1B9FMZh1q938lFC+rgjLhhFN3V7jWqZkRbJoT0Ub7boB0WOV9yEjHb+ISrjTdwIu7h1UE5MJ6B8Dn1Z7DiYHyuK5D6GUZg7PYk0XDpjmys0/WYzBmZifPG/lHGV5k1ErquV+DvW7aVr/ikdTROT/Zz796bzfhtNKdfsrb9MvBLc24YoXwPn2mo/FhIu0DKIY/W8f2IqKym36FDrNiv4O85C8xBIH1oKqD/6+BmbSLCY/myt5H65Qi5QqKjE0w0zq5aB5MMeQJWIB2BkyXkLk//xgporo1jeufa/vhVl3w6XW5jQE4o1FTt2EQZhG+bm1p8TYA7RUKfsbmRJfVvilKeKAU9VRTGndQbA5I+WtDu3IxUPmqXptXxSsjBzsKowF703GttPFjodNCzXYi8CARuoc8+QelVx1qxLIPrIWSDQuJPfQWV/zoY+EMbRR28XG0btJU3SyAdUFA55fEqxBegSwBtCQ+UGa8IoD3lfTAIxHo+5N8Pasaz+Rtw1ry0ciWQYSGO70pYyjlo3RMOVxJIM8XfqaB+zC0JapFLD/3XS7ZOhd0fNe+QinMCvEicJv7KanS/JvCMenjRPW0QPezskK3G9s4BUvjk9sEVLl/r8bk2Ihl11caG176TLs7dDWzno6VafsMkPt7ED/XKdfonFYRlX1gPfAmkkw/pMX8DWNyL9dINASpl7xQavgOptQaJhJFm4yWbputYnFdBuW8L3DWzDURkLetnWz/QfSlZ2lNK+EV2MnSTtMD6VV0cISh7wk/bCpUHD8HZUOXaM4jIYn8WxJrtg/zgyFqOQGzG5ki/l7Ct5bnOgd9w9ISa1JftGkmJA4VzBLXgSjEX1PcFxJ5I1ym7qC8RFQyDA61Y68tbhO/z9+5VR7OawI9WYw06Ma9NV8WWbWSV5d6sOLkyq8FEAwswzdWMXRZJDraoNEDd/tooo8Ts0TWwaIXc9oG5AcMt4cJ1yzr+9tljTW8XvK7+COtwdGVP0svid9Uevv4M0PAtdQi3KxSMJjUpCBYulQABZUiEB/ju4yuN47c28rqlELCciGGAg6aMNwXOZEs7/WXNMM8JzVTqgq3iLjmltAMNf48VyX+B9JLELbtlqEZGe8806rvznPbyPlZXfgdPtRJIl24r6kw5WdYmSSo9j6bh/knad6ydWBn8QR7hrwR/hLWBKlbZLxLPozbQSPDpb1UIZWD/m/V1ocZ+5RKW+nNcN5SSk7fzF77EpJdOyZXtfPC0/3c+ssFZSo6Wz6DN3oOgfzD5s1lQ9ZTc+0Rtsaj3Q/RvvpLHVOIb1zrNPhe38BLeani3yX8kVUT6bl97NW7yjSsbx6RuxZcIzH/ojOdKZ5b67J5BBd0unmQXDOqJ7ia8dn6HFB4t6cmGeWBWwLhTnaEXQXWY1Lsae6ly003qMbmZTi975KzmabIxCVkgdIXuoV4xNUbT2hs08kKlOsNlM48FF2NHBYO7Jghp5csPJglf2plIu1wJLRVzRCq51i4Rwma+XVDpficotSEqUe3u3KpLRmxo9N3TG3R7h8GkgZOyKsgc398cKmg3GT1p/zsxAAUl1kC4HIA2ZCjQ21jV9SCwWSVcxAOrm/EnBdf4hhTJldgmkwOlVw4cVFqoEojrelphprrguOSTYl+etS3FTaSJ4YeTTRI2fz3ygYUuTqISoteHC+lu/EotUKGWa5wiUtYycCWeUZ+dUUq9pbpuga3hMptsUvmhZyaz+W8jjuycJ0e6gP5As8+YuF1FuftKH+VmwZmJhxXkHpD4P3vGlBlGlYCHTm6Co7K5+4LcCfCuxQSqAgvl8rT3UC0yPwi+rNnguXsJXcYBNOnYqjj7E6KdYpYbTal5I0ckSebS8x5WoS3/mGjr07YYwN71xB6E/bCrY/EuuqCVNnY9qwNAaOu4uxcc+yDNv1+mWxmxUaKENu6RigQn0nL75vSWx80U+Gt9k9F66rDxZf8DzYg+8EPydAVS0V+isH0tFcm8b5gq4EZ3Dlfjq26v7DZ3kj+IxfmnMbojxTRj7KRLM1VhVk1jH6x34FtJzWa9KjqocTimV3gFcupbFoOfI4PhdEC254dElookY9QPyQJoV/u7HKYxq7ViWo77EnWDGWjcvEzh4q6UJh1q8G7mvXIBY1QiwwA/9p5VCat0jbRyVxRi4s2sqLQMuKNb2GTunKkONJ0GNpWu9aWd/KpGDz4POjGkT6jkwiJ7re7r1Pq5V3QsV2/Wm5+ne17FK68cj3dIh5zMCA1mILzcHZNpTGqAs++aU9OsXOx+m9iIxUDPvFKOaxzpDiR7UKxr2gB6c3LFwbJHyUZZomMX86DH60F6uhxDyF56304Li1PchggmUIKZxONUJ7kN56r9le5K4ScGc4sh8Cu7pZOzQlhm5N0qFLHT0SCFSY+pXpFe2yW3VXhakVWUMb7kfiGMUveN2XQ/YZC+WZ3LbjqSDpzh5VR7UuqEDV8ifJycO7/wH57fwmYt2uLUQ6nj3nAXhw/0LwCNb6s+s4FMaiJj85JZjl4WAV5wb/SA0/OAeG43BhEISqh7Us1mClMfKefInIFvoZDbnRepEDNXAUiMsEzwtsH/ubPqvMSTkto98MCbqKgVyD0LUDlq7xpJFX9ZqandYDfo/y3e5IjIY3QpK2N/E/Ge8apBGF4l4h6xbUZqpJP4FNhkn4fFV8iXnE/xS0/8cXtE7Q/vd++5TGyqqmJ3JtYv77b9C8B3eI4WUU44CVnlSIZgmHn6rhzX7U9gK1qSrliPiIuxXV7htT9b2ao+1rs7bLcKO5uYL8j+1rcEZQriungMiefCtN+FAtqXcu2D4OHUlgoMJyiCBy1MPvmLCoNrTz94+ClUUAavw8nJyvbYBuJMWH/he/Z/NW/d8HvkUtK45CYnaNq3LfxSH7nLRQKnhdhSRvxle7jo5ysAHxOC1jDiJbT5ncHqNc7BWeByDiL8vEqiCW8iV/M0JAcWaARjg9UHjLPrPyI0DmwMdGd4nnR4ZIL9JYLaf6F/s6Gvk8JpJ4A5bCQTVEO7DKq+jl7scu1CnwP0mKn8ZSqni47pH2ZIGSJ8+m/BjQjENkpiZz4kk8oEzmzsGth5nRJIZ9M7T/StoIDSqf2UiH06Sog74HvOQigUyUk0BZQSZqcckTtHdq2BruYyBzP2cDGbLwPw9fw5LYKH/8ppGH8EFuBPR/hvf559OAGLxn9fqClwaEmNJkSa+lYXMd4CCoGb43r+2c25MwBA40yiLfZf6Jza6xF6NVKbXRGOOFeUpL9M5SWEhddnnC7TxwlsFOgU4TPlvDLzkkpMq4DVJ+2vJ5tQetm841CSyRUfDtXUFFWikQEnqEfuIUFt3NPK7VqubaWKHij9SnpKuwFrKUHDKr3DD/yRQLLaKmYINtzwGPvWJ5t8B3D/S9q8vB1suBIO1K/0lyM28vffGU8mPwdgANsi4ZqvwVyL+7qLeq43qbVk1+CjcSIKAVfgk6Rgbr9PxdjkX9vYyuWIpL3prqrATB2gk492X00rXEzD0Th2OqbRcH4pFP8M7qsLEvK5pbpgfDNoF8eXeUg/tvWc9AK3WfL/GSbZs56J4gjspESSeQ9mYegcKuDSHfYVdZDfdf/5X5xFXAelDituy/e0z0BluxxS7CWyeEX+PkXngu8uDilq0YqGmhVH4a/BjTWtTWbckpmPTRZCy3uSuUVxpu6Upk82/c/Yw60EhiEGTo1pvyJas3fpHRp8dlDBK8aoge+PhJbZXSyqgP7zOm6BhhtveH3sMUdyQFEeG7brw5BYd88hmeKOFTf6VBlZ/M5LSu0Us7vYgBhiML/nL8kYqgtlxg7qRadkPkGeUgWM5CkyEnDgyH9PZKuTeFDBiTXe4PT9EbEWPAUAS0gNZakQ04xp6kMIOHzkdKc1LSMv2nUMOYp/I1BiCZSa8Ejk3ME4oxVrTdS44W399eBBcvJXEWkUAJkabl6ejWwVooLAMR/pBjXSUrioC39EU/Zkpy/dbAXMfMz5B/3ahhmAWlKNOmYQw62MjYWwWMozRFy2zKzlDutbxk8OZfHQQQYLKLL0ZMvxdFWSI5nTaMC+T37+hy42/PoferUFZ7ZCVIdQ4OVSTiTrj1GnciMdKTAvNhgHFq5Drd8qG9K9PeyCu8U4OpPG9vVi5FDixv0qD1Lc8P/a8F6gxOT3KS17rkRSc2lPjIcaTuDGitwzv/xCDwDh0GL3EebxkVsrpmb9MqqDI6FpjacYIX9qeOF8fwNQqhM7WZHj2ufTQu3SPMUZLHiCx9YGZw9YhpiOcoe4OWw6u4YmiNGoWBH9fbiaPwbXKhugv3WbEe1CN/yjJeWbn9qancET0Meg1edIESPEz5Hyudj4G3A8whuNpFNzjyfwIDDtxWePduttKxC3QdOzLMum4xL5cudEQR506HEqkpDX2WRMRwbX3I4gmDcpObEZ64u4uiqqpGZVG+GscSFLvu6cE2E2II90pVEQxtylyHh3kXZA0z1h4V0QiwlJ7r+veZO7ZGvhQyi2qwKQfjow37ycbKtxlKm6Wk3SUi6yl19z0jp0oe/bGkO8cdB+uSmUXcwiLaNSpmDCfjKh0QTyuFweCiZl92m3OE4/q4tFEY6bLxGUq7qimQK5Nt/QyQRdfknWtEP4Uo7lm/3uddPLIb5E1redVeRC0SK70YWJQlKDT8qPidO5KTkcPvQ0QZLYxyr4pGol3NjGnfCiH2ZkyUqU0xj6MoYkXy2QS8mFm8xx8WCHH2rKc/RIOw9g/KD3Wc66GPgnH5AYs8rOlSbbOqOfAZkUxia5kv4TLx/Q238WiSyjtzYg0q445zQ9PZQ53iCR6sWU6c66VgYYKKKIQhcmY/Jdmizr0l7UoNUP65RSFppD2n4/i5CcZZ8XAzpqEcNcuBEqH2xwQv/RfGXMEjsjUnFaYwNI+vRtEGeCtvdKyxXhielEBoz7hJmZT0PWtfUBUVwXIYg3b5Qn5Qh2L6pjm4YSXW3ALVjPM5vUcSelWlPt9oTayfe4ULjoaiPVVypUk26pXVp6EWkuSubMrK2N4tsE7fhJib8aG7AFROvQcS/W/iJxpNVyVxFIEuqvL9hb4R/deTKRYzAAiFSgop7nFqqkefi8CZH2EQiYjybqJ/R5C1zltR6yzV6O18wPo+CcujeHd8c+cePPR+TCWStgnAgsTQdY1wlz2AYTuFIIFWz6o6CuNPHepfkFX2gGV2mi7ImxZonJTEhiGFdZKE8zW28hPFQ1mZxvpckhEgKGZWUxK2hb1XakkSyzu5Puc7TjuSn39Z7xSb5KA0Phtp0t6ntnoXG4lI7c6e+tfj2YFSTGpWTwxvQ5me5JGefKina6MdSCv/RNvVJMos3oDBfiwX8JMH3JlfFyf+OoWUnN7LY+QvmdhpWei1i8zpBH/8pojv605ayIQk0+LGgXxSU6vclH8ZotpcLxNxFVwNMkjmhExGZLgi8Zd0XG9D8dny4Nph7n+p8NfPrLSdfnI0UOMUF62/HJXJXbb1cm51nMR6D1OA3rvjA+jmDNOYYX7v03g7LER8dxHYsZCZDqDk6nWcHXxFOl7TyKELlXPybsv7tIfGNyozAKHsSck8QwFXfohsxGyNL3YniGOUi6bUNx3DJsZxbtNQzmPNcYGDQVcRmZ42chj8TumaUtPKy4vrO8RWt1v+wtJon39uJZhIVT3trtjEJXJPi4VuDPDO6ojCCGy3Oq11ylwyk5PJbt80hya5b/juQWL/eOTyapZ2/82CGs2M9Sw7CsEke3IH8QFaGxfPgshPZxSMLhKIYLYqf8w654G6ZG1wKv+Oh9SH/5wwB9cy339TzDC9d148OzqDDwwSFmkDUSgIxj+Uy/2KcswMbjWKOl/e+IGrDrY55W1BibtJ8B8XQqe5JaEX5CsrugJGCnrttwx1vQg5khD/4o9LzvZ8sypGXYeGafZd388rryEScdLm3PQCvH9vOF/pXVuDEHX5Hg1zERzU/oi4lVjIzRs2D8zMf2JlC88DF3GFnJ591MOLx6p6cdqPD+4W1p33/Aiznz54r2JfJ0wIPYOg7TKqf0hQucqkYLaxZqsdhKFFH4bhOMwiDVQ9U1zudHf+7Dfny9qB+8bwTrizOHXhby8T1locQSR9tj+dUu1Hsj26U6XpXAhOLMkAm2Wo7xO52oeH4U04qB2571Y8Wf3zbSviedp0MM5uHuRuU2gCtNpJKzGXdGrl8jU9il6NPEZ6FA4YmWi7TaDkZEEkrrsAIfL906ANFH3nr0wnfqNmUuyCs0B/htBDk4ljvC1G4yuZaAS/svXQ+qZBZJWKC8yrPeWoHRpzG+OYk7ncvNITzhh45dh7qHWSnK5f1skLVLViWX7Vwsxs29OZQxApyfjoTTaqvb/0OFfTvZbQD7ulgkZ8FprJ1vG5TCZ/pg73UsNbIPoTIyspDzNxKpvBIu9XUG4UxQO0B6iPUINnaxUS4srT2wUFAnrKzoZUsKpcYDCRrYYAp9uzNO80IYtlz1l/WacJCfRnCUhbmzEYxGX4Y+0khpc91LjY7DiMnbIcz+XXd8IMYCo+zEUX+yNth/EMnTMZ9kOlAWg3MV6FgCxbxj8Z/Gk6qWl8YCNPtgskBicc8eJGK+K4ZOD8wcWTywNX8VNWaqRrxdFQRTPVEaXRLEolUVCQAmObZolq2jD9lb2muNi5K78HVgnzUS1vO1HRO1cVNpLKASqK/hFWBPv2YtE3MoIZ9u9+8rsTORgH2+mCxprPQN71pXyReqe0YM7dRTuT50QhLTdfFYamwZjMiRhTfhaZBOcDDf9BYoOPTkw6RhhtI9543qWEtVrw32A5n2vwcSxLGnxaX1PQjqbrvRYmcIGEFsNmr57a9uQYLQUP4veNbmb3mrfow9SKxLdaK2ItE37QDGxzhgYKjGaHp5AT4LoUT5cYy1v/IS/KpNk9Zsdy2pegMFzVKUG533PElm6ZuccuMwzO9RiWlCE7xWLdSLY1PQYSw6SZxzTg7/3UxwdtLbezzWMGkHf5segxKlVQiKWGdNoX2HcZ0NcN8RNn14uZ+TIYRv2UjEi+n6UMTMknjePw7vtu7ljIh6tO+CGxZpQJ2BmwywMPjT918xjS+riKV+xNhRdgk3G6p/LSiG8fjBm0LQH7ydW+iKUhKDhJxTgmFT3PJZ1jnEZgiDmdAyQkSRS/uru5pWT+poTF/+1IwuGoBpKjBAgqvO4ubTm6I0H5Lsq88h6thWfzbO/+Dt40Ojn/aALt0+2y864K6GbSfDXnauhzNj+42e73Zgl0AHOvgjPgaX1LxrobsqHrLZqgG+/xRFulv2q010dIA4gRJSZHHh456SG6fCr5pfSdFAJKRsuPwHQX/dMMTpOyB3zQjMrrH2wvjDG0Pfu4xUkZN8h7w+gp3+VpUDHlfGFyXNiO6OLhq7d2bIYjaQxm3ddQ8h32O1a7vTlFNjdjwbJSAMBVGRzbCIzR3hhPCjJbHeAuyQIbF5rA7LjKuWX2GoV5IbZ9Hf3bbqUST9mrKy81EkciVcHCW8zqVLswuviz1ngEk/7U0nW3+T1WXNulHAA2XLJk/gGasNyGefKFnHt2QyNwP2pfaq8VTalsOKu/JcmL13oFJ2R5EPbvpisKCT/cbuuLC8VG3y5rtuNwTks2g7cT/cIvlmfnfBD3oEL86KviRLQ2oC+zDoE92jSubQShX+CfVvvyp80w/7o1WJmCZzYy3muzyvwSkscfsNgwKjfrxWhVuTB5sdi2/mK/UZ2xDoULgl/hsgRipttRiaI4YXnJuXOk/qcudXyjhQaC3shqpTeqgZs8jMWQPKalhnyKoWuKg00MrLWaOPMuadjkRHsjHtv++gHn9bgKE/BKxdRoGIcZ3/QXDeBRsYrD1le9tg2W8V82uBfpJ5vTUIgwbineTl1C1k/6fwDYfjdhJaQDjmo2nGbT5ouIzVlNSDnLvAZEEdf+Wn4L1xp74CRyo1aUSHFJbm4xWKCrNSE/gwQwjS5MbQPLkns01AfTrjuF4XBJEI9Sgj88G974PzLr1bXd76C8DHIfupErS6I/wwKkVqZktkkf1tPtOqxduw/+hRe8SemEogFfi/ae24WSbjizCOBTXnhmxJ7Q8uuXVHQSZkvtG0AEgAfVVkmgqQD7QsamQ8dbOBpi3HGTDoDXsncYru/4R72v6E97lDc7v+qxftXcr6QyVVRRrYL09QbCkoTOkb2YnpvuaRh/ho2Vj4MmUZRfaaWvUnhM54kMovEZHzQxFiJhQOwSv3eSFTouZLbWPPCSUKdMA9krCLQmvg2m7q14gxeFd5C8WDdpJL7T4cqvGauyWCuRrK2MUqfh17btZbxG+1N9Q6h/rFGVf12slm/OK0u9ym7diNcK8ozdez+lTwzfD8zrwUcq5ai2b9bQ2AtzzuIdJMvaVGSr5QIvXqMpTtH/4NFoRu1oqFQ5zgJXRIp7IK6PhrmPU6hrbmaoIEvko/jxBnJxlOwDL/Zl7NfbvHzraFFFgknALk/NYo4BhDZpboel6uCu+sFL3iszfqfwRZG9K1HY5RMQT38vslz97ubSM7O+AXEF78PTtszkGytetrlub1JT9SvHF73/UKNoThFKAkWz3z0mAjEDR/MZWrinxqsJJBuKYx8hUvL0XdfdMuS31jld9w/+hR5RrZUr9DQAi0AK8YOmiurjEKEdZkwmyBIJFo1hO7j3/py2x45KSwg4sqXAZG3Ql85EA2YxiN73buUp9tbvBP4xNZtbV8J8Ryt1QBiASO87Z+pFhs5Ly6rpLbJdQF7L1UV5R0k7+GClLpHtfFKA3FGFkStWoQaXdySW0xI3Nz7KOi5EwnC8ISstGeO/ScaA4+rZ1rz2gxhecDB0tmcMnNgxUWcXpo/K2llA6AdYmL2+/TSgSFjhRaTxIPIqM8hpp+8v8eFEBBeld9B6fktKVdWukn4lu0AnchLGy7ZF8kgsK7JnoHGE56X9Bky/kKRE2G/l1rrvMFHvQdC5pajX95gGdAj8J8ZMJpsktWFLEEiKXGIRnKPv8jFly0imD56+DQHJpZtMgvu9dansFjcZwDV8FqnGguEWpKzkMApQdJ/jgkyv+2/XXEs41u6VnrlEQ9PDfpvV5rPOuhB3HLLd3bHkmw/+JrO+fCxI9290pU41WdNuac8BJhB5QyEuk5vDtqcBo7/pngXkCPNtD7sqzSBNpYVM1jr+cChOVvlVwyoWECsXc9+thRxwbmSmtEpRsjVfSJmUPsfA4t5QjjGT+W02jubzEedZmSG1redqRdfKb2QT4z3OPYC97Ac2uauviGudnTQT17WuaNsRtAAPu1QxJ1tcuX6aZuDjBtsQ5QE8ZyPUJIYP0nydy4Q5Duk+0zZBZ1DiQv/OTLkJnaCLGjrw2O9REDXv7e1hhP6gqufbmeSRXwIqtEUK1ALOc2KcgWn/WQhJbwUNReUZ+jrz5vRdM/ElqAp0xv0if1sFuVDdAAaDoPp+Qobq5lcn4uZ59J/2hclKZoDN6N09cuAql/VQOGFIWk59blCNT1VJ/6DxGMdN/7kdTHKxStlUQLtHDsgroKXRnFwT4LWf8we+wrq0WqnIG55yndF/LXn75OOp+HM5++UHuHfOnEqLNfv6rVqPwe3/IxYP3UNuSqvfbzgUJApuXwI8X/eu0mP9iOk+ZuT5SIYqUDWWaoo2kFlG2vldQdCWXAnuzrphZr8vsmUB9B75JSM4L+uwPdeng8rrNttLDff9TrIkkYiRSzCPA1XPaP0NxBq2LRX+3H/RwGG54KZzpVlu44az6KlfB+YwkkWc6d5VccUDlh5PFY+qRrRoNAR8at/LBmhzfCIqTzQhCllwgbYVNJWDhY/0wcZ5HyNHCxYbo60zpVKLJypoE+ndW24eh4AStGJjw2jKxkuzvxEzqSo/xEhiW9EfAAe1XqXuReojsYCdh8Hoe4f59cfO53rvz0CBoDhBc4p2VG5uC4iMORNV15RWHesgAKS65PT2GUfOi+HFiT/qi8oNqs6ehv05xfAIk0lpQZuOamT53L4it7kpyPfxeU1+Witp18JZQ2QBnY08puo4Vlzw/RMzTTGAh9JNvupGRWjanTlu388udqDF9KEBinUv+avMdeAOZpzGCii6ckJZ2ePFL3LXng4PE5Q43lIeEzfYVbDRkoXKjUufxzrBy4flAsoJsdWaVgqnFZDHhltICqN+fTO65vNSNU2cvBwf7+81hRWZ+MT4vvMtQ0UMlc8D9fJeHMQNXYR9UNpKO1RsVnv40PyG1fQ7beKA6RQd8RWCWKfjSKM4OocWFgQqLSTIW9f7sI8A5JzQmIzTSclKJhf56GQWZtVXU3WZ/cEJwhVW2QV/sJXIsoEim2KvYjyVbnV2VgGKiUOm8hthVAq6en3vsu3NWMf9Yr+cLwjYOJsOLE5xgBpSmxCm+5n7lLjkuC80efZG/0nhYvelytxPaGjZ2OUYVZLnFGLomsgveivTsJfyuMOXlHiOh8Bi7vjGA0LggSe3Ki3BOkVl6nd5KeFKoTE+7S/YUNMhn+6FTaH3OOk19X7BtcDtxAj4taq869VVvw/OXWCA8yVndRTTZP8XlLVs9C/TlXs5lgeiUsNJrkSsIHv+T/YxCfP8MYZqSh05SZ5tJXkNZoUfZEEkz9L7j4LNw/MoX8K/zqaVMzlr6rymWd6d1iz9pfOxEOoiBzyzJ/TRplUuTzcZch7Nhs3RG8A+SuJxhaDEWac1tstqFoK13N5ub7g903hrfX6EyCt+hlBklq9sHBBxF8b4zKk8ZWpaCwTv8GxRGCfICyo/4lccWD32px4SkapTShzXkeHm/o8PU3R0fSsNDIObaIbkCCKj2Kk7D+lxfO3BAbjUNTjFlRsDixS8wDQVDsIiaHuUM2gJv1Jg59gMuQTFglv0cAHB9xMapi1oEQA6x0gK5KoXqZb+iVd6PYbThdoSHIOUiClYh2xiTXRSn386etbnqw87pa8Sq7y0f+FxHju024zX8x2XVyy8H2JM4mrLFSpNg2bvUX7Q0WPm1oMT8rY5PX5E6SUkRaFjydgJZ59yCvRQO46dqjkkE+SjWcFexAk454f+0JXyUu6eRmOrSWEQBOKS4iVVROvTttilWSOaG7AhGOWgJEpm/DHsgAFkgdlri/lJpWkd0w2j4O7xdFeNEwfot1P6jLBu3TAW6l8GSPltv/tP3dyopYMqw6WeIu3Dr076d6da9J/eFhEZaHEXnp5hyxnUOV30SKfOhJNvKcRsl3iiAtcepVV0RL3D5BJiyLqV/RB9p/JZEcAnRYNL+jWESWZ4eQvznhGfhqsyfFg6plwFzKaQetvp+Q0BAgcXDY61syL6tNeQBEBg9UAFvgW9kTH6f+iZOf211DuZAPsvd8WTeLLGYwiKxXSikCKR8QCre+9ZsVoHB0BfrWfT1nNPZ00ocjuhHAaEuQFfZJAOvRHc0pbqR8SfLATvMDvIJBIiFjOrO3AkQCBcqMgifodMAQNJthdlCR9nfIAzhqNg+oniKXK2zm6WWqeTOZXH2Oztk+jPrBSSWgU29U7keMmCB+NQrTKYOm84vuPCFv0fNbP46dY78DvH5S72aZNvq9g7sztMPGDOhdHNHOJj5jEckT616rYKAz/nPClSxSy6Pi7mcBJpbiUbM0BbrsHAMDNu2m9+1N2wi2KINHJAVtCR2LBWVTHcMGjYxthRMGs2AumLemtjgQNFNKQ4oTqQNVbW1t/kd+mIdXeOuqc3ZGwDChJ3MNeB53Oz1bhGb++qkKZzMPEeYTSrzmGZ8DTodo6owz1rgoxn0IO18OC1ieZQT2+hhxo71Y2lt2MHOWLwg08LvyYeWJwHWJ7VASvP2QGGPUvLMf/K6p5SBUnWYxKlsxl7By9NiaQr/RcvJCIBKL3NqPqPtYEEoxcYrXmpKy2whBbxT+IpmnpQj5WE40IhJnKNWLB2KyS8nHKu/TEQaYeKgXAh1LgOb+6YWqM5CZtH3OkF90MuQezecF4E/+ZPaiGVoQWYQSR+VUPEiM/Qa/rrKJTj5+Qy4+17poRfut50kPKaeIFWhKBBcw0NLZtR82qeZ9YNVYpYdufTq+RLWny0ZyD+a1rt4N4IOrU0QnT1wvPmlQmkTii9RKCCz9a1IpZ8xt98HgAm38Lvf2LsbK3pN1gI0Q8DfMUtTiuhkSyLR6Aj0RpuXYstUMn/GLsgAb7XyXjKcooEdBfFJ7YBJHEYEisIxWM6uJQtwyk9+fRXn73uYMBbGEL+cu6F1n+vzGXwjlcRD9RrwFt3oWIfCz5Tlq5ujBQqSj8d7qpETN2iZrrltlQHN7w8yfHbIG5kO+0hppm9TP+N42ad+pTArfqOcIIiYqrcGv8XD0xRSmSX6j610Y8azeObQLEplrblKuS7NQubXhFw0DIhvAEow7P1RZkXWZA/jTmATYcRzo2mH/SRP+Wl6i0v7BRoORyp+nxgLY2pziDzA+Zwutl779L3REoPFsHBinJRJCkBa1KxjP3kC0Q8zu06KcHsV018PrOh+fhuAcWQVORJcKyDLQyGTSg+SjtanEbh26l0s444MAg+Octp/Rfpwq3mi81CVw9MIYPpk3y7v+c3ISx4IASSyBjj8giKewiEkXG+/9f//+p2P+KR3xy788924Vd6IMcB4RMmE3Z4WHnXunFFHj7YRM6mkyScVXVTrsYQRxUHq1vyZxS5k3EwzkT5yegQIlkki5/a5Touu8JLPgLcgHFfkZCuSgaEr6sNdJLghmgKMlj+8VtEzUnVFLw0TQk8zXBpwmbIrdrG8rJTSkXxZj3/qZIzyvidYQzU0L1wY1b/F/VKIwg2uQUldMtPEvSpj4yr8bLzarv549WBrTY0KozmE/4Ev0b2Wy3t0yufkjY85U276GOHFr7IR2q1c79d4Fbw9oD9dLUJtM7aid+5xa1WQwrsk7yvoFJBLddX59AVgP/YczQOtlv/HqT4xwecwAe8q/nR0i9pnFJ6auxPKtay/OkrkzqiW8BoWsKE2L7Ajih8UMJ52r3m6eNdlFWxZQH3vWh2eptIkDsf6Tbg2EgBgAAwBgf27Zt27Zt27Zt27Zt27ZtdYgOcpGjX3kekvrL+AirgGBI4XDXVn58TFT4xFUMcZRZZTdETojgvIxisZ0nyxdFPrLQWDR6esJ9wV52jVJ0LPLRQ9j7qgHzsuHLrpVDSGJ7Yjzqw13w5BBSmOKw1vKVajR1rwTrTLnQOqB3Kq53MNr2d1V3jOOMQIZ6Uogkc4C6tkcqphhtCtenvNqnoHXpH7ybFvC9zPe7An8hW1O2FuWfAKhlTC6SecLz0jtzWdeEVr9eJVbzDGPW6lXcDX1FtNOj/fN6zBFLgKClRr9avdra02Gn0Awi5wEgoy3azNmZmNe5pM8h30r9oS0CGX5WBHh9Oljw1BGBPEMZuNMlThly/afZ8owlgIaA+8w2BIh4bwK/spVESZFCYmU4z4K5Kiy5lfirCRPPEPy8p0EO3BkRqmuqyxW+Z52UpNlLmbY9TDdDmZh1T2uzjrspip5AYDOBWt4FQJ6HPU/rjKYRQnLZtpnrgDdSLiHO0kv7qtVJC7KyBNvBfqi5a52nBaAomjRXUj4P319mRJSYBzWsr7IZN28tva1TYQZ1IFIIAASXACyqZUReBTbbS/aooBJh+Yn4GmNyborwwhEaBpY7310G3N7DBjm0vo1tMaXd+6ZLlDt9N9O9wu6alxHSklQRRqLmax7tVT8hYnJI39aWT7jLf8E/ezrgOH8PUNgjSaBbO556e9k+PF6ybwjsNYhMbPEf+Bcer6ViiM6UWS1mQCy23P5/fxH/ck6YmlRI2tRdcJJD77d581LK9l49eqBEJPkBLKPsk3+YSJycKKrH5po6ORp6TCWOuJN6rz5JS0Vi6+atdARVfeYFLxMdJVgE2uyQI8hlPeSqzG/RxdOccD6b72vK567ptJMLNnEM5icdMREaSvqsfMLPBTHJI1tWukoIYZWWqPTAfAcD1F40vTPorT36XQ0MJnApTMvYMxY9miNN9DrhMNdZH2btgCTcCByX33WF5aIVr21s57fYaWulNwuZH9sYFQK5k0oORrLA/6Fadkg/rM5J15R9zMDK7SP1wKPQhisctEi8HE9FwOPGLze+AKRVQhF+7oFbMD6D6GZ5Zp+vdqKF+fMucClUEIfGfusK32+WYusS9Pv8fdSxvUbdnfaeHYiBlFYGc4ev8Q/C358sxaN0hSBY+HqcA2p95xg+0d4unKnMbkwlAWWNwb7bke/ZI7AKN+pgYKGyztYPazRgBGkT08IIpJBor8ZCjdd9HERhvfjLQLoQcXYi0jsX7BMx2UhQXFsx+2y1ENkQwj/tE5FJYIkyGRdZgFEDYmoSQsuMkvJq8haprtO/xez7hDkVxf2DYe+cHg6XiIdQZ17E6tsW3P2EBCn2YL3M5RfY8KWRBIKQf3MWYhWK2spT+HBLZJkUZT3uYj2lcy8tRVFSYC1jZUP6jTrHjmbWr5BqFZTz9t+9kXb15v0LBZP7TYQA0UsrLBCLPNtuyuR7x4d7J5ZQnTapKmZ1kqNw2ooDxHZ1bBsTXnqnGf3eWKfGDuJKwOJ4154IaYqwAKW95YsCUA1rJskJypTgRhHQUHYHj5/C/U04HH3ugoDL4hZIm+N5HMk3k0YiHcKgAZusSLoXykCJqvpMdDdc6m+47EHsYkX4fRkVwFKPipf44VNXmiOr8SR7lOeXPdzo18B0I0z8xFSjjulLnfWR2KDNHX8c/OQo06wwCq3vX0JCQ7n5uL1218y0550oV8Jc1+6tBH6MyLQ/fjaokkszmDAWvfR02fVdMiT20j7vOGsWHBKiy8u/dmbRhbbNXP4Nsb1PbE9adUWrgqA+yZ8FVMwXrLtGKcdqdVXW2cw6sPkCDNLb7ADfebdEGv1VCGPP9BbKAh68/2ytSPs0MRZlRdU2fjcw2/wmhrcHkRcS+1yMql44cAr24wEEXmmVtGlCXDQCZJjiB7xUPN6vziCtPc1z7K0KqdzSJQvadhlEW8J6Bgdg8i4IMpOeV5X++q5rgxzkD0SDyxnHWIHbvDG2RvpHq33vvjPPAnwcr+qg0Sqmgu5ZZdaW8sfTRkSrsZadjh41Q8riJ852DGxjL20t6S9tIkKS7I4hn9NpQuxaTZ0Vhn0A8Ylk6ZRmGpbpeq2/FlZd0CbsafZety6RDEnMOHJtDfHCLkE5l0qT/9ZIDldkrhYBAdnOLuEdRlAzJYl0Zp1Cs/av96zZ+4serMApgd0x3RYxC9EqwZXMRge6IKewCpo5pxTMgo+BRdPAfkYUFIiIell9sCeLKy3bax9K9nlYeC9k5RVngJyTSF856OD9grlpEH6RemnIIhG/f9e5EQZfsdBGRPsYUJyXAYUT1TW7h+KW76qQMQXlOHmUxhC+EYyEBOfYg0N4NUNWwsx+K1R0e8Qx1x9ATEdLEF531mFBxc/Pjq4a5scgjRDkCp4L78AdRfIPMTE6bYZHNuyY6bEFdFtltOjR/1vtPePg/d7ND5gM9OmKWAV7+5KsOHIXC9shFcZYy2TmW5kdxk+xdaKkKQzmcAB39UbW1PStcfu6NYo47Rm0H1sD6Nvx3hw4+Q07Q2IkYpdUW52TVP+KrOStV0WZUaNrjTC9Kx70Y5fyXIZNXNgLTKRprP+0Ivk02snmDFBpjtSzIf1IG+9dtQRpl384abwbKex1R3FqOLOb06YrJ9TYNbqxUSH8TJR5a6N6SqZMeE6y403ML9/A1drw2cKCCp5OHKEXlvEOWTUm8BzIUfWmxo5b7s8Pmu5FEO7TuHkZxtvxuVPVFvyIDNxwi1PglWIUI470p/B43ncUaXclFvk2+JjgUv2kQ+t2NX1d7LUUpiHkNW3o70eIgkz88APThrjZ4hCm1l64nbmF1OLn0RPRpYgUZgEB32Ok8yokrrVE75Xn617cILo3nrSS+dP3Gr5h14vUnv7ZWGj5eXZXNYd5C11WUKBZUhxlKSnGc3PK2ezwbJA1aMC7H4VrJsq16CpRJ2QEQx6LmrVJzYFYzmxG3f4PRy+GE3XcY9gAwZqlOecDKXq3M5LPouLzfQSaNEBYCRib3w3uQWIWR5Pds6LuZHHJEvsXf5pzt9gDIc9qn3wn90LrqjQgtO7pDJ6FKQJZMPoRKumCX9RypYNI3tyZNEcBxun3ELUE+Khstb7QTHMLB8Obr1ZGKEi7tV7oZVkC8iV+iRUBhDu5UVMa/Yt3UAGV2ZfBsP3+xs55Bq5NYftqOwJB9qSMTlU21yfUqWxW564itqklSM5G7YdGbcc6fq9uciNYdhbk57nKLkP+C1dIejaliMsjYc2Di1B0ngT807RnTjJeZwpiVcXH4O3BessNSztpG/KDn4p9I+JXDqW/LdASYW4Umfn4vKJxgzpXn+k+WgbJECpw5KXjX8FDSjbSw4FBBWjRjCY4BeEbBR//bO5GO4PH5th4CAUtaIShbOjRDkvi67JWQ+HVP8vhlICJLax9pyKRto/a0lrQjVG5cU0CddaHxwyfLMjleWOnEW3RGXow+3UdOysVhsrxTRA++HyRAwlcAnlBbyw/YCqzRvo8sP12UzSp/gbCC7Dm2G+Qot0nQ2N7dPg1ZXFhTM48hGPvzOBN1Y1PZ7HMLIlZmSXWyXEf1ArI3p/HolLphmDK8/d4XYONKyE934Ii2eXjowuYYiPgHRCMC1PRDCw0onbe/mGxHNZXlHU3xs+U8q2vE65+UGu0qHq4yvgxoDPLr4a7tOl0GR541Dk/LBjsD2nSHCf2tumat3qrVDQQ1hqPSBqlvJaUVTg9503/lWt7d/FMPCev8HT+Ys1KImTT+RxH3tVLpPJQpfeIWF9tsM1eNb0MCEF0L/ruJsR030J0HgNmQYiwcJCWsw18xJOY7MuGHznJaLZjmneT+pYEwPz88brl6Z26rAZp58X+l+NxEL7tzTIbIPsgBc7odPnfjbej1g4Hd8c9U+MxAjMGYEng82suD3D2ROVCupxBhbgGyy4Abutw80P2QA2K1DoDU2Z3UqFNSZXi8T/+KFZrHvGvhkzNJrTPlrDooIPAgOU+JIhIjfyW5QYcMlL5FPFtoTftIIWj/Pif0RTHOjwPL2Tu+qFY63Qs4/yxAitBF3a1wBNFGAUl5FX9Rfo1aLPI41ykaLlqIYKSZLaM+Vt0os/4GTRF0hmvbJbWgjuriCl+Jtf9kg4+lNZhq8TLI6RLmpH5DoNCQ+awCIw5XZKLWnYtel4BoK2skD2F3+a9XuX+JPGywjaNd/BVg6dbC8Ff3hPKJgwBnj/uJP/69NqPdYwM+pCWC+ofN1wVPjwwLf/OC/z4v0oknQ94XQvvqiyuTINOiS2wL7LBCZCWPpu72Qqq4cZQh2Rbn+lBIVbUL6AKkM2His47waVSqOrSoH+mj6VBnokLya5JWETUBq/j/G8r61r+eYXw/aUmAHBiDsFZuKGThc0i1w4rEu9n7jv1eq144/lqL0z3K0ESH9ZHjRQTi4YSQzqjjmC0eN+DGxCgfuehEy4fuIwG2BMbi0kUlX94Bjed3N92dHv0Qmz4WlX02Kla8rhydN/VuwA7iF4kIAdEmnZ49+4IkglgVqjCNxIttCj/4F8A4383JXH/1Kq3D2fjRv+2Gzjhbt/UTeRmLnZxqPpjO1SIzS2NYMoFM/vlD9zutxJwOvO0FinOwCN0GzvJ0EbM6mlqTsT5F3/ecqmK+IFEfblvSfH7q5DnwJJQ1BrhKF6gV49l3SUt3DUJBd06cj+NbC3WvLf11wkA5hzurOfQUCDm4aTRx6qlNaS79+jSxXsxgokKWc2HpyuG0utEwzqV0+6daXo8hWvguUepEw7hf2azwCdDWNW9NluemZTjrRW2/PkYkAjSY7zYLn3GKDmhZB0FyJG7cRGZ0C55sYdvjvQmW5awk6aZoBREWKydr6Qv398mHipyMHTlNz+Mmd7/+e4U0s5wVLCS66pOCFOdS+wAcyndNygGub87dbDYOsabqswHuxj9pk/647uQb7mQCnkmd7ash8moeHlP7v8JyyWP6WuPC2FAO8HNBD2Tk5BytagaN7JPDyc474CVgQqgxbp2b9B4tU8cWUMNzF2OfM0urBPRnPy0BtX01s48GHo9n9uofO1JVtQ8wQZFCx3ToSXPwgaPbd8kAqTHe2HQAjz5X+NL2wjY2gNmAZkbcBDMefoMtURfHWQcjnJyBpcZFFid9B7mlGzOwXGshjZ4rqBz+tDDYf0Aaqsp40FUWGkICS3eBfbhd9nCrjoZZY9YuujRLrs7WXongoEN5xeMJAtaqxfrHFuQ6Y2hNehaLcSCc3ayXPv6BUt9IhLWkSSPrO3AvRDsK9/o0B4EmL3UxFcFmed2u1LspCw39dB92ESgJ64brfKUJa0mFu9/8x0osDeUxrtGKcmrYzHO1FSnBmVLRKrJCKyC81XHIdaZPJUQNpNy9AHEwlE5lrR75nfXwKmWKP+Mx/CBOf32r1odNVWRyQ2BXR2qA2SuX0X5PUK4N8RFuL2QAKZEnbhCvHMatf0V0D8C6jfNcZT5vJBQnaUFb63P1gzXiekf42PL9GRnjNzk098LvOC8ir95IV3jO8pEtPY+SqwlIZPmlVM8BynEThnVngypGnhFnQIwsuixVZEX9vpkEyPHGb/b9xlAtgPQaS42z1HdiVutWLAryPZF6Mr3awGgK+cG1jKWkNIz8ql2LdRtzzvgNkBzzCmdq3E153mPIHJUjwP23Cv+Y15ozB/Ff3xv8Bfp243n3zlIIJiP6siWA3+/ehVRSLvMUL7J3H9m4croh38jOX9ZHWHSv9sr5Xz0GnAyNEpaAwA/Ow/JyEVnbNG+SuTm6ywcSyGlxcuDjKyK+LCI9Jpr+fJa/4MzzfJ0Y4JmuVN6evrqr9vAwzl/ozNhcaMASTDFaWWG9cGuiGnvppN/USPnnkOIFbZzSNTczQJyaakQWwSw1DRYRREsdHBWi+C3bOH/HM/zXjwiR+RAWkFjEpukqgheac/zcGGpeu9yBshV+NkGrobDYDuhwgnnd/IDuMntjkukO7ooXU8TvIUouLk70h9qb43w3lwh7TI32YRkFMy3J79Q7S59uYihs4aucwQqyvd/IxaKKYvVkk/aC2FfTcfMc5/oo3IPZx/XprLkUJNv0hzKwIY7eKjscI+ybj2G+Enc3WWQ2gqNLRrGF6Tn4/gbsjcL5UOH4sdhZ/FbkQV/8QPoJ65OqIpb9r0DHfz0j0XpbDnj4bBdin4lgFYmbSq50RDN2LRu7/C3KFrrIJ28+N2iTSVZBIAP+/XnDb3EZsupDLNQovsBjskW9FegtmehGGLRKh/d/osTfYec4nt+WL3JTFVKDyW2WS2LyZeZI0efOoPUrxeciowwxBdTRpO0ksBzl+zpcmJNdzfEb1WOFknMnMg46FXNk/sdYzxacylvld9b6e/HdqeUTUtJQuFHq/wxRqA9RDbKPjxgiKWj1b8wDfxxGc1Jw2hNghr3J+d9FiSUD3nylKQQbKt+TOkClm0WSLtm24na4K2iKyQjwnj3jaxX0t8oEEuoLFOYgF81gSe9NiauE7au5SCkGR+56gAfyiPrUt45bUPSfjAZVRp72VImAeyuL8blHnXLLMWQHFiIDIIC3ORrbrEOuJBA37jAoWjpHU4JWlWKxWeG9MnJJfyp88d4ZHHOwb//luUuyfVVVB8BSKXWVDZJQNLLetNbEsULOXi6Z/Iwyk+F6fV9h1j5EJmsft/S2grauTj6rKuye9KmNxPW8Xa4txN1vqp7frpQpgfrtgmuIHsEKrXtRQGbnAZnx9MwXVM1y+XEKldK0IbEmqCScv27B8dOCxMbp0zwl4TbCYAtQN3dr6R1hW9AuQro1MrGmMMYN9fNZ7O7ZzmvSc3fE2J1Jcjg93Z3msSTZJ0GGRNRXIJ7GxGhzx85jfaFCPQhMo2jB+swncKwiASzqS1RbROAcMqBMUZeP99xnxj8M2FfVhGuXpgow3XtpZKFx7wRA4qwoLyS3Tnfzb+Sk86KrftlKiuJaK6mCMM4sS32NDDLiHN6MIem1XZqC4/n4W7bSRAQPjNb5koeNl7USLcL1DvqSz6+0tWKB+SK3xWfEVGptDzq1NwkD3ANuHxpFpdwm28kpxMJ7CsXyPjYTv7ZvdK3gFTDqtY+O+ByCnlK7siT7I5y8cnJ818ukIvNkRaNF/OdLX8jGp8gjHXRliDC3gfhadp7+M5rQ4yD7xMLkZJ7ZIm6PP8UEoBxnk2tbi3l6W9FULOgOV6qZHB0g45UWjmI0N7bMhgo5NAMtarxZ3MiuhSadFOGeCtGxh4lhAdjNdc9aKSTV56DnFkF/zuuUeIpb1VYbyWUXN67zNgkRkcnxM9vDLTswQ3pkfW4DU/JTMcbkVVqt9uBQ8rPbIz1jL/KU8iSfqcqdUYIkGMxwkHW9RJ+KrZcysUlJ+EAqmudgkFT5Y/DMBq2cGgzjAFz2xU38xEZQKIVDPnoP4ZtlEz4YPb/N3pviXs+Yqav06qMQ1RbprIKKPD79ie8D+kEMI+QpHGMqpgUGEo341WY5Ti/VGE0nLvR7mleqBPYNkEwnioJgqT5VCjjbsyGaA7zIj/feywPSnrHcoZWPOXycVQA97MB1KuDQ8W7EMVmeEybEgUkff6eeItUxTLqXA4q5BWURo+/vgCAgK6BsJSPRCD1huUexASjhr+wk2YUsp2mHdqj2FUPLwC/O6nzTFJ9hB0v1YqDfvhFDAY1LZVoGa8/iLaivIrkf6H4QJ7IfRdI2Sg019t3KeOLM6MGh0yhScH2UCFAeyA9y5Sv1+gckVg4pxEj8PzNM6WMA336W6ZktIcg74AFjC0czB/UwACRoo+lQ4ypWI4ZucbeFdqnOsTqX4cRe4OTFuqk3y9NEZE5cWRQjCWP1m9wrYFqkiT5WXGpPH78ylPbhCuReDkNHAOBdb/p1x6tCPVWY+GigRVMeLP1+bE46E2+E7xD70CJkr/bprizexo4zexIW+Ye/GSasuXnA9S1+SrH0MQr60sFbg9RSZj/KJKODJYHvxluopkTa+ERayLKCy/vDRrGKjCB4XoEINwUjmc84Q67PQlxaWNLI7c+1i/j2uta6t0miOBKLN7lP3kTHbR2clEMSvfrrz1bgtMYAbBzyj8i9An3wuKhQ9aHjoIsYEzcfqW2alN5uke51tO5vYmQp34RLy8q9F6YidN9eJfa3hbFBjN9VB3Q4f2SgmlvEJZVDGs7PMIrCWv9tp7zXC+cokeuQ5du5NO02E3l5VkUvtL2NuHW8+Evx4ic0G36AGM8vRPBrVt4fPbve8CgWoyIhsvtwTpFeSLMPtPTzrh/zrNBV9O5l52KjjMxyc55hjQqXUwCcS8aQyO8yroiI8VGra7ZWb54oqxAV+wAT+2TxkrMVlO/5aEPSNP0g/6wVdeq0B3fFrwUx6TDbWTNbSiOZrseO6EGTK3LdAWxFGLe8NVFg3gSQC+fvGqfyzdL2kE/ZGgTOAK/7rwtptBf8DNSt2hxN5ver7b8Qkiyo5x2zlEAycSdRuOf+CmkZF7Vh/tQA8P2LqhDVcmY/kIoYRDXTXQnbQoo1VrmnmdFS5RW9CjpgvvB55dYYJqceQltfZDppZCYVRF8Yy7h/NRVsaLvFPF4FDrKQ9DpJfe5aF5NSXuUv2L4amCwUyew+QodgchZcSW6/eK4l+UCczuk3O2AVKwTp8/mW77hwa4ZUa58v5UpY9opIPbdKKtbbWbMpW3qD8mVeA9g9oqnUkB0RBGSvt8TcXdVQkBTAuexIXdEymd9Vii7WebEU9WRmCAzd2UT9pBSikeyGLnvBnLZSOKqdFmjCyZNQ9u8tkfmQoDmlYL+wS+9fm+iz9MlNJxoVaHbREvcozS+URCPnSvTkGgcECrDmMP4nmylZ141om6xPbrCrAaIuYcHscT1kBpcp4bM9Z+khs18/z8iG+cwqfbN9wc85qu02wO6dBTeZ8awFrET3xZSVAd+mvZ4bfs0yg8rRwCqNZ8H4q5pZfRHtNf1sV6Gg4whnwj+HlNbS5tbQNnhDAXQ8cJElkvn4AwtAA5ioYGdUBQOiOmZ4HSM1QhvaLXnzJ6Ze4YU37B/M8yaEFTIiG3xbzbQvKGlI+nO7f8k774kwBl+MoMFQw2kZkWTf/6E8AxpBJAkBxoc8ovxhKGopRCVDpoGZHnfWwNE8AzySwrTGf3OKoXibGxOFdKIKh0FOitBWFTVHEXb2n7dZ98ztgwtiMx3psHttn8XXe9tKVD0qsE2AJTMF6YwHAwzY5RXzEpdJt6XiCWzpqE9v77IKzDz+y9thumJk1MdHPMSGDMKIzG5Hus87qgemGEYwv85DFYuBcq17RF9y7WtSFHA5zysnuFfYXMSA6OQnH5aHc6QR9q4IuWFgHmeJFbaeD2GQyTwlTXVu+H/09qdNjRCmjdGx0aErvFP3OJw6NVkcfBMtdudJDnzI1KnE6uMlgh3RydHp181dFG20TwDCFbbUSzIIHophU1vvvHVEioi5DIcuf9+WyWQcbRzWvR/wTh+01XSv97GSmVsViP3BhatQ5QWr1W9sbtd6xt0gIHCAdafeKcwit+Bmq4NHr1orxYR5ykTigZX/3zDKH9Tfj9HCe7OrocX9dW5jqpSiBi2vo+Vzg4ldNu8S7fjn/USZiwr80q4tmkttS2OOSY2Ac+knSWE69wkRAUMBhW9JkwZLByK2Ufx/Qf4eTGZo+plTHxpJ+Tr4JHeoFMO6HjXV/c0tAQTfhYidI5AfhRZqPdYTXQ57VoOCKz8LEaWnekZYxeiy7gqhY98VWGFLQUUn4XkJVbb1XTV+D2CMlWOg024T0B9GMtvnh/xGjgrrlG7bj4xKnuuRuOR66olSib2BPoiWTdyXsDREl6tEYsiQllzufSIuuxfGxQLncCFmvOg319D0usyLLt2SyzPX43KFmZt8SnruNHmB0ptumvffz+lEZ1vfWGzXC3zHO+FBac0/Oc36gsec+CFXgU6p80pazTbXOY1s5P/O8NRkW0UptZT0EdP4ihuyZcfebj6h3Z63HpJwN/0LSVcFOIylhJ80Gm1kRSGwFEJd0QKOw5wyglCgMhzo2apx7L3+GHe9GZPzYFStKnifDjo37bjP6pdB0/EV1g1vYBh4EmkuLU4pFFVjingfdbm4IMqOxBzfDpA4wFL3M6H4z6M12E+1B379uiFUW+255LE0Lzy+EWgR4u1XNC0Pr5CJjVxwWOC4Jjnc9r9FMUmF9sK+JIWSmMw8QFKBsEOyJkvW29RCO2EQ1rPRmhLB6l8F/rLmbpr6hBRoAOVB+WCK+Ma6jpQe4w/x5sBDG3G2LBc+JN/I17S62xPIHs9h5qdglyuHOeLwFAmFGwBsucz+NlhbIh/4pFjw2dG07lQraMCG3E+/lksviVR32IPv4oIXr4Ppq1Bw5nCKIQAGpwQ6pyA6LIBdoBlRaR+qScpz6lFbweNFPBBj/ew49g317/09hpJaDN1Cz3T669Vl6c7vkzSHu1m0ylmUAAZ7+DC74elfg+/PFM8SoJwktN2M0f+ImaTof2UIdxoZfwg1KwiJHy0Iv9kwwyIJD1uW4933uYshkMpaOv72SjIvNL0Sd6h7aa1Vx8mwSmvx0Q0b6T5I+Paewn5pHkBZamLzQw7AxHo0We7Mlk9R71tZuB1K8lB30JOJY7CcQo8nAczqzQDMT0NkI5Emb8Y2j2AyXrtty/MCJX04hqyYco2IsPdiUVaXxAMyuXqaMjDiAsqTpWbumxMXvBCfv/BlfwYZ4ILvN50YNy5MoQgE1cSzKqvui6SQSGjk6kauKReIV19HJU43oRbySC2kNN9Jlgb9jhLDkeqjyAaOZmuGQIBByLr4YevXLGNB7Xm7AUpLf/gp1XZcG6qa4nEmXspyHdIKIUeI29PYPBTlEeWbFZXh3J9y8mpWXax2ZCtCN3m2w440CDvZ4vGyazQtMbTEiHOPknbPNPEkwStW5SJ7PP+GPWNI2cJlU6rSd129wVppbdLqrXCLbJBsgDWmT13e5qopieOwibXNihXBVyDK13ASCaHvvjiduQxX6fHrwMIdhmVfpEJGf2jdjzej0YotdD2e/hy8PvanTij2gmq4ouEc2TgWt/MoVqucSHvXpz0SCxz1LOGzeZSocKBeLBqf12WiTEpPihp9W53+DXQLuBCnQd1CH1snV1uZVhftqeEIM7Cd5rPVv8JQvrvMi2UjkkLWGMEU+uO+dc/38WkYgrz73mPrqm5cfY668E0nnvWp7aWCfaoIIueOxqBQD1+tZmuCj4kwgbtBoPftrN5a8eJTK/79RBJpHZxwUk3fHiFaunHhZNRAe/dutmPGBJQ4ZO2+ahJSjuh9/2ed79qNydyIFQBhUlK2yEZ4IpBMHfaPukE6DkADNcVQskvR/Z0W/0xSUNHdlezS1q3mh9SMWrlGWD78sl7nikw7Yyz1cl06g59yvHfIzHAzTeW033NsaMDJze2tzlrr2mG31oLqiu8XYQNOO+JM9KAWAwmGes5jTgFerHaUNxkrmh+kPWfCAkEF255wMOpvWNO4bEKqSti8jOrRB1GBxVxQP1bhdTWg4CtHuOCIaw1OfEIgP+GXJusvPLACd3MHepHO/9invYXzsPefJGpo3zvafkM6XAKecHxOnFYD366ZzFxNBHmUx4Tez8h/sUo9uEbd9f0OmX00OXrR/oBoKU4QS4lW7XLmsJyZ6gkbpkXlcgh1MVBbS496Uhik73hber2oVS2rw+ElsXCySwxErbAnp15BvxA2jeV4ui9zi4IYPxfqkb5eRa3ZY7isdltgGt7YLWy4xJfgbfhZSP7GTWs0agMdSHC6KBuookBq+Z+YtuObEZOzaA5U9yHnFuUjlumL5ozSqEWMWQGT+fO9Vlcu3Ur7VNeJGI1rC2smBMS+dnkoNmKmXMIcAlBxW2Ixe2Vu567WFjBCybau9drVuRmcVYZK5Jv/eyhkKnz7jZKzlGottffbcg6W3H2E4NmbjJ6gVMVtPbuXGPlducaAK9vrGYAJ0vWDog3tgF0Ss25cx2lE2jFRDoQKeyUSdeBAvIqOPMj+dMtsGlI2c8alnqaDHRbvmTLPpssKl18375c5wPpANzXSU68EmIHIblh/B8xD4UJorzGRGFRoHxq6VOiqNJJrB+Ujz8e70OWKgS2rZ94twA6AAk1WIvDZNOfkj0o/5Gqb1iPWAb49mVbFoSCXvvz7kxEKQDw7F7UgqTi5YNO9f2GFFP+F+AQUolqLdk3QjjRSmW2/bU02sDqYgfTEe0gwPLYFytJk39COA16DXWsaCAvKiWTaU90ahtUiEsZUB7cFRvD6z3rLM7hCPbTq/S075wGRBV7Hlet3B7WIi6iiD9VgeZkPd8dtbEC1BrIzV0igUZcAT+TxU3CbZOD9i8JoJEKkrSoriP578fShKoeU5KMCFlsv2q92GG1WTmZwQX/MpcycSCY6Gw2XYqoWyat2W2JL8oix2AuN3P0FZAhjVsGIYxAiscsdaa6J7LI1gDAVqbDdl3f4j/dyoqfwVWhaG4RVEYyuDb+JhWQvHhZjDx47zg8YUjW62bPiqiJij25ctTzWlwgaALukJjlMWFNy+jFu4gm+Qp6+5USLyGWN2PwnN1loTaDaKB+eTRJxHIPj3rLa7qnBYReKmMuFg/DIEe9vTpIaRKMu1CLy7ueiPJV4XzHkTeE4xLGFCH4CUYjbqJHxOb6ILba8XrWOun77Ne8CUIqMamCHytLcqfewInC5QJlrAAuqpG1DWkELOvl97lmhbnBWzVQNzb19omLLgaMMFP2qDijXkQrd2f1qqyefFb1FipPnuXwB5Er6RhHWoT5tXThuzyyjbTinU6QCkzluUaee34SXJ11NNkFeKxEFh5FhPLANIqu3Gl2STAYviO2BycPt0RJhXF/jCgl+e5aVcYcZ3Hbaw5w4wqGvlsWSckFGhKzVsGW71PjEzkLMgfSZCkkDfpw6WoaTrfYDL3leTjQOgRnDOtQvalULP2+ZYTVFr5ZB8pb8lcxnCSRtUYfkVudrSere+6Tp99+p299WtZ9ABIwH2XD5YM+VF5XCwf4mG5GlQ6Ya2uRTp0j/a/kY9K+4izV+FgBlBUTuDX6bTEHoL2iJqQB2jnNMAyUKgIOe0Z/OfxZZ6tCmNHSn8QU6o1ulNH4LtysQUTEJElr4HeinKYPNPnWg20jGt7sq7lECXRuxjJon74P9omLLOWUq7c+SZzxki+IJStMJO2UZvgLaPxzq+ATNMfOwAgCYvS5as7J0ELuu8PnY+OtijRBXIIXg4s+1/maakxEtgJcjJK20FvXV635JIm9cR7jgkpLkSXSMOI5T/FcbvjhWqRbqJJbjFgyC3cG06bfrLZtDRDV7Lna4v58jOv1h+R3QtYmLvZD0JVQ9JnPfmZWjZBVUKgI2D335ch/51kShJPWzDPiqImGnCTeD4ZsRvsVoMEqKOSqzFRdjE88b7Kg5Ddy3nRdPaKS9zWJSAb6ZpsGyT5I8Vc6tg/QAbijL4ZQ0oeKg/PTTRwmYMDJh+s8tq/BfImGHllrHqAHCT6jxIWz34yIrc0PSsT2Yen5IJ4Qgl9/I2ArXzjFeveTI1JwY+JZs5WRxeDPjcxKD+vgLkl9O+nFF2ih0HT+cVxaZvHCctcGjlA7UUvbiB2q1wwaUD4VGl4ttfCpwy3IS4vMVnwEGjyXDmMi3UqEW1iDcpRf4dOQBK/MbK90h0c2ZevEQ68t/bLhIKWNpFP1PhBr59jkaX24lxIld7za7xRx9JhfVEjP1HiUDRv6z86xeUCKFtgY8mZqrfpQ2R3udklgS/+If9RYXLdEPRw9zqiJmXkXTkhJTh442idcr5MXJH2w2w06LShSh512peMeqHvfPh+9yHPsdMQrAl4wmI0w34R/RwrjQ8T7uwPwB/uKmQuH9JQkW2iYMdHoM8xFGKwlJy1BpmdqdwOYbVEFgv2bNdeOggjPS5vo9/3lupJ2k22zAIvETpZP+wtaerHkvpbb3J0Cczl0hMkQKRml6eZ74xx4cOTrixXRK3v4EHXMa2I/HMoZzBUz9iCXYsIb+QFc6rTKH70tScw7HAg1g/EPNvfXeUsBhI2IFgwv6niTQucuL0r4fa+tpIYnlC4mYkUGjfio76OA3lgGtbcv4McyInvVKOtuYEugikZPs1mRI+OChIQhJe4BCeKdTBfFOJT+HR7HFD4Do9qwQWxm2Ni9xAqtSBSlf/di6mjYyyRDWGOCatlF6iDt+X9yUaxlF+lthzvAcqwzff0mzZT4wXTiAbubYOvsnrsZjn4aufwbKok/+udL5TqEizyZ6tuIR0Obt8bo5O6PW8r09G30wT2147aOFykFFNNY4U+0NNChZK9F7qW114AAoyXSdBS4WdoJbeKV6317RFFCFcaN9r24I88Af1wiyjM21q+IdFKvcAUaZrbgVh58uCo3nKZcjcq5/hblaVqvMj5C+Zo4EKHvouctuvYMDteLg05l9j0qIBwVn4X66q8ABcqE0hKVGduxH+09JtpxVPKdhQM6N2+1YhJ2iV4+QMa7xtEzO8v/Xk1Bjm+qtv0rMOLads82LIOpqTnIeUgDMEGEPeG/M/kD/taVaSjgsuPy4NkJVqNbUsQhhN0Badh/ALWARyomIxZzSehSusjjn+9p+ItilOk/9u0D9ZBlmMhG93XTJVsSn24ZUk8Os+ILCOpODYJ7S9+9oMXhyByJT2bkSPjrjQjXggAG2fPAqjBTKsUHkklOM9qUODPu5nRO8xRSLKWylBSzYcgruAmVC1zQt1HnaejeX+Y620Id/4ok31I6Za7JKuPtexi6Om0C07cWasc7f9l/Sw5AWBS2eCaSYYnKJ1Jqm2HDK2WdcNArX+jddxW+yR/bFBvUeHb7GYjosW88pk6jxJPuv38OFHzPw+oTaUlUhIeCvHkctDm9yXpq+CxLhhAtDR0cRL6lgFgBn/yIlKP609yA3nO3bbziMIpKFhMvBU/QDuaeBky5AGKUxRach1GO2E7sRDOLVbR9hkqjrxTE2vUKtRKa+QaZz6OLyMIi4OuIlKR05i2pnzeMIskhrEg/nIxi6y4BFLcX8RVMuKEV/RpRQjij4XyBhl9N5UU5KIyCFJ4OAIn5CYjZrMqjaa1bJHwnTnc7lFDoA+D2LDEmtWUdik6vNXJCuTn5shWfYuJa2RdBekWCfqQOdPEv+SOB3HZnp+9+Rj5bIqq6qSdVxmj9FbN3C+7RCspMFLtAtvASHAqgT6VaTTYNtF24fDyvJDFuxjUxDenJiGczQRY2zhU164W6xzKwGpbLH/AptyEmNLy/rPL5LvNCYJz3hFAjI1Q15SmdDPdkdiahjsP8Ejeu0yOcgtPSEoMwKLb8hSG1zyjCPDKA+V/PG7fPsVImPnueyvgHXGNeujO9nldtPljC7us3ZysCzlcd+RnbL2gpQXuqtM37LRw4cKj/WqYk6/HvJZdYm7F6+9qSr5h0OawpTDMMlFgPHWwnjcmr3PEhNaEFtXnNcxCw+XVryZ8E32lkajCYxN4CiGR2hKXRqnqS50PK6Sdk0wkNHqXMt7O3IKtpbjQXw5wajXgryEAggvJt6pvJIqqjcaDtLJH0He4lsA9+wqMBoKp4pp7ze+/1O4mwHmWQwwKj4E+K/DsZI8+SRTHuILynuMcJtCe9wB5yQBuUpySrPLNab9YqrElNJq5pGccbUh2mn80dAY87G4XMRGQSrV7QBELCkv0K2dk3Kt1K3g1L19MaR/wd9G0y0KleN9iGU0vJMRRjEsrWSu+R+/n9aG9BHiVaImAEkRa9oj14Ck7oYHPuztqIjaL1RTdosSjzVcJmJT8VAPdXIcxGhVJInW41Iu+ReM8QorO1FuC3stT1+q1drgt/gZjks6QyFrbv+yw9sCG9fHVL0Fx95rynwHyJP6IQZCzRyIPnpxLEQs8sk2MgN0FlwmrqsNaVn/wT96T4JB3sQUuQOdoP9lsTi/S41Ibjfi1s2hF1D+ky/G1UxI/4rP7wGPpcthSznF82SB8FBbaVjpl8c9JHZ7hKLOsoYCjQ/rWvFEJtyVbgqriI+wHoTK8q7N3kvwz3ErOSY/NbEWpsrpIkR4iEOYgk0IGN+bld9I3P+RhuPQs8MOyoVKGfP4pBaCIHezX6nZUtkuokQKwU4ROdgI2HOhS1Hl8ewrQ75gbWXeQVCEhBUgnIhEEL0NZeXCrxlEwCTwAPdWDA0j6VjhWKxsc2gb60+s3mCBA92hSkypHGCGf1OnlXG18gtXi+2vJexEC2jjUQY8RzpPKL1+NZ4LdO5hqxKrB/6obB++L+QCXwEg1ml4K7OUwOWTyg8SDzoiUi33wBBx9A61EU7D+uLkRuPMvW6dEbHOz9RtgoV2o6suZpqDuzfXhcj5kDtlhOhV8B4VuQPZ9hvxrJq1rYmhZr+KrBs3YyfOsb9XVvGKKtuNruQVdBLajJFTpukwVl/KEff7WcldbccqFT2VB8rjZo7F3hoYgnyKUodid2mR6TjVHJK2TiWPEjCrTduAT4fVty+XsxwSTJUoRws8/UfX+FOfWLUtEcOqUJMvJ6/KswI//GYGPf5hv59qz1KxvQ4gxfRKfthxxhRHvcJ0XJh4IAGLvLtY+zP8f11NtrwtvvnAhjp7yhg0bYjqgI/kHBmLsErUq6VAkqJHU6F11zvBl5D3NUSyf30abSRylkPuuSsvZE8AyYP+e1xKuaZfw2tp+HmJZzz3tnIsSIAL4jRdCtX7Cpe/YUOGyQYKrsiPbozCJRfZzmQa2+pyqGBHK8yRdVXD0609tVyFSYTCipxHbJ6OvIcXPOzAhjz/xCcoBvid1y0iPvgMU4SoXRH1uqOKWtPH/sowG+PAZvFWpyQS4VQgiArCqXfdfcS4KN13j3p8/zwhe8MZ03qLcl/6nPtT5/PR2/EqCBQidj/RRRGJd5Tuthp2bCtiYPkHkjZKuyt5umh9zxRvZag8D3dA5xysg3CybUSfA/MJdB6P/5P/JqELfcvMmzLFKKCs0tME4q93ddn/PaKdkZfGvR97dRs0P6smNJ7jWwBl74UgZV7q8nVlCJlYfEWUv3XopsrpNUxAS2VnT+K1OLQmmuAtmSplElssbTImSYf3zw/JQL5Fy56C9UBb22pRieep9ilkqxtrgSdTjkB543a5t5sCdyZzwiew8wEmm/PNKrg0MZthZ4Tl5OgLvueGn8psRrlwhnL56ispmjd6MmGuUxippooU6qGE0VO5jhR7piDIroyfiTM8c+r3WfOjcs+rgiuYipZXv2hMO5hcDKKoPC894RF5vu2WldHMjFQTgTEQ0ypKOBGB78rlicP7jjXgMohUjRJZim/DN1V9FF2HjOL73QE9du9/KM6egHASZbDa6lyrIxqGFGUoDZ5Dl0fxVNVDTdIJvayy2dpUHh3DbVRxSHx/ftLnU0iXVbblGsfePiPKiEtVupiJ57TxJvZe6soWcnjDx78nx/QXR+kxaK9ekJnFm6N5xPOdcfmJYiIRYR16up+EoflsFGYQsvhMZyH5qpODJ4UxJczep+OHZ7HVb3f69v7VAL76Wzaxnyy6WFpvWxUTTsXW89iC4nC8w2XbOP+vEIzxnttKK8CDS6Gk+qa5puP5TQ0pTUeO9h1K3BgJxAoO/4ubkUoXb8WApizhN0ISJNd3y9DQC7qNQlZstWsuMrw8UJ2bC0OYn0h7u4bGyXzjDPRtslDZSIbPjA9uUrxoo+9Wc/JOTd2sQ3EdBn4WoIR8lUlbQqHa+HaT7boJSOco74fhC4/t3j5GnUyAn/0sORBT5IPRu7SHDIRKNOb3SfzhmwuihzRUi4qlotGpjfbczuzaCfxRlsfJ6Hwp9ThAdX/XOcpkp3WpMAlqIqrCBEi/qBY+pRwY9Bk6CDZgyMncegBS3yLIspPvXp+rPtBdYFb65+Xn7iDQ8Jh7ft/8kFOIUqzrQHG+j8V797eryB0mkcmfuWPduGQYdCfIbF06ZxjwU6fqBK1lzN4hG6cy/w4RomDnQEpdEtCy9meVA7HkEArzyJypJQ6+A1kozP4UOaDSrm6KxId8b7VaeCArdPeFv2WLHg4jpSfH62fuhTxihtCdVEt737yGZLYkPcJa9M1MOp0ILco3VrZ4FoLdoeDqy6JUV5li20Ta5LCh9FbFosZfYzRTpNbxHlTCATP33nbuESpCzWh3pjYTjjsUH8LwURdehZ89QAfUAcg62HU2nKZcRbj6A6gX4ArTGeTRtaAiOJFWHL0jCDqUk4t9RHwjqdfq9/NlYxBIjWYCkSVeXFs2KdAMBQy1nB6O16tAG89xh96MfLO0qUEq3bOcc1XKIKt4T56PhPpVnjzU4HFs8cVF5nt7M5MWjxZOK4d9g8xd5BrxkwPn6tu+SgufBd4bRel7gDCJCq9CtVYiqsX1oxCu51See91Vs5QLE2PAS4vDnLUyHykuAGI2+3G3y1c1utJePXi8gS6rvYp4cHOPqppW2v9FjoK2jlFlBSi8oTxh2IekgZp5mGgkgVTkInbWH8IITMXrB3xNm2MlWgOAydVpsTgej+PcP4CGcaW22G1I55npfH0asUdhvEFASB2wr6oCoUCE1W8u2X5Yq2G4Y8jG4/osi79BBxd8Dr35mpjygj+lR+92yAcosS5oyUay1UiS4w62PD+DbTw+IqmbBWKfdTXiG3ne8/1Q0A5gDYt2DzZS4eHgzO7DT3JWS2yBeN6jtk9E24ZHJ+cWs4UkKkiuE6EuXV/khh57CWdxag5obhJZW8tXNPmX1+tBaeCIcZSj7D+JqHsGi7gE8P03Otb3OCUjqT8g/CqeOn2/1LVGpExmKoS/PERV90zEkOJpjX8vfpeZ/rGiMjqTn1mVKK4OvsbEydfjn+k9HQRBy3a1IcN24bGCpbeW6Zq/u4EZ/54iWLglC7lhRJi6VFAJiXEaFKOutEVXpt6LaCpHtMOgjAnG4K0LG8pWTQiuK5TCf9jgPp9Ddt1vusgtTfa78Ktf7aLHEaVL4POGlplyeADLf1uHd01B3ScBv3KaVN6LYofYjGVdjlnfi7EQ05/xZ59kvKtJ0Fk0Y4ImDTBEVqUcB8hJUStuDMKzx5jKmVvg7AjQPfSEMCbdx0L9rOWVLy36LfyGV4uVe1uPRicZ6yV0fiE+B5glaN3l57sxMBBtuVkCsER7K78kVPN6Oa8OnFcLYxYumifE6UGC52rJOZ3Wz7gViNUBsR8hw9+L/fRQsAx4H7c3RhjAu7m3nfjjpmLdlSWic5oM2QTB/p86wpuUfM8BLmm3ypbfLVVxddBme5IB4iPcegk8XJ89KELkPIiaMhCc1tvAlH6Ihh1jmQtX9+LBTQGvQ2x2GAWRd7qlN0Ck1qpmhxyYubcCxAwJKXZzqgyZvmWUT9ABo7EqSj1qG8yGzyS+WXG+0d3xWFUMpzVAHgXJIWruuyYdRmd3FLCousj703bi6Y+sVevDRNI6entvsQWzRtDda0gLOoiLvLF7CzO3s2VZAhzratsIDCLWy97VpiLlmP61bSn2HXVXxiz3wE+QjCfrJKW5n4vlI8TKc/dE0QF/lAC90cEO0jwZwHQROTyktzRB3uDP1mLBYAo1NANFH4G3fR1GDqPePDew9KjYkW6T7oX52tNc2zsmbwJSxT5p2g4BiQ54VYQRQOpMIOvA7SgTHfg7kRrF8zpcyXG5zgh4i7oDrQtVEcZhOwS5pXuHwXkJVS1wS0WQ8SJS41DBEp8FMM9Z10feW/mLcDq22saaKf3pvPxjUaPhrB9AS8gG8ctSRhBjrp0oXoFdZiU1oxbNPzYzESj7e95L2QaeYcCLojxEMHi6fpiBwcp+nNHFzRTzsPAy4t8YuTVq0DvffVLOrD2GO6ugRSrDokZEok/KCeD8iOA3dYhmL1Q/TcDjg+V/blwoMBdkavKuFbFYlpiuJYOoyiEWJOXGQ6LzYyyTnzRm/QKHE3jcPl2OM1d/2DzNCd2W18b05Fqzfw3PXjRdaVD/5lVXKSD9o9OEGUpYHxthRYSGP6qi9/UQPHc5MusgRiQaqBhk2utsZ/mulYmseNC8aZWWyQl/VHm10WBcak4P+sAyVNBq+2JVmpN93Ea6oDewJSWcMTife0/g4AL6sB6rTApkOMTDIZhM9Vl/SycI90rXfkwHuHwRMl8OUSJRq8ZouSpz+RmfnY1PareqnAz6v5UQMZxFxQs2wXmErcTfpmutk2vvNudLA/bVvtK0lW7Tll1IEJGKHKcWuKKKJBX0b6D5Wua/w02dB4bafttnRZ9TP4JB52hg7iinoGVE3hcb1egqzW3yehbVK+QLch4tif64d9w8nmuPaCq9+eGDLMfq0d6uMqkoAJ6GUumUEXXzydCr7KR7xrxDwZPUwHL100dP0D0Pkmr8hQGbseN5/cdC9v5THN2U3A9oqwmwRPa04xo8BZCtXtD/ZEb1MvfWLPeqVQFHGFqoLw+uQ/UbmuCA2WBPAV098EK8ZeJSAIDp5x2LPmSIFfYQ6P07YBlWqO4AId42R/68nEC8mb2xLjzMLRxB13ivcTe2f5iY7ptPaRsobzJmbFo5bPeTrm7gIsQhiXwRThMDO1aTPBfalSjGz1guwpFqK1ZYc7PRQEReqJ2m6fdiJYJS5sJU7eKv4JDiGQ8CePOVdIRBJL8Sy9EuhdChtOcRt1SPyl/EGIhX7qj9hhzrhAzqYkkcvPXwt5D8C4fEDKmqXwxdM9wQctTG1jLxhZhlBVYZQAmefzY5TsWseF6hROBEfvjiUTqf7Yd2yA+Ic5LjrDMdHn2lj6n8XqeX6an8Dv+t49/3453Yo/l03wktyrOE2kePJ3mThNGo7KNqSXXz9E8KTaUrQDuK5YDWBtSnQsWOkr+RRk48Z9qqArY/7rrKfyL9E3GXOLHocWGfn6rEMXHrR11Hj1I6eF0mResNtrq/Bptb0lk+ZT9VQ7iGUFB2odKJUdkFOXs0Swi0tiulwSBi7tUBjazpuEGlHCBhpdqy5BmOsng1ITB5ntsx+8XcF3HgDMfMxgJfTPyJlfcf+moNEURWyRVJXd8eV3QrhRnjdxNnKKAuZmRMYAbU3qVPJkNUdZQRFC1gRGLBj/hLYoxMsE4PMIjjUgzfI0+v6rS1Uqt3n3y1CTKw6dEyrsCgrNgjQvCNVppVuM4mkoKzvm3XoFLuTDQHuSB2i6PBXOkRfCDO94mv/qFHEJC/sx6DyscdreaDKOvZvy5WNIdTYK3ANynpvV9fQ/vJzJzoRnj1leFRUSrY9HRF6EYNDPyjWhCcoSZmrMnZAYIlnCu+7EogFXkWYxaYT0M7UDjMnK6K+OCgEVFYdkfPJH1oa761BapNBDgLgQlt3fsc56XDSkSgYvo1K3a2tYWlNyZpaBS+oq+/zrIONsyKYMDiEnuLmYftwDAYr+6SFeaJVMaLSs5d6iTkIktE+kVd6/4Zf0XxXOYC4WABJGWr/8v/4fdxzujhW6Z0MZFa6mioXKKrGFxbU87lJCDI0rRwigD6P27oJNtYUZho5TQr2sqk8XUPTMzzAl38sMext7AHz2R+vskgXjXmp06rYSWPg7vis22PD1aj1eFabWX0dA7uOY8Yl26JX9FFTMLwLORLPtFzZjlJ4AowiSn77eXWy/p6t0O/yfysLjOdfcTpUZvAnD8B+UfDSIYocU2lS34m/Bh+9DubLPkNkQcGhQAWp1tzyoV8SWr/nbKRc28rHXkMiF+n0Ggm2trAkE0ndo+p6f9goAuXFeCEfC0ZeynsyWV7svOABl3S8CJozhnyuykb+pvO8xGA2aEFDPtZM6rRaN7a9VBd+loEgG1TZhtntxQGe0tfS5c6gq2/gyNB/Y6kST7wX6Gr9SumNrl0CTiACX6ob/Qx/o4EU9NLV7XIV4zxcRfsvkW0uW94lgMDnQmzQzktS3hFYuFXyUAa8btkfI8FheMs941IUQ+hhBO17WZNKGHTwB9sZVzH+k2wNCLQoCANBs27Zt62fbtm3b5su2bdu2brZds4hZyFF4ftiXpbMTQ189epD4zuxvIJoY2oHaW+w0DbQXo5MxFWFEcoH7UFIqbWdgTjJYuQ5IPIT2TCT96CZS6PTbGPiQpIGcYXy/fmz4AHQ2ZSS2sfYMKGwYTiMG88fhyA4WDgrzYUDMZMfTSZ3TPoTy7NkJTzd0Qvwc37MqzoAMOs2d2RKZj+zzw7eEDpw7q0eKeW+MH0BT1lH3caE1KQvfmZnyeyJ1Ulura1JohQQOGpKMFEQcY0OoOOsvl+FkEioQLtGnO+ds4AYuqm8bB1rAJF0n1ILNEfNrPac2a8zjZaPIahcwUm+ufa5swfOmbmtQkim+JAHbWTKNv3Q0FRYT6E5YVA78sOdBcfyuNY7jpLjgqyubnhUNeAy5igonifyiu3HqtE6k7lgjxh1Vnzb8/cGEemHhAJbrSFFdV1NgBEegZzrW6TMcxg9Q9P0nuFn31ft7PvxFVCvyttqMX9Yz2ey8fmFhp3gC/x0sv94cqU5zRXr8N/XijMNJIM/LcB8wxb8VmwiTmue4SWMtt5m8PD2OOPduKETaucGIF5/C7Qwfvs0V9oB1lKIW7rHf5SvtD+eOuBN0ZPkB/5uozWM6Mm4LpHKA2DpD5VxY0/yApUd02M86l7Fbz5+AbU+qXz+d0xKgnIZ8ZolEpVYRUobT3ZRjDeYWSZUAp21+O01JWLAeSYgjIYBWiJcLoE8BsU12k9h0lx9i/zxjH6xd6mTL9bwizK9IOJTc6jdFDaezpw9t2GiOvfp5CmNicXZzxD7z71x3HlcPIkA1QrP2iAWzgGNMaoB5toT/jJljS3Ty0+yKzN3WouIwF+lrxjq4ZlUFid41z6T8WGmKgKnKwGf0119caMqQ5A+5W4i94MX0CTkKcPXeOyKEsFe3YIYakw+itSxdg8OswZmzAmhapmyAUxzpTF5jMalBn9CL+XV9XgKtbEem+Tw9g2XbLZkkpmjrsckqVPWH9+XhokMS/hqcarwWXfK23AkDmx84Jv5mF4wQLs9PdsJI9orXsuawZMP7nbvgg3Rbfwzm3VsQdPeW+7VhfIbtM9QKoleqiupc2EWLTTmU5kJFf5wlLLRbAsVr+ld8VJg8bldq9PluLXbP55oVwTmfsK7x7vrk8gJzWTjkQ4+o++p6hthRhCZCKjP9i8a1NphfKalnt9B+sQ5bGJvGOK644uM7kfN4jmEIyfiw6wWJ19W4WK5MVZo54sAxbh/ogNHZ34nxzFuppezbDx+7dTWboOVScAT5lG+ryPa766DoumAu70ISI6TRLrMTGAYo7oA/76JkcAeeSAQsNUrpfZpIaD/0bO4ib96/cYapFrPHSCdUDKP2LjUhG1uLwGOrm9SAGa2vEzHQ9vBH0jBhJ4gliDMOUFEAtEAc58cY4iz+vUWEk0Vw76/6kgfxOVeKkwDOoVYRRVrUmSQNyvvB2C/npuL7uNJe52R62u+lukR9KjvCwuHBa0L6pRL8GyDTIGtWv0Yj3jTrK9sKb5VkwwLiDKN9CI+9/tm+qZ9+IdY8k4hfjr0034E2I7ktvFOZcxqk605nPlrhiSnD32IK/6OCZGU1i0Pv+eQxcwTTxyyjFHOkcnmC0aEPcsEcCnfGTwzJcZdSvj6OJBMzVu2ySMZJEnfB2rEd6/6qoBQIl3FJmMBmjAUbBvdllQyh1i8hCrxR2PpxBHzicl2lVXkXqoEfCOXEJRoZ/yVCff5EFh3qHJ30mAhGFeOPoW6P9gyyR7xT5trnrTPEJ3vVstmcNDuqGSO7iO+75PbP5JOvBaP4lidlWjQcSjF4z9w7TFUbXhjIrCx1LzsupJmACSesZg3E20lrvvH7w0rOwBRB6xrlXxkIrUvc7VZKRNdVnVv9LA9U4xUxYbysOONyBUDOrIkEUDTD2tw5nWc1cgMxRNDGFU7JCrzbZpsvgTtfSc9DdcvhKIZj6csdJgELFgmFG1CCMCuaez453O+wKJSLqaQAEZA629w1KTw7Lb2kEvhJTlC7MGvv53lTCmPogLdeFHMG0VV4TAicx1owqBg0T+tzYGJFYDZLMUEo+k4FTF39fngjyO9BK9ueigeoTiJsBFWr+YJ74SSIhWno5DlZ+WASUdt6OUYnKam8b0mzfYbsFs0rCTji8CiPxhDRhYBzoRSRTtU6b2rI389Fa/mTnVjA6sLOos3ffmfy+U9TYCM3cah57enWQHdmMVERYo/zrtUWqOd4iuNQwPjcIQHF1mnVlY1rS4L7oyvFunOmZUyCI5lt+Ciqk2qZnbaTnZNgoZ3eCN4opY74U9BhcdWANnuLLMyclxDoJmW1NsuxshFfeAv9t5fkbSyaKlcphV3JesdiC9G4843RhI1mUWjh2/zH+eiN9slWP9PVYehc+HgjZ8rsJoRAcgV/9lDb9DJXz/Kg8qyNvjbexoedAbT097Pi39AwnDdjbgVbfhGxETlBlrgMtVIgpyqxchgNbS7Cr7sIsiPototgQLGjsn/OHC1i/WGrgDGt0nAz6K97fBb1g8SSLVkBp/mKVYWGekIuh4TJgMD+jD/VBzG/Fi0bGqTGipyHbbJSxVOjmTtFjpjwvZYyM49GLNKNDnobgUilQKANDLWeM8J+M39iSAy+p0MyHPylnZJYDx4aid7lUkDkQHkk49AMC79zYnDZJJ1XsouSM5ISxbzbf//HOhrUb3RKLaIRDlsm4RcgdYR5hZ8HAl/pcjaxNPmmqPSVTmScKrzR/af+uMDQP/26I04M7qAhWrDQekR4WGf8O712++dvTWRKO1dV4fQiLiXVMxSUc2DsoiDWws/CEMwnmXlJ2YnHtYADgmizu0wAHQ2S6AbfJ8CxMSkobXCgsa9HDjVuhD+EL0zJ/Svg+3FhAYWyP90AoiYnNk0y85+Tr+Gupsi5QJtYtkhr25qcnpFEdGQK5p47UQAX5bKXxXk9iNDMpDDd75ig00iJJoJG1Bd+hfSY8EUT/2D7wg4dfaGMAjvTwu8w8jVigxogdDwgKxH7DOdko5QvCbwUn0ZD0PHirGRWYwYVImXvLYq2Yn5a91+iSntqpPeWyfq5IQX7gmF52RxmGiJycU99t8JWNhO0v/cpYtRm8C0NPo/bmmp8DxLIqqWIamymW7/B1GqlStvPMonAnZp4mlZyVzvCbS00fh01OZuMQc/Xjnap2c+71gf49o2Ah+lA/Ch2CUEpF2sZLMhTENXBqKCdMmG3sSej8kZuZCkEd4K1AIfGrYDELbgDpzHqmxcJBNTC/qIuHIJFHJHC6w2ma876vxDGrH0Q+ALMvrJevL6KZyqGZjpFHrOCOiaMHKFfy4zOEff7hRwVwJrCEonOykirr1b7VwO2sjHz2Sf/39A0Opv3GB+SdS9kvLYOPDguZ4RN32hZzZ+guVzCQWwSoSYF5n/dkO16tXQ9biVvZXGjjyeLIMvO/Sk1g8kKG9xJ9hAqELWdFmNK3DblV2Ks0xo46gHz/L1fa0fVymmnCMUwzHs9PoxKtoXX2d3TdcvOlbDLR2PKjYEZt6GZ7FsEb17c31a2YBCGKQbPdFGZRIUihfxe3avS5xbUuSkRXJTMqqIHc5RX77yayd33h7Nr6R6tkgHP4Ug5i42uiWuwGktRhiT2KuUC9/FydYe1l3WHQWd/uoI7Vt8gXmy/XZJMuiJ83yKzdjosC87cIjEjKTK61HOMk9kFe347sk+8z5hZuaNTKI2nNgH9qCfA+Zsv0xUjG5e7iigKCj9MXlK0PHkyLbY9V6OgdekNtGuFr3uPKo+WXfeGjAweGpL/yZYryKcd0SD9W0ZffS2Z+Z3PNFbif3zcVyzsmlAZsZxvBC5zTUK9s8wqj7sPzQxaUc44NUxmEJblwuMINHXz9u0a99RDrrqqT3CRvI+Rm1E1V39jZaUyQrQweHqy0JlygjeTNSGRa9EEIr/bpbiPqY2I26HGeZQytl4gfO4fIlbnfarOP9effNmEgEgK2wc6YPfjFnzg/8uX9El3Fnyi+y/LtvzX9cToFUNkSrQpP6CSFEfKPiw2lJWWefkIgAvzo6AitDhgbWdqfDPLZUAVXGPiJ17lfmPYzlFlv3QBOkXjtFRFDdwcSPVBrj4qOmU/oZJeTS1iE+3GRyN8uHlZ1LYuIZ3fPpi9I4HmuLeOzfFPKXaZlWjbrQuzbhUTZMIkObVidC3ReGf25Po1+Fycwe96qZEoy/9qy1LZKLAHJ5DQgZOK6qLNZKWJzIdeDN8SBozTUJx1HnhHcrJ3Og/pgCzL8MO8aQW/+D+rZ/1s2k6ome48bO/k5rPMyf4lbgPKLIhAkWI8JoVclXqWwmiHj93K2Ke9sTmRyRKqNIOgZb54HTYhGOjcxvQQn6hl6TaJ77EDyS1jgyg8Xf5LXOhKmRXKREt/YB2nsabhJCrRtfcFoAep85EUaglQ5CBWKIsStiFayuPAimxztrZwVibtc5oUo6igB7lxIapuHvNjluAEEPRpy6K13tz3DPHSzKR/6FcU5rYO4+W2N0LTX3GAXSc1RuB2fqVA9SRApA8TcdtGKfTAE/HAF4s3whtIVqTxsdD6GzXdeCDj3s0R5AxYLl1KecAIv5UmzKyTHhLI6oaGmDMn6VpaOtQ1wOGMVopLPe8Ow1Olz4zd5PAEDI2zQiYZS/5vI+am8jOJrMQRfbWvdowtJVe6ZQf+RhtQzQw8UJY2FHlMxbam+xIJnF6YajoBcgLlx+9AB0WZrQI6fMmYZi1iez4bFPsZSS9Ci3iVF49w9SeyQmaqZ2i8XYOKu+ONLMo8R0YqZryKdiEyonZNBDS2fpN/eNEHISvBBDL/XTs6/PDTT+CeGtLyuxrUKZDBNXfTl14D2P38V8dbHnDaBcKJZVdqE+uDq50jSUJbAkNdBRTqd16vCfSlNn2XoJ1KMSEi2rS86e+VG/PxrpeYktEZG6KHVO44WPcXLh4YS+9KAW2+HLfdYuLeNalvXRVFqSfEs9wKiEDuixu57d3rUAzhY1AmptSVaJPpxUAkYpeoQ0yTnXavzKDRKQT6VrkbGaJzojr+7UshD6hiHYLXpd/+b+5Y73kE9pSFgcq70uaMI5tyQTlyuGaLPXki0jAKuzE+j4O4jPOowEfHCabDUJgHmW9XhJ5UAImJErJJuQIW/5kqzewjoq90o650IOI0QxaQiQl6tBDC25crjnKnAUjzadgjxHDswaaplYyYbBZIDSqgbtih1MIBAp7i5xhNBogIIYlcUK4/sgqgtLosHrHzKpt94szBEqCM9NEyDiZFs9b2r7i81h7BccAlzQ2SZH4AgUE+YAMQ5K7v9gC7Jy6Q2VRuXB7+MNpkptDOGPs5Q3Q8Wulc/m6hT6R+C0aNrlU9PJC9aIQhjxQpjV+5aPn5Ic94ydrR9u6N3B2S49rWQlhSoQSuyBx/etTEX9ap2Sw9+fvRpKIdt10/HdbRvfQ9ScPMwuV5TNHbN/cic9FdvB9SaY7dX3Q6NDyXZnAcMlhF1+FfcCecCdztw00Gtv5oYb0gGtSxV8HbTSH8txv+/Bz4Ho6KWg0UfC1MOYWIelhnIVIYL5Q+hpURiFVDZk8cks4R6vJtmA8ezx17JUZo90O73OjGww+pHI1lAHwIV/avc8o981Y1NaFRvvKCmfEIUw0+bYrzkkrokCygTcM4QX2VmPq5D4cyNWU47GSlLo0bBaHWM4GZgB59uR2wEhci0QwimHBMmdmw8vOdyUwiv1dK8SaR7mmDRYeOjbIDGEkeJm+Ew2gp7bfhb8BoKCAYy8B/102WCMegmwOOV9pstPAxAVkZkkHTiFjrs0QMHqQo07GVtECHcKTAY//G4+Q9M+5aM+6/COtGx/DkY9QdyEAJOrUmkBI0gu6jyuVPu06cN4bHEaHQAJ/gLJPsIdA3TeM2YV32OFTNuHEG2U7Es0IHDJs2XqM1+twgu+exKZgxyYbXQgfkkaLo3EeOpJHIqE5+k3/4U0r2UWqYs/+BeVMlpze748BjqVpt2MBunMbnnGf9zqASS3j/ezq0/80V2eyBbE01uSNgYYS6EmqOZPN7oGiUCbQSyW+3MpltlGPHJSwBXu1Fxg0eV5GEX9mUrQQpgk6FXo62gU6PnWN1cFmweQiyvoRa3alKP29CT1N4CGU/x+D149HXllkPgY6+FHACEfy+MPuvHINSI2D/R6zfueVx/UJdOkKFFfRwcIS43CIDw4l8ZxFrJwlDieP6CWo0Gc2Rury8z+8mPTov2Ozxnnf5JJqrRcmzGA3+3WZP7ljO+XFrzR3jGRe3MmuBFSYpQ/qR+E/hsRbnxJrduK8hOaF412uMwdprsZtRF+On5UpNmctPRRF0ynQRwA8ykHiDjKdBw/s3xBYKPbxf+focuq5N+6z08VDyZC1o44P5y/XT58HTP2LlqrS/34hN02VfAV8Yw3orSVqZcaI85OfTxJ/TOb5DPt9CmnIcfXe2RDNcYrDKj1ipZJbS/+w4nWbk7sGvxljuh+NYICuO/Y8bl6lLiTzXpRf5nt9p4P6U4kMprHPHzaTO3V82W/XAb8q0U3lpkiAqLbkfnevqxv7eRHzKutirBlI+uiACsXsAaAp26dgsD14uUdxUC7IRyD824GhNdpKhjG7epsDW6lWqJRk++Kg0PbPwluS4H24P/PCWUYOQvB0FacZ01d8KN3ASaNiO8Ryxoy82kEHBH8eyEh9KMmkTBDv37Mb/1/ttFUtUW5DZxdy5gD5aJeDdAYwTqqVcRrOY5QYYZjKZUsJh/k14ySIkpapf5qkhAoOR6rjCDTP/WNN0s4o3MwVfGhdyWWX6bttQ9jDVzahes3M+LplkQVKed4UVi/lmf4x5PpKs1XvQP3RoX9YcjE7QngTLDZ9ALmMpCgH56IBFN1FuwwwvvqNI2p4jrKr7NTbbAhNqYDgKkTfgYPskSHO/0JnoopskLCzKFbILo0JOD403569P1nNhaU6bCiZG7a1kfGV3nnu7upQ4yHnYWkUxkkWce2Z3Q+JaRPOMftZJCAwtyZudtFU7fbOob9lRvT1OlKBOsx5KZsYbf0nmE7e612mFTg5iAPRDTKKWkdJgU9CGTYE+NVlNcVORtPa23CIF8+b5BO14JCtG8whA8zSNHbRSuf7DHd4bZm2QvmCd5A9CpJGStcu6nkLkUysX1QBMPxyiEchQjB4rSzTo9G9rQeW/dThw0DQo28KH0pJ+sPd0udCSCzobWrpvvu7mnq6JfPNZisFvdJrfqV8ZHE42sEun5Y9xMzhhloUo6vdcWj+o/2QFqRE0recff07G73iKGkxcmrmT6lieQjv2tZO5CEPId+btN+QVU8FlOGz//bRFI8EwH3FoH0hTywqudb9kSOgz7FAsSrHiyKuYWW4Co8EB7CsWpG3N9AUp2Ounqw6v1sapaZX+u+Xv+MjL+93XqxD6KidIQxR01gtS8NJModRw8uP1dveT3VG25poMpzleqRZA1PCpWrjCjoHLjhPyJYZq1+m4PG6OfqLbQ15n+UlKt3/EaA3Ey+PvLSVrA+GxFJ9OicDcckFc0My7KUZNYqgFkCj7wclW7eqO4cYr96UXpY2GZMjLMVqfSPSGolxrUtWdK9/kDW3vl8ypKMEHVrF0fmvNydA6CBeCvLUvKXdsEOeWWtXpNZJZ0RysGjx1QRWch2sA3Ylyzb5E3PA+Ap0Ih01U3UTGGAZwoP/42uiov3RJhORX1ip2ZttEdajs1SrbROA7hkUrFdrsKfHgu2F1XuQiZwP6RGA6W33Mw0smdXG0aTP5ZJzNmLgcrawKEwwjjGnkzTvfIaJGIqqqGfA60RCTa4JxTwPFVOL6UzBzQ5RKU+NMMluJbJQFjYE5fLmrDhLnoNhxc7qYmCPt/+4urQ15gGko4PzmcHjUyp3iTCpCq+T1I3Wd9HM6N+xB1/CjKGjJS9hIAcWJiSV3fusQ/UBZhVB1bTFl4biPSkTH8Qm6s7RPNSOhdcQhSeFG2tQZkneH6asBWkWcgi5X5+W+Y3C6I7LWERDP6jIuINZ/l052vFSsRzcOYzLBdzBKDKKb2D4Gfqx7UXn0YQlt0XsA5wlPo5gCfCYuLvFBdNl3UpJwAFDpnxcGzicW0xcKBytdm7HDtlFj9iGputBjXAs6r0wstzlykLVdkBcbLHErj97Ab1uN+sgl8D/N/CvG9esc0FFcU/uQL1uPX46s0F2Wj+PUEkT+qJfKiLoVIOA1GDlpjDZjss8jlGrBam49ImkqCj10I1jm+cd7us1ku6rUocW5q3iCGn7iDwW1iTUAn+Z6Dpy9a/T7EaZ+dvsvw0CSUQd0Z40B+p636O3LMqf7Xz8sDcrpQM/f3k/wLxve5GwCcYHlW8ycEvVAYmifMiDOMubZ9TSx3RnU8Nh4f07/2RA/gsu/ccIbUDqp1ttZzb1h5E7kyI9yZkZxW2cHz6VOAwzGuLaa/qOCf3XcWEc2uHPVt4vXRXgMhOwfUuTwxTZ27qonOLMH5VtmNDFNNbQJEILQzJiG7EL6G3HFkB4s86vM6iEjFyuNeLMBiNOqxkKNIxTPpX9ZgECq6yfNd+3L2uyRpesST000JklAQY95Uuxm9/uVSROheJ7L/qeRc2VtRw/swH+HiXM3xWe6tIgXD4s7iesg7BJmET0axyWRJdTKbK7rkigVMmWQ9s43xZQusTttEjMHiXX5/rd+FLpn4nHkV5f/jDGL8BLJcNxD8A0MiP1nDwear9TewfxfJr9bS1nR0yRPgS9NoEWtbiNVX0XD2d/oZ+8hxaCFcsauJy+NlXGy4arvYTcIwrRbQuOTqBTp6F8eRCsipwKtoS26psX+a5J37vnY5aIJ2Z0/ySqxsNGF5HGrsXyCzvOTaAPjk+I/3biokM4T7sW9XXlITLGtZo9g89HKyfOyMOWICpp4veZq8+8+lM3CjxL1N4ZAgp1UONUzMVGbfRwHK+oQbhWFF2dPcQzipKOrHxakkeyCbgUFbnEcfAh6u7xs51kZQ6kTr3z8up8Kuakj+cEZPfh4FEV2OSQH0Te3knWdqtYcqT3uGGgUXB7zZMfRcyqZYXPNCVSXROltPe3b42iNACNfRP8we22gcKO/rTdivZ3c6mWCm+Cl7C/H1wxM86CnbMSAN2V6ez1Uz2jXvWkBblCnJLGiUGdgoqF7C8tOsokeJV3Lx3GoUoGfpK2AS/FGqCAV5R/958kRbqgB0uWMqf9S3FwPhRr/avg/33HbmiClP+eXfhtdb91LYWdVh6I9qi2v25VpSOjXRKEQjy21CtpKsU8alZw5rNG264xz7mwXBKOuIsv7r7MX7rt5Bq5Z9pvU+NHwyBhsdrjOjXuoMee/zctwKo4P6GanFH/E/ZQU7oxp8mTse7MOd0+BnTtLnmD3rjNC9A45cn+7+tNabXHkRXMm4fmHFY3C30qmlSyvNvqxJpCmowA63Xc3SujVXh4vBGue8OJ+D21zB8dlcGJvJKQ4GVZxtbQrMisHJ79DMIR5AvwodchU9bLTUeSPXv6j7tWW/X+2kSNBonw7D4rW7M/fT0U+/FcVIfqdqeQDSzGNJpUwFGKQvwYj3TTpEqBGZYW6jOJ4mP77WKmkoivF7OdeqayGC965GZetazLrkxWh/kgnBhL6K7+XGnSzNQeLia3+MMw3xkR7vcp1L3OuKUDB8dpzoDudS6abvynYKJ3vqYc90VtsTWg/34we14N2uu7u/sz2mYq/U93LRLdbMyjByz/E0EMONrMSRPqD8OkdEDvFeoMyVcH8L1jCKCMFcaLiWVw27oKVWUgaZVlX4qxdZuaUvk5kRMwpY4aLVchrUjB0MimlIYJXy5AQvzNpg9PuO9szPq8XCU9NiQhR4QO4DxmoFTzX5wW2638Gzhp6asXejPaNHiyC3tC+g9l2lt1X+0WBWHUAyCNxM9OIVtD617H9rmw3CIctXpFI1nunQ5jeX3hezc5meeSmLN/N97LDDG/ybdPGeT9duWoYPoXDCW083W+5wKQ4btw14rSkc2JE+NHieKoCTQ2mWVqeobF0fW6xMNzcmLD5CCsgfIw4SktqzWkVWX0Goz1+wJWLyd03LJ6/wdk9TQJGSkvmWBHIpKJxJwxfPAPKjuAIM3PAcTJJLWfFmR5G3SgHiuBdcLsvb8PiOHBSt+oFBwtPwH+FA9Cpw35DU2pk4NHcYNXBzX+YwdRwJlaKp4SpFXoCZEfn8Jk2EBntZ263d7Wx3XUEU5/KBEeftV6TCyA7RYduydf3INZ31JLFAUtHY3c7VhkXyhAmwWBuVeEOQwgz88wGzUZMj7h5561Q/5ZgYFD98GXl3xS0l9ykr7wx7/v8b/Xnuyh2YYWltmh8lrz5ZQivp7DzFTR37nG4/314FM/zuO8M0yaEHYvbzWeLysBn2jAftzZ3oiTpNlaZsUQ2Y2X6ZMh70nXfwjT1ec4nSMmiAAl9paTH2sn4r0HiGQbBO1GEVsXTm1TwIsHgJ+Ya8w2HbJ7SnCfVKKEyWhxn4to00NtuptP+kYCy5+norLt7Ge0veUh4jdcglpw1Ni1Y/tCRA3N3H3LnxCAPkFnGmtlSk456ySzQZlYKpOAova4sZuuNUwyZEMG4h6CmGtIv6xinjOfYPx6LeOatsqIBcPjgY1vPfeJ1wEII9C+X28gNwBxc9aXc8adiUc25kkMxIZ+LBj2OeaFe9Ue1xkH6lMBHv6PyYzsFJKXRh0+7WdKL8g8WMLUm3g9X76HNrmoyF1J7eAJNOUUTsVxKDTOk7GJngBywrK+rEi/n0jNy7DuJ3wB/6enW60TrCidos1i/Nv64QQO6rVqwVOO7vVmS8H/7XMqn7gS9+iY4AJz703qhfg6yktrD3/FUzKbmmhVCvm6NSkO2eIg2dm3s7tHIGlcGINudtjJIZxEZNIj2xo3gKPyDNueWuEj9tjPU8+G0z3nJuj63/W/uzFl2PjuNRFAyhvRLz6jaGbAkNL4eDhqGS/d8GKEQXHav5RJhxklCYgDRzLwc2s7STwrRQFAsMjbyekGL3m4+RCsB4m943r0VTS+Qmt/5HS8N5uqOm8/9x6ECm6GVjbki9Bcckjikjsib5cE7WDCBv+l+A1YvUM6w8pNlVAdIiRlKpGKhZgS56jQPfy1VgZQD0ZJaxDP1JVXEECYM5hYbxXTxzxGCEeRL8hiQJ2mmPoZZxlb0HtLZIAysvVrmRG1yughL1Y4zMWToC9A1Hf2H7BJrUQQmR9ZrOk1qht7Dxjeed2cd046ERZsE95fLMcZtdrcfr596aQBb0Sp7UpcHcu7ht99QpujPS2IXpbGHkBP8CS1rvpGnnmF82I9cRPNiSs41pCfYg+DGSxQOblRtSrFtsCUap7C0BzPj2tgosKZ5mYi/cDUnnscgjNJzYaaEGBTlw3O9cth+r7HyukFUqefjjtCyPu9OyqlIIXOVKsyD56UXNwznFQIa+3SUuhUfWXDgIAgtq/4n90RTuVsgW/TCO0mfZu5WtUme2MnN6NaUjzspJVEZ/VHFN8/5q5wml1mHuX3agYQYE3t/WGAwnxpQg2PSILRuAmEhNfxPd+B3bTw00mHz2QsAtSwon/Qx9T8KvSZlPpy8STDie7Co6yAa/tOvCHfp9Z+jAgU0z6cdilUD8teeIFr68d4YMY/l0SGrq/t3HjxRxNt0fgXIMeLaNVJx0UsTiIFgUQkenzeoyrz/vok6jEcw3BqFcAos4qQrJjQ5HsIDk+gs2/WILNTBsucT1ZsE8MZag2bo1d9/vV5z6U2l8akvgaigFZ55tL8sDcqvpEb4gUEZ5/qTYBBOEsl/a0L80MNwCcRenC0T1xX3SSbj9RnC/xXaZZp2QSuUTaNcIHg1GdpXPHWwe/JsGu1cusR/WFT0u7iO3szHGafKZ/4wc0SaKGorMSBVnKVkt6quwACjFLaTTVWJLwCqR9zTL+7kVJ1qAh654hk+f0Ju+blMJPLlX0l88CbfJJq5OhW/lVITr4VYmXIj4x9NElwWmPyAU+bJ66lFdIKvUOi1JTll5QY2A42ycBxf6Bxqq472A67saaZGvADpATCS4HUc1SIRfI87HQ00YIWOvvmlb26XyFPLZLp0t1wQ/XmkQWiq3jdu3YO1GnWveF5iny/FHUOayO9fBPNKnQUGQatJ+/rj1QEHAqm/lM9VXfD3e9SUHh1ZS8GwjlWAHwnD4P9g1EI0vXG9z1H/cSKLJl7MDx96evgIZ8zq36YWYl4z/qIsutyTWFrWTDBkLYsh8nwcPfrIVTiqRzaiO8FKFNofm7495UesMVYCLExnzm0k1wJt/y1NQ4hIR9s3MarUZ8snA1xDtNHb+VkPl0BIC6xiWFqrri3He484cVX9lU03W+ZBwc/7Vzjrj/60lJfCC/37+QshSarteEF+VlDG7TNEqDjX+/+DUiQv73C4rQG5sqr4jOe7TKhej99AGI4woLt2f/CH++/sxGN4G+VRFM/pnQwXzbs3jw+v+HMVzeaEeetscmA1XIE+ZsPbVKNqBwurfBnUjgTXQlBRovds1Y5wRSHIWhQQNRQwZ0xq/d632b+WDWS7t0LtAFuGWL5pO1BA9ElbuFXBsK189cj7MrHLErBhKQR32V4t3Y24jSbnkXUXolAOzrnTYU+zmzH13wvMJ03uHlte1sV88IeVYOGNvgwbFJyKeLryqyJGA0u5GEqFi2/qssl/gkcFyu+O+K2WGvUQh4eP6ZDANZWMwDTc4X8tWhtbbGpRSbUskRrVbUpLCcCcBnm1xzfaPMmfBSFB1trJLfZ+DDu+s6HZCU9ylQO41CQKDBQhuLetOK1Fe0rmd5rTKhzBVw+J8FJXYH23E3ZiGErAeNVUHspwygJ1CzqugpDB2rjbYTPcH/ko7X8qIZdae4riYhtf7LLZGLFrfLVPvRyZEL2tlM6ykIuBJ2tbbkzTtVQu23YYL8ZahpEe3RfqlZBcj5xDlbH5Gp01FA+muWwHgiYXzwL/JeeQZzzndSrLcj8PpMPeKY67QIYFzJfm0zvcyQnMhvYwadcsEni3Up0Wn37nzNdkSXD7ItWdELbUa2NxKn+lhf6h1HXYKYpqHoMRx6MQ04r5fHU4tG4S8Fwu9mZ7vPhty/uFeTMX0k+LojW67cmXczFvQobZfm6esixLJ2L5HoGOkSPbCkONIxmwRzd0cNNoBZD9/N2y2/5at99uXRuPnXQbblihnjl5HBC0C9lNoO8meIxC7SvNUNLLjbjQIPqjEqe1BJVRETzVGdAvu/knriniMws3W6StmmhD/QZVFKj+a1VLdNa9zptqwHiYdCn3y+lSWMCql3Tmnww/AWtl9dCE6wG5bbyxeB/xPfAFC/juZE0iE0S0tQ8RLj8OW9H261HHmhlx93egpcQc0nb24VMbv1ztzKw3H867QMG7h7dijFNHkMLJhELcNLGMkdF+i0PlddC64e3z/Tk0y2qIgkuS97u7sET8tshWzIOtRtCSKVxQXiIyoWLk0Ffn6O0LdfZKHA9oPIIXzXmqnD9MFc83RLwvYg2OxcSNsYZesjiyJVnYYFeQb4vK144NYZTdcswsGYGPNtp/UTvLDcUgrT63Z/Qm+Cs25cj7sEIPYvme+IaflcdDEZUAww7OZucsIn0Zj8zkzy/jKS7YhflgXv8Ds7DE1E8c7NoiMUkEkcKxPnplql5sfhnq5S2EmqSwWUO6i05lvraYvBxv4qXbcUlr9nR7+ZHWvZzdfQzQyec2pTjKGv0DTcSz3nyIhh/FpmORJFbGXm+XeFWmdFuF412+VctDeo6pPFUvLIlcv3T02DJ+qSa8rzeEnwRsUgU3ucUNAJr0TGfjmT9Y6DvLaMN+9BJXKn3x8RQOo5EgAhMkXi/FSEaEIg2HyG3MjaLfDwJBo/hqTIk5YczMQmUTaabN51PglhiAe/TISkoVF/ye5b602uIB3PyfjDbU/yxXZbmnm1CFJn7WDQwpM7Um7LzBu2dY2RENZjVKMu1G3be4xAKs34IX87Ltg6rcuFEv+Wi8DeQuR8F4gzYiQxpKN7IQdVanIPwrxTJDroFAGNKNZgvT728PQmdvlzKhcj1c+DdOdy4XCGU9QIq+qg1ZD0ZvCABn8BKwIQRQWBpFQbOJFAmlFJ0mF53XbLSEapsTgZ/YjWlVqNrFQghTP0n167AkrBr6dtBxrmK/m4vgvcZXLs9Ilx6pwD2W+iwaiFH+A/evxKV/VP6TJbvAIsh+fW9XFo/OP+SyQ88PNwbW2JlJnyzf8eF3zlLR1F2vU4Q98/BqvZymYplTKGUzLQbrFkwfeEK031Mwtfdkftg+6wX9JP3SnmuKfDb4a4U59OSAE3a58/6NtV6my/6QCtjkmBPDexE/JwDlM8VVguhNPngJLdEAcawLQ5ISpmUiwTan4n3yVnp61XiYGPzlFXIUh1L/BbNOtV2E89YcCF5o9Ah0ga0XiGkl+FSbzwZzE04ehlA3F8QSkKVzOPFfYPWrmgm7sSuZgbEBwGKrYZYCjK4F+cXNFAwp05kg+c8PTcXWRe8k4VXrKEsbpsknaSjiDd+zZfX5nP+VUdW0H7WipyTaUpuXP5A9R40p25iPnJ4vVPCk9588YnSlppd84NKP8KyKj+RrYh76ovH4L3v7Jugw+QXXicBLeURIHMPAKxZuspfEHfqeoHMEOvddfWIpcDULw5+DtsiCCqTCbQXx5GTlA+EpWdjM+wYFz0hZWzVxyETPXT5jy9lozsHffmE2pVxzEUSm/l3zaMIeOMhy8QrcY94gGSEGvXRdFC9OFkgwWIw046MLIktOkZneQZ7sB0+6/BeKezoX1cvuR6KI2FKei29Scsi8bvWgNTCxtiJQe/CnhCWgiiHlIjd10TGNgpsJcTIN+PmxbJRej2foYwDLXReqA1VA0CdxICATscIF56vtcHIRSrxn2OtL+q23NoRlc7M91vftR8r9XwpdhDZYuBlzBxW3URYgaYcjqFCXWDIVek0e85/G9o/mw6lv9bNrVH5eetZ8Htddl5qI06DOKwq8Sz40xS+EYFyCT6Ixe1yyC1F28aSDsjJ5SUAoxSmhwG/L3yhGJiflQ0ZZ5W5ZNxowTHpO+p69RGj52mlLrXOFtaaoP7VEiHIUKljlehs1FQkykXftRNongN0upmDHQC2etp8CF0bhbElijLj3hAyXfAtIUrLPmIEhCePUgCmSdDxS4pC9lJsPwyzB4TTy+9B3ZVMpWwURBglxSnXx4zPi5iazkELV/V9aHzoB63A/XS+Srw7V5TPDECL8rSU3gXsGAMmAYuunPbb/E/nZA2pvR7566aHA4NCBzl6HlrEiQiUjzqRtEzw6kHugtoCiJaFyucxsp1IndjR97sH9gy48IcSDOIwjt7Tw4lOZ28oAzs+n7/73w4hrIJRIQx5fdRPjsMq+clHiNezML8SNC2XdPX72ahla3w5XEt/AtJiGti39o/tPuZJ7wZ3glr+L/t1FdaQHdVpfYxUk1kxD7iZv+VlyHKqSr/wZQM0GJXYBDI0dwgmGMQ5qbjkoWYoYAJBnn6Z8kkPFqB7G3yV8yy3I/40R/DOQkK6dMM/bCglAKazqqCXFVY+BwBKu8L61cjk2Dd8VayOHYN1Ti/h86g1v4J1wIEZ8RvCt9wV26UMVWieUaYjvSPcMrK+nWdrfNyO4pZKidWiuthfYiiFvQMd6cdmqfiU6OL2MuNdwblUmLPryLX/WwlREofRm2dGp9B7Bw7W66O68i3BsYjSxGNWHYeXKL9ZL3xf1eSiEA3e5P17NrpMZF7GJnxOpXzYtytcGJm7WqAOhFGU7Z+ESbHYNpqN58woLZi6Z0yz4ZX7uqIga/KFGRxIAmQ9dEX6PqiEFeiEha7AR4MFCDhsmlEQDoyhNRMb0hPHC/0kGaauPlOSkNrpqMsIPAvowyaPhm+jMvdRAV8V313tTnNul2NH4+vDBcGRaiu2OvLphTFdVzZO0qBJ2jLH3Xm4ACAbUGC+z2h/7XI0PI3ScOnL18BeD+XUUzk12sVhQ4+Fs2UCQy4DZEt2zfNOrjkC9bH8tjEgoWCzjgf/o775emYCcJRxmaZufeQSgA1U/7+dwZcZtYzkaAi/7VnSk+s0LFwOvsRWFA09pUkqQIroZLKaMiH2RoMzFLUyO+f6zkyDCSNoz2h+5LymlXn1byIUzX1/8T1HLOospU74aSwnRdd6BlG0Bie4J61/8ejpxw1XcOw3fGwedbgt+0vEpPTtd/PNq8B9nnhxeWTO+xeB0P3k2hUfBWqqmnPeSm8UwWYapssaxNgkkeFVWiA/noNiXZlayTQiNyB1Q1TBbCgTmv0GAdACATdZ1EVt9t35TMwOGaqpGauX0WB38iKjczSzemguBMSDIleQrcAAsah/tF5DuLqAPHc0BTNEaL8kczKLbs0cPQm4dyJ2hJybYlyTTkII2rQM69jpI4XJa1wMQtFX127X/evOSsOJWCqaon35uGzmfn+zhr15RjTZzai12G5fTYUefVeGq5DXV/Cc0h3q0mQ1EIvzDhPzUexPgUgVnPjkqeFBb4xSVKhDciuhHUt4WRN2eOIH/sqWdQqC75XIP+/KLyD57AB7FhB5a9PzSeMHtAcXwL2cV3pfOgvYOfCYf0Kbi1570r5tYd1mXI4VGoDuNH2w12623vrdgBpNqNseo1fUSpSfa4js12jyukLgtc6A83ZWgR705k7PypGJoH+WkS/MSEBItLm+704nebmRFCBv+qkSq8c3CzkzhlTGSmMh+lUvMBqWuCLyo/x8BCr+E8EELTWAzdkJALhL4UTDN5KQfhZN2MOShYCqbNbge/Lja8rPzmoBOPMA6FgbZ72u4BTlRczJ0kM1QbOoDSF4N9+DgOIa0X+KslTpOn1SDKsGjYz35X6Xt7yvXXjHn1YDT+SVEcwZCAXLlbzvNGitjypSFGIP6AqYGVnkTEK0UWW47HeAQHAY9VmwanqaAacoXZsYoiVYtbYis8Kca0S5uhp6LM8MpuNCQyO0CLeM/VUTTH7LBcZNHKflgkUXJ98uv78Xumy05/lpgioDO22I+YqpWM5JeTgeBNyC8yt06aGu/Ny+QYLOwxhTKoRurnk/GZ7OFOfTp7Lwk8esWz0PWtpw+jMeKzZGJhNzL7hBL1p+vjwTx6ttzej+QapyVntMsB+hU1zW4MNW/1+EuKlTlZVqvefYxPxmpyOb4GmMzhk9vmgSIUAc9l56/3+TCbmKvC3j+MCyXaNNnouvhuQo+sULpQHUn+OIy/lg4gs1xByzy2cs1rP563IAtXoXBCe2WBQwXXCyG3uMQsZM7k7Y/aH+/1aQk3MxnIjlPVjY1vihtU5CAVFWUCYl58TG1p492iD+/k1cBa+y/6h8fsY0aPWysr9GzHGyIwR3VXgAccQvkxPpzmgtYK/ykwrkSUzUTBSeF6YtDy8PbRGQgsHGX47BVb32EJZUJrEzTRw1yY6mZ44QdBTgKKag2FzYTFeKTTCpVXKSOJcdyq/zsii5jabcU+kAV0UZGYzcWKOUrLsinTTnclHZBDf8nxClH7HYW73SdJ3sSJi+ElSw3v/tJmY8Ge3sOPzWzylx/g7Wna1dZF6k2/lvwVceRcxWTbQFvEfsa/q4tEBbtDC97JtLpn663S9KFYFgh2Iile4J35IqRan1R9r2qDrdJmdqj4hkF2iPlscr3qTxhwGSXiMqjMqziCEg1XRgS8YCTYwQEtGDmGYDVyFDKPebFq8gHhW9qMnXSFZ+QqwECtN0bKFWnoUgwjFQc6GYNqGplhHXDeP1qViMiwfa/v1M4nRSmN4ZApEOKRisLWGipeksSONg+wPaY1c6M9Eq9iVaSD2mNard/Zn53TLF/hTTHJehxiqONXFnTxL0QySUY7jHpnWuWdsjHd/1r+pFcmnTOLqQ+ZnOvhrA8xs861ECc170JecpxSAeMt98ISHJDkjeyKfrTIiT5lo2tu13Zl056TvmYAGvL44JxiYTCF1HPXtGye1M2UHpOnXuv6Fyv1s+V/0So1CtJdpfoplUE+kmu/5zqCL6IJTGz9NzbS4NVHLGR9QwfTQO1q7dQnAVn/UZAEe3KVB2KJb2wMyzDduWq1AhZPwM+c6nkqw0AhDbgb0h5UtEkReni0oc3bhBiB790WSDecuhE8r0UmHNBR+6621J9DqfImfcDK0JLpWfeFTG4jpWH8cFZdvlLrts54Tz6sycpXhilpYOmey97pkAn22+7+3LhT1pN1zD23ybrWu5hZlkaMRHmpEGiZ5D2xGX5goiHTReQu9Y2VKG3mCSfuqCjr1X89/xgaiT8arqRf4CePbDOOezrLGc6ZkfzbFSbkcUi2197TsmywGFEbnz9sxpPsnSHT9Ga7gbrOg+0+eX5ozr1QwAW+VR2Op5bTKe048R9629jaB81b7dzPa3WkMoIZVdnBvMRPAzOQfmAWP78kv+4ix+Y3j4sdM3NFM2T9PPYAYrOcE77CjpGD+/2nNF1YvV2fN82SxtzqB8fit+X2AaGDL9Lvfr8uZ/okBhbtN4z/rkXfqnNSQqOp2hcA1lk+BgUgsFOOuJ+iAb/xrr3oaYPtS0GF+ZguKfhHOgoFixrPk3DXv5oecJ0WjKId5EbZBo4XhTLUEQQlTnX16N7gjLSmqFOZCCHDrm9QEFQ7UJrkdv1a53MDvfHSeNnKQI9+6uFtN71MjD3IB9CwKtmjDYl/+yYiKV9A3VFivZPxa3DGVZ0fsLcayOZnzS6HeKbFN0BjdF88IRzUim1gr64d77pZKtkAfIqcR9+3a/seHgR0Z+d70aB4NgNO3UfHA7A2yC5tzKSSq5wqRMjVIohqUO0D0GmKgeS8H6v6DXhlphkekmAYwiPpvba9ZlPfy69sBT6SPKi102a0HqOnLmiU2yuTIxpxEXDhr4LGA96MUCiJYZT+h3yK6njwZbJK/BzRHeHdGQEJFVxlV5djzx0hifakJ9HOW0Er8lQbKvrLE7JndW2onLPVjsPKpEvXD+KyLkjNu43VesJxvQlKXsuqSVjO7fPeK44EUmRerMTQrsJBmZtFBN1Ba7Lp6wX/gph8M25WviL9LLW3XbXeJ0rM45p3A0q6T+00Lnl7tnMGccTi6uv8Y2lWQltta4NA/qQN0TVnkiRVj5BewBoryeq8y+1ROdB6nmc+PNh0yqBxPI9yiBosWmYtX/0vygHvrdbdnz/XXIIWISZ3gZKG+nC+UZb8KEofpqMCQQO8WVetEACOiB3/oeoqSYyrJctjNPK/1yJi4lo+0OidjgUatzuk+l0Npl+t7Egwcxjj8l0ZQlRkdIkavztGnk7SgrrBJKazErT5S/dj+FcR5x7+XeVrJB3vgQI6QMe042OVKW5Lpk0+WwUA9BEZnF7Nw4U8sR6NeWjHbupKGrdLxd0NRyxOJRjGY1JdMTc25MoyuePDe6O4NxfYeRU0ab9qDALpcATHqFwq3nFaMIRTST3oa5CEcLQ6mtTil0QuBzDAjpmK0RM0Wck2ZRu5nDnm4vDoT0vymyF87nFQ2O3LkkPe7ED78CWIPRDVO1gEccFpUdF1SiXhefev+KdIofCrOK68NK2/uNjFf8jBfteEbFzn27hgIbgHqbSY79R51QQ+7kngxVgTRMvKMA+ZBFLLqZ+K683Wc74hvAhFNe+xWW5oAtBXwH9LBurHLK2eQ0b5gIhNVqjipBlBiEa1hD08USNOVsuzkXxJNH54KS8ueNe5dpCFdyhg36faP6LYZxKur//pcAwnwBSbdPF7uPGg+qy4D2ECth6cbNi72rf7SGkYwlA2L8Tf6o9ImsRAQHzsKqU1LzReo9xIfooDmNSejBzL/tPN9/2Hhay6JRzZSDP1zbqPczCxMo+Q2GtGUYf4xQnk9dkPWQSyp6+Z+XZ5za5vWIlcpiOTl1HaBmEoGwSR0oSjL7EwyEUQPH2ku1aWN/FeLH6iftfxfG+AikOcbMOwDx6lHa6SIMNhf9oMHzEO0bK+XyxACAXSe9y9rVkaVQ2BV8+et2R9mZc7N4agWyg7z5uFkLEC5zl+eHso/jfzXOZIC8JOTkWtmfQk+ulkOZfMut1zJXZrT5T30mrIKdEGJAimbZ+4UiF2PxDIkY17c+ZPPmm5OB18/qxyAUoZzZ7/x1Q9FaBrlOv/R+JMxMQjapfdncgaN+UrwqvWC3aMhJTo9ZbMU5lUPSE5hGV+wZvOVAFJ26rNqNJuRDBMwEaJVioOUb1xjZxuAGJ+rA6JmCI6Yuz8p85Zt6Vi7qvDqcQo7IYntfTjQbNkNZagOiSPhWX1tRmMA+Jyk3osWqKkMr8LN8PHFxdG14adF8NsV/z7vf9Gf/mJvF7yricm/r5Ff6u6zi9e4XAWJnPqHn7bLAIClwRKcpK9K/cbmIS9T6OpZ5W2pW4bRfkMcQluIqpNcTmKA51QHy4SmV7GnLodWYRJEoKUrZh/hXVUEKh6tJ2uCg72H/nKlz/WTYW5DHt3WGKIo1IUqHTz7rlIZaCZU2xELB/nNi7wF9bmcSCPMeUnq/Yxtzzt6Psfvv7xdUntiQeMmFvwc55XSYU35BXaCXnNurBZGN2YNR9U1wa7POuRLzur3uff1LmWQTxcKO4vhiZouJmnKZfJDnv5puGbB0cMI4viAHIKKTuMsn9HiGkGtA4ExMYtOHmrLOnaOBAr8x2S3grurO+61hE95yT/A1zYdWfvfQ+hJvRj8gq42o20fIfoTrL+880C59021jt4wkz0dL9RPfXizcob3i42x8/prhAVL2xQdM/KbPJxwPLklXzGG72082X6uNiNFC58yGUq6++p51iP6z4boMeKdNcAeT5PGJP1BOyNsQUQtMz5RSr5LgcTDsNcWxFJlo2EL0jdHE4KxqcF6iyx9r6iibDpVgh83byRNscduz+tw9xsnaA7FHOSYbQ/yhErYJMuc9PPNv2MtJN2NuQI1wk3lNT+ssgjTu7tQDvsYwe8ozxIi88QHu4jBGTkoWZBDx9V59LM0loNxbbCgj6qO465QPj89A7cqNXNAN23qaLfgI0d3NPyenYNe+rHMGqbZ3+Wk9I4CnzpaJG0oP4nyIEx/ZhL5DtVvE0Z1PPWRXtoo0OJKhqjXajiStiNlakY0XFJmY3wwyVwZF+l/MU4GW8wRzHIu+MrqCDrF8oL702+XPzIesZ1Oagi3vRHymoKeZZG5x1f6aYxtM2tfW0XVQSppoAfPVaCbZ8zJJCqN0r0o9dnf5E2PncEW2SCfYQpC5JyCcSN/qwMQ4PsduflUyyaAhYdOEXGAzk05AtYsxynIAD5DYHIWuJvLtBlQgJoyb8YqoucPaGB88d5p546Eej149Rrp05P6kDo4yiTpv1w90YzBI6Gs/9XvWu5p06bTSYqpROxv/X/1+WST0SKvV2nfVJH8uenhB7a/7MBfz3URXAq/lJTHTU3MfidaPT3cAzlh34nulVpQ1c1mn2IOivteKtAt6lvcnPs53b3NeZcTtkLFAKROJ1mcfqo+towN+isj04QzjZUMEpT29XdybvxOchd+ALbfBatTnxEQFiPmNU5+V2r9O7JeNcegHGKN3/HME83KH43xF1VTveKI115kvID48gAyS8Vh6+PL714bwhpU+zHSIfUkOL2e5Saiw/QKaC4cJuPRAlT6VGJP5ftz9Smr766LfV78+ferqAbz24fB5cYl34h7D+dFr0bTZ10xYxejsKlmBTEcsPRvMBbjDDzyxB1IRe5XfKtxn3xFFYWX9f6+GPkcCLVLqmEnnhxVzAhjSIFt4/ujPuHZUITIHQQW+cJLOTqchYYYN5ZxUvmN07q76hnSon4pXjLx1P65YsK+l6cFHKjykwgGESqkgEUMG7Fsr1WOK9x9ZCGvQoOIVNetHN1e8hW8UcuG1vI9o0hfSxluekELAvjkM+jLWf9yBRDogHdFFMHep0DwMykD0CfXqMFvcmiSJcTjgRakdhf2mrK91RKF2baOLk/0i3B4NQEAAAoNm2bdu2bdu2bds2f7Zt27btuiFukDf+++MmEHvi9GWAIpIy/poioR163frvAFJtTI3MFylSHDGJSHjlXLV9M7pl1fI18FQsL/rgJ5HpVFuFU3XQj59KulYBNOFJTroHP6Qr1+8GgOTaPLn0Dfp9nxE4Yt75Vve0KJyZ1cmamenWyRacejJvmlR80N+14+Z+sHuWVth2qEhVhKIY5XyFjvdxXn28Kx8o3mKfkG8T3W3wkY6PBG/6KndD07MhnXpq269hVGDc7nL1tJEbxMpCGjtRJC0AU9yxMYhmaVUXcg8eeT8SUvR73qYC1Zsz8WLMSULEArGGPFNns1Rq5q4yrnaF8MFNzkeB9ei9vrzJaIvCz0EPbbZmpj/K3bKXwtxcOhNVmZ3gg6VTAhyWvcdzw12PLKDBHWUWO8CK5G8xafuI3FXSEIGLyPYbyqXlHl89OZh4WJvd6keYS/9XHVKnbOcgsVRx59q3ySkKl1aDbOmiKcZ4ICKOvvSgn/viJdtam/rLAyjIdWq/XH0VR+lALlqdo+sMVf76tTaNkmwKBSF4GDg9r0Mv2a8kSP+PNUcHAycFsC8QQ/72BpuFuJnvilJd/1HY55YX+htPbB7WsDG1tW+yRflLoO5JIGw55EtRvxqQ7ibY3ksLba0vvRktqMJendX62MKxNUb1Z2K0NZM2+9OheGhB5OKqGZFhld47oRrR/zKCFbPBmVBpYHSo6riEVnhkDi03OXHP9vqZ7kSwdf1+opBdAI/YmHaTZVFeNj/pDG1hjJXsO3P6K2pJ5jzsdq9i/fiBy9+RcbczBWXxUiihMdu6IOcSiAzDDFYK2jd83ngXle7wCBjGNJoLqH8frycorO7WlZpBDbKYk8SdPIzeW0fueNavsEREdtMwCjiMTHT1LeWcfOWm8THShdXYR959SGCVF7wqmNFsCUp9H5+/IJ844I3vQ/7swBQPDwh1g28CYBdUUW3VGBMvBY6Dy7MLDlHZwiObNfbWrtVWBmIaRdI30V4IjcuceJU+++2oFHpBjiRyOkNIzfgl2MSvCOIHNd+a1WHi92bI+l4rkZubBJk2a/bqDabkYmKAjPYK+HPZ10CQrJgeMzRddFnByCN7RehXnoXpyKzOx/xnLAo3cgmlYOdL2Uai5Lzz+NK7kYVqNbd1iBITgD2ua+bdWelty3GmnivHCU/OOyTIgO4M/ytgcA1CrXB9O181VTGO8MyLDjfFQljQ5QhkMMn3UPngRGH64F/urh357AIVGkHERdvebH0WzLBroepmwBTAI85GUPfDhBcb7tt3YhEtskMyZeGmsVozxrFlQUHCcPTnwo0Y1fI4vL+TCdkuCAXWPvDM3qvGenhENOuz3VZDnsLyn5XFn71kU/u6Lvuj/PB6DaePUu2lnCzyt1MJXtuUbDqeMloRZA9z/JPdxpw8bAXik9GMVGpwXLtsEfNGCFPUsU/IX5A99wz07nx1MdtEZuAc1MVUXKky+jA/huMGBmGcn53PB7NgX20XwuKDxypUvRSWELq3Hsg4X2LRJXYcz4MYSN/p52Yoxcy7lax7PMOPUJfZnYs7RDvVG4/zAgylzljKY3ZOGA1mkYtWE+FHGHI305VMD2kSAJfW3kHA4AfzHYwTwpbGjGUcK67UPwgF9hNNFOexCJkAgc1Mr3TO8pmV6iAq+IfKjDsZClBv21mjcFDE+pJ4DG4MH6iqLRRAkjzmDoycyS/KSX90V4Hyf6IYi6ddMJvK01cdGF7tovg4yEY4iBTzY1mWz7sPxRahxyMwTxVQHh59UT0b4V6nzir2249IDubBjumYYAkg0t4r1Z5QXlWu7WmyQpvVSZIIbDFD8ZUg1MHcgXLaJBd2BfCMSSYLw9xXrdxxxedHD+3ZOdw7tlQ+MzYZX0fK0eZTsvtURUpJ4YJ/6oiKdoMTH7t4WuTWEhfGvRaEyeP3rkFEQZrauu1+8gTsGhFo/LB7lqrORvWpUty1IF8gP6NfQJgcTEUhn7iqoSY8aShqJSUHUGbUSOpsAxiTOQm+gJWdBqsIK1lEJKOqNwR6ODVtypSecsGOgFMKQu44zVWjMq4PYrLj8rZNGzI4z24/JopfIXMNsXz3Ne9qw3eyoMojRm//ZKkY+OmK+eKNus5EZxsbwlE9Wn+9dLCHRKY/qZqTFgHO+edrE8KQaSAR92Y+Xsa/W3EKw1kgIgv4L0lnoDrtQOdh99ItiQtwup6PYM7MJ9HPJYMvC/ksanhD4kYa8pbFz1AwWd0RpAEkUr8jXpJoUO2km2Bb0o0K7/ZFbwl2KBaCk5OLeaV7HMifC4jsFZ3cocgaKIzIALurs90xxIvBIePEFRPXcTZKJRxe6241eoxymm6IwBjwVmoHlfcAZUeSdwM17EAghiX7eP9SKOUT+qVdCslHUHNeWzCJu6ZZyWZH8sotkJsDvhveeGjRvo/a98VdUSUZ5sXAZxJs0hmyfBf6TZipfIU952UeWAT9j4kmd0Xp9M3x/3QPcK5Zc/+9LxAbAAEGl3XhGE2kp7MnmW9ksvjls8o5eZokm3gO7NimLzxLQlgTT5fIv9O/OQjU7ZN6UggU1s5cszGW2joLCQZM/cLbPpOa3bsW6eDNqiDK+PW0spAyN/eUT6+/cW29nR7iCmJEYuokqBuU3uY62TSkGwvBkg/927o/ODDfW5aPew/rU/zJ7yPYFj63Q2unmMCIv7MNXXWfVj6roFCbruFy331l1R20gKL/1QV2F3+lcXwgA4RDK0ccD5J4kyNWX/n3NuSkDfWwJVtVEiq6f0h26t8rNToBAvXXtEw+iv861D5EEIvS0sf+V9ZtVTZDSd5A/2PYJonGvtbr51SUF6+z7nQ44wr4aHw/uyVykYAPO+rdGoLoxPn+DQMB8a5RPZQ/1uvtimqWun6sArbYow72WVoxXcbwPMpNsSf0Clx4meXuBnvG+5BOR+/916I63uPx17IVqY/+9EPKv1BDEEtirVpqUN5jmFGbvWOUHwJPEGx76YQ8Kp7cbUdMA8dMaKIQDSNDOmszCjXdcvtNXMlhrPRdt/KG/KQtDL4zHpnHKTCBW8ZEAazB/xfbMR/wppFY0svKji3hojcwL9UQVa7rhpFr6JOyt8BJsTGn748jCQwBQxWJ0rd82vy5N27eylKLTQwrwnK1s1+ELHdS6PHnqDIW1jklTRvHkNrIe+UTE6HUUzUdTUg7nFh1OXltaPCsIJHstngDbBqKNS/gmuMChcIKXko3a1pvQqg9G1RsFV7dY2bCGkyK4SvvSkvi6yWVpPQVrSh4yJxKpkWrT0dejKUWnP52DYfh8+WYwm7PyYeKfshGRx4QG+er5v2MiGSTCLSP2mei8jNBA1DMItnuepa+QRoItlf56VHNH/OtAQ5nHebuZPTRlC+hTszXvWwDC8icHaekqX6NUeIuIve3Oa9dyGfUsXjZivTapDvNn6lv2am72ivXyCeYxnKgVlwkKdkdF8tpXwcpDEhs7s1JrROCUMd9CWMBSDySmyRN+PbkPPXLDpYMRl+IKcbAs4nV3WX/hiV600pP5XoY9PBU4OXW2ySn0JnkgjgZ60j9AtO5OHtj8/4bnVkPLJPaZ5hK1FhIf3v2nIEzk+tEPq2T463rtEwwPFng1guZlQorqf1eNm4htHGKtHQJIbvXNozh8nS2+a7P6AyxMi+i7JDb2biWT/uP0lWzPwc6dCz/455si2nExDL3RBXlQVnWFGn8rEUTVdcfkCGqfRo2eHj0ilsvazNowxSONyvocWkSBZjTZUZKHdVw8aVFAkfhO+NajpvQ68AgvjnUv7fAd0h3Rra3UfZMcsBsd4VCmXVOK+qd+3iiyxGavdB7FZYkdaN4dAZQ59sHNQqwlV1rjIiQiSsLenQayfzc+2Ox8vuoQrzfpR0T4IhUKHnUti7CDaI03jprT2mhB0CPeQGbcoyDhjy3/6ebo9USoZGIm3dw+RUTUBJMuGQtrmG3RXr5n63IfqvORa5DMCEB/fLKueXIBumkXrwsg18ReQ2oqYKqijmlbAAtQl9Q2yFAOZ0JnBHKA+dCqit3Hnz0VzZPYoHMtKLIT1KL2bBlLXnmDp+cIUXXN78OuzO4UkYFzXeMFpm3q1M+XbhmodnR0y1vb5NOzp/FCQh6c2Tk2Anidm71CriTuinT3drWv4PoISRxP9zl6MupplBUqoqLPvywvV93uhlvszQ+ef+CppZAR9kf44bNAK3y8qAqQDzEaOMmBh8MjMG6+tYlVWKdAcQ7N4rDU3WWX8/6zNvdweAOJGclDwHyNmYu5MOGohlVodiKxjkXWKI67OBPd+e1zuaGIMzg7RZ/9nnpIOtL2TlTOCAQceyCORbUdBGcMM11t1OslQC3C0GW3MI9muDalZOh3+2XO4EFgf6G4Z0aN5MFTfpdcMAimmkfShwNWsp74eWpmV21X/86MLcwk/IWxqjvzPcOo0cVpkFTyVpG6IyhGiSQlnl4+21iMl3Z7KnLv0RJw37/iiJ0zqaa2DA91UyiL8ue4iGxkmMAAqhqrXrQrYqJEIZSOVHYtMQsNJmaz2txzzlpMkR6SZkc/5EmvY15qAwxRCbQFZ+K8DgA4fX3yF61gjBIdrZ4gXLmYG50Gt1h+Om9hoWokh8c5/3F/MpY/BS/q1mJd2LXdQXJC2xJ5GLKwiyqA3xWZc4X4K/khW1tqX9ipt2oGwfZ0Uny7bA1AM3/etxK0o0bM8M6SrBdE3flKNQm0CckJzRlGJjek1RhzWh1sLsOkXmZGTQncJxi29PWmWadts8ogZq1uxpzdpTKPLfPKGFORgMiKZCJ0iRUA1iA71gxnYKic4nxbgl8GgXCuh3tAgvs87a6AIQOV7afxgo/cfE7Vn4trU1pejD/vZLlLDakLZAYTuh7AYHRQf1YynWJmsO5UDJ726y6mhF9y89paGsFbogPweel3bKhI6eOLmcfVLZZENqwnrNjwpFRmWd2/mg9HBEbqNTzFYmIv+fe1mjdGKnNESmN7Woi9eqtLjS9POlXNOHw0RkKlDjOdyO4EbPOynLfFTD1u5L1ix+V2PYT7EJwKg39DlB6N0Uln/SdbUYALwgiBrNDBYLRAG5d6JxzJXl1o71gRaawqTMtX4N+BaeogJI1Hkgn2IF3dZYf1JkN9CkECv/7MmJKks2XJFGa6xOBvIQQsV9E29F4J0zIJiOx7X96m+n4BWYIsl9hL5JidFm9tdzqjcDum5Dmt5JRFkNWetGCCb9pOv9JS8PMmLzg2Dy4Pl8tGm7JLTI6rLotk+/6Ybe+3AOInZhieMz4xuJ0378Pd7La58QVK4cz5FGaUz/YRWHTpR1pBgFVXFW7WB0rs0zU7bB/wDZggg1StBvwKRRk3wza1DIir11WgTccKqxZe8lauL0+v6ZRcKl9/mfaWcXgxsIxsK9nu5uVzzECSSn80Wl7UiMGoNsN5yRyW7QG4db1pho7kEkdjBuA4LkMJi67AwqbX68sQMVQcfh76anZtl6Gcrtxm+fCz+jdpQbzt2HJ15iE7Aoz4u1fPeuyzGddnUqoHWx/AnMBfxeDEYxofCWFfwWwkhUsxUllVrT5k0AhsFAnHi04vdOwTwH3SgVaXj5dMBEEK30ci1wOlKKEc0AatUV1ugH4QJ6bQeNBh0nUo9zRtr1fivYRoANpKQCeS9YqVcKLkSYh/6GDyJsvi3D/UFEfGyJIQz7OtuhEWBWC9P3NoqnjYXC7kp3uIJWfZ9OTewWVSuxKnnSZfA2zZuc/N6MoIu7VAwwk3bjtnOJEB5jNqkgAVEX69sWs9e0Fmoh+Ejid3ZL/nFziKq3aDlroynMNYutPl6hAIPX9AFuxnZCtBJDm5a2JGlwUv35889lNc33Q54zcUWwbDRU1vkdELCm91RgxeAHz/mWSxn4Bj2m6JtdSD8xSn7YYpFcx+pA8IpkIccir15d3QZj5MqYbHwtKA7cPGvtxr27hZjBqeyH/Of5WlpykSKnNJHUhQzFJ71fDJIkIljKU0TASO5rGZHDC7tqpvCL4Z7Fqmm4GyGs/+niojsq9fXihMGiHhVMIaZp//ibd1uPdJ+tlFxpjKXra56YCrPq2wKXN7LZeSZOfByyQQdBfyT5bqKTEmimeSHRAxmZpNORmXj42gURSmr7jqoyFS6SGq2pTgzQkSDHZsRcLfrZAOOlR3mja21pAUiL104mfRb6+HZvC8SKq6wpxu1x8Su8PrW7X1irDipORXue6yJOcQbLGZLjvLH9rQ0VTBr2E1XpnH99x2M5kDcYbBSAvkWwmkW/LOftaANnF8ruoKJT+rMkTg7Ox4s3uye6319atOM+Ml0R5F5HCqWmqeOgLc13AfvVn+qP0MYIPi3upLo/JIcpnCLNx0tYYELXpRkxTLJTiSf/CcBLvnOy2+Kdpaf054SIOZHY4jAdZUmONUTNHdx8JZOdUq9sKxcRpINJJo1vRcLOybz361ST/u4610/2u/IHvodrc2o2HKcIBHv2cHjKUlNukXnZML1Hh8fi8wl5hNbgfVxkpWXW0lRTv5Ehm4PkFwS/Tx3/e019V5jkoSiMN7Wn85pMqWlId5G/AsLeUF6hyqIBDvNie1/yyYqhscm3AHCU+sq4NieCOvs6TWJJ0FBaiIp1Ghh0d+U3PcNAn1tvA8HcArIVSoK4vsQP2m8rXEF258Br/IZ32sfGWbYKiwhASJBCDD4DcSi3QR/vqXKSlBleqmnT5beplWVsv2iU3VyIkXmuoHYB3IW848PmTTAfMu81tV8t68mzRz0SJDheC+2OvFV/5/F4SV4aiZf8e2eEUN4IKyDtjlyynCJ/MmGvZRJ2xcRlapuhi9WzF5OIDo4ZTHUgMmqF0b97LitA8AIFKDK5ile5OGzaepRI5uw1hnGMyQ8phENv4Y+ryRwCkWszPLTFLCf66QMPoEiTIt/QU2Ack3z7GS4vqsBvmX/MSgWuzV5H5yEy/b3JAjKlvyx4Rqj2V/rg7ql1bfOSGHsWmgRwTfsBsvFFk9Bfr/1D3CDWxKyK/5tADG5TWZZEBUcHywZE1+vkoGUG5pkOOJGKUK1rrZqG/WExs5cwOSjqSMrwpND75NrT6yjXnS7RVvRWF6Au02zONm5D+wAF8Ui6FccgP413OZeJzmFZGICydxKMtZpU566asxEQ7lX7s8Acs6XErKZtYJ5QQ5D1AKtBn8x5p/nxdcu6BUAZCEOW17wNXj9JfR6lHhcJ/8qQ9StSevQuLZSJo1QcYCBEjfxTyLRTt9zprEJZW/4HJCkKp9EYvzDsnRLDbL9Hu9ZP0cOt6LxF2uYF2604ytFjwRSoBPZNRDF0g+5puJoYQBfIOtUDtLodguv5TcrBj3/XKpHJlAmH3EsGOuOmRuW1ViSJCGbkA/nlP4eb6gS2kgyYT41aTfRQG0+EqOOc8kxVYlzlliNEAKqMExiiFQmEVBVXUGGe4qhQDUo1YtjVO9PX9DMcl7zy3nlbL92mPL4v+D/tQ2R3fw7HVgiywn2cJQlUPN7X/2tFYHYSbJiA7Pvvry3V/7TIQgG4mVNaq6bipYfwr0/kx93sj13HYrOKBK/Mkdox+CsaoJGkxFBzECHdP4uGZ+TQrA2UYfbldinlwfAYe+iK1yjpxBo9/3671yFcc4XHDKDQTfkT5ByfwkXx8sB7TUv/xEyTnQomYT+m3yQ2P4gS8liqHBH9BopHSrSvSQ92JsNfQXbeZn/BSAV/i6wEtNjuTvHTcZthKPUmZslfxh+x7xsPVGqMpzSEoTD1tmLbAFjVsoEKlJnqChUO/gR3T1Y34B5zzl87CrPF1VidWqNDTRiwtYCyXIiZE3EtSzia+q8M0gj+2Jt4O17o22cLDoeFbkpd9xtmhlJgLNBhwuTkI5yL0S+OzysMcTIJUsolVXJZu6LTaAnwGIhlW6zSFo2JepSeNejPp8VOGrNhfrzi8mkFEbO+VTpMF4xWto1St2rhDz4abqDpFAFJePUqk7PW+WnX3N1HhrKX1mUtSrpc6+qXtUB00k4OXBpFM2eZ3GThYwaGihZA8ydOlIVoeV/GLamWYUHtU//oji+SiVIPIsJ1LOfCkUcH9t7ss46sV1GZIaPB3yWvrfiYTPEGYa1r+zTxh5IGzvrp6JHv99lgefMpecb5PIpr2Yw6FoQKwxYsps6259TUHpOhY2zaLcrCNjicP1IUEPW239+3YhsgKiJghW06Y3rx8fNcuK78tRrijtBYHs+x79jiQKXAoF7o+VSt3d3lpRf+zyKjEvzbvBb/Pxm37e3DnHJ3uhml3GBjXLIKJjSVQRUu4dn8F9t+RUcnnVgaRzqJbwYdz7wOs6GCR8FA/ylvm14e8GY9STQl7XipH/zZAnoSkpoYE+qpqqlXpvTCg8JlNDGmFd3lM8f0D8ov3jfEQOWkOGbvUXvgYt40MRxi5Whrl8iaolJCKirAKbugMeiqe5kHb0sTaJRCKZMg9jSzC9j2S8si+NZAmpUBZMGd7Z+XKKOndzqbnp9HX4sbtphN5iVrZQ8AskQbFb7Q/rKlG88+JRcrl8d/hsEl0pxSsqbAMXHFOVPOtUd1dLhD8jfell/KqRYyJB5mC3B8WleZMI+Xl0xbgxa2yGR6inYY8qR9JhRcwO5ApO6xtv+kf9M4L+kNM2skdAWE9bSXExoo6KpQweH6KAs67gAxHSwMHWFgkzkaNTuzyuTxkrmX8yhv7FwnsPyuWKggLBEK6HnkGg6mA39r1Yxd7oAjIRsCTSu47LZMt6cXykhdK/kl0giHWuKk8schDMxCCLwYcc9rcfKPgrUrRfjX/NqsSHkOzCLvEvJiD7+xW4QYl8MwDLKoMWA8IPNiX3ZinT2OPVEXAv57wEn4iElgf2navXBHn47F/ULJoJ5h39hJ68LAO1IT89juaqGyp2iZ/uR1zZRhLFklgmdj/RhCfnzqMa2fP20hJRjRuVYRgpRKp6E9EhZMNBr9cKE4J4iTAsTLWkHdlFJuyoij4RBGEFoIuVlNQQYIW7Y+l9Kfc/k5Is4MVLlwBlextlsLX7TGEossGXJ1/0MiquEfbnxFeCcQMJ7oUOLqSQzaR7aOcC/42xBWlt7ZxiXryCklNc99GgFOs4ezpsgp/976Y+IAbYm3415ccJ04hCWewWZG8CR2GLi/NI7qCVaNhCsuRRlIWvIedFrI36GxAdf8d9ceqJs8htDKiIAfVgnCsLYIPwaBvJUB4wXYK1MjXTmRa6FNga89MUpQHI9g3PRFyXw1u8A9KpeGdB+hK9WYnFhlHiSNHP/0Bc226NC9IYSynyz2Q/XQUjiTV1PJyFsxpHyhVCRNEnIQGy/HPSIo5S08U3/pH4MUoGb42m+CJKTJmRXYrrR0AdLgwd2xDZFwm+p4Vz+HpRmS2gnQANwxhZN+6wGAT6RlnNEay8SunNAK921Jh9FOnbAiiYLBylYr7yC8yWGZ2SfWdXZiDHLE4qyCZEdaxQrLHQkAJ9lS67pEx4OfHoJx+TxCwn5krdlfZG3wE1IDVaiBgwj91eniOqjuphvgSu0+1lbjCJFs3+28VZpt2+3LLpCOv+SZunqPNvOjlkqw6vOmV6b6yQ5tHfTsfFVxmQpKdB+S5rFWll5Ju0DY0nZma4QviqbHW6xeGQo/VvoO/5RKljMlO46NDkgMsFpJWWUYPR0IWQMQfsZ/uRhlWhvP3cXemYeX3dLQUqHHXZLfrYTCBgLOqOwkcu0qPTG8dQjxResKXXm3brBRuzxKbTfuz244RzsP+RsNXoKQ0ikPuVWNJnRaISVuwuXr8nSgHmoeAYOMkPwc8lR5j4n+3GKqzdmgnf8qz4NHwCmvBc90OLjpSzDBA3TSjPvuigN1DJ2sHWbmlJ1FjaZ1RLcNro7h7dvkXBz76cUXfn45cGcli8IIQ0TuwStnR4ZZziLUa884njmwl+6tH9lho69owswkwuv72Wtef/ng5SRgPe2juA+GuOPefUQK0oYeYSc4T71qLRGrdweF0BhqW8+yZqGkPbGFpMtcyIkP+tIxrRTiG4iXkYCDByW51Z2b2JRDi0I64kyl1oikndHtrpp7VpeRBbEebxSWTx3TjF3hin2j3Pleg3bmiAPacUrUv9pVI6ZMpfhRRzhixzPnz/YViaBwljyT7zB9SvqoDZEpo0Z+skMLVf1fPgy8DCK++W+suVHQGbZ+49YvgwitJshDraF2pzCQ3WTDm+3Fw/YAfoT6409m9OflL3iWJjyVCWwKvDu58gswSeLwqMSEMqAuwU/ovqu65OzrFatfmy2GKvrSrfLNkiMU+BrqS8y+M4QyMYLbp30/DFfNVVF6hLJtW07DYESIe9H8D+qiTekIsw2PepfSvfCR4cTviRNE38i4Ct3ydGr+Cy5MZIr8I71afpNeJ0p8G2Q8g9uWkqZVm6oOv4lHI5D7P/8GzfjeUDoA1dMGJCEE7xVwdnWnEL7mc0yti9KP2iDBQUwMsCeKhIoehFkWE/IZeuR/YMWhR9TR+xY5aaaUUOWpeDixxmsMB7CuxkeEYJmXMldyd5nEjddY/835yZiZgOHnJOiUvnoosZBbSP0rdyntTUXD3p9TrY358w/8676vWucfqALwmchqevSl8LfXrgOyyaDWQv4ykc3blvzZcItyo9PET2Dq26OASLmK+31H9wMCmnctOEF5abdJKbzGgMAoJhEMBzBCQaeQ5WLMrqVyXXABmfH2jAeQo+DE0ek9aszJNN+pf8l2N7wgk8SeOPqUangt/5eORJcrlvO0M4X0KyRhEwXNoThv6WbCUPJmAYqocx4+i2kJqZbbOTkQ7Mm5ToU9eHR0+vdLyY2HRGL/OErAUmU+AemsL3S2zOt6JMmQSgqpXT0lVe0fem39dl8nEznO6XNSWHn2ercB2Ew/mVTLuDI9fjkrRpCp8lWchLGdTeevMsqnnhsKqs01DZnYfz6Xw/NN8lXcI92jK9TWMxkPaZVOyScjV6qDzTVBjbWLyyF6glW3eOjwGduKQ9UKARP4Fc4asNB3KITQ+WF4WzBTZUjLaLIGaE9Yeq+f4sdzxrR9fWwoas4uKlyYFVVB4hbN+MCO0EEBUcwg/jdqyDUq95UN09tT1VMWHFYj+ED+AGtsF5xq91+NXABqUZWlICEwqGaY47m2z2Bm0A96Na5isAaLI8o0GXunq1RsahTN7KYxdYo9EnoWrMNCo64CUxOoC0ik+qEy86wYZz0MeUhv0rmTp2REk2guk6IL40Lx0hsu/igD/sCSytMg9d51vpN86iIj4IFdTvbivEXxNII3szultXzIW01ooyGF72seHqpE5e+nPPeM1AjQkbeaurtUfqrp2rSebFa62y8dCcDGT5gVMt8lG7QvjmHgx1OOHpWgGyFPLj3Erxy3dZPwKEUx8CgD9RpK7Z874Qr7IfMlyAHLCP7i+nzcoLhrN6c0Em9CLWzH0SBZCDKfHpEXbGg6Tdu1iF9VIEozVw7LTkj8BKOQcs3yvZ5eRkuBQuNTdMkRnQPYzLQ1pud2ocEQKyZ16W9BBUq8nehb4PQ3cH2TSp56ziqw2dcEkcTKxlXUdgUtpaKz9oojIBVAyGw4YSfNYVFbivb5BLJbq6XA3lor9t4M/BHGHCzAJuo8589XmZLta94OF8sRpOBlstpA5bOZnuEBD4Kf0fjCIyp4WRbpBAfIrzPuXAdbAe6yvRWiLpJi17XYUSIiSxyD5pP3khCBAMzqY/MtN09xJFZohoFe7rbAfVVLpkwgWxEdxqhUOIvOKx1zD4FdPDwPUgTqf9wl2pNjy556SyrdC3voC/uOQvBOF4+dQFfrG2fppdWaIRqk+0YaOIrtiB3Rey8n/FOHuujnDJ5CwpD8RcW+/ArtuDiAOKSqP+NDBxgrqHKvUmohRUg5w7ICm1bP9BVvdAZ0yVibuiKpL1Q9fWC9n4daO1LWCfssM66kpvs7XK9QSJa20+JuTIixF8kELUQr7Vpnk8KE/5c5GzX3kR0ZT5E2E/tJcTaaacly3ISAlxug4F3dpI2CKTs59af8C3nq6JWOuW/loxEg70v7u9nT6xd6/YYk81KO/a0XwkI9aqSbn3Q6MiA9oscOexyBX/t6xcLt93Z4IHmDz/aJTu6S0jyq9FmBMQevoZawvvuD85KTOw9yYGpGtpHplkNRyeo21EYIZFiYYSIWbkFVLy6ShiBleuPJyxuiSL8aSMTWU7p4cGmv6vBscDoZxLfQXZi7y28OXkHi2PpJIZ5Vr5KjXHS6TuwxyGFl/zWRcYHfZxIkp/flu5sROwcvROJwsX71cOr3YFyfiGhxzfT3NrJQCkISOY5cABnya7xLQO34IrIBNh+TbC/EmSX0LJT3Ojjtt39l2QHt3ftNgnz+9g20cYNLyckeahnv4NivVkP3ERqKWRXtUsFg1HrbgAdo+vPmw2QY4BsavOxHfVuYQFskVJt/SyIlUWkmAz+QdIIZPIsyI1n4xHHkFhmWZrnZKD5Ima5SCzNgVugLWKHBLLILfoHXwZQAv1XEGfypNiBB7L/ApkkJmEtyF+PvnnV4j1VpzNd/D3cqer7+IvLtxjY8qR6/HVQYneXl5638rNTTU4PvtxMx+2Odj+mADuYTERnz6aCbTBtYEcM6uKrzg1wwth/UZu41pWBcPUmgm5292pkgQBZyHXuoFT/To7glsk4JsSkm01pYrSG718TwTvOJrvrN9+xOLueGfE84imtFOfoTl1kWlSWxqP1SlBHyMPajNq3tCvFWmyhnsuq5siMgjtH3eumZ6gNhpYJLekucQBgwbUwn0zTel0EzUJbtIHW9bZfslRYsd2ovui/3rBS1ArP3JtUeau4xbQkeill8Lesu1juDA5oqbTJ5lEwBKWz9PSH9ibQ8aoyMtMju1RAXKNAA27a8fqBsYvJ0ALVAAqTtVYh6kkpvQXzovjY1FnsQ4UwKW2V4t6B9fgWFgmpwhux8Oj8RTGzdzsBXrh7aKMU07M+fdnbT2cmUZHPAmIqh2wwZaUEQ6yOs1cHNmXiTFFHJsEFKYaizJpzaa1v4gxpfpKA+IHpp+jMf/lyc1KptTGgUiNonRN+AAT4OW7PcxThetmjO1pkgFA0Q0W6KLm398JssQcL5n3AQwj2MEDM9bnSCHNufmXPIz0WUZ9E4pvgzGRiw5rjo+uOd7F5MSTZ1Edag0Twi+nikfTEwCnVxwqCLkumzfw331BuX3lykLGAQx1r86wN9sdD9n6PKU1wgqiJ7tQL3sgRlIHglOpc9mH4CxJcuLDsmlBgHLzDcCR/WwmIy6SM6DUujaEvuQm1iX53h5CV7IwGlsL9fWvUBwjwhZv79GAyuZ/ANvBpyJBgTrEDps0zx/C+vMidVVofBMC+itEA0ZnkUuZMQ0B2/BMW8ZU/dQE1v87HIUCsvyLIBiwuQvbO2ZYXcvRbzm31ZnYYQztKT9MCJwXsoUwRtjiJhrKX/lAn//0Rk6gzhrX+snUq4F7JxXgE3m9mWVAwssTi54FwMnUdbRX+YJXcHKHulQ0zrQTo/DhK4KMgf6E7AZgPZB5wAA4IifLF2E5524vbYrknYhkLmUqdW00jM6v+rVRiuNeI3POuNGC3Eia3l3gH9MtNTHjnrDRk/YiKvwQcnjjtNg0cfjO2Q/wAo0yxjVTYngnD9kowuOspn8w19jZ38P/wdjpCZBzcNg8tCdOOP0S3HrvFrdCaPbQLwaGkimANu3bsEeTc1CBuA3nyeVO8ECSOlDFLqbcChjYmfedx7aZxs+jC3uF4M3+FoDC+3H5hLSmZAr1dedJO//KTiwSLh5tjCtLyp279HJihZik8yJP+RFC+YLM1Ew9QgK4kTW523hMsa1DPRDs7iBnZjtK55AtxfY9ZL+pV986FtsMApupWCODoIDnaFFDIGZEHJ5+wxWa4wpK1E9IG83HibmNAfzWrLD+aNopGQixn393G5/Kh8/xPazivO6QMgUVDYtcFRC1hK+/Cf4Vtrr4ar+3HVLSOmigWwXi1sgcDC3MEtgnSF68PmcIa3l5WKaPC7uweFRyFLNuAyPakP91qUyqCkDSpVkY283W1fRDmTCF+NtXM6KBsIP1z9wZwxkZ8TWo/WhSBF4Yh0SJQp+x8eiAVP7HgnQ8DIk3CvjiHR7IxZYbsTVCkaBejDCGFXvXik+XThSsiNk90NZu5ZDC+p5qu8+1oGIZTO6nX0eqNGlos2Sq2P3ecSO7wNubwW9zHwEXf7ThPNeIBNSrvvuX1lFJ1QgivRqNl8uyuqMSBRnzFm5eUJwTpgboQ2sEk/pI5XMt9J2xpd/C2SZmwcQdPdEdX/BwjeqVbUZDMRmxoc8/gREbyU8UWThxJO5B3Idx5JtKJ5AWQW8m8JpDAN6yPVLyJzfJElb4vTD+XEmc7cYY1QNUorgqsA+Dq5QMIfsO7LmOhSQX/dO5x+K+DAaqdkbPnD0feQSCfYv23wyrCABtMGJxc3e5vik73rvZS6xezlcBnfPb7QKgJi2wiOyW9mUjH6CUmTAIwcC/UG20JwlHiYZXFm0LMLn2DlBLmyTdGszjTUpzRXdVkGnGBPeKKpnCW6FN+S4YDssIDF2KTh2sbvhUWYLdekaZMUcg+h8NIyXZjyBAPBSGZ2U9GHwAnC3JkyMk+w7mhOXnhKnL1z9SlBBhPReYsgk+E76LLjcZplHbRGb0hQ7AgED7xJF7G9swP0j1SwssaUPkYauO2yrMIc8BvZSHEpuK2osknJWsTw1Cb9Bzi9yoEKIizQaG1N62xCawDZtBQsktHTOONV5CiABBXuqUPH21wbaKOFcv+KnEZYn8xcPKdl74XbM2Fjdnp/+N0OycfHJu5rzlTNB7js6ML+WuqM/BglXpGo+5PX+OOA1f4HlAhKx26pQrTvvbMYxtE3chhQ6NSnLsIzlxE7575odZLgIvx/oDflbCoL4tqpV0SuHusCe5g+GTmJA/cY/YeoNBio3kGM9SUt18i9sb1jZuqlKRWv02+i3JeORLnCC2vN1Hp01PDXP+N+kOZoQ0yrlcurOOkqYalX5Xh4Jy5RnaH54wgkDpaOK8LtGrQPF/vEoQHvrQwJCej7/V8PRdhFh7wg67pY9HxHJrI7NMKj+wU1smwimwFoc+wsYd7Eapqz9OUqgTfkke3AKQKTthrLAjAKJG7rlS0PkUWwAGC9Ri3S+/ar288NYqXdy0kuiT8CuI6P3VGCGYtwhaWEv5XoOVrmgJU5FO3d9EMR8MqG0RXdt76AhuoY9jTS5wMy6o3OjqseG8mlipELvp6pB4AmgLXN5/ZgToKXtgpRoaA7o/opeaMFXND4/LquMsCWZs20rPXBaR4oTcx98fknl2tPRW8k+eyboLMzqCdMRG5OUiNaLbQYLOIZgp1MDdNgGGqZsSB7kjqcb0KiODc+Uz7Cf/X5KRRDo3L31cP3mXElzeFUJ83IkR8DuNMIk6uLT3Ipw0q5k/2OIG8WtAGyBt8Rb0JM58oa2sg0Qu6pV/eq1Ax/hFShxnpJXhfQR5xDebExZdx7E+G/mZCtN0kjoOWEO6wg0uqRi+rWCku8HNWoY19iEn8qsjCQm0bv9qpjgzxLHjUhhymg32fg5Pzjq98DAwaJrDGQTsvS6eLo2V+6UV+jF8Fb4PJG2VpqjClG2c7kRvmlovqjJnUTYM1JkL+FoGxXgL8byEOJlzc8QZq3N4g8yF5l4XV+5uRHse+7GVSbbOnhKgWed4OM5D0x7wiH6q0BLSApAc2kg0REQ07Ut1+JD6NMoEJ1fwIhzOqlUcerrXEkz52Qx8T+tgoykzo2WsLQi86ixfs9stSmx9pnNVSSgbwHg/TTrU/wCfRhuFDlnLTwIRTnmjVIKbS+HP151BL04Tf+/Jl52FPVi49I3pLd+1YeiY9L1CtBHfNVadc/ItsnQ/XTc6va5tvb7iSg71YkPWoEZOtd6rhvE56iKWy5ZC6+VfC7f8/ndLD90C+OfnG3oyq5hguM/u+6cBUBy2gDlYe48w2fhDdCUCM2ndD/a4UyXfmhmCerorgfGze4amT8MKJHvEnc9wuUzuEAhxvQdb3LxS74irAjyHNclpPQwWkiSmx5w3i03/qox95LmmSDS2xaoagbj3vRgSKhlcBDBK96Sr+roV+rHxqChZn7BIwtFxlmfPV/EJvniibgKaVOB3wJ1A/S6kA4ZaWD5fnA/8uQo1/f4fXiMkzLGNW0CDEws+9QQD373VYsrtUonoB0exinRrcDMAUhZ29zJqrIRr9ugYy/pdThHcS3I3rPR7+1ddq8OZIBbHjfDbek+SkOy2tNyvNt4geeh5tqew+xqjJJ9oBejpw4hqEfY2dxxNh0nyMCvw212lWY6WIZRdwdDGvZ9kp2NgeiP/EB7x9+PON/2ilgbX4K3nbW8ykfYl3GDRSpJNzAYWW8oXt/y/FgbmjqnBO1+dSSywzX05eAijvyaxZpXFaz9xD7tfo8pfzNggMNq7ne2GY+DC6HL4RRwIezay03T6GCnv33SB7ejsUwGuLfQ+8ekPBstbstJmTSb/Wtpg8h1pJT5NB4auLcLaiBDqFR2OCGSbJyp6Who9oYPIM90hxD2s7YbZNZ4R4oi6iI4ShrZuSh94u92cTB22EAkd5s6FnZa0RGeHWwqqrgrqN+1em5+5gNjSs+RqYpjmSoWyPd/zcUFuDJ9I8RfJ0kVa1HvZAV3qu8cgeDlhpxNmBmcknW9wDTMOtuxuEPNrbiUgRkG5RG6AExo71Ou3UyGXpjvMHoHUAu+zcO0i4j2EH7ilW0+xjFfSzMxV9Brk24sLbyVfOnEjERlJmsua9WpO+tlsG7u9dn8aed8CwJzDi85CDs0MN0UqYA+wHQxqnQGbRKoGJD1+op80erKpYwhV/7xhfKqTgSOu6cKc6QCflYnRqfDbpkelokbaHLDhfB+Ll4CK/3pfH4APM0X+Lb35TG+zlgnkxOTMTiJ/CMQYt2KKF6y4DlDal+HfPyKSBkAd2IJrIN6Nafw4ycJ0e6uwjW6U+VZBLYiXwf/+lbaXrk7mtIUK7ggrRBUXvsr+roHvFz7ttK7I4yz1/PllUK5Q4egiPNbDHV8hJO/xrYdFxxBXD8i3RpmMl9QHQIT17w2hnDt/zCLmNWH62qX1s2UvuLb41PEwi/TPeFKeblZxCiBvWeA7eKc465XtWO0QOkC5wyIaD5xnG5RqjXVF4UCo7pRthlbzKJ4mTSqLRC3pJuFewrBswLGEmQgBsU2zz/6N0RqT/wglIFo4eMtPkKrtswO5X0EHoRQ9Dy+uR4uL7b3pA5iwQaZ+fhAhQCEqd7PTF1q/oXX4LSoOBVsAUMz5MMj80rJ7x4093p6i+FfaoTEvEysyXLwNiNtJKiibF3+okXljwS19s5U9Rn7Ul7OT3zQXj4GkULsuddqu/ObEhktcMxIZz6Pln7n6FV4tMDeYghDPuvZIMa7F64JH+BXdHEMdZyAVCUhmk8WMi/n8S9sHJDgoiTwZOICNyH8O8v3GVlAlZj3p9DA+zZwErLotnlrovJMGL8KfUtlRtWLxIDY1VbnlhIATg9L4jqjYMQBlJvN0Q853lgFum6fLEquJMKyzZEY/TxD2JDsw3GzRadoxZFaxra5HPvFEcE7peMLW84qYWNDA1VVZ7OUMbuRTQqRen0cK9oMhZTkGUO+fMoXo5QXSa7t0hb+mL8bZxRiskr8QE65WEmcmnjNJNd4OOfXshxL9eBKMjVfn6PE+mlJGjp1pmhBfAmg0PPmuW+wWGrLh9b+m5X2cViy00lVAJxWJUpehdYM0dGwXqjQIDwzUgSdqT6Sa3h1D2OIbk/x7l800R4YhttLH1twlh982vx3UrSVsewGv9TC53dUd1aVcGA1QWfjOrWkjNzLKZogjGEk4JOjZB69n9hb4IzNcpOOV7iWNIim8Lz6koC4uGt3+WYsOHs4oUGfSRUkK13LoUnf0SQBTSfFPwZUrTRflyIAdrKPLylFOTS45Zv9jVpVzzKZ/1zA+1zwdwKmWNnkMUuaJuMy/Jy6G4VJKVY2wYina4xFCy0yMKUuBnjCH2NNh6xlG9f0eoz8EqdnYOveAfHTYGNuRTXy1HI+zJFU5QefdHC+1iCmsQVS+AryyYVc8XYswAqiDipBdRXYyA8a+V2Cs6JMXi1NG1+MQKoQ0oGLtxjQpPgvXHB6l0kObc4O+Rqcfy5dLQK5zchwjHVS1V209WDVynx0I3YwDbtWWWbeqcuGhjbz5zY4BgITWLGyaZsCWEfL87BhhvsD2D+5IhpiY4YgUM8d398nhQsP8YCPXC+CQpTJXhL17RQOg3k/PaY1JQ0FD+ep5rYNbasDrR5lqnOURjO9NqwvqRFI3prfD+eH+0FAD9yN17wlDj9de7LsfaUlSStSQ0jAkSV0ANF7kTKLt0+mie0GJPp98yHCNAsDIpsxE26UYVdtctPtPuIuDUoQMYcPE76XlG9kHQxKD8KUktz97FHXXavgjBLubg99ubLWos7+lfgc7sbNMCnR34yDlyZQKmVCzj6X4VhFnlEw+wITGAHJpDMVZd0C3mbHLFI3R2tZHwDAjpJZwT5urTfdIYiqsj2QnJ+oTBh7q23fnWIg+zhP/w7b3oLBm/WCF7+jFY3V9IekRPkSFJOri0L0pIPYS7y5a12q2OTeTasN/F2fA9oDkjoDtIdvqtHy0QVs0571jZQCquwsaBFJ2UlLYyAwqARr3CtIP712Lbe2XdpbeyPafuzZdcvSPn4d0ycyua5pjnK5X77B83j6sJUN52Np9DBqirVRWADFgFNLcvnhrqBUUnzJfcokFByp554L0C0cSaas3FLqb5pi02UYQSVYMsw6s7JAB4UCgeLu9J9R1+ygauFvoM+VHWhkig2o2oTO8xsPfgkkKRgr43AYBWCC2LsInOA/NYmHrwzBkQIzXhc2RkufHRqeekshMXJgnvFXR3gzaR2G/lWB1RmUp41I8K3OWeRC+Cr0+xhLOYFXZVp3wAwgWXaQEgrOUI4vC3/abLth/NRzhm+o2/gWl5sg9vAWBeJOR4V7OWXtRxE6VHRZd/Ct7ZYLeFKAlQqqFnRaXQZti8+IpQ2ofd8qHK7Ol5K3EqXKraiZTv6X99UyudJefMd1UijpcHqRCBHOUizO+aliBtxLR09Me17eXfopHvFdZdtdAR4GFWtHr4YbjahzSv0dqX09P4FADoPmPHHiZbKbggLVy+XggcJF0poYv2zOBaWB67vI9RMSFq2P5lo2Zu7Y388HeashHrqmtYdrHsxa1WgLeuPXf2SuZF/1RDlXYT5ErKQ6yeQaSpDnGIamkD4LirRCDjDySvGInhXHfVVjS8amEFdqOZuy+ROZBynX5cueMjKPRPWGmFfbtcIq0j6OYh6IsmIezmLnfaBoqg6Eh5giYEUbnHkCatZ0Bvp/64Nmvbg3+7h4lm9Mo8w/ZEM9qpL3jghlC49vTBBB/2K93oByF/3mQo3hTXKRmR15Xrsc9oDxnhhm5PtclK9WXMlTj52l1PyJjHtTumOoh0sv6mbswBmZw/aXTEKqN4POFmya+xxN++c3r2UHX0laP9OugZZVbB2A4IZxK36GzcLsXaZoJnkHvV6atH0/7wYT+SNO24Ms7l1gmsqsdC5Jc18y3kyqVAq8JaW0kyzVC0CaGVbx8vSNCcRuPx0vyGldL1/GhJmi0ZroXhTeTGYbgUgLwrqjus66UjZ++pzx75xVrElkM4Tf5I9y6nxN/4UKtXR8nBoQIBRbO42G+Ol03EdrLaumx+N6CrIq/pSGgRWzox+VnmcpTWTZyVYia3F4nmZ5u8ILNgpbsDmrAEZ5BB6lQGcPJ5arrXWn9xMGqw4ECZ8K7yV1X9drFiswP1AMCrwAPyRvnkXsLI1IbpaBsvX8KUDIhhpNr66991gvW8y+vaPt9rAa9IzyXKtaP7C/zOHG48HAhqDGUEH1JXbfmp/JBBu7hAOSGDdx9uOS6JfxSQMVOr3WWDdv5gemWTOf4yqUVD9zjnHOwkt1kAT4zjvOinUad0UAcpM4mYl06Ndyq8q/i2geNyiQ0rM93Tf7HAalyw6UcbCbmJb6dSxLqTdcmhZpW46XPbiIIJg/2VPhJqwypHwqHascS7Q2lqIhOxroVvpybY7F8QhAPNn4DYC2+l+kWZR6wZLaU3Y+BntzoR4fT6aSXMwoM9ffIMOr2/tepNR7LxZGaugtBJDdGwlExuPFzU5sTlqGhjmok/Urvymj1UkrOdgmH1hUJxkAh0HBNZXLbYpOcFRXEIV/7Q0TzYxYJXJmmtd1oxWT7WfsE5hpNuDoWtUg62n539zOWxqazwsLvST3i40cfpVxoP2+6cPuIKakfivcMS6CNOsnChXtFdwqIj91ZFMHYQaROw9INNUPuKr3ucqBX2QG6SJm5DK5KQw3ZbHimkb6BPc2JCMTc5VSXFotKgZBb598V6dm88GReurFZtf/Rd+b9scAdx5eB1Vje9pmrc7pWYO4HvaTK4DI6ytRqgXlehFCnm8mtl4ct1OJ4IGLTqnmDsq0VquE051/t5b9ovvgwvbq4MxtmFxSoELGRq+T0cGEkEIwQOoSDbmObakdLL6VdUgxIg7hq8XTB8LUwYC7DExMEEZNGrUk/I7WlPdAy6JLXYgKWRjXQXWM7GuqWt3R3hycGVn94X3l3V8YTgwfRHz2aQ5vg2uS2x5vpbrG5OsQL6+0UweVJaalO3tc0fewNWwkf3M+Sg7qRPflkQzjUL1SpB59Ur/H7qc71GYFc3zzds2APB6NvTp4KjcjqvUOk55XiZL0G0f3hV1b+5isOvN1DLFFy43uqbfXxxcZcYkHpne0mee567ayiETJ7YB1MPukr6FK2a0/qMH6d9WFhmyx2AYjVCOWZ7wdiQUbCOp/aIbtQLyQ/JEk9Ha1ufDUJDO73FvDNq58nTZahzJSVC5RPaIc7j7qburIZ1JTeI4GZa3sIsgQmRYegxPi4QkL9rmuECgAV7Lo4IqB6iV97Ub+g/v3iSaloph/Z5IfB18ZMW2Csto2yc/Fv32XQ4+/zyKhzR1j5ypwx2lxgGM+IIJMWeozAnSs17NBEhex97Izz1IPwOG3L/AQzkpWvBSZmUGR0j/iF6wbe8s9wik6yTLGcXXBaj/WwS5Gv5K7GlgbanqbK+/MpqrnquipqiR8o1l/DcG7+EE9qY8x8f4t2TqgY65JaKHMcr/LsHBlW0K/hFD1sDUPmd2KoKOYxohXW4xE/xT3jc6KhOgg71pqrpLN7ZPZweSl2WJMVP6Bfy//f/3/HzXsbPrtbPrARCsIeka77s+XbUTQJgFDvApIkCgZhJ8kL/LvC0knRtTz8LbKvCfbWNuNfQcczRVNKh7g/WAmOGXQVrc+m02+x2Fe13VDFxHvlY7cKUezBvdSZR2j5PtdGbb2RZ0ZEVTHpzLnWEt3LqRuRwrS0U2I2hlZPdpYkcSY6M+juz4xRETbH+k+TUq92SzcXU38Vf5Igsqv52rghyYQ0eNrx8lZlCOnjMM5hnnM1hLrqK/omAGjs7B3F5I4FsUJCNV9/2sz25UmSPqWhgEgvmjAgZtGKEW+IXGACLaqe5Zn2khutY8HU1ak2/0NSNOZttit+b96fQOrEKU60upGz6dKV1Bo8tI8vVans8cmEY9zgsnhQD2Q2RvWGbhi3VsE7j70dNj85l4YYPC4ZZ90S9A8oKdWcUhGZh8G79kf536lhpTR0R2xmspVgQysa08DSPRS06sz/xUWkyxCVCNwi4Fx9AgRIKjOerK31Q2QV0A1ulwhfJCcau+yTptN/kBUaM1vCwryz8tyIfMbxMv8wkrjMw4aHTp6fuyJWz7M8SiLn74KWjHH3qZhTzUjN+LG7e/NQAOeyy0Yzax/YDDRwRfK+gwGuKQw/T38BCh0O3ptGbrBJHR3cJxGeAzlsNMO7mD6m2PtCkcPfj/jKlWTi+wd4f+Rbg8GoSAAAECzbdv1s23btm3btm3btm3btnlD3CAPWaa3z+sq7zV8V/sd+d88xL/TlOROpkrVsWvU69s5iC6WKAOH3s/343zwZs1bbViDT9jnGgH14Gie1/Fku9SxbL/zWaP7+Hjz9qCmT1CRIAVYqwQiM0SQzrHnTsmTr84aR3p64LtpaW6Mg3JtEpck/hcrr4LAdDC5059KleKMB5gTwSnEr1ukSmq51pVfGVip4mQTXqY0bn7ZRewhuySuub8GSNVfL7LB5TKxJ6cCzq7VAZnvD4Kcv59YZy1fWTtd7nHEmFlfznMjB00Z0R2TFCwz2wCTXyRrHZ1bmLZgJIpbxJ92axbsACs6twni1Ek4/SqgoEO/OJ7v1RY465Ev3umrurTadqdza32D8a5+0zn1MLAubFQAynqiy7juuIdt1235JFaib9MdpiSNXOJc5TdgYdOM5I6QpS9LBbDLwUCY6KKuiqfWB+hPudWMBhpHmBmvNF+tbUMyB4xqeooeIy2y71PxOSbt6q7RCbinF8T82g2xnL6ZVjyayozYxnMiXgOmxrvyWKx1juQj6hJnMneFsIkXNt6hnocwI9YBx9Dxz3TQtukdK7p3WGzhKKFNkIPEh+Nw03+50U2c94KKVr/INcshKjQHdbyBfl3HDef+l7mLjaf02ITUag+3KNGrOxTucLo076qHAqPw2Yncjl7QDr2Poz64SOZ+b6UMofEMpqugrVG1g0nX1M0q9NapZQowwZ90+LeoFp14UFcfaNxSvHIaXHH2o6d7W1hZWVyZ8zfYwVCkyEI7ogShGNwSSUAw9qarx4Ja1dobpcMXS45b8aV6iIAPBS2PUUPtPKAvGRxp2QmKbpcNvO0994HADdAUtPBORzXjsR5qFOp3jHHnPqJBFdlrAWFMoXjW8gz3Hl7l619s+PAvglNbEs0x+ArGhr89w2z0nOcbwC2oxTxy+U6Pk95SJN5h2Yrr/Ge9JWOLzNmKbRt+kcaskDUwdit6kw4pAdZtlTUoXC4ydufZxPm9nnpHZJp7vdsQIfSbusrVM39QwaZjsS5I7AU6VQjKMkePLqvwy+38MsF8dlKteQCJFrSmVuyJ9OIW8LtKnqJsH5BPl4N9Ufq2AT/vJMpNEf7uRGPIRUPUpTbKK8xKJUBevQpIGfDGOsXKRixeXwYtHYwYaWVcYXX6bgHiXuN1WKadEt26bM02JcoqwUKnpdk/BXCuSZISHTM9jC6TMLJSX7L+8jZZ88PrF1idyweGbNwqD0LKroFlrO7fmvK58UECZYRyeALrYWchq5JCB9mC0BI/yENIy4i4XX+74Fl5iVfJSLSJq9jddB6nBR/GgzpmAzaNshZgrks5shsJefTCIKMPsxpnFPhT0jErD+XuuA12NdgjRdYRqc/wQakY80qEiWyZhWmlbSQpdfwkjCKKYITN8lyUb6edLk5Q2q+YePOJwvnqR0W2EoviINL6OuIhfiTY6YqV7OyeWLEssiK37kq5mf8qE0LvU4S2jbdq1EkNJjss2SN6lWsOZhp9zCTfWYlaFoNjFWapXqJl9KvkT7QXHaqOryAYhGxClTbYOqjTVcxz4Vnq0lmYQfRGV47MS/PTpXj7jgJQWPp5Y+IhqU24uuLPJD6Y1aAiBvAK9ChZSY75O9QA0XxBFxk0Eew/vtDBvzfWlY6e53Blj11i5ExYQLF4qyO7MLnpSLl8Vnkg3pudwQQfLG/daY6M42R9BaS4OcZme6O5tZqUTyn69ux7fAN8Xuq5jwufqzYq5yRh5qZzy70G/dFonEX8Y0hggUcgL8YebxrELimmzesvl5tS+vzjJ2bvUNZ3r6cU3Pwdjx0ZmfcyKkjvaPNy1QjiTpWc/pLDrFDEWWOWlYSsNeYfy9UO3YgBfbvNwewqwFbWr4bsVlZSMiAg0DIX8GXjr6Y3FxHnRs5amu0WhGmzEFk/Um4yoztCJbAhernJIMR9UCtZSJcbqCDDuHHxgcwb867eMjawCRu8WAjjKvw1UhGZ92752/yQyrZPrNJ4/3nHqNlaseqLQSVitDsb3NkKt8E0BOhI2YU0yIVWD/PB3DU5TdOaSAvef2AJB9hYFS12x6ptjVw4PnpOPHesSRrZOJxVcv18E3BnhFZEzWiRDBPtvILbgkGYx0v5pe/6xXwTY3er9nlXwXtoWHTR5IAX8JRuOiDf6iw7ttxdVcwfKW8TEsUQ8ZfGhwf/A9ixMlI/ps/B13pmsFbqp9YzM4jqsi+byRrR0Y2BRYDD5XjKRTVhSvBT6jZQapzcXBPaSCfRM1vLSV2npaLJ5b2CiHmoc+nV1LKfUz6KH1cQ26K3w+yuIPewJQNZMDqc65UR20QHLxTGaFeU6hNrzZbwDPpARexD2Oi/E1hpN16Fs5oPryE2O0kW+SaBj7r/NRBwprcRY4ghAWdb8pVsCmtnyGedgFbLPO83Ocb5MsJrH3qQE7rwKwakcMg4bb2FPYWzHhWdaDH7Blzrq8wsIb9wbOQc8SJ8yYOOZPj3fhzEi9FyCFiSEphbbwvNjyomdyi/5MgPD8j7CW0/fL4XYA0XBRBDXlaOFqMMfyjP5htGDHEJDsyDcTE7bnBIzZzkRi/4YPK5p7hXd491mcLmfCg2rIDk0MZJN76dfNfnciLMXhg51yc2kTbCMdtip9FWipfvOruBuxQ1YHQ6TVYhJb0hAJDrvJ0Ds0xKkv+blf9NmNg6iGhPcNRbd/1L5TZO0X+L4YTYAWIhvdSbDDpU1uIx7i8pYh0FujU0Bmn82O2+1hS281MTvYAyt93yg/je0ZrkMCmJHFRx6c7tj5FEScq0BFEhR8htn3l56yDJ+wKQLr08s2LTnnYolT9i1m3a5Aj8BH/xEjXCqexalj3h1w7VdL6Ep7mAr33sbGFD/HDrWcVBERylYifOex4qVMieTYsQcSv10+kNH8hObGQRAgzejymB2E0aaQJYusp1T8g8auXN7zIL51ZMz9v5rtRK5jH8tu8Ot3MDFT7qyjtrEiYJgjrL5th9qEpTcjkl83ou6Mh7mmqfQHpmF1iaZBVLNO441DvjzP9FISh06Y7daB8RwqEKy6QrEZ5xRIYHbxReWcitj8WU9+/KCnzYRDcW8rBKvVhCWcFJSG1IUgORvJccid6YNjI8q5Vfp/IcJTA6Kv0ySUdF6iTbYIHVWzijlCSJW5YiSebkKJ+Z3uKgrB4JLys26x+kvhOYCZBbWeaZ+b+RJ0WrYERm2uZAt+J018kyg2tz1Ex85KEXu5lKYHizN0ZzspUNkn/YIavl+wq2dyFBEvCMI6iHldOeaSmMF8+JAxZjGtTQLV+aejrS+un+6rb56RHNnpUc/sKKrWUlp2a8/PStnT+DEI92fs4J07sHDoHW6SOQ+7cQakf4RhCDX/0L+VZLBAF5nGWbVm0gyOFGwiFTXO3wZa9CSYKc++gEiWvTObhTDmLotGwg3GuAlj3phPiQrFb/Ik9KcixiBdLpEVRuO/SMP1Uvb5a34Mlad9FfjmzaK5cANTrOMzqjzuDI4z9lqaY26+HWcaKpuB3x1ksljXDMV9LwPZEJwfyL/iyRatDFsYzuDGVgjpEQTX2NKt8AsutmC3GBjmdbcXZ/zJ4C3bTszjbA+8+ixj4OjEkcp1KanziO4wLiWq2F7spsX7BhNWh6T46X+PQTTqWpC70sy8qvBrdytiUYKPZ2C0XjYWN1jXGnBqnzIqbNOwRowt+pwYtuIiHryGrFacLEbae0QXtZp42nKXD3a4rmt3DFFWff5yXVznTMIxmMQtH+DbWXXg9wUCgMxttVaRzbbBrAyXdDo/QTgJXvP6Cavp3XVGPRQ05I8sz1sbalVPnYRZ9HaICDgQfeiQR0UNjodwuwLqYGgYSR6Ha/nzGmWptyqt0khOtrmyZGF2XaEpnR4HZGtTJhC5Q9c5cRuZbWLR1CGqNVjJwdEXBdYlBq8zSjNfvjrDEp5zQ9EijLioGpD5czAh6AS8O90tkQZiLB0TAuzlHKufIutwa8qOUnDuSXnQD/4QbAdEee6gLHskC1hwyLcaMp++6o4loOM8eGqmA7kXwiGbyxZ/UAHCDjoTolgnGFp8c8zTnTXKS7CKiBE33hKKYIBe3qYJKXmfaGZs0dq/HBlxVSZ6PG0DKf74L8zLTWwcYgMMRVIb5Ja5OCbbw+R3yTQplJqLytMMgkTwU+wz09LLFUt36UlCEPLnpjIWViOhou/dFltmq9EGfEGOMknlnuMaMtd7nvutg3XFwgBRWdf/eakKwJG+2eBp7LKjr2p3BbYVWVLZ0DAM1cZ7FeZPpZ/5klV4A7t53dGJyyMqTlGm2+CB0Z8Z+fpfF5Ba/Kpe7F6QiAOgqrqg8dKY6dNUHCrIdCOP0r1WrAdIBmNxkrNSUd1iDqSF2FcZDq8UpA66P7TaTEscnerme5kQOS5jW7SQRi9rh5yeBBapYOnx3Lk2mKF8XBB3QEC9CpvftoKBwGpHxNt0dpSMMyvslaeSeiowqHpHuSFiUkSEkNP/5YfJYO/QXPIOJg1N+nvysNijEeqOWALZ8o0EIY5j5gUmAedc4sKoP7Avv6i2jTZhVoCgs230z3YmrL2o8p+VGbPUy/C30l4epw2aQTNDh/U8JN7Cx0cZCCrrBecI4PJ0LAjys9pPGnVBxZL4/6LHLJRt3TkGkU7uh5q1uLYxUTCovoawRSJmw8M1nb63O0N9AlsTSnBq00Smf5Cb3k4GJ4pBAKrmR2wTqqFtljbfiZ+ha6E0/jSqbH6EMSa4JonPL4UF1XX9+TcBtJgz/ZXBxWMbl2xXcxl+PexjZm6+IEFNZ8UMBmSVUHcWvnCot3+bwBv8CaLyJ0Te00JdDnI3TIMlLS0UlwJYfUzZ0qDEodVEMs6Yr1mZziPMQG65swqKgPBXmpsBEr+hWDqFzjT8+Uh8EwG9HfcR50Hd7j/Trq0JmULbEWMZr2PCS9uozffzlE+lwymN2jWoA5DDCYoEQFYeRHclnjCCAEsTqEgHFi/uScC0sSSDmJYodo4IQO9ojINSskpGUubiIb155n/Vh1s9nKqKQw3tlK5OP4qVb41t4OWeAIq8nEqkhB/Ih/qKVHB5bRUcWwnWS1GLptcCxSNs4D66kkWDAgm8qOk7VnBY1ku6UXYWGB6diawxKTNzx0LoZBwpNA3A++KI3HBI+60vTv4qEPHM7359DsIShxNhn5l4nd1D8co9Z7X2JZiBP0HiF9RdIsTtABnVHNKUoL9KqrsJdZKeoNqMXuLyVj/WpALkZ6OIX5oaTxr4zUZJ1fFzKxVDAjc012pRlfh0R4ygpeYBvUJwR5djfY7sdR4vDOJqJ5bUu/Xw/B1sZQRAZ/bXg3g/tXacY1cElopoORMaopAR6PDP7mPmJC5m/GcFyk4UXMrQRO25xpZz4emjlbzNXLqlGUpBBwPma9MjFl8+nzHNjBBNqlVPmxDiivn5Bf9fgCd8laNtUPMbWN52DGY7xFbI7/KHxLOfSNmja9xJUtzoOhlrLwO5omCmxEsjI3qAa0bqFRpgyabtynU0lvi+eRgdJIbc2O8PbONdN+/q6OjlCw0NVq461ArAcLJqcuqKszRmNwiKdX9Y1mzWlYKq/Ah5XQeWksYngfIvxHCl72koNyru4yN8+ZIx5vtf5XeJn+FWFxvNeq9p6vnNi8H084LWrNFu0zUVOKIPiPMNBeRYtr9/HxYK3AfG9RDTlH2FuMHG3K/4rbNhLX00MpF+h7n9JcoGT1AaUPSWq5hU6Pv0bz+O/q9rqcgp/1ey4kfAitAgxKbwQ1UH9K4UxO7NeqoJPb2bYsiObBBf/q2EbqfsxAqFLdaotqsT+jmKUolo5UQhvvxwpEVX9BBGnJ8k7nEm+/vutZ98A8dc7U2clnnsiJS0r02dXQ+C8ts0pCCnAedzau2a2bwrGvUp2fDLVKkBcdHVclhP5uYlkn8uOGHgD/yQsbLSETWZOBwpeTh533aTLZOZYJSnFAVN1muMMwVqYwnrtQfEPScf+jh1S4g0CjER955YZzqLfZ8F/xcRoLTzz9Xtgfey5RU/fwOyAAxMUlLxKvB5goG8dChyw9h6mOmbSAh/uBl4yoC3id9nGYU2xgnZBOckXDBh/rHgVF5fSlao8+CIvIqIqrpBltdleU+5nScurLPA0n5BicEbUFI+07FhyqWXmq2Zj5VdvLt8assEzBecc9e+C92qYs52djkVgZEsidk6MxPwC9kZFd2YGZu+XFz8zxu+w5IJKoy4ZcuUwkKUoVv3MLC1zNqJJ+cnHHXwbCp87dcRU72HgQ3uyzsjY1u5N783uA09LDtE/HvLGbjD6HHNR1neYrzPxJm7DfevkUZpJAmTZSvyKsI9l3lNaK2paeRfrvJKhQGc0pPjB8k1516EhDPFNCRt+GW0fhfrmoouebbZGP7WDPGzGsgkQYQ+YwZguwypX7T+TIhFrmz4t22D9sn1oncYpjJyxXWMm/Th/oTnzwqKX1RtFHKYDC85WRhSMGlxnBMzUZPw3654Pb5uIQJcu2iI3vtjH/l/YnDmPZr5uVQTVLSbrRD/ioE7w6x2mjgS95HA8dewtfBFYpkM+u5Ja7uic5ZFU8Si8nsWZVZlXnyJFN6TN6dBtQYpL0pi3oqPdgVOI7ype+zyYsRZs+5warz6GiSg58ZxBTl6lyd4gBh5B+bigR7j/JAPBuApwpVkv6DMLbHW8g4IggSX8MjIkkR0GcWuXLo9/k0C9o7L76VO0tje9NuJLXbdiF9g/5xmiIZWH5UVEhtfza6Y4MU6QlLyq/ymJUF4Q6SVkxtorvnkyk1FVi3ebEWpHNuKGpsgU1AJWSEaNUGa2h7QAuEu5nFxZSvrE8E7zWLJnOaPITHfdVr/NnfDA8T4+wr4iOsxTj1gxzS3IB3iwWVroH/1tMx/F+hAHQPYv4N79taenpuhM0xIB7hGmoYxBYaC1XRVAxey3USjU1n5Qy76MCcgtXQAL3Em3RNcLUAvHFn1d5rXmNY+KRFUrdz2nqwNoRfJ0MrelFxo06T0Uy3d55hc2c36heCiJLC8WOOZMHfim0Bepj7dXiM4SzX7XX1JJ5Bta3O9VqVwH540zfnHwfhWKaSaezKCgSDc+IS3PPouq9mbmuSJPMtCTHzHmQDQb47pWqlmLh9pearkFz29UItQmcA2uVrFpZBPxpgxmRJoEBOqrVbyHBZPA/5n+LNQjllQCfikizvpTi/mgBJrZr9FzR6VRGnSZGozGrGJwnJevyugI8jijmpI8uEyIafcayzMt81OPLOKBxuWImiu4WWr2Bwjx16q6K6MyiSR+HRRFzDFHhbbnRizHj6kcPpagKoh11eKxpK17OLKMSr0N9Ic6TGBN3SfYgxtDkg6Gw9fL2uCKFPGXYCsfHki4n/q07jMu5QYFwu8wkZd325+R5Vi/LJF1+6tdd9sqrZDtUpRxqnTSAY39kXmREXV1AAFOyZXac2xrI99VdrU+BIcwTdboNl0imaH7UAcEBpVlhoCbEacxoBrjAEWzhTPlQvV421kuExYcn4LFHP5WziNQPKLqkDRMu/656scIrTylJn5/lez1VsKHEbGSbpHlMquP9SD1+HM4O/yWqrBaFeosONS5vYb1yIWuUMYKxvFl4iAJ2geRgT2eGR6o3rMh7fO4MRM196uEsCZOvYe36ejTWF3hVmM4rkQB+ZNj2BggNJIQuSk75Cp5tnxKBw3Rhy50mD5L8PiJCk1MUPbQC52MxU2IxxV6lRt2yThkMhIktBlqmJearN2KCg7NdwuaUSoQF8UQ6rFX+uSBgkinYdMS6GLTbuagCjbBW/Dx4wFPo7P3NW/eAAmB502Hz2SnFSPslrw4zIbBDoYFeoKz+krhmQov5WKOWuCDYUfk3eEh06Ibr5IrI0bPU1b6JfbGVdz8DNxs+rkvrgwdBC2jomv/N5kP9JkOEw66uyC5spfQ/XJs4fVYtnXnC72QgDXAja7jZuNkhrtas2424lvM/d0Jea7sJQ9QcQSU3snakpPWi9Agi0rcGs6eU67Ei383lz6YFVNiGYfOXmzcB33PfTT+kaAWiL9/XvX6qf4PWvQ4WqaFD80fFOVw+uexQFEt5ZdoQqY+Dy3KxNf0gfZrcVIUBbbyN0JlAZiM69Sk5TMAXL6PvS7xaGcNZwGic4U/+ZHyIi/OqF1Mytf8Kq4qgFJNyKQtBAUYyWkG7Ym2YQpzw4Zr6jG9O5u/qdIVsxgeKh/GGxJ1phMFfO3uxeAZC2swtrh41cJSjPdRaZ7l4hrrPN/LzLoIBSbK6IZjRo/478XXDDa4qO0UuthZ6xio1fZ9y6Dys8rXYGFc1WZ3gQeEgiUZgbzqasG8cM554ronaRp3LUI3399OBlAxOC8shj8G4XZ29VCFnp7hmqBoVvSk04G0udDlKbQIGb0jdEcdyhP1Sdvi6hr2CH1ET8cBT+w7yRC5mFp1/3DnVTZrFkfr+fMt9CaRN7YuSo/t3sH2sk2s6eTfVordh7W+WjA07SNruoh07LTOWK97DMBp7Jh9oG2zcp6qzz/TEbmO1iWpT0STXR9B9pS7o3DHXYNKkfHbArHHqWQt8hatuFDK9LrZuz1YrpOROO0zw1352LpGuL1EhU1kIDXXOIUQ9a2nsAFyGJL0tpsmVtTPTNnbZw2Xek1TLEDEdiIlF96FbhPlRnwK9BUh8SbzOpjxFmwfZHnz3vFAG0Y1BNqrxW8L1QqPQnLd/iB3glHr8BDwcUKGealRfoXsCrBn7aDGYtyz7g8chncjOXoOffJkHNz1AXvv3k0sX8MkSNxM1JcxNpbVxe/lbfAuTqQ2MRP/zW/Ol4SvA+SboEptJa2aTb4iHTHVpPBo5b1P41Eqar1JqILLTONF9FSmXuP6afQYGVa2PO0LndOrcYghXoGEgbV7rDyIZ/0TlLZVI9MngzfSpVJ8UHQuX5CEA3ZK38hgCjCJ5ZiQ7AzeytFEpDFysa5sj2x4BQeETtaeUrks+8/RlLNiYjYUOTGK8U+ODY/Abc66eyTnBYPK1Eyd9cFiCA9f3ciTdMFq7ExZyjLp9Ru+AicLVm8nrN8r4cf5nghjY6Husw39FmxV6y38yLMCsRkBqN4dK7dfPiRFe0KhuLYZzs6MLpjOb+pMpqn5Xu7zPfMrdc3XVNESPckOx80nlcA53mswVMP2dBIFQpqVI6dMI3WPrPgnbkTe/wweNyJ9KHAs9SkoovGQZ2lbpx1JkQzslW4lq2b+53PUYga1ncr1nJkCkPuPrEX41ErkjKp3Q3w3HJv0tHgdI/BbmJd9ir4d3KC+VC4vWOqei7WeomZ3IO0jC13o69b3KOKGQI/gluddqs2RxD8OIqJ800kRH1u+jxRb/OujuFDtfFEDTQSLjNfDSqgi1Vb9HtKAj0ODI7/BD6QPQHd0FCE6R+QCkIzC3RUhrgTIs7z95uHf9027R95uAzf7cYbgr7Xl7OYa30WmlENHaXzzABEPq7tSDpCZ4AoFqCCSWk9N+Ut88FMnTWmb1plRcfGoTpzOFWKcOPA+f7hBwm+c/gTclKEDUmvBift5Q8APEHs4yhaHC9Zu5ajOoT2Sxc0NDZ9n+0PB0gNKCKEqi9Orf92WoKCROdlUQ9lGV43BFnZRL0s7ZAX8DEYFAWyQIiQQIpnwKHcwNEq0RvOcDiHycZEkH3ayzaOfFHkjdRqgfJM4fJxs8Qw3oIlxtq+Ez99DBqquiu5tAX5fxkdoICEaUPWSNVxIFpt3CXsepQ3ONKOILXDMkJRpMw8SEh0dr9p4KwlOir/WtKOIQ4Bd5zeadlByXO+5x81ILl4q0kvnzqMTADk6d2M+guiOFJE3JniOJV9BsXCJzrlvb8GFGSWB/6kfL2SqY+YIQIkBSfrEmk3aRtK4RnxFMPpSJFfEPyVxcYgBrxnJ6TYGm/DVIOffMYi1Voc+cLsm4AE8qruCSr2gNmSCWSI7MW7aYRW/brzalapZ38o9k42J8VIYXPhjBQd7wsFPOvKOb8ihNTLU/XYzJEvqEHBqz2RYLYZTP8myCx4Q89VsdXfD3gghbNbMhovpKbQLDUxEIMVqmV40D7MDYCpjUHQdZEdT5BhUOqN0rDRkDum8c4JNazHaP7P6NA75VH5EaDX9FB9OZ8YdChLR0jUYd8XPffC5SI3tt0dN5afqc68MiR5UwrzDMzjqq7kPrMfPuGGp5bRqxDV3VjdJ1TqLK8qOmfjQ9yXMjUw7wgoE0NrZFJdvGwtEx0l4aHdpNk0xLhHy5eHlNsOvH4fMLj4lVmm93/1y3GS07RYq/Kdn6oEuX5Ykna34JXYYJ/GQ+8YfZcAoMbeZE+FCVQGLknR/qyTwsJchouT4LDksZCM/eq9Zq1f3ZEOKYIqaooojcInv2vjMG3eDEX3PXJoERwDv6p0+D5uiJPIgP1l7kcbpKXTOa8bkQlbAMGKCtlfa0nwocJvF0Vn9++YH2a8dGBsUhpZCJW85MFCP92kBOL+wtV3MXYm8uJMuh/bB5jJS0Pz59AHLqXq7iodIGZxUYt3tqIS/y77M7OSNyx3uiEk6lwei6vW/KcySN5jyhAafIL3gfqsRwYrbRiIemZI0SlObcS1nGUkoKviFKK8iBG6XsGrD097wVRgUvRzZkySHtpMQ/vLARUboueRtdnyAjWi945k+Rf15Q1tkjWwv5BktjEs/PESWK5cbdn11Zf1NoGvXlp11ps0ad/t8r+w3h0ifOlUoi6ZHKfEXx0f4JDYH/slFn9N9VwWFx9ZJeLQPgH5kY65ON1f1qiuL6B3CXEnFvJORwJRhjgCGoz73b7NsLniojKImjIAAGrY4r47Nfs1TcQTA53fO2cgz86buAXHYPXDCMttOjWZt4qnWN5QLIVt1h2yXbG8qWKeMg/Y/yIcUNSbemvRSeryiBziWHwRmhK4WaiZFE6xKFhxzIo6rr31HA78lcsZe7gOl4EQeZPezLBhRb0M+t+szGFdu/z6B2gggbp+BLw/kDsMfarMnq2SJiYSDWsWQKHS7/SXMWayHVNys7BrX3SadspFKjwXSasYajQd9uxcHtlRJNGN/tSMYjrFv76fBYMYee4Emz8EUirtpwqnqqGCW6GYUKK3q3CWryqmUsufx5jEE/qMmvsZPMmOFwBvknpfXTEnKSw37arcU/V4PTsZWJv+DOEvOqoJGy+OSj+3Dl1VL2qKSIZ7zJ2DBlmrB5sU4QrLkMHH/YQIz7OY+oQKIhIDuoV7Mm9kWeJ69wiwTBXf8dLeps+Bl7kU9eKBZ6spSTeC+ckVVTVW92uhF3Zu2YZoU7jOwB7qvdGAdAzGHQoFYU3vHyMAjlwns1ExOovO5bZarVTyzZVYELooZiXgNXlFLs7eR8eswPz3P556rMTS0XFYHOHG1Ot6VSX+F7se5RVrKq17DlozUrzTg5WLQoB/vmkvaE3DCpBGC+50UtwJOOANVJ1VTus0XYgVSXTmdoGzPvw1B7dXciqb1gxvQ6ogJZqu80IMhfZ9pRqtcI5ZveEMCyCKki8l2bv9Z+RbHKGA+ufOzKte9/FJbRxCi4XZUfGTr0i99bB7UiNKV7gLjbjphyBKhDwmCbo5z0VtHvTdxyQROXtRrEuOkto0UvqC/5QHdbsXNLoWP6aAwbePWqyWLx4AqSUZZMNSZOQ43u/piBLcIBc1ZhzqQRPPna11X1tAUsq1XIyVzlPQBrqW4mARCQxRTleA3msAmtijewPlqnd3DG5TMTK+yg8UFVIEpqmBFKSoFMHCFlT5wNqI8JTt5U6F2Q8jPEQHMP+DZfI8icl4xwWuzTZYod1cpWsfiuFcJfj0Rx2AJ0eytElQMlAYc9kq9yKlJg6UBR3INHyF5tKA2rV1ql0vj5nXy6zCnt1fexEph/KQHHdNuQe2yssDsP4bj/inrPqPEUPpA6tTBKpYuh9nLreDOnDKTkn/jEPwA7QSuNdIFt1tNNrvWriLge74RM6j6ltATq3DU23VrjJEDmq4xPVhEzU3vQT/UOpMo+7WALKZ6sdw1Czh8JX7KbM/dgd23S/8Y8o90zb+sZ5T3DgKXzMDyM3P+J+nA+NUMFRLasz1RjR7VaYH6w6VcaFKQTq4rf50COLlT/+2V11k6bpFssTI5cFxAG/CFdRK7OzVID/qIvO1UNjYbxcHlwhB5xd+CMJp7lsygNEFqvWR89REIF6nsAoidRHwTkjmVQH/ixpxOzV4UUQPOegZfyhKGMI22B8suqIvODCAXzFbyNQ4KpQxZ9WD35AhG1BNWmvyks9Ey8lkIxWGcCo1G1Cg7+R2r3uzMLmxmTDkPy2pKbKfyGWVjtuW3HLs1fEj1FStEfIiaeQ+IZLnu7dcSBPrCsjUZ9nWkGC9GIuTDHgJz8x/R+pBnIvFXUoAP9AIYXeQavK6+xt6jwE2fTFx6gC9Djxu9uJ1WRBn6TudzebKQp52isGnJEAFa28rDpA4W/erHBAZF0/RzCXXEpbMx9G0hnwybaKJb6c5F4VCXNu3x0B+spPgJZZmvjYGPRt0boI9F/jFvieSZlp3EDp8qXSoSPYeMrABQIVewCqp6N3EyCarxjrpwnf9/jtcU6oNVln1xoxbqgwZrT8gA8bSIaEz6kUbCYYJICRPOadOYacbCEWJYzAmbV8suu0wOWq6Om0e0T8CfJwHFeSdBeu1LSmt9GyRZPXVZmlfF1u5aHGg420selqLFLE42+GaeeVSxI2AFrSsPxq4FRJCttRTK5BG4vVfPcT7i9ViZlGPSFSqGs7h8TCkUXScXzfZ1TxJ9LWTHnKtbyXGKn5I7qtwMovZGvTHU0fE8NauaEXKafOEH1I5OTIZBo0w3++8CxOfYp3aR7kNc+14SNIhtinlBR9AmBBCMrMVsnR77OaTHhXmCqYOP9TTIqRXQlbluTlNkSZRhGmGqrzuVtTiJ6LTqTUBRt0gKM3Mh3XVZrHlEh0rniCLMIXYzgjQ5DC6F+8xTM7ucnKJzNqhdznhDgBcCSFVaGtJdzKrQ+pRxX8+C41FJfEBECsLsOxIIEVdoGLxuOvFvkQ+ExzDpnLaDXivS9zqEMqXHbSOACMxPcp55Fddhca+rOZuglVd4gdyV23jpAcMULvSd0MyIrU/v11lECAogcTK1jAqObw0H84jYxf8hfZl79ftS3iWoeO9gZOogzecN3g7eUjgXBwW+SGnaZK8FpCVSF0h0nLvJnA5kC4pTOXGDKVKg3bRh+m/SttwH1lll6lX4dRFkjvjgS6NQ1TtII+KbBrEO30eBiT3csAoSKsOdTB0Zgtc9JWygcNj7f43869aIxwVU4HIqqzV8AO8kkn+yf8L5LVfGIM6VZ1jT6euvc1V3T3XSSIYRyUXCsXkvmRAckhrCo9+WgXIoVxnuDfgu62vToF3kOayHPnXecvRkhE7YqAsbEWW6CbXCecDdsEK2j8Dap663cgYHT09hyVmJu9emQP5BuLnYQsSZeIdtOSYqGArJ+z+IXIUw6f9KJ46CLOI7BDOH/+YVHzrvQBgDg8PR64SOrWk/knb9pMpLYnoBhF+WCD8DKBH+Pr0fAKHPdemL1qWa0rNLFcNEQdS2wsgt8Fa+YfZ+/y6rD29cDZ0sNArNSvNmDuc0YdYz9zvqDOFO0IYwblxai9oNHW6f+4HGkM8fYvlagABgTdwCgTpFjQ+GDjHFde0qeNbaP2FSMZl39fvnhDiVd9PEBNoImy6RJ3eQXUSu48jzMsyb9M6kfKX+qpPg+yoDV2pVV23X7gISQGAO+KljyKDUN+0/u68/CrgqFB3Tiz9ySHRkeljukY6gr6F+TWop3r/lzeb7aodPMfM29tuPNmbWdQqwBrZsIXlSURne+YWVQNdejHavzXImYaDSZNaimwwnA90nxRGx1Gi25WYnh8Zb3/Pf9Gfu4hfia2ox9TJA3BUUNCOxfbeAfom9yP7l+dhanefpBmC9/yx3k2jooTyDVGpcNxFRIV0Bqop97KFggPh5n+Si91h3ZYB90uOXnvKr9hv525IJViMF9oU7eVRmEywL6fMjZU7Mw5Nqk9IjOKJuKCap9DQcFDMY/OhnYi+3vP+TCwfkFvUJIZe2NAaDOmHK6RA5zB/KSOO4teiYL9uOAYxpPan15vC6v26CSdYD0H37MFhLokSFQnccE/w3pRlY+lbM/XKMXg5pkbLNN6rmEcaay31Nu/pkgb/zmfFG/+bYg1q8yskj0a/c+iMWjyR+o1sGoUD3kIdNEnmUNyUloMYebpmHRMH1HtRS03/q1aFlyIW97Xi4xiraY/TACPvoGx3xas1yJVR/EhtUJwMtDXZlC4CEtu9Cw1eYq9ssmqEIvbjhWSBsd/7r3A4rIDLPlw2w84HMErspt9uZiyI6kMkww/K2e0L2KmCipJkI2b76lpatK/Dml5w9EmwphzI8xcARu1oEFsoD5TQT7sxRyj6V7BJHd1TKYWCtG+c5UJEamnnYyrC0XKBZdMettTooohwKx1ADOYXzOAxk4TKgcrL67AUpVn2tA9CMYTawgcyoPPq/JWRA0kAEfh0dCL5lpFuLMJyDcTHPETXyQM8nfZAxWjezcG7avlDFb7MKfbtZfD8yDwybhNGCKVuNKs/FhLYiKfM42FBM7geUSKU2GilhiYJKWeN4FQULHRI3uOwFhkXy/hNY54lqcDbmLsNBuB9EgfekdNvHvUKwYDgOoTLYTBJHg3AnMzZogb37mHX0jAm/ZIlgn5IRJSDABkBsfO5Yf5hr2guaoaYJHdFVTCI5m9CD3zUq+oaf4tOwq2R/Td7u/MekjzS41iw3Pr8jROQkRPersZETBsyjqDdxXnfxSHOkVmSZZh7dJ2cOyzx+10Hb5NBxVGHdYrLIe+jOclDl2mE2ML39AW9QQOrrSpgHLJHee1ctqBkq96HnP4Ilq1ikOdiwsmru+qH0YpujlBBkSIi5w6cAfuCecp2s+4vXI/CnRuK59IJXxC/1ox2AwdVm7RZOidpsq/tqSlSs/Z/UxxdciRUoScdmTjAz6Fk2Zu0OD0lwrTIhvmgjpe2h2E/WJ3oiNLXCExXc5DdfdY1Ys7Y1qD7F+HWc1RX0WYQviDlIBVTa4SpqLEdGfgNwX5vAohOVHm8k6hU62FDm1S+U/7KiVDEkbjT4hhKIMQhOa58His0wDlZPFU0aBVg1bafPsSjl7uJQQIWgXIglXed7L82+z+EwYD2VsvpfZAgiLyJGLfxrc7qTXlktAjxAgzthQmCqQEqzeQSP997NDCmR3viWfiYZ49IpsSU3XAYQoBrMWSH5uujgSB7coKG9Rk9kypvKvPsjsoBFfKDWgZ5iCQYBIZOqxJnw3K/e3OvrpSFUStMrYoRQR3pkzHwWFtvPe9W7fgmhQXFvtO1eTokDLaeECjShg2poNsOPkBqw8pdF5u4gWKTasQ7kukWKVqoXWkMLiatMplKkQdaTvcNrkYJBYE4FV7dSrjKw0IhVNxJhCzXDzjzZzav0ZqDD+STygyX4wO5qf7QEEqZBDzosUCxx6rXkDimOHyxWtddbYfKmc34eG54UwKV5fpxQSMMe9HiiGbujyKacTI94CCXonPh35h8Irr9v8qZ0KRUXaOaLjVLMFavD7FzXXvIYLtsgM8ni0DxtZ0RqE/OWADDhH71CoKlLZbXD99KLM9us6PAjr/czSoDLalpPtyFvM+TMoDcc5XAlxkrkKaApf20CC9vu5XYVzrDU/X6TCNKUDNHju1FJGerI/FFjkuhNuSDyC2n8qIwuWSGiW+66hEoMvUVwbj+Blyr6D2rDkYNWF7iGE+USxRaUpV6/UrRDte+Rs1TomiqW+C3uTq5Z1gp3T3b9X0gJMLDiLV3urRjeqD3LFEspRIo+qhry1slnBpe0e5OpeIbbkRh6pDWyqrdT2N0vqzfOcyO5U4HTUnMGpYrYabFlXWcIzvbaHhcdljfms6vKXujxMXrGOsaHAeUh1eO8Ptkrrz6v/sGXX2vyFHUtk/OzGyvs1yWlcLgyUhEEd77qZCLxtnTYaLxguGIA4tf96UzsWMr45BOrKCNEKyJz4ZkCmO7Q0Ctw9qzaKZhMAIXNzRWOcNiSsDmphlai1j2tncoyH5VCM3yaM+ORn599y7rp0onzlOYUkMDj8qgzX0yn86aBdueygXUEKA3qHBhM5bmBmx7hY8ZJX29WGzkHIaAHhJLWj/Q0Yx1Cse81LxDGHprSZMElVVgBBUU+0S5qLq6Fe3dUw/9xQONsJxyr59+KOY/T0Zm24BCtYukpIJCgZs5A+TqZMCcofcgD6B2ktKC0tE+mnuVVHvwFb1bSfluXTQi6LUEvYhelYSSsZAj9F01ZGy0CEGlyRFZOaPm0w/fTe7oafb0zrocxCcq0qWmP2mmqM5z2bmYGrfH1ir6ncW33N3bLVLy3Ly4eAG0foPdBsY5n8wrFqXliOJH2weUPJSRlgohSseGFU02FoY4G4svltKbDzTL4kThLRw8ri/EAph5oRsVRydPBLXXyrVlb3njj8PuEsKnJSqfvsc46jYtNm5YMjK6gfYwrsJe5cpn4P7x5y4qvu8nWbG4njDZKHVHlB9qrn/LMmYh/GRqHkX3WVQhm7jbAQFwVU2j3BngbFlPf+TqlTilaQNp2ZakeKHjkxNNvWVJqe3uCU1qVaPoZ0jQ6rcAAa5Y81mU/1BONc8N6LWtQgiBSJ6F0m8sLWW1jh/dqlYruxTEyW9A6aRFj2Da1Uz78UWpcK3bNeI8zIIr3zuIr1/z1+6IsqAkVjk8NeSQFoPVxaKHYea94BK49+kXU1LR5hCXlPUcPwg6gbOLDa7M4ChiUmzI7FYcOCRdZIJgZu/6hh+Vd2+4QP8Gs1xgBtNpkOu/dqwgIGE8T1NQ89ubocSuBvFuwtDtEJtxfJi++6+lBBLpkRilrkshdzZh1oPnv+5o5vRWd9QbQBqo2KUQlE2O6ff2IOaXOkdE0fpWFAJBeEZiRakMoxToLDoPKQLzYUo73Pb7HsocQzBejSlOP1p36+wxBl1i8O+baJKrh/YLSzLE4UIx7PgZN/kUUym2fDEq3mqE4NA+n84+HZA99FvNLwBwZGukTBXfpyF6HYBmg6Fwk0rJ098TiG9y7Jz2GKpXh0skrPqhh4T15mvittPpv0zldTb4M/Zca5WelHrNkHww7fu1JoNPOGogvBn7pX8s6Whhw2ab6zDPLqEb+hqHF8SEAozsoJz6ECuAJuqrXaxc5dOvF69kSj+RNhmSTinytIVjeD0EBm1CizY9tXGkqpYSMD9cYIclOCcIj4BbpJdztzKhq4fxYAkxxm4/f+bW+YkmDfJ0Xkv6l7HcPVa2iQnibx0Ro3MYVJ6UHqkY2lFj4cieXRBkMUpXwJY6oX3Armx87cBpJGR26t05kNfj6KwXPBpeudpcIpFnGKn7YW4NdkRoPYhCCt/Afzs+01hM2KVv6GrbVnFEy+leCGO4msuXjMXaGjf+AAw0P3M2RBEa2oiAoTNmY8robYOyhnEMQEr9tE3sAjW3dhqvtb1JHW+fm4yfD1k/6XgV6BhXNgu5VI9VVquIvS0vFWsCrVLoGxBuxbEuA1EbUPYEZH2J/peZ21XKytlFwYYQHFlBdvkwDDen6Fa2REtke0w16nF+AelH1HguONH9Y80I7O8YwzD1lwbgf06yss01ExA+pT0r53lM77jTRBENvL8+KdJaT8s/nsBZ0fmQBdMqJeJtTO60SIM0IFYT+i4Ku9KQ1muIg1l7IpAAUxyxQ3yGgc6UTT2QzmxZfczoNXUAdb+qp24TgI54pHdRxBohnYDSAmV8etQB0e9ba8m5Bi1GYfCvNnZ9D9fqh/mD6q5m+MMe/7ih8KT5uLtpQfSuNbkA2y8fqUr97RrnBVpED7gNNoBVIjkpx6BtWxg0zpqkAET4pZqoEuBf5Ll83RVpidNJfS25F9NhKZuNrr1h9wum+CQY/Ug2phFWGM1e0xfyvOuZpytXKu/E5iK1T9g9NWGRtXcHb3cAsoyO0QHHeL93VY1f6KO46xAPHEDmHCkp0tEV08/jeHLU+lH9BTHtqHPO7R83yuXNVR7pdHj/OoDhm9kjYKlFqIi5phxqNKuUu6rV63SjXJ4zWE3uwBAXPV181sYgJPEl3YRtUFZXK7B5SMvMQfgQSIxF7QDAluTsZSuAqKpJm0dSoAI6M9/Qwj9tu5Q1WC9CtdSYS6qO2iOLIKtlVqvPfBYkJ4u0NvfpAj6INYbCipa8DkCAzHEijKiTOUkpbhgMh1lnMCvjlBbCntKX87XJ84mC7XxakwwO7xllUAxKf+NuLZHIbkKg1fH8YqUQBzqeVEQeM6fU2sckSZQkCXIhj8MNrKrD7voaf6R+HXB6kalrwoKUBmeHr6nJVmEZwHPRAgIseh3axVs/KeDYlSF6WaxpglX78y8NIJL09L6NFT9S5bZE9qFSpFYiGMbW9FCW/ujMYwDlNobbRXe7gXeDZaMkm2L7JuAnNc0/wbwj81EPCWldiSO0BASbZHn3y8ZP4Em2BI93H5gkj6cN9xQrZCpAyQf+p38Yh5jlDC3iXyWI2HVS0tae+FvOjaMEmNqa9b4Ndol7E4cfPrYkZxLaNUbizfSHZ22iy/PIXVUpWxLliW+afwei5QiN659cuQAxqzpwb/K86CYIGLTfBSuCszNjH0Jszizk0qNLO0AR3WBcBcvgk3dm/Dpn6IeLQ4pF/JIng9rduuK7rlyXC0WlR4eSoUdUwi+3AxZwFHqoPU9oUiC7zFu3WXXwNwr42WaXsJMSOxQSpQe8q4FlrlGV9l3SSH+UD/RdpZurpzYRWNVyGl6378wBhoEHZAXBu+5n5ozeDURgeR1UpDINfMfgZIfhYRc/wBogtSYcVvX87+3tbRxVeD8VRi/vd4w+SuRvTooPytrWF1ORNwDj3a8HenhRD9LPiej5X4dotJJOmREOrOKSKs/Gdba3uA8lkXqKz4K+Tq718iuSVX+WKrT6nDgK0okqa3wBADokCTsF0hNtnBG0yxbJqEOFXm2FJrz88mya7pVfsZdXeNYGKwoTjZl29otRul5R/zmDeAAwvhcRFjYGbaZ94yZwo1KKdWkEZH/SSRo4L5ZOuO0BaLCYjxEpK9iSEmM3pMyoHPLTVzWPq9e7kIfTLgOObftqidftw05//Iv7V+sEKSVX41pDFbAxTknerO1P+4hw9Q/vSHqeDmfIPnrmvWWStshBWBxD6VAd5PiH7DuXWd+QOWjeFWi5cv9OZOJd4/GdCAzMpDx4++npzKOoM/0oxU64qzg6XjCRL1T+F76Tb6ksXSJQVLGa1w74Oni8+K1TVjxnu9qCw/g76b3d/847C8g8SEvNGummbxCx6b7YAQv0r8OkoZoU9CoBwtACa4HN4nDzyMW7/8Snd2fy3Ol91fVF3lGKdCA4mrvUkMCHQNgOuLOdZlV1s1eepjSdq/UCO+tBUk8B0MB97kdF+BDSdOm1+3+jj+w6ncuxGqzsbqI0ZFpjyuofCBvQNqOO4ZJUT9GlQ6+gK750tfDf+FSXnPo5ugGL1Up6v7WtXcpP57Av93cB/MuEN9SsWyPvIECdLDbZ9Uh1jstNeGfkUTjh1hdBRQRC24I2UkJqLYhqZXy91ngZVUGwAUM6O3qOSB0Qzl5Dd8JFbTIWOU8YYUx1nKuFE3oWihzwcxCW8vfPMEY4mnYy4kDh/IosxN7wLrUvoni0dlh4gF2lRH1kOQiquZqv6f1lGo9w/jQ6+SGSpTM1JDg82gjKiW/CyZ2wGVRkIxP4nc7CkWaHCtYg7Pxkndsx9y66DYtV1j4WD7+XDr71spmPMWI6W/7AQ9HNREZfifGGU4B4aPn1ECkLeSRkxGToU1Ljx1B6Q7dQFYLzbItrb/tgiGtETnCi1aIGw1O3l7JDtf8Ry/g0LGJUfVA1eubQpPB88Ash3fE7Y1yDcJPrwnSZL8INxWGUvsq4N8NPc9S3ozZcE5u8dMbT+ErxTphjt5qOV3kMw9ca05CPkx07KqZJjZ7pS0xvYtCXnAHZeqmd39bIpvXZR3Wx2Akao85iiK2IVrGtwPIrhLe0WQ7bmiES6avDfqo0pP/x4VLxTTtCKKbs2exaMiqDbURPTu00cJIQDbttdXySeNY31XGm26iEAAhOdczMK/QCOetdeP4i9sWaK+9lEp+tky8ct+Y1FbL4gEsnJwZEzp4BnufBoNv3yDpQi0Efohycxxe5vZogYmQtQmLmXZP+EoXtxC04v+f0ozj1C+NjpJJ8VBLT7Cpr2U2bVYIst9vRm458P0lkBwLUKMgFeX2h1l5Q8eWKhbeCzC2MX+Y7W8+TB7SioU+YQAgt/C7id+0Q7ri/YNauz0fBhYMDGq3XvUtfepCI4SyF9SWtt8vSxAySpxk0ye+/xDxqUx3XZxB+ZqugnyeWCOCwywqNp0iJI5JtQoPDBiGHr6vo9mLQYAhYtiFv/cLytJ9rULrp/09miyQ6SMIVDZgFjHfdSHk7rUd/wLD1qvZFOIyDo1u0+S2Mhhp8vFY7BYXXDjrAR2Zrt389uitZF+8MxMeT8Qm5OwBP9sw9+So1845FWqqkaSHr4odQHQYd0iCYye5JGQxaAlJlp+WOlH9jnK1EeAQfHZHhOBKMg3iu6wyHTVs6IhbSpF3hof5Mdkx+g+zy4NPm2bcXHYCJIZcUmN3vtoGl5/HYAeixuLB++HCJtKcZKyoz+oNlT4L1GbcwlW7W4JmXz6+mPuG5aFwyAGDUUPNnIVXRmR8gS8huKI8ML2Tj5764erIPN2JLe8taLOoKOX3lkLArXktiJ/XLoY/yFwcee/+eKgOSSs4GCG8YeAAYp81C6z/KQyELNiT3OhrzX02OoNkQ7rBM25gY2Xkqtp6vRL0frCRMDY6mqR1J2JVOxDN7rkUssx0+zOLpnGf6S+PiQtULqWL+TiuFE/xxB3xzrdwxEVqlamLkVFIh25S0Fkrc/Ih1EECma4KHStnyDip3dVZpBt0nlSZ+cqQzqA6PUMvb+L9XO2IaW/QDRljA4b9UqDZOFC47Qyt84NwqVbr7DWr6PrOIdXHXPeXpFtMCwIxrxxy2+aWC7XC5InItA+kriN1+hAG4kwlv0IzdEqETabYWGJXuLqICqchfinAvTCjxPPb1sQTqZLiTaQ82KKkvTv5I1zEtSROwdivm6RzVTdsgUqAXYJMq4wk/1f/w9JkaJ7ylYubOSYtwTn+UO78mHofFLjq12fu5kNtgFcPahKCMDDJ1hO3haJDMmZclvNQ3ZXXr7ON4IBl6Q1A4kvF9Fzy+awJL+5YFQv3RC56n1yMaKlfn9GjCBWCq+xkzvpobhC5IlvoS1VJw3C1SiGJx4ob8q+p42AVssIqFovwQvcjt9/cFIOynO/10jfWtsrgNqUwkMtEIBI4eLu3M6xwHlgtHfvJzgsZwquhhln6nMycv9TpRWTPzCr6g2a3DmVGwQyeTRSDsNhVV04NDyR3Gp+71Eo0qeYzSD9d4+YZji26I/XDmeJ5Kg+8wcRgS7QuBANqAtFMsS878qVno17atjfVvgsQbNKVxhUSUk08qDQgyzSBMTHyed+GqRWglwFjcl9iiRCDLp5CFFAWIy5Dqy9SqoUE4FJRYCTjz7vOzgSqUwB+8oh5v63X0hDLKUbVD4QfqkhklUbv9RRtpDmeMkYzmoLSa5GL1BleMNRCiV2LSJSEyguKzDdF3HbcsHf598wIf06FcxuODZ1pO4zmHddCp6NUi+R8na3x7Yo2OKxw7KuWnkvp/SbhU32i+TMiNcp2MOizoMb6FHKLx9++CRR/55t0cMd/ViM4AQ7Egt9u2wvPhKN+vE3KB9DM4h/RUcBH+jIFhAKde0KhNopyCx1vp/d0+TL7LW+v9wvx4Pym6aPO2nvb5cWwL6l8VoNjh9yyFz22PnYZKyhyaKONtqzei4acBdHWcGw9WKLErhfeCD2H+n2YBiIAQAAMLZt27Zt27Zt27Zt42Pbtm1bHaKD3MAa+Whtc42J8J4MiEOF04KrsYUFiPJouUvKlAcdsMrxmM8KwrcDCcqloAdEZnnlAELWjLmh6MUPZ/8UZquFg2XI5iV4iuTDP8jo1R37UoFzOlNSnIEaWZokCVNSinUcP35Xh5uLl9qgxm8S81Nqv8YlDPsjs4erB1M/92uEXesbZyIGKWEcMbhsl0mhbYW/qZJVkiIs8ngVkMuZSRxOSwAzxMs12dMNY7Yk9NLOLHwzDm1cv7V4fTToCuXyQs7rin28EByG9EM+2m515E/aCtoTXQ/4sJTMKl68AIRQ8faf2nuKwMtETd75OCBkY44GUdVNJ0ZmNQLrFqaxU5H6LswS6vRNVcFtngYWhNSg9hxpqDtXX/ygrpxJ1x3dqfby9TpvEOKHmgs0scoLIjDfALhlstvla6vZy/fTEsCp1pEH7qC/blmX5Cq1sox5GwnFpTEoEX4fhDan0aCg8Gy1HW5fy7IaYq6OFLljLA3UHxnQkPnaroaa5edXhsRs7xAWQ6NbUnr8kItoSj370Pnfp2vbRkjOBJOngqOmUDXRlkrwWlZR8qozReN1ckV8YJMQ5OIY6MNcSTsNmhczvnDFCwt3ZtSgQfYiucYPI/wKv0wlwd+dIPzJcb4PryDp7OI+HICIbwKq7TJYQVVMDBZ1x4G9clV566SaZfsdvMysaHW3TKMG6DXJ9rQuSNLX0yl9Y+ol+lYKk1qmMvnDUygPmLW+pAzXxVNcn/bJjohtYhrbagIKqW6lUWZUgg2zZbnu19v8ff731VBGTm8HcIUjMiY0KowhHmxwVCXF4F5zFOAx/gyMT4ggw6xIVc4l11OBvWFzd78LX3E79/wWTPXc8iBx2whBqVSy3aPpGIsKg7mzWYjQBFVHrAxR3uOn4AbOAPosEyE/glg+WRdYAogNJayWA62Hm/lD4xuL7RCPpAKp5lmiy15o3R8AZEMcE3+WqpWmmMwCHIptO2uqisKjsIvF8qPV2HJT4gUz26Tzc1PQ/ZYTAXkDmAf7Fp8yilb1ctRDQ3r473LPqQsu2M2hUi0u83HgWXZ2g55wCQOs0xLgdoCtPJbI3oJGUJRMAung3X5X7Yf0/RPC7tu2oF296cCukX/VR7R/aMFFVAehUxjgRoeCfDRptLpgTqkFjK+Wja1gdjGHNfvhqDdy9QKyrMiiPFu7sMfKS0wMpcKLNxl0w/JJcCJsQ+6gZ5cg6GOmk4mFBNTgonnsyCsHXTOfQV2DNhPk+UWLmqT4RU5rJytIY+ovLGlhbuajFFwbJY3hroa3kZkMkkC93UqfbXo5+j26PbJ2QZiSVwC42u0Kj1VG7VFfcVEMoCy3TRDS+Fob/0JXEtaae7Rss/dwR8vIMhyTVWMlaTkT7fH3XMzmIM8rUZWRQMWIh8IP0Va+7FSMOSV3yPsck7ZBVee39DTGBh17UBPTM1X3rWOZYK8XJuFsJD196xCR2oSa7XnatOpVTX/ENLpj7mePHsBab/fDfBX7qaZC5XTa7PTvmlgWBpcUAT/5sykRVrQZc44DiWtS93pHEYwmEgNk54KEcrrtN9CQc1m2Hd3Bh0g5FQ0UuBxcP7I5UuWOQbz4+mXimMkAFc4BjTf3vbL8GRDfKbs6al7G4Mb9PSj0GWKldEcSxH2DntoXWNUbThjMsr/zE6fx2bYD3stBWRl+EQqFgN/vOiIrpMXgUsXPCjU/0Phfhfq7ZwtdIZiWs0MPvUNPFSXRi+RstEbjRE/bzT4JssIeLhWPuUYPZs3lqPQFN1eTTlVOnVwnEMnfbxcSsNk+OrRjQFlm/whdC5Z4bAxMYjzFpirvgmY6Fq/TsM4gT2l5jnw3A3yaYCtGOQKn3N15h1J+cLZ7tMXHa5t8xbR4KoXLZK9PhBShCU9/8Gl7l6w94aUxS4afX8HL5mYQ90AsWoL2IR3x39RYbVFw9Sd357kysOk/rvge08zwc+ogpyr4S1u8D62h2v7e7Yc4lv4Cojued1pbosx5hQKr4rhu6MpeCvY15wQZJ4XWoygDTtEbvYTQ7TuI39ZOow84noGuxoKtEH3u+jzqzp37ciTjBu+TSnnRROhNFCyC2LBcdDo+DqrwnCTeH8wL11g9f0H0ZAcsMnsVHvw2NJElNkaDvG9dCB6NzAEIo5IxxNimBdNWpKXBGey3EMn6sh6/CQ1bhq6eIAKBddkcNg0axoxPqAagoAuEadEh0anSBbLSC85pfniUxtypVENJG09cCxWpYEptVPK3iOJJK+5HyBVFD0fXBTeMDODv2ZROkCuGcwALeR/HpS5mlE30O6gKlPKw72bMBDV9tmDryDXSQKj5Z8y3ebI0gQfP9NgG7eF3jE/UBEpogeRekIVHcgiMjToYVoFKx2aTDVXQNuO90rozJPgCoueup4QdpIIjpTGBtV4kDkqjstCdyhghXwnqD5SznsRMSB8n+HA1CQLg0u7J/lrzcPqlxD+UAYDDkR+aTo8gSL22+DHUvKvYRz5McjxEJwa5w1Hp09a3pk87l7d94BNPIrPtBi3LuEgpWP9pg+P7BCVdRl9s4l+r7Zfuo7sUx7+QqxAsGHpsDzQwQSH+M0fvmr/HzITqx6JMO5KHRMp+heJyXEeS+wkbc8/5XPe08oOw10vfJIdumE0g6X8mvPPsZpaxF+KPgiFmztr7SDilfLH7hTe7Y305Hq07iBApSG0RK/XgCgRt+BGONfZ5L3GEWkr3L2sL3BNDg17TFOikB+KnM5zEN6SqglNjdwUxnleBM5Y0StRSmHZ6TbHtxVqEU3bp34C+8+7Dnba0BK4MnC7ko+gX0tDXEBSvuiEEcDnWCuMtxOvtBp9WeHwYmtSq1sKmXr5gg0dM6GejYejgyF8MHNqazQpZvWZDFms64A+2m7I7xSuvZgmKqg/7zZVN19KsiaFfJw9HwD4RTfusoeqzpml9cVqIEKurvoxt9fE/mN6WPhpYNf81h99spMgp+2qV5Z8p/9yFQ7ulYZa9j0vk29opJ4JNcGzRtzXxeEqO6srymMV2BvvocN6Nqathnvmyw8NNpk22+X0+5c5plpyj/odN+dVtMAc86fU3Ld7t0F0+b0n6Nm5ZXX0KYQI6e7Mx4AEyGyUppiAOitJISCY19YhJdBAv9gu3c38gHmrIouRuaHNlpbhd/K1SvMKKgvIVBWUGtm9V9APK8nQbNnhkbf6QVwQQPQm4LYacjmb4JFkLPxxddDKqV9DBjHcBtLxrYfoR6p2fgMVONpdfDyyY1S0fNslFaMn7UNvPIVstNYBCNNTrQ0u2oJt1lixwwioeBWbVkapgeL0r87R6NNK90DEp3NdyY1vUMnqPLwzFnK7Y6dw+ltZESgtqZr+gfDBiCxqhEJvgQfb1KUJcWh7pqvj2YJ1DgNVJR9rWJ5wJi2OBuiK4HRT2Qiw3rEDY8A640yjNsoOxqHNdeJiHfnlIwGpahLhVoTHPXlyGUW9uOkyo9ZTj7hQag+M4ketw8MvrTbz6cE9rakKG0DbY1flEN/sCG8mm0J5MABWricVMhq4kQZEbjmiRLa8aHnhimLFbwYS+1Q9yXivtOwKkivu9cvu5nVaRSzTP5DTSp9RfoFazVlpVQQYLsPIKhGv8WVgLxUDFEfvrSkL5dl9FEXeSB+rkssDTvqhgKyAguIdCcvu0RPxECG9dAuLk+erNuTSGAVnSvacKTus2S1MMRwTeZUt/JaVRY0t2HcBnfC6hqCL4oYgWT2hZyqtRufIgo4YxYLtKx874TXK+TeXsKqkJKFbXAYfmPtxwHj3QodZRauNRMeizgCNd8UYX/P6Fd1FG0B68vdVYgfx+AMN0wR9s1E2OBypUToYQ0AAV4TfBKYAI34BsezZta2GdZA+hob+asjU6GoRFD45CCtPIQnmkOXJ/9pkrMleJmGawUVY2ncT3Z8KJpO5n9pYSbuvrM1WKnEdDnHNg/sBrh/3a1eP+b6vix0GKHwh7GKP/XUlGcvp7Qfa7zx8EYKIwduys7xfGZDMko0pvxlMIyPbEASIOiKrmsR3Fd+1B415O43gVead10KUUOmyR+y9ISCFa8Y0ChK1V4IboraLbaqCrr31TPd1oaJaEDUTPuPZWKFQb14ndld74u7gohEytHo3IVpIwWinGWPdLzE2fKvbbcG8y1h6dOGaedD1WJUvytf5qoggbwN30LHQJgrU4iAUCI1byw7yYv4bGUdzrlv8MJuBqMH2RMxNMTvzCL+8DCJzawTZav9y2DcubHo4mqb49zwPtSBxbNNouaWj2gH3TCsq6lhJLUTGkqZCF2OeowjZAt2r0B6b/ZmjBDuR+Uc+E/SWxIAXZ9cXj+xJesaWCGRvrz5rgiH2jdFotTbDpNrTpnb54YmHHk0trqmKyDrkEwEQcdxmZN5IHZ+ccaPAwYipA/To3Xa7fpIBz3J081OSUpzP775PNGKUyHm4eMJqvXLIVCm3t2vcQ8MbKZaNJjYjyLWnRAWSdBNwjd8PjYaglZA0DtlubefKCGqfiXyS5PBCeEsKaPP1haW3PI9btQ5ssZfPnLGai09Flp+Imgtv53kRLEFpnWu/X5/Tk47QJCE9Yg4x6xfwnoLAC6pxUkrQJIFF9V2aSHQXYOGTJWUzvEb0P9OqZLohRKUyAtSBKNg2tX+/SgNGQec3w+y8vieduaHr0fhfMTPVR8f7UBKx20pYPgwq1oE52JtAcRfO1B+idJDyJwwVdBnMCc8XreOe0JRXhyhclmbI3a+w3wWD7sL7SkiohZmqo4XdJKzGxbKcWSoI/hQYAKtDiNKkZQa/u6aUtET0QoYYI5I/nExB6UNSg7qva1k42BEUxXgaCV5SB2FPDUAuns6NvDbYk0snsDS3O2VCPtw1jvIuMGvZTs8uzlVkpFd6GLpX7oXOn4LJOy48yRV73wI7N4pjKCauISNPa3k0b/zYX0QHQIxlM5WURdawWiW1pMTrrvauECyOguFeJxPFDkY2+IsSbdAMRaaVVxn1Iye/WibhRMYS1O3lBNhzGHtqGrEzAgKhMkctjj0D8qlgTr0n92AcU5Ux/oUC5CCjYErGwYp5LEqvlqUjsBKCWEiQy/C2ZYAQBwUaT/HR7avJwU5yY589kHzy61msZuiP0zQYXvPV+616MuFCz+O+cRU8FygafJm9vKk77BYzC6WELI87wVOZU73CXvJboQyQ3e7vuqbAyxPwi0cBn/dQn0L7mwCeCnq3jBsCXde1tu+2CC7EcBhfw1HZo8yb59Eti4S+W2mqgeAXl2p1LUU6QDRoswHFGtuWEOpnPW4Sbs0yGzKNmCyKZRmeZNZcIEVJxD0a7XKHp/NHMX4kL+hOEvUmRAOXRT/ZYiYldcPYHMRWsAkveZE0JCTLoyvIkDeOC+fjO5iRfBXIJaQHFr+fpnv+AbphxokZ/hmNLnR1QENJzwEb5JzEu4AMP3qbnSWhW6I7WHq9PGeAy+PjJt0w6XMyBG/oz/ryhSf5QFnYTqEdVU8fzpNdRHGv4hCaKQbro0HS1jmTrmceIiy9WwTiHSe8IjFT5NGQhGUl4Hxx6T0KuGccrhiyrlBGr+5QatQxS/L9kvceGDz+LRK5cXQdXB/aMA7bbUdFTNE2Hn/NerWClr0I5jkA02rw3KtTRaEcDQPzLbIaVeWVtwvSts6fjOY7aNGEq+AFZiGamZJ5b6m9L2e5AxCtXmCRRjzSk6CRq6nCSiUqlh6KDRaqxTa2H/CUot/ewwRd9ixmo4c/c8EWI6C+35rLBKUg1/huV4qF+RNYSKl3sHQTYBVHIFRgN9xRHpLLF4H2KPWdl5nhyzczQeH8NEr7uiSdCu3JnGW+zaoUxfAetwT9egUpCgpaAqGP1bfbUGXOxWaAI4ZPZJiT8Ku1zsOm8IqlcaMMbaV64JYEz4gblEikcs4HH459ZpgzrXyuYJJNGwpcoeYsh557+XJ7+RItYazxNcRWEf5++VhxPBaQ+Na6CBvOPNRuUQEvMwjkn0gqE+lY+4q5AKhqq3nKopDAo99dw7cuoVYKNhtByQTcSAqz6OhxwU9EZ9XxJeEocg9VIK7zLwboBSfhjfBxCLv6LOgDrwCDJuXZhxeJJ955EBNOKVgicgQgBi7brpgdYaM2g2EfWnZWm9wuoX15oab5jWYaseskmKgFPcqdQliWRshd6B1XQyR+jQLLi0Y7QRgXAbZL9U4Ea0orNbp03nIbkIgvSzZ1BY1vaekbyAoEiEnaByFB5dSBHRmEX3uD8Q2sAzJReNzx1SO+XbyWw3i7kRf6zWzCY0iUBzX7NPHPIu1zeUMuu2Y/Hh8AgZTKidk8tVE8N3VGnYBooHzc/BvCdA+Ybg18mkVcD61x/J9/i9hnUxDsZNMrOfCYZHkHzmatbnKW7i+7HMHUBxculXEKX+/uG9PhS0IOS9NpNbRw/qmma+RMrlpvfR/qbnL2dNIkfVTTqsXSGmWYsu0XZ5MnzvqvVJ+b6ap80ChpId80oDXrLDqe1M8V1VKcMiEJigST89nf1LUROzeSx3XCdPNrGxjAl8pdC2eyzB7JyDIef0DBtfjwRXdRHDjLff5Qm5JDfUO1l8MhHYaWb3Zpox1B6fYwK+tBDv/vXiETD40rm6VKSLmKz086j28a6n0VzsTpzUmEFzW9j1M5k++/+G7gEaw8Rov/4OPOonuvQYrcR+HKHi2QR3wmCVnOMwughP1hs/oSIJ25oCyFoxW7rnwTfwuycNlQNO8xj72jzfRbCwQK8yG3/1utvEZgNwQHKsd0a7grEGmPsA8/K3ysqjX1WzPI9pdRF2UJOjXDWq5UOKEEzqgigG56K0wnMh05KlDqasreAgt9Z/kiKm35VApcbiKoqwgVRIPHFGm/oQNPs8TFX+hzYD0ggO14p8wTXN4rkdHgZMXzA/zRbG6g9abyJkdnl0W7pkP+yHe88fSq7NwcKdit1Jrl6a7IrxyypzpuG2JAfvRR/mPIh6itS+tOY5tsk7RSNgtbj+LZnnchc2Qr58siTBWPGwDFuVjBw5/HV+oedkS5oJ++nXHz72GGLhICZQrtMineWFHxfcOD2eaPsbCfbFNmY8tfMrpa+GZDCD3mMaLzpUUOSkrzNvMAbYvQPzZwnAdEMqKmAvM6FBFbeXypsNCZekgBjRnMI8Yw0oNE6SEMIQVO2kgPAiQ8G3DfIdj/46l2apEbpgNG+10LsNKJxCIpux5op1V5nEVsqTQB0Y5StikjacsfbV2fWqAQpZfsiJtlN7UQBAOZn4uM8gbwRflNZSSJB1yuoPZ7pPT3fOjA7tBsgNkHkIebFsA/FhqFd3tq9Ki8ePV/t3E6c7ehQbIt70umPKMKfZC5WVvzodJ18Z17G39LcCXqMYZ59wpvicceC3Jkq3SduohjDISX7DnQCMwbupoYFCw6S0ECAlPDwo4TNCqLSychNunzCLd7KZCHV/RZDwztL0EXYtIbupqbPWrzVxLqiPpI6Ndu2kRFFT816BiXRS6fVdUSgSHqt9952cNP9D+MhYGSDg8K9ZKiNpfxnjpLg3rx0o5+ZymlS3cN7EYQU4dpLGMV2VXPDn3QoDsrRaLzb5MyeNriI+K++R8PH4Fe+0FGz9cg2qaWEHnazhz8zBPJXhZq+aD0nTahG5SXycNcRFeOaiKxUDc+VFpm/r1Ar0Ww61Zz27XSWCaS217zhvj6e767RlO2bSLx+x53qVfM4QgQF6bfCCst1IEvM5Bfd0//gzAK/dB6NuLiLye0b05iluL/hAFDwfvPhQwI/BDdwAcF2yiF9oEUmi721IdVMIjGk2WvAKsgYp5LyUeUm3u5PWIbzQbMW/5RxtRend1M/wLnyEUu65WX3vIk+Yl+oHGoget1emDEBmCEVf8EyNkU0/ZJpaAAEpMg5Pn9LHgSTNS+958R3NALIFd94sk11oolI9LEbLF78LBa27GMdusB6XRXLZ2Gf0Q105++5jCD4el54SZ3m28V47/GkPHf/cbztM1QcmwjKZ5mHdkN5w/S7MpwIrYjROr1+tQNKh0w7mTdVVmEn/DhzJGBgEG8M1RuR5Ps5Zu/MczVqr/dsuNe8agFsqIPP2liv6EVLwz0F4k2aNkGGqDDDejP4KcaVk7wrvhuaIqvAN3LtvojXrEFgPjlADKJ8h7Bj4lFPRbR8yVLbYOLGrEztmIrVhpPpkSMIMtnvEm9/M2YGaIGiUPXpG95OElZE9ezHGLxEcO8obL2pOuZVbrSTmSk8va3vGBVpHTVIbWx//ES+wItfPxHqxcYvGWDpuleNr8NPbJBVNFWLlEZy0GB/B5QItrBlvezGsaXN7YdXwYhXkYrn5ZX6nQaXefkkDP5NI1OTJGRWgwKro3GAOYBtAv0uDfMYmsHbl3xSzGgm2QKS/GKhNJRi+c/T5o+v1DiB036S8iXniR4AmSy3W+D2tgBSpoA3UJNWupjs0JVk+9E1mh9vn85UrS/eH0LjAFsYCHtI2X3dt00uANONm9IrDGhg8L4EtCF4nvezTUiMob1iTumjXlRmwfcRjpQItUTIMfUhNHfUht6AlNPoJzj+qVBo1JflYlNb0VjQj38bAlwPz6aw3Z0q072218X4Y4Nnt9YcDN1igbPqz619p+uV5ViDh+dpLhiyD2m7LDjNQW+HcdrqV4GR1SpoCS66HM6FaQSeyqnkYrmo5cxtwIeXUwCy9uhrD4gVnHyLorA7kNsgbj1j/9aLGJE1RvQsS+IAlK/p5yE8XcZ26UxJ7x4psUoBqt7K4l8JPXV50FD4DeOwmEdpwW7uIAOVq33EYcQbK92tCF/IEkvZgsuTbtBOFOyo0898ucusi8WewtoKCajmHosmyUYqtD82O6spD1aNqYLm+VV0uDbn5X2rrbMspuQJZbumnRn2ot3vfLY2oRJ2uKy626D/0TjBnU9Ie6dYbvw5XB4zIUW3zNQn5DLQueJUjUmzlR9nysWDGyMsALw1ULvgq7hMMh/R4S1BfSHJabrlhlCzryk+wrBpgYO2+aT9Lj+WBEEa4qZF/eKYLa6EvwuEGPwxCUBig2wwga3fDtQH9q/B7vgg983aTe3lI7t/j+T5NCak1lO7ktb+1q+B8npG4yKaKxQKIjB4COXwgzpFKnlJWEKVBM8kMVm8EGpjZe/sQu9fmN0ZMBuK+Vhpxf27XvBqOkz41QteJR832BwyP2qyhXDk3sznqjKOpQoh4b3hq3H9sZE7htyLWzYiCvSTnJlXU5+c7eFZR8//MvR+DlP9qzIgM5aHIS9AMjGMvobz7HAotgAtPeODmXdqHL6VvetM+bhrDWIgPBtl4QPf9aUZfkgnlpux3loEcsE+sc6V0YKd+FZhQlxrcw6TK4N/2wJziilYpztGa1TmETQteI0mkRB/D9ooJAStf3XK4IFZD+bIblBnydHDVbJavCZX1wvvc5uza1s0sClEds5D5ogkWXmBtAV3+9t6TekYgcnK2q/WSP8bmWQmCJqioo76/o3MzsNtXKt8vJ3O7SL9+Xa8/uM0pNLQntSkDz2MEewmjNeQVXbHPQ3jyuh4oRqK88G5LO3smVfOYVZetB8ZgA4+YzQ9wc5VTnvxZB16xKm6E9zJhAp7V+FvOo3ZsGiQ+Nq19fZZ3u8kAMPTsZFEBuXJ/9s/HcQY1qgrRj/OsmAnX8I2NxSVKs2R0Y6azEljW/i0UsGSIVFSTQ9LEnCMBbd94i494Ja65tYH01GIEfvRRcaN8wvB7LYR63ffVQL2a4E6bcX6OEAwYXQYmyWG9lHLOV9VTjW2kkGBNBooKk3vm9AXzRzTYcVFE4Cp78gniajGTjrRKt/Qi4JfI/tjsXwN0h4YKwjawECKAG9jD/W29y5sqDN3Z0l1T5+3FO+nQRgy478X2RLb3EC+tOihAFEgaRVKalnakM73d+ZMAmbyzA5hMszuMzsee1t9kvZkiDA7fwk1LZEKek65YtTm5UDGF6+tZL8kdowDzxFjwJ/kE1diDWnlHhR2LfWhJXnO1+TuXsmhrc3tM7uX09dOv1WXJWJvEJvTFTTjVydhVLUw6X2NhdRKP5/RgjeGxjbyhIrMjrz8k2NuVN25NkWgVfrcqrgtMDuw4EnXEpyoxY/vlg7ZzjC/Ef6himrPggQFnMFc+Fz+lPMJyf285v2lcCrvNJPKDgIdzLl639mhOHpa+o4/SEcplvj5VXF+gwuN9sDoXHJkvHtyJrshYuFReHKQzXCwNcoQFn4oERXJmdUajlv2d+M7r5rvLzJ4FzAH8bvZy8HrX3NK/lm2LCEVidRcMrhMXAbxigyChBLPwkoOg3ka3lojZ8YBPuZmEMTT5kf8JjwBA9PAueSZswvP7GOchgjzk4iwhF+hk6bz+3FAOzyohUHsUA+hi2p8c58gWyNdOLm4ZAdR4x64C6008u7uQChjUy7IWxKOHzRlZ2oEoHwji+BQmEQ9Y9QaoCdBglafE9xXqC03gUv9ZnLd6nvH0MDZDWTvbSS8cvoHLL/0atGuTlM+glSWnKyRN7nMC4VdhGuYRvgdcwzGdiujO91VHZydSdRZh/Y3F9bXYbUW9mkBoyU9aZv+ER47TfDyXf1hwh7upv5BfVDby3eu9PqgGahICcZz2dYXNEmjgAXXxmV2xEqLnz9MxYarMAFtlpNeCP+zXpv/75pnEF2BWBWq5med9az0hkYCSb263dtkTDxptuNq/j63J+X+5PeR5eRRLEsXLMcqGtA9kWAEELF3urqis+AKTTw/ZzfBq8Rp7upCjd8shUDTs8CiHp3eF41FQpGRaALNEadJaeWXxrW8W/kAnrvSIQI8+oPXHV28ZViwbPVDoCuqByAvXGL7h/hemfnyW+YIsRgNRaDMMO+zS1YnfsH+3Z3pGab2ax0flOn59e0rF7L6zE2Hp15xiYdcvY+rLKBQ3Y+AxDhx01ootS+jSqKHORgtuIbj9a0BXDsisKyGzBnFxl9N2qUxzTJbFUb+VJHdJfGEd+HrH4QW/ge0CH8ItdjKZFlmrlc19BnXKcVT1PrjbWxqlGs9p0+NZthysCCILz6+BoHtb3EKQsLGe//Jju+oqxnVnm72/DOL/SpJMTv5Hs+oO8iQg1aFZjB5wyOCbT6VMOOo6HSAPKB6bEtP8U2dKyNOkpAbkoj9mMD7pMisMdgfxCSbayDZNZ7F7OMYaoojPP1pVUQYn3SCPapJVkeC/Zzr1Rb10xktGOAqaI5s6jOlFYUNc6XkAg8mkVYNcDtGYf0Velkx5V0mWdxVnEt9skIKo63cjY6Ce/4wneWKuuCpfFjz8gxXBgA9SafMoILG7l659AbwBBk4ph786Q2Z9boILLbXc/tmBfHGSBm8VWStD1ybk3ABhaVlJpTSNWtDMdpfpiXILNOp8qqDsTVdhLTlv8BHvNNWg0fqr8RoLbcEY5eJ8+qXUc49apnF2g/q7K7jvUk7d6dHad6qfmRR2nBiajQnXN92PtRCLjPQ3PN91r09CSwx6oJKSXM6HcU9VFbG9Mk0ZJwHjklFC4KNytt+ejuwSgLR7JrdGDNncmCSk1Y/zKqb1b/NJ3Qc4zB9Yacya9tddPG8u/i7Mf+vAbZLXUt6AThHgIABDl3ERMdqqGunIjAMdAyGs2ddF10+sDthbYOUBl1vOwyKHPv1x50jycC8DsbZxxpdcrOZOw2qr+Y57S2N/ppDIdU7lNvdJ3H9p2hGjUIuG2TRChk+48A8VYAJOG4TPGuSg9P2iT5ytBxskEj1LJcS3PrpcX6mRivzMt3MvnC/2jdGryHGYIxrkhqpKUl7nNS4N4G0xl1nB3Tw+UGieLtQSwt6sbwnQWCABqHhpjIXcDnwxP6ukoznYT4cGRchCrXwbOg+MGi2k4UVOIXEeM5eYnnI1j7AvpL0jjopafsAm0c0RVGLn2ILbXafxS+GeetohrmZbGmjuuCU9T9USpZA+dtbj/NhTyCoE4xHFguIhB/6+9FYK8hXh2mu94n38Ezdyj+1Iaa174lkKmLsYntpS3gEeLdVJR1zzyGNl1jFg39a2UPH7Fcn7oUlCaCcklBPOSFSpsOGV3aKORwVaQJCF99WWPJqngPQ3oWidYvFwmzB3xGEpNlJLAek9X6czywbPXJbpJi1nRGvd8AH5KIFEZKBTd2rgsvKiJDDcPJ/fjMVEigDL8SIhLF6c2OkavHY+lBgYDdugh3Hg4nglMMOPenOTiFuWttVFpVRb3o25Pqf8jO6No7LFD3cP55yptL303y1v72ANPsffnCRrR3dMdtrExHaz32nZpqw9peTzG1+z+pR8/UAYbZW2uVRRoYuEZwyw2Pgxz5EN4/k8ZKEniuCYhCAOksjwOzIxhRN8+o1yebJBrCM9bnvrWqa/cy7W3SEWz9u0J9HyRTVSEvBlO/lM4qfJkUbii9jyNr0P4gSZp7dHE8y8yZSdXsdpSERwcC+rJR5DheYvMxEltUDJh9X6fyGZNvzcS3mpHxLp/KCAg5o0ujun2exOdjuhyz0YFhvBbN8hKdGd/dxF0I/qfAIZSmhcpq0zusMZlYaOEOT9lFWZg6sQeo9kCBTm42axWGwsPJE6/9aIrdkghmneCS0cjtyVsECZ+6Wg8nr1RJ1YLilxFxswGtHzXSAp625v6iEZd/TBO9N3T07XBi/HdLq3EZ+RjxFxit0U9rsyUz4/MzGYfolVRwasGuZ21MvmUSibOGpH9o542Bfi20qqZ9qD4jRuBalK0Etr+zi5ij/GO9UuQUpSo/5ctEAvFPMxkhLhLBfwZTs9lT1qg+pXqEjSuJXQMIahDL2aB3mrA5RGlGv+6WBeBsJm6HmlPno08+Nd/G/3gHGJbI/FgYhcFysDNvuqV9EXw9mKRGSvnnIt1+34y5zt25RETxApy6rf6ldkPzJLDgGNpeQussnhGP8ak3ZxbmL9jstdpAnids+E1Dt6pLDzFTB7gr8edMcdE+YW1OI8pl85zk14ZMTO0J9CNLsnZ6jMwRj6RNrHQIa6VBJVPTP0EacR/vlxOHoq4a8XWx/BytwwyBNDBSkibLF8vdJad1jMeNiDBw5C5HgrvHqrS5b3CAWr8prdc/PkrKRO5+BIhfvMs9RWTgGWY/w3e36HkCh0cizQgIqWJShnTrwU8z0jFO2+u2yLKmLhwbtpQvgWsN9spAOChxQ8gnqIPVzULGOdwkdW3rjdCwX9L4XNZeH/sLAltMdzgL2ZTZjOFIxUHbdAxGeFo+NEyXL4c40Kbe7fHEck9DboiipM3bk31Xa2/fvsUPKTGhXW+fX4f7FmCbwsyfPOboGN7oy0or8U1aliNkkYbmI2ZnscxN+EUilDHqRFVZpY3FG79C75gxOKIeUub38TpIlL6wIglbqebMVKf0Z+PevTHe9z3g5j8+2pxEgreuW6uVG8FjEcCI8FU7Qe4+oXyrfl1PvCf4nMneIyhyKRfneFuN0XOVrag8TncUbMLCX7yvqs83AcO8eMOivA2ZF3rM8ALptciNcgp9PCRNMgekeV/cSZXCIBdgTDY3pO+3WTymK6eaAl3H9/BbZkVM8Jdj319oEF6aUIMLIb25c2jXAbxoksOrIBe932CbGBlP6s8DAZ07bZ/ObHL71pQWthzlfMDVjgsja20Hkc7wgu4yq0jPponTpote864ZIp9uGaq2UXr8+DC+KVDQ9XFlvyvl9i/kngSZXizV5Pjp+BFRpPn/LxTQkX/vf+D9Fypv7PFqhM5oohL4Ts49F9hpOYSKb2wvLGlIVmllnzn5BAlTP6qsVH+A02pkslksABCWrSacjN8B5N7VvHeLaP3J9S903p8WPSt33XszUmaPF3/Fsu7dtn0y8TURhv0wtDlD6bTw3DRTiGanlF022qnTGsoG3r6oU/5jh+FwHF9f2m6XBt8jDK+aeMRYNCCw5w01wJ3T/ch3UfVgEuc9NbwaZINcdaMIrB6qm/acYcEuYyJMAP9C7cCMXIwfum0GQkwipwvVqDL3Mtz8d4bRNk7sE9lrqxLIW2wDRVxwSsWckkgVnj39HLggXxKJxxUhMoRlBcBnLNb+SdO1gZg91PGK6uG4cqUkRGut0RNLmGjGLShtgcAL1oYHTQjepSGasbsla3ekPBzfsONrOxpD+NvE/NHW9+VivHHoVU6PpIwI7HJ5sK2stlAnpXyXstnL7N0MsweW/ndKeAq+x4XLY97t3vmJYOx0LY06TcTnISTd+ejNOgCxWOYKBAaYB4u+nbLh8O/EY3tAIrfn61MTF7HXA5q1hYb36AstI464FaSGZCRvsZcbeUh6fI/o2cPltFVWcakwrwiRjMTQoKR7LKv2VuNU99RmnWyze8WPJutSSPPhP75CGDZ7GST/OAZ5H2kd8OV3g5GUbXAO5MOXc983XHaFGDk/R9xlkWqaWCUfNRktzcRwGSjOPtybxV1nqWI2NqgXNcWFABqyXTL8gU6Q1lcbPJ8MvPMAMwpEAzEfmQbV0BSvoi6GQ0WnvgjqjbESdMLnZqP1SlgwRwQvfvN8OUJGvwfS/vUi9YgF602L2QYbP5esANWoPOLGnlt6pSl6bYypqXD1BAICS5V97kaIk2gRarb9DRitYRhhSNzx3vwab9Hc1D/56yRLr6vqQkeXxuZ2m4eZDaevudZSB/My/y5aUvY8/htB9dxntmAl/OAgeKdnItUVJLQILGrvP5W59PjO7QAYQZJrmLUgbBRJ5JzFzv9Jxgsod0K81DO+xNbOUDv5CecOU/4CBN6Ovb7282uwGPlHx5/7GEsg4ZaNITQ4tg9NZCq6uPoc/t6V7eXjMJtdj5XNwBG6pr7KUtTrF4EE1sO/9p+eLry7Q/CWJJ9wYVt0IZ+xabi1GTqiUlbMi7n/QbdKffmZ3VRKuZ21VZF0qIvEAVnMwHuLyWtiZPwh2qGou5WVrAimA74ao/IIsuWz43H75Fk2FUIz43B9ttWZeE4EHDoMHiIBV7X4yR5bE7yx7CucFRKbuF+oEG62OWStD49KcJ3EKv48eXM2JcHri6RoXtkZtIzW4U/dHFUqkMqmDLW4xA/ut7smZ2kS+xHuklzAA3fN7VaJNBRA9maeqVJUHev9kT1t3+aqE657GAsZ/fmZhZri4g2QJP3M5XBnGE0JNTvLlHW2kNKjsPMGfzYHYpQSJPRGAYfjJVbBqaTDEh5bLLprO5Oq6LCb9A7N6OYcjgFGbVRNUbQ7lfCOmVB0w0kGnUMdiSdjSvXOv+AwM7DLpRhbjcPPixEAMXfC6SWGyRROK0OoP+qusHAIyAujvpDRBYbjR/17gnzssmlIeumk43tVRj1JP0B1a4kov+g1ekKwUjvxNKxo9lb2+ru/7W6V98UAEgi+uGFquuGV4ZLyaKSnG9qdmY7qQyfV74YGzXeV1K2Hx0pRmZexMYg5c6HohKRO1VobY7RMvluTNK+xnP0fFcC2yFHSVGS+vEDRf1KUwaYVfSKp+HwNvT4n4AO6V3PLE5h3SisiSIVf4OvTt6x1g4hqK1y3mwzyUY5ZWy6r63kCWMBebhqvFCkktjdGRd2k9ZWFy4qDWWQ8eiYEhDp0ocUuHLe9xY/geSEbS4XwasvM/73KWGk2jN4WniiE70x4CM77pRlk+s+6D8GXo9+iFgu546Eiuvi2rWUDVBWmXIN0O3tsVA3Ar8+P+9sizVGTMJ7lQFQ7/zmc6cloFivcwc9uff8A9g8cxTmOgMRWfqV8F2ejt0g6wYXKFUv3/CMQjeE6uwAAOyC9gHibaBgr7jsan/VGzyFF4hEqTla4imTVNZriQUTKCEzt7PUT1juVuLlHjAezZaB1LXX0pcewrVtXuBWee4xhZ+gNvQfmLBumjuuZZM+FrQwieVMAhXnuxVWIlD0Zv+lNGpc8KFDpgSpDV16/Hc+wz5xjH3QhU6B29xNVvB4LT2SpECcxO3GkC4LDC9qnvyDu6vmzSFsHB0rem0k5+oW3yhQZy5LaqgfXWCkcz5tF5oxSKa924CLa21LeipjZx7byMR+hot/kTB7YTfy+RLwB+anUfDnP3YOhUeEZHfJfFz5yCcOXrOKQ97ikDMo8ABgYPsNoUVgyOnt/HJuOSqdKARQqd6p8/H5WtQ+lbpKkLNNlHvPnTRTi4IWltWiK7fZn0lwD9hGP8DNOSWBlotcvCv71algKYmO3suLyrax87Xr09jcYAUFgpTwRyrVH4SimI7EfzH91BRFNF5WQDbA6qU+uOeJ9fSyG/veDK61rRiwprprRc/vrWEZ6PMdF+AVxln9cGcTy6q7Z75pur59KvvKO0tuSGM9703YaKHehiX+8ejB26ZhHcXswdYITN0mDsX1KXUXhllu/v92i5301WnD2huokFc3ztNgiwc/tVxZcURUqluZchZTU9mTr6mPDUSrP0wGnWxg0LsU102p6RK8WWY7y9/9gaqwbecopR5tdxt343lQ06iMq+vQtNceFmZBuLAzxVbVDnvOqT/rI0MTO/2Z4xQwj+4UyAchDBLPanYb/psrZl7apMTp2BwYzagu39sXwPi8Snu18u7Da22m14P1Srg1RUvXPFdEaQq9lxS81TgkijshWcQsqYuHxK5DW+ylHDGUe/oHZ7BCvggRSG/DVr9Mdox9MiGRc8w3YQgkrs+SvEq/8AdhQx3ewPlXDmfy2o1rZUPr5lIOKpdDmW7g/4+DEAGonjMG8LQ5l4N+SwwBzB2ZHk5s6+Lta8Ar0BBpWclHEv2j9g0v+DodePukJbzvVFusqWPnPciJRUs62rSaS1u+13Pala8VoRjobdZ/QW9NN27utg+z4AiwZtPKIUy3DIVta6jzDGwUpFYyqOzbUH4wWOZwkh3fZ6n68oJOerJGXdDOTn48gO1ykIMjU2XXqlFkYrZrJWJndhjGSHht1Ufd1nEtyqKAoPfJOJ8OqUmwYG1mz0paFJM7W/CmsiTmgbtSvfEIH7shzJYgy0Ka1UreXvi4TdPyT1bB8FjKNHlidhCvta9YaIZGk6vUszya8SxYi70R/IbbCOnqY2b+2WP3uqAFKnYOzUwnEfvE28Omrglhm5jgvpv84YG7U7RI7P4tJ7TnJ7AZ6G8Bsubs7/Z9rqQxFT0d/B3l77bmOfwvJ6VdQz+6Qj18o0iOeBgbCx+s6H8mdjK6gnTffdqwLfBvfpRGvKh9PrbVk/3AfnS0KQGBdB7ljUdtrum5zs4cjSNAtzNoIEx2g5jriEwH2+LcLlBNUrTFP4ELxGUvRRtZPGf1RPxARUvvFUCgtQLH/fCCWL36qHTMvFmCWSjO/gsWC4FDGwtXoQFZ/jB+qrbLaNXIdYR0hxKOw/eG0N08D3W2OhWgln+IpsU99vpiZUVec2hxbDNtihXUzvie9HFva3sSSdm+INNwqq2qB/kW7MU9wjM+rA5gUqMkx4ErenK924MwSan9GDciIj+CGTfLIMAD8IOFZgnxdhJWiUD2oiEtjgO4iQa+mDqQEm0c5gdZ2it1oumeU2yG10xwIG4fIRDWBVj6opWPpFr5Pu179trNWVdYKfhbXrUOK9xiAALqN1evBoqMAQ0gFEbCcnz+KVik9qlBF1MPeGdYbv8K3SrKKKIxlOw4bYcZy5mctUAgOyTIFbFUdr2UDQBR2S/rKs6dJC+uIu53GB23NJNMsWLHFeKU1XYgk8tFj4+qmDeAZIpw6yYmDd7tob8mYR12Rncal2gBQWpJidplfJ/JB+BZlegIYooikDiToNOaJXUgyDtcByHS8KHh8c7u0XiOu7TjocEmzBvpEKbYdPtcyFem+GXcP15YForor1T3DgCpeXNQBsqzzXltMkRyfAfuaTmuBVj56b2dE5nZDp7FtgsK4WKojhkeWflaOQw350vKt+H6sq0sxWg8J7I5lcy7kecD6vr5HIH1I8DryS3y2KjaewZgb5RNCF2Ribn3qwunn3BuA+VQ4XLdVcN8T0n8Mn9YtGOzEK6zlanfD89MMCmAuAUsUtXUuqB5C+UQqfJGu5pLrgj9dG5G1EoxZAAol1rlPNbVcUHGwdGfAWsHvgrBFgPOl4cNS+WsVkb4URLjHcPF0gGyCfVpK9JTuWkznTLBYrpyjag+629YYf8hfIUg/+Yll7+kNlodL5WBel5PIgSo3wpx444iQ11WICQWSQk3M5l0YozAScQAENV6uv5AdcpehWps5K3VX2SzCJLFzKMW5Lf9On6SzWHY8H4KybuPGi3l5Ej+IKmIvZItEiNuoZiq0w1TG4O7r0lXWFA9+Ju88LUaKryzgW/SHntoakwfRhXSigIfOuY8nrsc0iQYuRPK34a6GmyMsVzvuJi3EmUHNj1MTXUu1LZ4MtAgc7y6LZYNvb9YNEEX3Z/kJiFX8QX7vV5ucXJo1PSA7H0c/MC7nNyZDCbHKFGYJCmzJ71UTAqVWpoa0IxoVE11OcXixjfiosANmhAk9o79LR+VQv8QNtxeM+3Ah7MESN9VrqnCI/wj+wtP67e77ayER1+GIAqyPMBfDQwZKxiV0OYDZJesdjtXdTGByKTH+zKW2MAXSHfC3BhzU/gWI2786b7OKPNdXCMCqNa4d/9OsX2RViBj8fXxf29DK30b1bFCk8IW9XBbDbm/hpR3Mxw+OivBYtAJh5YPIFOGo8VWRJh2Usan2ELnQbTx/MAo2CFsat7MfUDu64xJPb4q9LMRzBt5NUWvB6s1M/1mlHBYoP16n7pjplK/7T8VSiv7IuMJ9Z4EdyXsAMdMGvqZ7k/BZv7IcfTpYavh8GrcEnT4+4GSiXlFNvf0N0FY07WoU16hhwRZ+28vQs8aQlahRQg9hD92pzONJ6/7goPc0VmXx31LK9edoZclJOvPxQREgECtyqIwhWmcueQn3gayUMwY1hkHY0XbgpAHBrZghBfnY4EbUXJDQ+hTI4/uhbQ5oWPWIGHNpwO1z2f7QkVu41lTZ93NXEN4qhNwV2c0tP9ECEJm3wQL5q/Y8wgFi0913ZmFavJj0zLKbhSZ8iLZM78Y9/lWj9losYlYFPlT4eKJWM//b2UJW6+ud3AdmPjrpipLzJK2IOd52UW+7tNY1fa3SSYILZPE0xn5da1xh/LzctwJzciEs2P0g22pi9FNUg7YbRqoWWc3H5UBXU7IWSyC6icGTpcJVlAdGxEnmeUTaFGBZHKAkjwmchIr9Y22HRuQmOEmdz9514ghvdtClgj8W7DAHFhXBWhgHI4gIsOT3u0vg6P52VK1aunkDnAuYjcZ89vJR1quBzO2Y2YhQmFZovq7ag+0G1PUJeLnGa060y/o2AMS8HqgKMdsGM8rvSxQigP01IB92+n1z4TjHakVhWpa/KEkvGLOeFUgyXkkV7IyZC5H9ht422UbvmY1nK8s+mbi3HuGUdO6ixXT+QnCSU5Y5VAKcAi0VdB7evXEvJUNTFr64n7UsTdq3zzQjv+1d26r8VVvf5+tV0xm/DZcKXsnELcPwTEacSRa4FQu0rK4ApKd24CtoCZwk0B9hEcCzZFVO+Z1nAeB3JbyyWROYXIoISCnRLyGqVSwnGPD6Y8iihTN64cT6E1LqxBZqUvffeEkn2QZxSQj9eFeFPC/HrWPNY8pGdAX9dgu22lLkViRVmCwWs+kYBDCrN/jI0lcB9nZz7nZ2Md4nZ4w/WRkkpUETmVKSZfnh0MJU+n8k3Ou0UmvY3FmonG5OFJxu2ayWmcbJ+A3FUlJvc+gS8DuGC+qi/9BGyL/yhtJaCj3F2ah6FOrHJgRpby0jItcTJp1IRGojZ+BffDit0cxi52xkl8MN3iX0A7b8nyxEp0SUd6ivTHNO6OsZWK3bJFGDA0pUpzVpxKCvPh0ReH+OY3leupmn6tAYLWLMMT9jVWQtdjQhJ8bRiFFQwLe3Ch/rpaK48i/re7AfSUqXFcr9dnN5qW8fdabT8zqPnP8TFG12T0ArJiyjIHdhyeFIyI0qWTD/UHefXTEHwxWp0uRj7b0K2RXPLHJL+n5+3Os6jzYtwETbm8kN7jx/VGCqNZm4FNFW2r8EfTY3P9lDDskWN2kbjjos7HCkDFGwE4MKyiIaC/nYCcrHifSxOrDYpE2dtlWqoLbIof1SKZ/a5n1fCFnPoiwTozNxbNry8q7Uxy7WbhJDyxbFTczgWZhKRg4w1OVXVO+aW8mGPoEUIDjwzutnodfZpo2joU72D/H1D7V8Abj70gzBLmbC9X+R8EhaSsUOCZmxyihyd1aK14FBDf80irlE8b2wl2IsfgYR2vPs/qi5JmBOX38NNfgXVXJYQwzjvBhTuTuxJ3wgA2bNiT3j23nBKXOryR21wngLzPRYoHNDAAujCsRpnWfhCHRBWCSRY9zGjDmsL/sGpqLrtxJHf24hd/hJ+SfJUhgzPblgYCxBz+o/v/o4Nb7kcZonxbWr78YjEjepDsfK4kVDjat4pZMmcxTL1wgtecGfLe8v+BT3yyLpKTKA5bkj4umQM1TUNkkxt4AMDCUPMDOXR63NJ4PEe893hwV4cXaLmtQtqWx5MriEfrL2hYPW7x94tFmI6vHks2PuBnIuLF/F/74BOgFvWZ2wL8AeW4+HDPuqDqmbR1Y40S/35r0gJ5/bSimr430PsWvrAvSqhqAm854z1iLia2KswxbsSvsaC5vN1K62TEEO9MaK1qOSZQiodX25stUdhPBXAU+R9STYMXWkJAZikurUIUEWCJZwd16H45hB9qmkGCKsU78B9rnvyi5H6vhEz7NsQ2Q+ShiyJPkEnUshBifzg8XFbiZmCn5FGvjbrLyClLIDeoS7zvgKUgaQbTr543U2zOrJLxVvKCu1vf2ZwKovQsAVjA438nTPq4U1WgKP4u/10tKPbz4wzvhntLSYEhzbqnxwYzLfiBzcfS3Yv0c1voZ/UJ1vb1QjaIGb/8FWSUAIo8sAN9gk3IfbOtoFUWHC3fCwNjQ9DqLuObyiXfvo/8HRUd4pRN/tO6vfPdNztbxLp3S532qdTF7h/ldDamNlx4mjN3lWZ4RyO0jlnz9+cJAkrJepQy3VOV9YocqQAfaOsB1t9aMiHA53swJW1S0kePRwI/GViblVIujWlJOEOUoNfhGSvZVkslWuyumNfxEFqmfR2yHWEiHRl/5TT/4o9OKNmndNxc74WFSfgmND+r/8vdduLIy4IQRRC4SxRjC3GCJi9/ypf3kNqLEvnu+4QMhMo6yM4MxNomqBOrz+JWacRpkMof0HZeLqNPsA6aIobz9OdNrrj1uJWMSUI8z7DrrPnLjCeqPmKnR1x0t50kEizv1mg3UrS5yv94SJUGTMk8jTQxTBgeEGSeHTLnQwaZwWmYLadcnnhC+gbCHfQ24EblHPRHlpJXm2LutKYyVsdcKzI4ED6yacYttUULoL1iSHTft4jnZKU2h29On3//L0t34Vg1w4XrX1fvfhwu+cmOGIjLdC502BO5VsJF+nzKyjjTWtQry09IQQpaGaJiMsy0RLv/+3WKtqXAVXO7q6umpWXq4tOxFHZNk6ydfhFZTyKQmAH5J5tSszKimVizeB6mvhwHkr46uKr3SfJc3AVVrIxJYrrc4G6/rHGhOXZnbOE2TzK4MTo2NotKhdj7MbLGM60+5nZeLd2NYky21Q/DoHa9jT39izxyt/AfkmNlH03Xv+aTS1epjZMbUnmZ+6S7e0rlc4wIvI4jG+2fe/u2dL/mPOwk4ks26w2U204nUD4PWvgSeS6edYysrMjonzSSEZs6laQ+7yQ8lDVa5omHTQzjbNICJgY4GGtAZx7njoNMP25kDwurnz/RJDaQPs6O9fkUDnbrIb3DHRoEiOvto6/3rsyRI5Qq3uhLojwQCmhIoH+fdYhjKZDjJ/EKSSwHD/2BxbmBzTZx0IsDXVK5SpofjQE0BU1L2uiJtkpGSYvHKRZUExOVWt2/5306xgoaeUZ71OeQdncXQo6nx7aQ4VOC8/u+MEjy55bBdDRFpi9g5gNBtAOEEmdBOD9KQ3HzJnM/o90ezAMxAAAABjbtm3btm3btm07H9u2bdu27Q7RQe5NSfNo4eUGbWEU+4flgJo+7YuIzMsfP6x/OHNAFWof3urYApiI4E/VfqY/o1RqcGetZYlow2VqXNDTaQtkT5HhISP41WQKa9fKxeVw7pVc8QtNvDnqAH0Hf9UvmGs+K6l/kxVCPyOjUucrgG3m+YasR4UWa+SXpSXCUHjUoyRy/RWYgjSEn1uL5CV3H+F5UWCjK/Ex+wiLMF7W1ILbz+kZLa/NEGf1Si8enTspFI42FRJduYYQm7SkiBOiM6HLLRclrKZtAgjA1phRbHC34BswyuOpXRj9PdmI1fkoLPeP1Nwl9+DWzOBmXjh5Td5UcjqFTkdhqQ9Z/nXVDrjO9i6L2nO6py99+S7VEWbmXOwc2rLV/UzyITvdVmA6zApxt8GmemTy1n5ho3xdUqMwp/07UsaPVHBrA0E8o82+RrGIzXF/oFRg2MTbI2guz92jLIZid+gICvQ3jXI3hqOLYeFlLouLIN6NI7JXo8aRxcg2q6Wkkf4NMg7p8U8DILwpBPGPpFqNW5F/4CdPuPrPRYtQEOq6utKkLjojd7yH95iMhSh50+KJ6kNdhoOkka/rYT7VrJWMu78UMKA5yooXwx81YDCgs+KyWnodUwwk0HagVDPmN9czEa/L/gFE+uQtfXLW8UMdTKIxRsCr1Uosrmmi3aVZsdYTcPCg1KkKGwL5Y663nZoASmUwpk9P+dTkxA3PLrfInhFA6ff2qQ7IyeHY+OfptZVEha+s9lACHXnsUjEWKl1ocEyMAIRrKLk1VORRgxL0u2ejdzQ5olQkrwRQ658PCkXm7eoNhsv9YqcfNRZawL6QJFE/FQqbpDw1b549lIBIUHdIz7UbDvXwD+1AYY6Tfr4RtfjJWyoOTKowwO3C2JvoRmhbk+O5mIStpzBY8b58Ub19yWykutj9aU5wCasX5eh7mUjrpKa6o0EO5HZK7czRZYJPHw4o3OEwq26UlpGfGLt8KDWTcVBd9ae5GAOxYB2459vRcmddc1Q39L5wY2V3he9UBrdbDXd4YcHya3y8I2myETfC7IpZFV2NicB71T/UmNEUeh/sDcYmfRjnC3K+ufl+w/DN9Oga9hhHILE66qm2erPxT2A3bfE8Nmdt0QEeY3nGGzya81ZOd1wk3w83FPZS2S65Mrp9OWNEoUniNUsKqf2ra3F9DtpPEHw6xFrHVPX0v0eQXjijTQ3UX2CdioM1uqb1ssSBdDvYLfIAae7R/s06U+c3DAqBTL/SluJfWQKFnM5XfucHX63NeuS318s4rSVo38o8SyPwhS52KmYpHGc3EnchZ+IuHJt3sMAAJ4HBFq5zqkEWFTIjE4W7TuTxeYV8uGlvBPpmRMVv63kos+WBYeKZGxdjd44yGm1NTw28JoU7e5Z5RsXGnn1D79Eir9oTAopiQuJ2lizmW0Da2M0cmno+T/NVTzgDHRGIn9vq24zABikyCNoK4Nubi3BqiuAUlRVdmQ5ZozP2vOvprH1H3uBU7AHDoIGwLRQM1Y81WKMso072o2AQYGCj1+tlLVkqBMf2UMkIsuuGFEL3YVaS9hlnkVaWJhmw+nO4PhXbCW/J8+v7y84uxvQRo+Nb08fd5k1O7PQEunsMr7lWO/RsfE6NblrzJY7irNAuSDdXnZZDbuXkqGlb6HBsDMnz8kWKgh3PhKauHPKxzaPHWTalL/5xWt2rW7zRuLXm8+hOpR8blp4t9CHC0ty9Q92EDNQSem554Xv6LiCM8k523/i+2H242Nlr/EkB8PV741lYtDHNxbZcGBdC/vKwGPQKxS2PD4UkeFNPDTU241tsD7Ib2aqzDk9rnXywBWN6CVp/+KNkmgcrrQOQ27KySxxSBOdzT6T2jBhMtH/0e0IIVM4+A5M5LX2kNxVRTxxnCJGd5bkZSyjqGqIKQQJpWYcRUhFOPJSoYMzB4EJb2Z0d1FNQMmEGuJG9Flrc0FQMEERdApy+QI7VMocgHu85tNHbaDFtRTyWv5ipyWkmzHzTXYYXKwE1XDrpZKtJAOj4OSPeB0su7198XZMOZUm0Mt3cEMrPA0VKMn6ZSfeIKBTPPeRbhmtnn+AlEjzxJKYvjp59k3nnaNfwD/E/gWH05bSTw0kEpDXF2rDdcsZu9GzjLB0TCHDr1EUmBmCuDJv14zopYC0kAVi16geaF7rsRpsxmWvfsqiNw9G+N78xBwRc4G08Hykbq9lOEUChWY5vKo42WOBacyOEV3Wopis6XO0+ItEOujWn1SPjWWpEFu7VKZM7COBvhEqbeIPvE8jrUaqBverYU1LqIXbmLPjFTtk0edZtANABqZG5pz/kmKlNAipk2+pBEH55wqzy/txivkww1YuNFkgcXIeY0lhXlwMtROpL8WyX5lIxkP7VMVDG3F6LKcMbvgug5uCwDuN1WkTKwhSgXYQEFf+GvLe429zz2KiB/xn5hvTsufLT6gAwmfPDxQrF8L+/wTG8nRSrS9HGpqA7ca9CQQf4GI4Oi5VTfP74reN0mwStH0SMq/d+zjE7TxVDvkSFBamw+sCEhbS7Ivdolj6xqGiWX7OpxeByXMjftrQBZEcIN4+tT2RRBHkV8UmtwfEeZzZTCirCFukLsMyG85Bs2sUUu6chJiAXXERVryKYVMkmxQITKOM8Rh4kWc+hdalifzCQA1Dsk2nhuh5Sv9iDmsFqAwmIv3jtBR/npaCFNCVeOqTVBKCwPjgIO51o9+fnICoMnYnidFrCPX94JgVoWcDmyEXgqwFdZwOKwZIcNDhMSUnJgBqrJQPKyF4PgrMEjblz3yLbtHyJpHKRqnFymehpo9EdYlOxYnziuyLS9EYeH5+jT4vABw5GqB5XseNzKL3TGGGdZozG0T3gWDsIoBXQ7cdMiZHzKVx/eJDx4az36Gk3DKx9qAIgSNknAjLqMAxkrOTBHVcW36CxASpTxtxh9BRMBp/u8xWRjACV0eDnGk7qDjF/fchBdQfa26HOBWNo2fx4HCXlsjlxtj9dGJydDVygCPEqB+6a2G2GrSfieX4+hI6cvV7ela/phjGfuj9NkrGDFnV5yj5WMtwSaQ50aAzTH1ETdsi07MuCOiQZwhbyFG+3ntNVdOgWW22MgcOUUW/tqyCjWmn7njMydzlm+SaHNyr3UsTGR5KrjrKTPuvytvNQFhVGYLZT+TNtcpcbce71x/ULhDufM2CA6Gag84/PY3AC0U9Xb2NUfOi64TU60PvY1TJjjltW6494kAK3tb6bkTDBGtFxT1KFTErxZnl/w7hNLLar3RuKCB3icCcMhgrJBhWw2UvjN9To8jFsGcj/QLQ0ddg0sVjeBcvDrarVY5iDGBLf04dj/o2eafrl0ReLThuAVJeGK2+Ks1Ni0D0zaOWDJjDgU7blLzup//2KuJjoNJG00Mk50nzEBXWVjYuNHc98CzptoUohvjnKdtFb/b0mqDfb+Iimrrr9flTFe2CpBxGflfE4v6NhfH+oYUziIkhXhe5HxOWpU82BtC4SPcnZa2zWAOE1oC05+5GJ2rVkcPuro0GOm8+zacnHzvyGgzPLMvwRklZzBBqXR50YxhiuUWjq8TYa7eHcGDgT2JGVzYbeWKDb8yJtouLBbjp3mDRd/2g2EPxDddLWbUKchI83FCoUklcdGkrvjkGJNIo+KMREHWx0qUKs6BP3rNSKFFZCi0hIh6eyAoYAWIVPeBpj/5Dpm3/eQxe5TJ6slZCuZ5UspBXdj8Swzp0oxDRhYSFLxd33OzRfK6kegtFvJ+WjAXKSIv9Qea1MDkfzsUXs4AY+vUC3xUrMTEtGo8hpbP+T5QU12rxrny65b68CqnoUUJmIe9+MyATingxnYd5JnjIzlz5GeFU7ocgWO1eHW1W+vKqXqirnVbV83QhMlwkucK8DAnNDisX6FfGjuhIgwjQ+NdDhX1SMiXlyujy9KW2WPY01NQeBg3YKKWQRvTd2x8pJRmgcD1jKgfLwZSJTRW1ua7gsqaxz+2g25898mFNOH5E/b7WCE5bs2LpYWYTcJDfu/7nYlxcOXlMrzfh5mIh39MzSqaW/M83TimyzK4GzewDLGodDHQb7ZMq0B5n43hYIIpuVvkSowuvA9ALj7+Oc8Jrh+hHH5N3YMc9S95YZjL0KZViue4YCD7smWXK/UqBzfaS1JEo7hWo5u66/MfgXSlDDdW5BWgdGuiNh9wwRCl1pogAt8/er3r+M+ZdRBLhZJ2dWrhpCJOTtIU32V+RoJUlZtDK2f2ywBkXXlv45RHg7xmsNVT+tWMeQ94rykkpT4Z8DP2nBG/uv4JSVH1CYwlBhNcYTpnHuGumuS4kr4XhCrHgiLiijp0+xsSkGZZTr2+t6+Kw7IKUZDvNyPlz43I+a+CvcAbCc/i95Ofr2nYazadcV4SV5dVxKMyEs24GxxrOlmWbq7HNmrANRZTVlQSV1bb089DlzVCseUTds0WeRDo56Bu9eKCdns5Ev2JUxBc3KXyLrbbJVBD12oWAbUN56SaC/EJO+m1ghmN0U6cex5v7ElW0HbFsLKYA3cKq3tVs4ev0tR+SO83LLm4fFOQoc+x5DIdwIeHACtVB6P8vZVIi38rH2AjNM14n60SgeXsmrQFdBdk7qiA2gGwdC5hAyPseKGCegq8VHp0e0YIgF4GEA2OBJQRf1fDaewuCxHL+H+dO7XCKtU5ykEHcfeOuyMyVcpkCJflU8Q7ewQLnuTOPgUQ3Fr3pkw9AFV4FoMCjzfNppIQFiU3QHr5B+nk0o0A71/CaDXI2SdU+5b2L6aUNgfiqLn81W3Ir+CVlqGVYFF/8Fwp0bTuH+D1YMZdG+TnDrFIeIN4qIJr10dv5fjgnssFRz29U8h1P+u5LtqWyqSsw/jNQqrvIeAbld7VGlnYKRAvrHfakHlWOTtkRwLpe8oDJ4fLdNrMPb/kb4A0HABkN4it1EzhuEQkyliua+uiFsB8Gpt3ikZSwAGaGJw4aFB+k0LKzM061K8/AFSZ9D+UuopsViDx+2J/v2ZoWwyU7MyXJW6UO8bwZb9AXJy+uv6HNB8OsGPrYd0Jpdce7FobpwgDVl7IDsUqdYvmV0dMGOcoRNG+UJB8Zl5+8L0RP5n6reNT+CFbqqVxrQg8Omr5sUcEcKND7YoeiQBtw/rV3y/gbpmNFi8cV1d1lg/VqvPKzztLXE9K09l0X6F7HZTD8jPOxsXeCakux1aO+jZU/louq/+qZG+wrvSjw20KGwChk7fXnNi/vwWHHdCsIOHRbOO2aDmKUVUHwkfln3+I95ZoGXMOjtdrPpxMKYz1Wr18SHXTqA8tj4StiroQ4eZ9VYYEc0BWvN62Zd3bx65ijmM0pbohwUSkq7W6qpQ22zO7fWI4iPe901VnpluNjh5P5Xx+jiwhM1skf7zgcS8Cmj+hxlL/e5I9vmDYHas4hCel/O+byV0hCUxRknXzsKa3+VYsuw3+3UIlh5KRXqE5hiIv+wjHyZi7VFyBtALJpeFttkjy4GsmpbJi4/4t97yjzRi/zaMV06vuOYxWe0Kej9+/0xSWC1gl03N+6IRETtZldFfteto9UHmYeNkPq9fcuAyaclCvhHeVVNGeoLnaphszkZtqwWHEGbNVpqP/TDMydf+0xPWRL5itwn2NneU0/fZplSzJ5rU/8uDL6taD4+rAlyB5qL6NyZzlJDx7IkQzGZU0iibaRnuCY5arDsangcUdrlS9Ebyujziq8hZElpf1HwtuT3POxLjWlWRc9wxzF4oAZ4p5JeQ3eSFXaggRBDS3CWVoV8rUG8SWD8RMh7ZrcD4QosWTX6vRiyqOCOlH7ydf07zKgkWFgA46WQ4w/SbGzRTXU4Wlof/IdQ4KOFXCJuadtuokezmtQuXyfLcMhMJU+vKni3esIlAMl6A2txwtNNJGFKYTnxIkjz2ETTUehn/zg7OAO5vetLJorAVrwTvyfhKvOtLOPx0JdgaANHqh1sGbMM+5IbyhY0d6AQ9HwWuQaEhtxnVMaDyeDaqpRbN5A7eSQ7nwWtRY7j2OtPyHdSGGkJ7AIcslgP0s6uf/6CsCvqH0p8SOGB+dQ9et2Hit0bTAqVN0KyAEnvgklxYlH6tommzX8Gq9Wmz+I0f/O3HnmfROJsYX5X0PXZyp5Kbu8KHMrBqvjh9JcUfwcj29HwRsx0hvYzRGnzO+6lm9d9nCVqz8iG3qmRJ4ELSaFM4Bcb2NHIWCGpL+yOZow2WsDm7H0f+TP4lAw0Km2FpTHJcpkkHAfTD4o7rAUoFHlHoMwPLdWAbBYVoRO9ElD6yMQdu0M1sZiu/2ApPzq799oxIFhp9S2GamnTSGwWLUr4dYa1e1aiXl1IXsmjnvALgHRfEeLoaw1JFRuFuGzlpGSCMctLhLDv0+1k4Uch1qbXcXbhd8wqH04+3D3JEXe+J2rPhrPrWJeH2XUhdq6L+xghs4ZtCvEKdpPtYR5f+yAKdy3Na1SmA4I3z0BXf0eftVl5JcHGgNCm++HhzR2aYzsWC7qOP9jyjCkVpeShF/AZGQ7XnXPVpcIRP/+hQr0+fBWWDjXkk64xdjlnWHhbE8CvUaozFqG54AWbGuuFcohnnVQ5vENjcGK6amScSI8yjxvbcvuQvlpTT+DTkNocKS2WAei4JPmNldfNv51Dc2OcHCof6u+bCr/wMP5IvR0nxjLbQ4fcqiqskzdnO3efL3sNgofRupci3NRa3OupvJuhcxssf3p9eGTPIW9A6OZVNhMO7OcvGAy7y4BP1iADCZGy8ZYMRmCns7w+Tjg9eJKGE1aTlKZtCbtS3rg/Wcb5ZMGBn4g+JdUj2ocmn5kugewYOI0lxycAAbrPObPmEZqbHpOfUzApvaulEO35LXYcSYrk5l+IlaWXTndgHPvNOgjsdI0JX14V13zriHdjpUQdEzPYIUQXrwQVHsphT6ZKbYlx8POru03Ft6WJNqAz8Ikqml5sdJHOu6tfuHUd2f9ml2FgGm+bUGxTwwvgS3WmmOXBELnzNyYbmslScpSXoCN+fpsUZk4GGi/rpQ2zVcoIszX5cKODccgK/QjIicwPpnBqDmpKygY2hLbQV+fsp/fC85T0ShFFBAcBg6Tnqx7fO9izyCFbJaCij8hPjdD7doS2eg7d4xwzqTOgl1GU03mMJ6J632rkRtewW0Z7Fo+luGPULItwe+uySguejTZ8KF753AmD+7EM3NjsKNcmVVRQGhX01GTGA+tcmiViwf7GHUGOy2xs/3Wfdx2C+/0OcUrtwd2zH5UEkJnMOC7+cKK98KMFaj1nzjmESeJpqHV7VeD2D+tCdLgpSkBBU8vfwO4QRyAF2MBP71X4NzVTuYQT1pq4b2djpDIyzxRg0Hxp9dEDD4xOhonGxNoXhZzX30hXSA9KsbPUwU+EHQXNy8/UgO5xvvxwoRiuXGA8l6eR5ORpEPK6COirG/RkhJ4nyYzN4EAIxVl8JpuxetNHJkPo6VOPLufPvKx0stai+HDsGlQLzmGJPEHxcM+UzZj7Wylp60I841l+WAuBPJyI9gzL9XtmOFp5m7rWk+pLDkGs/eS39bIGCB9yvfztfNj1kDgbD8odjZIPvnB8iASkIKXJCIQfdk7Aj6yH14wyKRlKJ7Q9R0tRltqtdcqKcltwrpVUEQ9ac/7wuwvMbACBi5wTfwHS0LYKtJ1awFMYCvlPvvgfyreVlUoM1Yct77Mc1uu4yqlomlhniZyaVW9uNXRjvKj3LS8eQ4x5ARIosSMlqpy6Yi1iP/4iCLJvPC3sTxjT5FotUeqbGOxbZOlIkUtmKwX7vBY48eU6adHmuVT0/9TZPyu1Kzf3JWrofvtlCWKQl4NqDHhrZL2UUjOkbUrtbmN84qAEQO0NvmlFVxNecomC8sZe0H7YhoOerKpoz076bn78YietSXe77ybqZWDbyuq3nKj2EHFlWzMF5cWh54wnQO+wE7GaRVlvFJQh48VdNvj2uVy9ZvhEQuk6BgckJ+YTuV+/mdrE7SgW/ewHCI+sXZBvd/6scexN1VgLs9i6q1omkGdAq6tRlZPeJgUQD7dsEkaBUja1o+0l4hXBMWrc7Kugtsfa0a1vh6NMfnUFN4iBPgQfTfgt5at2/h7CPW2bjNSe8l/p4tr17JwUMOQwGRragmwuKaR/GVKo7bDbLIb9UNxQm31m/RuSuONAGBY6Mhzf3nNTN6/He9oa9N74fLrpxC3q6ysW4VD60CUybUc0H4cjz4e2Yn6m5IZxG8SL6n+U5LQzC1TPmuWgHfeSTn62kh0SYZXlbW8vAevcAEIr+k04ptPi84ksCSGIocitD5BqOq5aiW6USKPPewcAn5+d0oECzKndf/4FNFD8IGnSCGX9aCnjIniNrqJEIMXWOaqLZW+EIBNq3F7LCWFBbjJcTIjfZkZ2qP9M29+foJ+HF+3M7HDo0bMDLdiVLmA4TdBFvKpf6BqCQSXN1iaSr+/dogUGoeNxiqkEK4QitEL3TqPV3zCOJk4kwhbwxXMsE/oZuzULeho6Cq8CKMku29Rneulz6e+bs4OYJ8M/GJbyhNBDdavbh4XmPktmKtGjGwgsLBZFsoCqYcYTOHZZl6o4hTLjTHXCXtVN0f71eWkocNA6zxOwluIz23V1v0lsohywSzYOzzpgtzoijX49HN4O8urQuAw4v3YziIB74ekNk37g9svg5xDn6v/CzvsS3smmfdqaLjAGxCq9GD/l/gwBD5swBHAYzVF9WQZOJWUZFqMHzzkxU8fxgXZ96H44rMZ+NBIHP4d5BGqpRcudZcr59wygbRJltcRpzSAoC2DWT/U1i/Y6qITN6+RzNYgX7Bkac53lj2aiSa4zeiaBI2so/DgsDYTqKkvA74VvjXpofNnXId12syA07RwbIdZ5telXRUERYsVPh0HoB40CkkyR0xFx5CwPrUiUhm+vZ4Dkw/UZy+y0BHTcXziiMmiDqLjhM6hpCY6562rSKOTrZNVjY2PheFYxd4yjxJh/zwwjA8bwPjYTPTckuk/m5x9Zw9uvv77PCrZ4cXDd9rpAkXicZvH+NKgOcwXYCPNMdFwYkvKr4M4icLrIOah4TNiYX/yThGxKO1shtJVLX/PQQ8eXx07mwdPXuCjt3TC98gE9WLL5zVAusl/3x4bBhO337XCkhxHZYFLf1Gr7k9o4KfSLM3OPLMTKvSf+Vmkzbvguj0MlyJPuJ5udC0YqrjI7KVjqwuDVSRR8eSeE4w3dC0S6SjtfKxPvbWOkWXiA+BY1mFvwdVqN1ACegbAWX5gQFZjezr2VuIYjK6QRdSWCBF/BGXTg9sDFTMTqoTks8eGf7iJMvTpvtV6vK+piwHOyMCttHtLfdL0m/vo48uVyH17gGR+O1fsFCc2k+6t+wbHsbZmEmn9AJ5DmmfI4ZA/kLYb4HiQ/xFzp7CH8WBuxahtBPfoTy0/+pQV470eLYlMG+aQhmujB16sCNzU6qHDLI6O1K3NeKc9KelZ+W7FFeTgXlt73Em5YyliFavkjF8vz3U/xPH66zfMIvdXowSenGhN3g0Ct+WkngdasrUSh0l3mLb/OhdAwaEUyQKksFevnnXfBq423eH+cf4MCH3FpB5rXp4zIGYnzUPmVRJ3uCr+c6fIJFLyb+Qu+EobBBVuLoRyYGiy7veQM1Pymob5TUu7b8mQremFZF07J3IVjGm1Yn1fG/L7UJ1Gv2oJhAzU2p+Pwwg7pFgrdGumM2Z/vaRYLpOY7CXJZe9uSaSvmgc1jorsERSQfqUexX6x1nuHV/7G9LBV+zzJDCkbqL/B4XL1pPRhGfnV0Mq9UVgOS2rnPblC42kQZkuaPDrbWSnEcQo+RdLSZh1FG/qoO3d4UfDjNpLzvjd8P6Y0ErzBKhpMqKLg5PhntPFHGxHG/QBD3+CfFW1Lgd/EbdaJ+M+UZZeafvb4wwB0YiKN2/IyPQ2uEc8zWUTDRvzqWyDj81JuYuRhsiFIZzAduHXWd9TxiOcU2TkgppnJzidIY0TDiJc0q4kSKaUYfjOUkV+KvqaSAjKrl3iO5I5YcQ9W+jo4TLGqDRHMGCITBwjWVfWLBXESnKQM89E8bE3FcAQnZw97Yba8ojeCgtxxXIh1KobeOn+THgL6vX4Qps5heeOJaDzDzVcEyoyWxK8sg8v42DuHoQL7j70reF9shBRF6CVg2d3ea/vU6ReMNdTPws/HrTmFBXj7YW2gTJH7sLyt92LQ9abjsufUL7gBTL2U2ldbnJ8x16iIHzx8rsj6SVTD3lLKIsN+9lAPn+1+JsCIds78bYc81yxo4QOVGDHsVWM3xfKzv6Y1lzg0vj/X4HAJ+XOMf5/x4JodoOF3FWrVrLOLu18PwTYgy+OA/mDNcj4N5Bk5Pr8zqawH2QMDgdjQWImNJttgSpxQXqkIsasmMKU4wJA1C/m8zm1DBglID09Or8bWog97djMShUAV14v3W7YXJ2h56m3YrMLAY30DRPsKdJFcVHJv+iASTc7QbXnsfs+BqHpREXngXSc2cDkEmIbHSvGJlf6g3TDHUKfcSTCbgNsOrfIA2EfQTBORHIQx0/rgrOGJucKI9poLkpW2Y42zKIpx4Eo2azUf+ZxpMwPcNtmHDgG//xSPBlx1JGVueYjq5PxKX4HJo3sqxHNL0uHcWL/jkAzciMuU96GJbxMZ8iAe3XM3UR1LpMpn0mlq9hkfrRynopKIgXjkA+IH4JE/8ttzgZgLAYpo15CPA8t7LaGx71DsWs0CurOkOiH8JCe2eFkPM8NmxRAOl1gn6zi2JXmiyEA2cDfOpuqqadsVVEJRfKUBCwSS6flBRpaMO5Ngl3M2C87Gx1KCkJHZrZfi/WRkYTNdOtAGd5KuNDOu28KWhojSjlNh4N+qqS6WDxj1zulAdUMtbFJsopzupiTxtmtC4TrSHuo/gUHHooBstYbJCsuc1gf6wF4/n95cxcIzgJsQzo9yq7Tcuv7RvTUMJGX0lRE10iqQ9HQGBMwK9rrxdSTEm8h/l4UPBz2FUNcN6UCHKWcTwoGrWQju1AbWSnmAbUxX2uQYQ2sA4O7S5Ts0ivXzoZ2lsgBbHMmMAOUc7artTQHNnyc9914o3yR72AKSTUcE5h5qa9+w398xwJFtqqPAsDk/vEb8P5gMG7Ns2FstoLGEqhXySrGBZfHHWUFfhA2ar8zzUo6MPfZZy4loujryI3a6l51pMz8PX8HtjR2+YX2xeuG+mv1P6NOI5c5PPrDfke1TIdzUELH5JkfgoVh3Jg5bVyKi3g2+5ctUOYcHLdQDWvHWl3Fyf9PHJ8gAZAEX50go3f+RGzMyhvqMYRlF8Yvqrhbo4MiFftDkC0A57zXE+A48Dg19IMkad+/KAuIw8FQa8SqjuQFmLCWqkA9ZchOO00PWn2p8Xgha9cujQ8W3gHUi2ORpwi1lwZzEnI2YUgcgjP+RltlPC/fvpoXv0CDu/SLaT1fvEMnajP0SL1QVlV4AaXyPjj+DuZDDphuw7Q8tSqBLjPxB4LfrxQoUWCbSp4XoSN1U8mkDvd6k/a43d8mMIvxb1AyOOhHBoH4+McxG9zicubV71CqZSxBmypxxFjoLhjwHV2XkffIQcB1L2jTiPXu/z3MaLsRRcmSF3fM2Dk7zfZbJKW+RzeXZs8+E+mw1Y5mHE3W4wUgrvEgcvom93kVUg0LidNd62qkaRJgmDm5U72GcLdR7p4mzdgwouey3rYb+aeW+UuTH2FBvv3x3E0PAvP7xCXh7iQEKATcUSJLwlT52RwD29hfUBGIWsFpdtA41oYyN0u+nButjAhnBKvMMGoebDT8PehhwvW01nlI59Ud2oZssqxA2vyfF/YeFNlbA2tKzeumk7iYarqDdPE20URxS3qDnpF6qEIKym6St55AeEA60C+5/30d5HClpqwFINAI1mV9vDTFcf6jglwf74VyFgDH6ahhALvgjArhHmp0kWGGh33wlDbfBetFSsQhyFNAfBYnQpWsLaqqmlO1Jk3XpVBOL0V/kOt/O/ymBrsRrIlwJHt7/MMN3oXrPFDkbisfDmxtRnDuwW95kw65TdH92V74/zNvp/beoIpFHDheDpXY7/PlmolojzkWaSCmHeJ7ZCFRdDQ124oOixYVsG39mL0lGtChJ1o2mn/ibvejiZk4C0qZ/Ozk6+1zCNoY7tpYNmuvutUF7ELHo7lcAIWYgB1RGmy4QGpiinxrxSzH8uCkLFh5He9ruKhATlpv1HLjQy3D/a5SI9DGco7G/k/ZSiXcrUIGkfavuyx+vZWX4EQkokWA87ospkdyiS8GrxyI+6eJQg8Wh8n/VCYB1H8HooKIs3RSi6NYiPP9PYHdTngOsSrhtGUvdy58mAJgipWNndq/uS2YcVZQ1g6OfvGGN0wS2pUTdDbsICGd2e9MJ+AM5wdci+RGIRRCqGD3USLqh7gITeOZi32xH2spPZL/vOjq+1iuhCE0tnnj2SPX8TgX92tKgsuCDokXuPj4pn9OPheMuJxjpxxDckRJLz3zBqK1+k4+0xHwLv5gdD03S3A87UAp317+oQAOX3BlkUKRuZqBcSQGu0bC7XDjatC46qAVtvhTolDrH0bv9c9+vtMe4ga5HxjMTJXpUHh+p0NfezRfybGGA24oHtrPbZyGZUAVQrsOSylbtLVVFGVEiJENCS6KibqO3d8EKIE4xVAxJQWtBUawUDWQtVo1v1Y7SdQSCHuenDHQ+Qx6Fvj8GkDhXJAakmdlmUCJISqvv16dQDhgxz46qyqNgiK11zSeQZ46MMjFcaGHL1ZPQcbvaqg726f7GmC/uyfk9vB4LgXsKsmnt+6XJBlIlZuIqJYrCQa39PUHe0ELsdR3SqvLmg9Ki6QS8pAB6PKrPiMlx0uNLqwYaAkJSFvA/pmcvHeZ7+VqKHJEfWvPH+/TULMzcQtf+jClndCGh4ZCevBR95oSF2FIFgML+JF2Gkw9njihxN6A3lvRYnZwNWvIFNYLSoEnPrOGvZbVPYIEpQ3dIX+x4TbaHBD6IozgM/BZp2c1KSkyETAZXmuCvzanHAuOPQi7RC/5cScOZ8UJZcJNMghUlE6u8cy813tf9PMefFawn8I+aGeCo9brnH2XbyIsFiFZjDNBK6M6G8UPsCtLINZ4rhyJWwKg8wWerf/KpdQO8nhxBuDUXM060mgynULaetld9oEzFKHLjh0CrGe51oBYmyC0hmT67pyrlB3nPGnv3xa5nFWRN34sbQKXPc42J72MP5lb2MgvMXw23MuOTjBhGsqXgnx5vcc4+L6ANwnaRUqmeu1bYkaZPJ3rniIlu+7dKX03BP4/TZCtM/H63z8I4DpPQIMcGyEmKZLdzRgFFF6dLj88w6HaGjCu1Rmlz27MchyVueKDNLeaVghm+A6tVohuKRRjC/ySs+R4Z5Ax2JrMis5UsJf41MDYPk77EV02bPBR3/s7NyiR9SBxVPw2PV83U/ZR9aD8a9fuvp+qJvCbyxdbRN6eeIAmidTnZIns1ahBkTx1hPaUXZoyA94u+YQCTRMNA4iF8ICwtIcMzioOYJVU+m4ZUbn4fBLiH/+1OSg7xlmL9t4MG5qo1cKmgHpZn+FZm3CnErgXspkOEgfl3IiIKAYF7XYxE0272Ne7nOaAbzBvXUq23gAGngOgvB9QI4CjdgQwlrbnpiPCIQwcLrMjn9foHYWBJCAsy3NnF4FF6X/pjcv+XWQkp662yoCWCrZW2HKD0S0B2pe2ROW9oJQfThjro2Bs00w0MA97P1HyP2l52Qgy6wZjXwv0a8aKzbY9BjA0FLKPtJZsyjNTtRSpSnXiKA1lLeqwVMsHjUCEaCWqykjgOP2JakjBc6SwFbZKgXwhoeYSNhyUucA1NzkKEM9pTf4OA3Mfax9RYVJRjme3Q3To3woYLrfiWXIKaGHWwX4ybwqyQA9Oa/+9Ve1LyDnJ5hWyHbOzluL3AO5TeaufncoAe1gDB/TBzYfSyWVrsp+DuRHXDMsj6G43U2VS/CQzaxsMRavW9cIAgtv8tZ3CyE03kavul4pb/GQSao5X4wGiTc05GNmvfZ2e4CivIzpgadnMD7yxUL6qWDn1Kf/UwujqGzSFjC9dXdKb1rwtsL707L5lv/L2COnk6RRsj8tXKZhHrN7pqgUnaY0MX0zDB1/aXHlBwFTAF3UDriRYEP40KAcTEuKlV+2dQ1xLyTBSeJozRZSoDlzoAdEv248pzQFLIY/+D+ojYiBvxncoLo9kfW3GiDIOkR/XrN7swrThjZwGy4759DxVQzY2O7A77OmBRnBiEgPOO2woQpdnyJPRNDRfvMESLINgMIeRgNZ+kQ6+CBqmS+nl+pXXXg1vnwYhuwXROaYTjGmkLIcDICiBg0BnJ5OntJUpWtYOub191UfMdjG9q3ZmvTigvz2QHLnAIUWUk+igmJawPSNSNg5M3xib8vUgsUID/IkFF8VuEhqAXP54pG4MWzUs/wu1JEtBs4oNhof2ULWAJbgLtwgTqpMteGHyJimKET0DJpcLg11mYAQXkjC6i8A3UA7iQephW67lmov6WfsWS58Z/9sGfZNNyAEDFSh8AJu7SUorjdkD/Knen1mcaQDEOslHXm9Fzl16pzH81D+Syx46CXnQ4xU0TXW5ng2g140cw0LlgI5s0LfNiX1zu9jYOsWZN+7UeGtGZPZgo1Ypn2OgYji6S3GZJfekS2VUoIDe/gIRFueuBC4YxuEAt/TJt6NxIDwPEWQ88Vv9xrX6a7vOCW7/DGZKTw08yILzp/gKztGVw2k4po4pdh475nql8pSbWQVw73vZGMwN8uaVHns1NVgpHmR8gu8ug6a+2FSIdcvccT5tnWlu0qozn6hAMaVTN/jwH3fXrMb5HaKu8rVdVP+m8rbY4vs8qJ525RS9jFTTkGlzDt/d+1PBgUA7u2p8tBl5CYFE5F1NQaq9MUvDcBCP4ECtSGP0NyaHe7Jj1fC2jcvEdyA52KRhY7VXhLRjYuEAyCsEG39RYUCQeQPzBcnBPAjjIulucfGzrcyr/5bu9LzX9eTvVs7Nv42CmDIMzT384v/Ibdtb2meug4FKaWxABeJ9a0fKDL13AM6p5aSixyH2IVpXp4aEm7rENcw9T+NVt+VfHUVP61YefW00soghAUjW6Ghfxe/0Idc7BysqSR5O9QbueHy7VwEhc9inGdNREWriJzgUrYFkT9hX1iI3Zn4ynYYoDxcdmLfjz1pb8eXsPa8GWS7RKpnLQZ0bwBabSzZMRMgm4i49Lq2L+OES/0fNn5PWPNu0SwyyrSKi7utjS1m6qRl/ckKWCiQ0AKyG6RaXD//cPCqnLX0suRg9sgX06nH3LRoxgvu4gp83Mbry2RYwjW91whxg09mOZLgBfqyXkkmba7Eb2145kjXz3TfdpztR9N0cys2nhMWSGN/a1uRNpX9tXM8NNKF8M06tKNfauu0v+Gg1RKDvqnpAUrpHVot02Hm7ERGR0vSIpJiLSDeOvRfeFBOsMU+wifE/jzyOt2ObLMkVU6k8txQyeyUqeYW2R8JkQt9vscYx+mGAs+lOnDFZyiLlzRnrSCO9n0jmaI51cH178HRaW8AvWK9iMa9Jjtbleh5ZBp0afy5nSZr8+KyFni4sz928ig60FOsrSivTrysuqdgFp8MB4+GeC1BYwAZUU1yyKgBuaT2FERDo6wTv5Iwl3j7EHXH+haj3/P6SBHk0dQXA5K7Ug/FOLv2gW7CILWk4YjRTMNWZRsOJl0Dyo5e33KwWinj5t2k1Wjo/CADNCNZKm7OOozUItU9T6MeZ+D0D9nQkBY5kQrbfIF6GNhb85UDASF5kVXfuwafeobncLc+9BnvOJYtw2sW4RYtE6OFGZ9JClvExbMjaqiUF1PDgEQ6mGXJYHKtw98bCNqvJ1R0dKRsaurC+mwBNseoJxlagUIFN/hYchV2xf2MYNTokRUD7XQdYH1Gk3ocWS31sbCY6FSNfweSKkasDZLhSqGm8tYKFE0iNJ5nIK7gVb727yvU5B13jHuHroMhiODDJio4P3yuXrJe5+77BQI7h9SqE++q31YDkn9zMzdeZgL/j2adFztYbhxdXZpBH32xzcR4L9PN037A+oo8ZFLmyk4MvsNojRfOjBw9nQ1S2ib9a/uybkDoDtJR1t6NxmWbDogRUh328cLuW7XhkviG8Uf+ZrAmgz0CEzyPeGt7wBh5R+GzBHuJRrnR0WrzTJK0Ts+BAjaYITXm2qliDFuaTzwgcrQGuSbLhKF5FahYq7lILK85JLa/G87Wygpty3xU/nw27ADx5DsZhZP2QsMUawMMC0wRi5ZJsIHFPsvrZu+OztkoXS0WqGlbaf0aGuiXMW4ynBTz0ip/PSgP1esHNF5DrERzKLkuiGfF7Wt50hOXUMAqIQjUOT5WouLC9lVt19IAXei5FQDvogllkVfgnryH6+U+fAPq79e6x6t8rOY5Jz/2dMqVY5XX3lkY0fstowe0DzRNbRwtcC1yObJoZp8B9yz9nibg3dVpdRu5OMtVUQwFVYGgrIS+1FlvEP3KORSahKhBjS5ibUSg93uI4P6orLkmZi4NHbvvv0R7Mp1Gc/czm7/QhDBTvtRijO28WfrLXSZR2fHMXBgyZHGhpuOHjv7gR41miUEBxBL0stEprpd0pDSIi50la1JtvINkmbl5LwKihE47gILv2P4q/eJsgrY/aEzMdJUc7z0xXdSvMTkO/eydM4SeI0Ijp4vvucLIGu+8r+pVLB1gooC5c2ad7HBQD74OVBQn8k0vELELbjr2rWzkBs1R2ExdKO2SX0/gWLSxVriC0ETokf3x6teMFiQRuqHy3MbmR5kTBBf45w2L7WUGNo9m4wCfFSSl17H4En7YpONz5wJ5gFxuQqINJr0bSLaPwrKL9I94UVNml7gebuSDSKZP4eNso/YaCpNOEg/AScDp0dL45p/WBGMZGBFqOFM0QDcBJvaLMxgVWsZsBc3srAsvohlE0tqyKz/uC0WkhIXV+7GktDrh/KctTk35TArLpfjbNOimohvbOUzpX/mkcwPK7ykAQdDVZmULlJ/Szop+8SzvOf6mOtAFE8u89PE+g95/xKHrsV/FJsKuzpgkYPevMnVMtoIAUkdX181DyfCd8xKK/Hf4mZHTObsT/c4mZkPj67a7hrsx/sglhbRB7b3lAXZ9E6XpO2DzwX5g2o2EcZ9LN0BTuErt+mSV66oixODIhXwsiQoiWzu/802rJbHpdT9gkfP4D+AkH1UYa6SQWQLFNn0pUAvsgBsieZlYaqfAlSSRGHFEGhQp/vT4ny7pqZhLmlUDjfyLcQZbyoOJumU2WpCyGHXRb7oD8ev0kN+GM0ih/pipNkleSoYXiqZK/iU7xlCfRXt5nQPw87NhkYRfydj893a63UIqmonO81BjDdg6C1Gi2bSt56j2uPDXexG1oWQlmlRhAEQv90hIxyyUca4EL0d7B7J3O3k01u802CCemFHmmTXJuxTH2ZjMsNlU8lu0Lv+ug67n4C8W9ORSioJ4OxtTuboIkHmGRYXn7whFaXlFa4wmM0gCsd5EktN1d6M93k10AwMl/J3q5K/DghdEuutLEWUSLE7mCS7EMWkE/1jbYJq9n1cGYJZFj/RlQGJyoAUkdJZ7WnLMCEiUxaCn+diXAynnbudWRaKFQC/yUnyOWqog/iCW6oGMVugj6jM4DiOXyEmhDaOZtBaAuWBI+VsUMeGV/s2MrJWUC4Cv07CkpZQ8zOZNmcoAs9l4oHnM/WvH2vmPcURWFAHNFcSndLlpWCLUYkUa/UTtacK+pSx2ns08w+Ly0naYG2Gn3pDg9Mn++crfODdI+3zYbyEHITv+U61llR/Eo2zJiR6DBDSGhZR9xSVbrllYEEMTMA7MuBK6+wy4wqoXLLJ540JYKrIPuGuAe7AfJd+6iyzYRq3peaFTwfZmB/grCPHVsKzbtqf3/Ck7qCAn5CejW7kUyGXzzYjox9z2972J6e8AipysfGL5PHn/LDx0UEjK3agI435BRKHbpYZkCjtQULRr1cmsMVtKpayFEwNERbAD5/1AXAz1Oy+gUwx+jZPpmQyjuGTeCft2iPoUoMf0arHriTpD2bgibdyXyGaYBr8KkypnP9JQU60hgkghQIuYNmA6fi3dDK1M9pHtGbIbCSRvfQHVOKt4q+fimFsAP+m2eYbnoqsEblOcu1BK29nRaSl4H+hu5K+EbF67W4wj9Iae29tQgfhKtLNIbbsnqNDriDA0DB4Hb3dU3CbYibFA5xSIQKjXLqdyT0oPTE1EnskZiJ7AY3+V8aZ/7YefJWnVId5+by+QpF4H8dQHOXBG+V/dRMC/SXxLFsuSgEl4bTAYYtHIQvalsPMftWaXSnhIPVrj4YnmeotOa3mzEhsHqu3MX3vMYEIVR+leVscuyMXpZOUR5WPo08FeIL2pRhWoRRgbtijpBUxl3Y2CQ0cF1CroIvXLnybmFd39adcD3aYGJWy868YsGwMSIwhMERcILq88ERzZHMbsOhCjQFtg2mXnbb6XjL3K4SYI8IOPzXZtZRi2m7ArMkaZl6/ul8RLpmT3z4HHWfyTDvI/5Z7ubk96xHOhw0ubG9xrzP+U+1lgyjfpvI9qCqUL2wqOPGNZq+DgkZxsQK+iSF3ByKfSCGSOoXAlLyNu0cp/nxBmvuu8S8h1BHGTrNW14cwqAj5grydvEKADcXv0J2jFlhRVvQz+iZe+FFuqNmuPqMnm4NbPfanYTyqpfOySsgUsoxDfOjW1B/NP9tVqxmQZhKpv2bPq2WOVm+aVefyehbKYVjQaF+AuY9/1Sv+p2UGuPo82yGyRrzHwpva50t2lFKoE1epuiLqX4qEv4Hf4iI7rLoWXcqtHPa90IWeLDZe41vN3aPu/AOs3Ya7eyd6hGoCgT3WuGeqBWTbiOXyv2qa5l7p+/FHfSD9RcrS7IVqhTYDC/2dArlY201SH7sxfqKMlfyURz+C37baAWrzoHUAZ7Yo/rD9hhnOXrTt9dT5EL5ov8ndJiXBIX5k4PUKpSIC5xf4xtOVcjhJ2lTZD7IZ4RJpKkb+P9fiR+ip2oU7J36qfVZHqWfHDbF1J1VzSjqry9cwYoNTY/vJIvjy+3P9syDcSA92NxmkhP07I+mmyO5k2AsxLEYmr6INBaDM18UeViXnT1QD33hm/8WHhUeBBYFeNpgW11vSlX1+ZzMtPLNQHPKRJ3JHihylx1gP0i1By/kAWOl5QPgNPkS9uuqBncZF56SXkk3SztUry0Eqk9fEjTzywjwkNJCShxHE8u0/tmb3XQ842GznYY/uZA8kQ2L/NmIJDSxSbt47bvEqBpiHyboVz/aKLTZFmlW+0t5IZC9RWoGAVL/fAo7xEQ3LJ+XmPuEGg5w3QA/JuEBAZVR/x2mG0/aNHI36/A5CXDaoTlGii/c+71tMb2uI5ECGW7gTvGHOAQ93s/TGHCedNi9hVOwJ6D8IUDBQ6LzYobrZ7k9Gua0/PXnbP6OuFSkvYUqQevv4/l5jNSWLJ0VyxeiItynehKoI0LsoFPJGM6FP31G39Lqf433ZDMTCb4WVyY9eYEfDxpHB0Yore45K2yIOChnScYcCC7uelb4lSNHHLqD9lhMtdKH+FZKVkJ/BCpuFvmBNw3Y5PoSH3ww6bOchA7nmY1yAk9PiNEGsYJhpWbGcLW8VMGKUZDoCzMubLNltQlseAn3fzKUH5y8WO/37eUyrzFLXCImBGCqq5jrzhsntctqD3B5TepBNEu/lJlftMhdshqUq0qB1HSe2L9KroJH+a3QOXKc1XD+ndZp/kbQixRD8uVcVG9RIHNN4KscI8On7smNJFZmGfjKJCU3xpYxzGz8qxerim+YW36QBcfAAexvSYwXQveRYZPG9pc3Jti6vNgbtuhFycbWSUUl6JRNX5qne8Yjsv9AkedxPBwbegaGweCwJO7ody8FBuWlcQHdQIdfk+x7lIN8xn70y+i+gLRflBZAyl0EZ/S5U8Ad3VnNzhJ68Q3YBz4CgpuILBs28rCeJHJhAOMIud2xrIpobKG3p6NBrsOEV03CRj4b1KPXF5BIZvpwbP6PsEoJeWb8hKOe+TC0U0IdKIiVZWB4H+RetKGCk57JHi4AF5oOlCsFnEKXy5ywakZKHbPzy/mRy57DDNkedTLWQqKEssG2jB8HuU9mdUVXf1p/jumHp+ojEAD5GF5EBjGTBC24SA/PoGd3IWzV9IYSIoYvQ44qAfI5aXMPamqtql7zq1lp6VVA8k/a91GPqXV8V2LJlTHD1huNLbwQ/pQJupkQwFMYvIzeeGbofdv9Qnskd93H5zsSSZ17kUBuN7MPbbqFnV3EKdJvlbDdXzzezBqkSARcqqT29iwTubAf9+rYKuIwiZdqa/yW9eWvWXYm9wayCFUmYcZ5EIVeY4m+Rx8Ytqg30FSrINuiDbnONzq5NoOXcRmPPlGvweZ0e2ewqqDmiNPuFUPB6BpyPfvAskhYTa1BYtIg5WBZGObJg6mv8hEvqQTJPLciRJ2Z2vaK5ZNQZdNlG2Phml2kocORmlr8m4R2EQo8WoKavOdOxwTT9Gun9E1CAgQDlPOJ/GvBO8rW0o3oECfQvJ3gCMHk0g56d9zVPc9Y4A+w67vn7bKItty7k/eqGrw6zuxLGy+PutD1ocLTFNO3EUxiEI1/u8kPIBJE9TVMfXxEKze/Evnkirp7LJCL39osevQEUOpJr84g6y34rQzSdupMvOCg1ST8huRVwWB6VRY8tdj8WA72qjteBazt77Fc8BCXKW/yEjgy7fFoupCbEjFp9iCY9K0FFeNH+YusfH1RpuI0q0L7QjzpSF2mY+GHJytCa1b7RcucEqE5e75hF4MfMMVe76jvnpMRyLqmM+RTWZSZERKbFnE3C40XS9bPLA6cJu0j2R06TOxuKC2WCwTGmABkDr9nTBns07qMbx+7+FgAtiqBj9i+VYSiGTwdXuZNDy8GMgt+tbsWdEY8XT7Ka/IE7Df7Rj4awKk9XYmHQ/b/+P4cTs1xKXMLh8yYqB2fUgFCJJRJU1h8YYuo7c7waP2Nmp10eXkp2R11DHkT60eBUMEGZ7M3GZPy0108PePlbM5eOB5TJSHUdHkro23JlgqoYZomshC+U9V5Y78vbUgryGgD/PdHUxr6lZ/NQoJ1+oGpfooTHwdtMkZQlLDBijMlvAIU4ibJnuk2gq52/V7l9tezmeJJzRzkEvSNWOb5VtgmJ/XVbW8J+sGR25vg3PXqh3DUgQVbUfOGQYTbvG0iz9/ywUGauGDW8FvXLj49kp5jZERitICGShUAfs7Aqx+dUbCJ4FsZeF1umjpqa+5ZA0w7yFLhl0Mu5sR+vNYKNzMdGW/qdscyJBII2hwaTjMWRS4kwRlVU8xJvd50FGAJPbAoU2VGSbJm+kcXDeJb62ENwfi7IapDK6AY/px36a8bhHTSYZLRj8OyUUPyiGGst1uDo+hldSQY0IJhq5wFZ8jB1FE4LKkJtZuYcqxSuAIlYISWjxcj2PFa/WuyfNgHp98FFuZo5TM1xizmY6WQDlCgMw9hKJuIAyXat1rWY7Iy0GjVUqojqCGNbtbBmNql7kr/IQlZ1gTUlLbSRXhcIPk2ZSla1bqjzYON9UTQ6AFMhUnVdEgevtr42LKdZ1FjTYA/6QgcsDgkqsxauw0CKL7WutSKqsiPa3idMomNS+OWdYiOWeg1ifaZ7lMsx3TYKQfzGzAZQqbQvgIbbL3XNGg+AJwMCAP7SyYoU+D0uDc6nDQEyPMUYS6e9r9TBFdv6wiT41Fej1ELJzUYBVdSIoLo9P3WBdHhsUJXvRFqPH01q7TYjAtMe3J5UFJF5N4LZBNcQ2BO3H0wf6EV2dazCXmE64FMpN05bhmGVUdrTU67hSYTzKcS02ayt63wbN+STNO+ydFdMHdyCrgNjC6NaMLsjSPd/pNuDYSAGAADA2LZt27Zt27Zt27Zt27btj+10iA5ynP474yk1O6EwA/vqyXS2DZMngX6U9A1ODcpeLFg7JxG+ERmcxOjsuIh1KiEYLv+sBHHd7fuLkFEiPSoRTaTjcJ0Ph18eztKTBhCKOjEK2FpwafLMZkRUkw4E1njUlRze8MLJPRhQ54yI+/HoxYb7EwuAL615fR9eq8TzGmLBAo6C5u8bn98Yjyz1/polG3JnQDaHYVip1zgCoNxsC18ZhcJ3CLD7IQ1U0YC3qSCj/BlMAudJYN8gFo21rhbgrFKxVwatVs7r1re5AhoLkq1CP7er5GIU7KUMUVkAxJRMpTW64OCKcHs2OoFjZK4gANrEQLpOTHOVMZkHJX6NGFrIsaSF1wnbCnOvTzOMnp8M1NGhcRfp92MP9Clw48C6Ew8luaITV2a0CDkICSo1T7Ow7pusrCTAazeLtt5wH43EFJV6+I5kTC8y+5KJQ4sqWj+3TZe8hY5ze/Cai8s7qrpssBnnewIxsVBMJeikddlp9LaWElxuSemZz7B3juQD+n0a84e4rmf1d+EbIJJxE4rpXVBzyIXH6R6S+qUsLjjSK6ygONMX536KRwNbvs6C3XPtR0jCTvBIVOrnWCvg8iS7TZhJPv6NQxfmPdQDDAjsUime6nTFSdD3cXjFRdXhEnNjNoabdSi6EM7gCj24DxYkwwPt21BJntQuk2+r9T0dg7GrD42CWBFlgMZ3Y8JTwXSfx4vGiXoyQZt9hLdXSSIPF0sZAp6MCC599vX6ROgl0iytz4T5sFw0ImyDOqNidPBCA2zDTvHSLt6uUG7ADAIx9dvs74RHlF5IlzIZsJOVWqhve9fkx2wHQPkN/Stl735lkPKeuMCaQZ2edT3GJFQmoBakwlBk93hHZJR8hJnBEM6IFtq9DtsZm6xmaw6sLZhy0asutr4n9qNgUM+q+oITYit+jJwJZBkGyiLjeQ7F4KDMyGIjKc2bR4LE7ae1rri5cf+IRVRwzCmbG9ykWqyZEnciIXKwIEKI7m23MxD93G1HaeDc8ojwsdcOPiEeeKyKnh4lIWiyn8SuPHMqRJoroNuAw2FkUPgJBqoJoldJIWqcEikN5ZY6vrj2md5P/QawsQ33l7IHOZgBX1toYW7qolCCux7lzTNVumqSArBJB+MnA3VBUrAb2gY88qUm9b9hbQ0lU8HW0Qv8aiwu9UKTKp1Vj6cxWbollZNzrpkEZaGlWjUc5uDgKoGxujyoC9o8reDQPXz+VEFPKwBl3iyuElNz0eAFH2keFmn7i2mLF1ihqci6LNHOvDiEzR5Dh7DHC/+DDISixSs06ynpnDutTXSmJomlFaPFKQorIomj3QhB+VnWPXHYUhTNNNL2JB2OKdGLcsoCq3WoBHfHbofMRI4PIPiQfFNbPD55EmOlg0PsIrDh3v2NEXIkk8p5SO6oBvpQPJ1TQZndtG26YikMkmxXA8N2sWfzWGilaKqH0BJsEuwrGlrnGNyWtgS4c17XApUya6GNkwWel6Lxv2p2nJXNpcAwLM2DewlIXKPmcUj0eUBNGOq0GaEK4cJbijC32cJGvIV+iREWLvBmbuGog9TicVAW5jgV+jOPTKCGGWxodTeeEaZXmM1asoXHv3vEHfP4SXjF9vExBjQ8uLsVBgXNXLWZs1LHRsIG9j6WDBJy9djWPlW0H+D9J8MQ8zlYte4pi+692QFGh0kRRaqoeZtLtK1K2hzVWAZtgvrur7ItFujLbH0Z2A5adBR9be78Y+6jdGACnIIIJbYTuu+DzB+7UhRnhpE5pRv0o6IjIte2MZtuSg8PTzq5S01ifbUe8FI4Np9xMX7PkuLXWFcB9UyuqOc/gX82CTOJSlRZJw55kJZMW6S8uFUsvevvbHtpNX2EV3AU/vE30AKHZ7RPyJ8midaii1HBWmZZlrYQFdO/tqKnLlCVD0X4DMfmCPee/ttP/aZ8S0vvGiLVJvH4h0j5AAi62cmTbjVa/VVkSdrM6ABc1WLC5qa0kCsfQDkOCqzX/qADSPA4A0ypy/qG2PLOikw3kVbkZjPYcw9pnGB7PdF34/hogbHywfqcmMyUUs4vpee+HX4uEDbr43yXDKQyI2LsKk6I0pCmKehHAIjZWJMLyv/GShYHP6BwsOst5s4dEBy7rDrqEz+WB6CNxZD/pAs8OQyPxSMNtsSR5Br4LgV0LdKZjtIUTARRBkmChXEmMOB3wGC97oK+I2YSSXRt7/tvm5L69nBwin4C5iuKPLB+oyHSQBWV2juDWpk2ZuPnnWPcocOnUBy4IgTRdpPCpWbMwHZ31zCuQfkiQ+d6662GpGmwPEKOIzFwrNNpKWKCxTVJoeFS7ILJUX0HY4O30rv+W2aXfWbXCEN75md/lmRkMaI2DORSP1HKEydAhh58ZiAOLvyqPOh+YPyT+av6EMzQlOTD7rVtAGgAUBxVpiI6ZP4LNUnJdVUTm8t0lotJthPccfkaBFkN6ueXC0HL+1IBIHaH07JrRxheMXRkJvRA0C+HDt22SzAcGdEuVlvezFLgqQ+hyuJij43XUEpNCsi46lJHa94m7/s6D90BeC3rX0zrS9znUyo1erqe+ZaNjvTyAjCxP1ZFWCp+1c7yljhp4owTu0qQik3gyAsPysLGjl51amk8e55FZgkxd0EvvehA+Ojbhxa9c7SenBiUVtm2q6efXHU0RtP63F1SJ45qeBJBhh908Lgfw80V1/k072gd5oS3qivqwGqekbEY1zNDhTeW/8n9fkNXnR2KGcn+tlslCgwK5GsLzOmDBNHenjskRVqX+YU4krIJf3WvPQkkvGolQp8u1qTE9nxjQOstsQOq0NU7WxY8V1X/UsCJ7eHfDnyo2mTrwCM2grvYjEtChBDU6pHTmvOvKYp+AUioklSo+kfubiOfvBV4bSBmjqL+X4mXP7ip06QM4DIuI48+GZeglXCEfuS+2gdr3Ut0C7CbJGYslm45CNlPityopQwGx6RQOCEDoZ4atwP9yKd1NNb6b4KxmeMPKwf3s7OXea2Ui72P5PgWyCoOUiEjc9WaAr8jd7rmd3g7jmd2rgXlQPFQA3TUOUE1muhO7RDJfI2a+bbqFaKqxlPKbN8ZPibRGwdN6evTUr3jv4rvtQMgA95FbP39cbPRP2uzKPT6nQxY2UKjdBxg6qU/JCNpw+XcOzNQtBeZbn/HZKxFd2lx1fNqrTg0C9Z8y8kRzFzHnV0ELmSlNLrrLtOJfoP8C24Nc8gRarl1jeWvt0xkROcyvuTh8gKIuDH0CgjGPLtNCc2AhLKVWp12TebYXnwhCXBlRVueplFWSWqnkAyH8qBoCRPeQeFOsytDrINbZM+7nmlDI8WL8Nv8WWvjTK7hpu6jb3ZnKA6lLLOMEQ6Nb6YIEjcQTyn1B/l8EAMuh9WHIucrbNpDZvhfEcHzCR2+92F6hdIW7f4GxVmWqutz9pKg2JBRaiH1YEAUHj1uvZWrv/qaxHoirZeR/nHNuD3QpciJZPkxLTGTdg5qSBRbnqEr5YVbTqguMw8qGJpBbcTAmBR3fikCok2C91OapTVwMrwucexmV4Kp4iY2ExwyG1Gk0cXesMhHpBpJTl7F7M/MG1kOqxvBl2jslhk8UbHMWq6LGzttII0pTW22sH6Zp/BjAbBvRjl4oDH2W3sFo9vsVnNibYAzWUUzCbvCsi+5D/yKP32SnqtTNvD2OPSem6Rar5kgHqlazqgpNYVm3qULxQzdeBw7t5cYLLcbktMcgd/VgbhOoCq/lAiduBq3C0ngF3x/lelucpJ4Qx1bnVrHGTRUUom2VTUTaHcigMqGexHZIJp7qafmUk1Z9K9hc2ig6cHg38yBP4qRYLCDZK43omKXmoY8f+bgjtlIxyz0cdYaHf0WbN23hWhe84/Qc6TwH4y63nam6fdqYufEMSRFDkJgOr1Mz4Rfi/kE6lWJ9CRbzBNJG+FhqlOHxLXywGun8ayDsRxA4j1PTTlByhwsRL+5UJP+rog5EEp/OhS/2QIffaREHpTN0pl8oWr2GCtA3AAkZhBWq201qajnJYqb3COMwQ+bTl6Mi+Z+T/ukJroYE2mkuCTqTvCyWCb+EQjGq1RTt1t6L17M/V02IKbnzxzQIY8MW23q3Ylu8z6tD4W89+4OJHRX/XpsWvElHHxY8B1K9aEh7w4ejZkNOKWmnJahBXaaTZGwoA4YpazUgjtPB7j4teSA6ZJuSOveaQ2Z0j76E8fneMNnLKHXQe1p1XDkHZH+DrfFVad1H3SHEUlK2cTMP798puE6kf23hywzzdWn7O61OREB54IkVdwl+H/bsGDqUAWs8H48SjfERNDOXeKGXP2clV/NZd12RJNOlXyPWIYKs+8MMthqJR4VZzKCfHLXhmsZjy3qMKYLEhl/kGZ5QEK/QMDyX5ulQ/9yya4DRGKLpIBHxHPkRBGXF3cbtu/AXfMQf1koXDd7UXmWnjnQjpAEHXZSsFOSHVs7R9rqLncYoM/pDt5tUxZ9Tk2JfTyr5mDCh4BdTjgQxAIU2HkMraHD7glh1HGcnaF6nr0SL5geJrJosgJ6BhD1R7S5rT1nWvuZtLjYQBDcesJfQQcdfONPmlvmu4LvnKhCImZrVP9E/ezwcwtg9KL200NyjTfada99xhof8lN1FoW4ak832xgDXlwCo0GMVitQG9UJrZpkPTXC0Gcyu728ehNNdWrauzKZ9TiVPjXfaXXs9Eoy/yIu8ZcNsB9lwXd9ENj4NShR0Mu5gUXxBW/EzPh1vfLMNTOXLash7zbiz+DfzsTHRHJSsED+MWyRIJalpS4sNcRvHKzbnNHSMeiP0aLcWvITzsCzXYsMWIOPkWMYEPk5nE7+ZpvU6ylKrh/WwSxVcc665xaAYv1YD7HI2gD//ogYkKYwHt1W8+Sj3T1QZaj0IdT65Q/c1RagmGdUwfftXxgU7UhcyHnGuaKIo+EC42MyexUM/2otJnzglRbSrHbpajKCnIFGZmurnJ6fbSoacGzD0PiBHr8sg9/Q0yMvcZ225q6a30OgYaa52yrYt/KhPDqUEjv0I3NaBOlEHMxqZo8zxKMaVv1aC56Cl76xa9cVzk6W0BxJGV9JlAXF1hYyHNP7CQ1wJVDmX5t7rQz6ERZOJC+kDrkneCbzIofyIac9QVR5Pr7tyVsbhtjhiCLKfmZmP2l4D26IYjxjdw5W3/ZRMBEAaK6ldj8b1/YANGm7s7MMZXq/QWICmMRZlauwuOVLyPZvuIbTxpORBFl9yGpEzjjl8k45O+dcP2Do7X8Pjcn3RNcQT+G+VkssnUSBBfn1S3eG9JMpl5HIHp1T/GEQyqz+XGXC/dhwQ4a1rLBeiU0hDLREyZNCbhAJad5HQVA2aE3UWae3qs1MhxCUeL7sJzngCSJcB7yCKRM7zWn479oDhhlZx2t8s+ufTqmwIFjMU7VGnwsbA8430nCZvgULxM2dv6h6oInSi194lYt73p5goQ4y6bCmCpHfAnzBx4QPAbQAOlwWq5tWv5IEAWxDeIKhcSZplxh1+++2J59jn5LNGWrvxMW/hOL0amvJzE0SpODqmGD4t9QXGebsE4SJGCu8DtfBcfGLovjxN5oEqHrFnQs4ys1T+HnY6X8N4c7bxNwoxZ6ckQHy2pGGY6poZn+qLzFPgrklhtFNVdSmWZWfkq3K+w2Qt25iEUpcXNMIdSz3V+OmLMhLQu4dQGZ4vPmMb+6llKe1tjvo87jmUl36DSbymDoZ2r+EiYv7m9NhURg9RyHYYfeyLGyg1pwsyR64IdBGDMNeR4s285iDiXEOdi13j1VNoRZqL7+jevGZL9vDviw2riEDHkweFQXQ5wACfsGkpi2bzjINSYGKVadnA4TqDYM2k+QS0xS6CHVV5S4oeFg8W4KDuizYiYtSAZfhwG7o0o3e46C/CE//ZQt9yS0zvckV+omk6X7bVY3pfoOfH7SUtn/Ift8dLjya6pAeGuYWozEQeJffr5siZj52ogR3zegQMqcEIieeUxUMJ1GtjMa+pr9dWUFLePqJqzMgkk8FevS52iMRzfXZLCgjbGjZyGvejqMhu7u3kZ6tyKQTVodeECEacB0gI/52gTHpPWfBWxSdWHWr7lxA3m2NmAjPDajcEcb/9FdIu0FvdIukL3dYwnJwQP2lyWoPY73gxXca0vWoWKWueKKVFxLReuWIGubiNL/2hyxLDrNSsXpeoXNCwW3JIOOlstfY2eY966H0ms838fb+JDGz79AC3AiA3Q8aGP8hA+6y2gg2IGxFRMnNHCBIp4lpqnAazJl57uvfkO8S3CvrAnlRed7lSFkZFCuYSyt0msyjWp14nlELbHJsocJq60KLUus5YlRtejz+TLuh+Xs9+PQMKY4xiXaB6dxBi6NDrXkaMC+q/+A8EigtePtnJtmsxgEz2qeWbl4EvwnXVPwEFH1rG5lvTlC0puynpWAkRu/2CIgh45mmeYFvQSp63EJXrT5N2uyl1OH846hpNUxtTQADscJGi2cyDf1en1XNA3AXyHN85SIkugHAspJa99H/fd+FwPldGSV0/2dbMk35+aLDVBfgL/0aU/neb14G27wIlM1NC0ueSYGWW3Z+UX0oNcgDm1CwgZtNQElE9tk7JxRPf7Wpc/0vLFLc0kMAemra19ORJREkaXv7qe4fO3sK2zBlvm+K6TV8oH6jEDiw8hQdiHV1eZ1wI15PMrA61jGU7z8q67GaqQ2zPZ1+SY6M7GBd0LpNjVkfRusMaB83Qsi1iPU7bdJRDON5jKCkRsD+FofZvjBeyAjoadfjnozVeylkiY/sVhqzDQk77r7Uji+9AhfDAdNd7Fa7WtqJQwgtu7z++50IjALbhm+ZzDLb6oMATobZuohfgaosQBG09EipyI7nMiHAkGEjbaaQ2FruZWm7zThTm5v1e66zvgYOotGKOm6zFKR6a9/1rUHcklejwxHda1rmDGiqfkV+D/pjhMXYqcmN9OuiOsnkQF5hC4F5kJr61SyGc9Xe0Tq+CaBGBzYUZyaKoQU6VilmWt+e45Tgp8/XlvLuiX64FmdVrL+xEsiNSFDi1+jmHNqbwCUqOXWV4NsNl5h8pQGDQE4c8nWvXw7Q4pnL2O5s4g5mY8BZz5Z8K0QXSvVePgVzGZw5b+3E9b5V2tMNHVVj6LxoiKoIPX27Mlr1X+bZQO2Y4xnzKSiZraUBrsssT6BS/o4a5bi/SoddG5/q0Scj7SDTAinDuaEVQf8jyeXhOgpP4X+DMWKOf8Z3XdB3HjZrtvebb1xVyNFArJHrPUt8HENQupT3n3nqg7n1ewOqZvWLk9jMUlgXlvtyoQYL3cxdZTDLP/oiWCmsbm44Hc29A0lb1FZ4NFMKza1K5Ia6JI3Lv/oix2nZI9cy0tEzK9NCI4G194m5BdHpZnIeGVCS90GWf5gmeWGzvhVRbv9QBDdKn36o7IpZl2TdfNNNnclO5ORS0BE7pYW3FuIvIbZn+XsObrKcoqsjIrWIVpcDPazW1+XGM3L+mdsgUj3urUN5GaYv+M5XG5E9dxtwmzG27VQ/dKRGfgyVVBsDQtySMuxtnynKVKx4HWqJIYDnx+kAjPaTxLkFcGcEwA1Qx5xqyc+Do+AxP09PvGfUwKzmmMc/5HgF7oahLh/mucetaTQ1ABo/O71+SvXiAvcc1XnRY++2weXrNqGPpC6stjI7baXlOmCd68Yt4NVQZGErs3ZpGf2Fwzm0NamqG8j0wrMPLJMT01ZGyfgdm/hoKsBH+V+epWjz2lZiMrWElXzdJHCw5fcQfFRI1kDQzDHMPsbitUuHQnVkWOsMVUEbgAh2kPwELWEMVa8IWBVOxo9Wd8IF+tnt1lB56zSLG3NXPodZJ1ySI5DtLgEMVRqd2D7EkCkUzof+VrxeL4xn01hM9D/vL7swTNeGZO76ISjpDcoRVuPZO1UAXZnZdHkmc0ckVDED2HUEGFY9Mr41IHiyfEFaPaLlpPcq297P6JW7PCXrjKUIBNNWq8wVOK63xuMJB4yJLIL5J1hwswbzr2ansUV7EjA2EO3OJUTasOi4eUZrtyx3wUzuHudGeVLzn74wY1NsmTo3gxas6MuswAj3dhQIUVdP6v4DKAHVVExw0QSraG+9Athq/0h90Rc6jg67KVooUeTa3wMJzu0rv4D4WwXxl50rIulnqD+5IeuISRjqFsc8+Fzdwhw6PTs2cGW0yT3XBTA/Z0Bdb4wl2a+uUQKx7lkYe8wfeEtgwyKe8rPrnjs8Yiv8CKPRTLdM+XVYfevZJDTQcMXa5qlbpPH3Q72z4gAPEbkzJK4CsROPy5MYOq8kF8zVA5N4uZdJaLjR+TVuDjtzRciUDZVsuspZd4gPqU/8lQ5+AdQl3dguQRe/+kDNU6o6TogCuCkfaUuTVA2RXbSmtkhwM07M1TkYpp1yYXkGuO2RsEV+i9FX1vOZVlbVsqdp3MPR32JPNngt/1a6ereb3m7uT2BYmoFClUU8Kp1F1DkHsw1hNl9AYjYlWtzOE70yQvjI3VyufY++oRY7FCJ4mfRC6oYmD9ScV2fL315W0qcgAY0/nIiwwcmyxkIt+fYBeTTmOXHS7uWzYURCQ+Y0D5HETOE5kbDqav56eFBYymJI7S8Cw5gpcbEC82iAgZLAZ1oHO3cViK069lCKuFsQa4SUBLtZVBOeNbzIF2hnY//23zwByHSWbJTJX5pFhx6efAPQ6AnvNG41o6k6XL+gZYQOEKm3IaYbB0PGHNiSYEilvoI4zd3OAfR9CvILzIlZ6ctXnjjnQBggGfu3yhm9xR5ChqdC5HnKwrGSF9xknB8QkXUVuueD8SfY4Cc/E2ZjU1nBzVW0TxBrtOgkWwo7BkOec/IkCnfucFm4fWshsyzoATjLXnF3nPZZ4wth+ClxCU3bbrmsUmAPNs56lYlWUOI5gWGJTESOz0GHcsEjYvo8w1uRyvfyC/Xrdn6+G9nUL/IUAlwa7eHwhUb+Wu3ePPfifTb/8KaXmT8kY+AgTBjbYBSQNX+cYYbPjtDfFQ+A0AcZelJpM8KFm6TV5tVxFJWAJ3Dh216kPXCpKNlxu71NPJaCXASrEuttO1lOsvucSzu3RKIFGiPJW/eGTLwaFmbIw1BTo1MnKMOqxnGU/Hk+W6CIkSSXW92BvUuJxG4x/ogiXHQh9lpiFltdUBnxRCyu0I8TeOqS+KvmNo28CwxB3/ZeSpi/FljvAtJQUT670TGnxRb9M2bDdxiCNd/fXaOpIIMWXq0zyvP8aYxwPQvjbjB9EIwkSWOrGgSwTb8/i1BRDir2xZFKz/iN/yZoDGbnsUXXEbnQFHeY6e7y/FPg7SKOxBZNBj6P7oWLgWaIL8MgP552pHtghBgE/ZW8bEBIomaVz3s8lZwUdPA284E7f3V6X3wZecwLjzMCkvzAfRA2jXWbnCv4ofOl4uzyxCcFcC4nR3sM6sJR6rbDDREvpMoUQgPYrMLKQDbCezMeQXm1uM3axhsjC8sWs95mCPRPDxEuhlVOJxCfPh3fQuFCwMv4WqR9Z6tUiASeBAGoYvBFYiCYaOENj1yxNhsIpcH0Rl5NOgxrrMXggejA+kgOn/XjLbtpEokF30Mu9/HRoLNC8Sr0zIyRea6BkS1QtevAOd4k4YeU3Edk4I54xyya65Lc16hRZYRpxQ+JEFML3JaHnwXORpED+ExEwfwfty0l0o6o6vBfhr5rCwTPutsc+QIaJBSdLwsC1w2q9Hyt5S/uQ0X3801LXOayb8GBT1njgF89zDPOH3vBJwF/PmJVor21DfEH9ph9b0qHYkCCfPGK+oZqOFo7MFc2ZX2z2I+jGRd4tl0YOWY/kp0y7TC8Bravhj11/mHJ5vwpJXJPLoTd/mNUt5Fr/cNiwgX/rv+EBpVwfk7uz4vkc16kdhWyWrcAWFr1mggwYZKxeuUD5/kssOX6sEMlKz6zG3pNHStxMgiY+UbWnrrfM23OTQwVT1640BRqdT6JguBcJh4mr6W9vAy57wfE1Bpw1II7DdCWHbDF0h3ZBI76pInPyeJ3B8Dtml1oWSBYX3mwReRnDIt7sjQO3zLvb5tIUNALmjgQdL1GUu7ucPFocU3Xaxftr5TnID98MPcFsu11/3tjh3MVIhXTu2WshdDJ926xFyynZOerfHh0v8gFq27PVNP156KqXVW+60cBuC6lioQz48IKjjPuItjrQ3VBEmM+b4mDRPgwAVFAHd2wE0EgZOGYlrc+P1zI0PDvb5DzdS8w0WRoAg7sOurH9eu/NbMIztjsifkMXnR0TBxj9ftJc4094qPAYuzuuksBdt7JjAoZr9wR7oYF8VR/6T/oAGcNDBF8zImf+oTCDyV7F2ovaoPWUZHSUHx0+MI75FVkrm4ZJVrQFY9jR0PGDMxvaMIUFg3f1eivfHGsWjZdpUUV3u/MbXTK8qCbSyhG9yA1OALn91R32qOV0vfrm5p5DRBpxFTO9T7XtMYyd8naj6aMpd4xM/BMFRt0ED1AQAiN5j4L6HPh1eFU6+/QWw1lMgBMLoBV2r0+nPsmWGijtVZK44HrZe7P3qmin6smjT794cgdgpCEt+J0f0BWWpfOK5xeYp8webLeAZ2VKPNH8wWSgHVnq25obx8/SfQUhcFPILFN4pPbVNe9q2d5p7AhfixXmDr3xgXt6qqxjHvkVumK4zqnbSB+UXVShEObdsmQ21qbVtXjEoD+i+ENEXRpOJrinGViFdS3n2bxH+gV8BclqGrH9jGFTidsiRuI58sT5M9LKd4ryVncFtS2+2Ww4tMsO6Mus0AqmXUeBjso9VPyIJE0U3qcniqI6CYWYwxbKp0tlCLW4Ef7tfyCK+WPMxuMdXFXL+EHR1Xc51ag/nfXjyLqm9vr3VAToQEVEd/3PBukCAQltTMo02hFoKGIXYV3DFjAYQSh9drzhjpBRQknThm9inu2aWCLzViQes2a9KV2mjR9zXhiw/3tYuZGfbBY/XthEm828QsLSTjPWhw0be3mKJNpZe/1uuR1dp5UFvjE0uXm16DQpYpfe2LIWl8/l8bW1ONaE0huVc5hH1hV7DJkPAUCtBJcV8KS9YnGlVwsUFEZV15P133AqAkrqdXI8W1bss6RqBWI5jvWJqfZAJzoRkjIJIwAwLBtShGBtxsI4R9pASIq3dRfWUGvCGgR49RdJa6dfK1inACSy6olLOv+CCLJV91OgvaIxFmOuI2gA78+6O9IJbFgpTLyQVpfB4M9URqfYBc0yXLl2zlnF6vKUe3jnCQbm7XCT8jqi2+8Jw0OU4re1FGw77K4AefogeDI8p5clDo82ysQqLPY1pO7QxwkDO9+N3XabtZ3UF+/v0qrTcSZURR+OfbPezpgwvgIkX9SVMw/6ZR9FrYtMN2eOMtfUqXWVAnUiHutFEciyTFDnJ/QeD80uyxOlexoXaposGHVAo87Y4fMT5biC0eVoXnwh+VuBeaL4Eq9CxZVApMQvoWuzGJ5x/Vlj8aorQ+ixMWlsxQFBEy590F+7zbRAGw11eEaSV/2oCXiokjjHk8w0Pom2RX4cP0lxR97X8EV2ZK20pRy3oCKMNW1wtoVOG9EuQyZN0Hr+fjBxtc3V7gVQA037y5/05g1m0BJ1srsErc7TMA1cXhO2AZkjnwJPaCB6FQfdub3FNvXyhgqpvVump80Nq6KfFPBFKjIjsRCj+tgGsg2b4mktkK/Y7VPRQJAex2Or5gPx+OO9UOrWnRA2ZHQ7gO8bWwGA05oi4Zg55VYfZntCbQRihNti9SLWP/SWvDVJr7AmijHMwUi0QmGAnSOdiMTvHgSqLzr/Fa+fatlKNmdsUtJi6ELq13K6WuSOIT9VrSaxL+HNULTJPeFIjfttajY1KYpRs1PteoqRncBOc9k+G5YlBkZ8cnqxdthvGekt+qu69+m0p+LbsDT3tbGHtYl3otiwhCg3Y1JegH+gKW2kD/FfrCePcSo3szw2Qu5ViCZfx2HIPLhoek4FdgMBaWmLuE2E9aoWFb5VdisEBj7ppgA+x+PePwcHwabZg8oMT5fsujtkDzHy2P/OD5Tkfyw45XY/efot1cIDrfgIkUKGJ+zUDs80zlsyuI6ZnyHLK1S7Ubv4totXxisB23x+4Lh3PrhymSDOCsUEWOZIUPEZe5dS02oAT671Fs49Ulpv9A7TphL+QSeEFUmumMdXwNfdRDD0BnAgbQTdjTKA6qbXe3AUbL7TwBIv4ndyvFQpGHDDbTfoMgV5PCBp8n15jgJJj9LaSWQOhxPTVo3848lqUFv6FTQkdnPUcs3ml+c6a7tPxueuzmcM60ifYHfYPZRZ8YIMAxYUk6XiaYkln7z/yLIOpdsAf41LMwcml/QdDRq0uPNxFCtAcCUuk51HFoRimk0GBPUvBkzvDjLpstRHmUxnwfJe+fC1/YVS2nw1LpcreqstU8Grus4gwEYbD5dU4CveMcQ6Ampa2XIrPo8NqwnrQkXpDhq9qkLRIB03w7toM/Tn/Dln1hFPlWiO3u3MdWMjEaWtCcFNgg8I9iqHJEmDcZ9tdLPBg1moU5244bBhA/PZ5DRLe8BscMYvn+TYESiQO6k6CY7vuyKTRA4y/JKHMIiYWZ2bR338n7yKnMn04EWkArUSuc99N0H3rREtCbOzxuYF8Hiu70OMBxqwxQA62BQXFn+nH7YNYKydT8JYKXUHrEgBP313a1s79d/eL7fd0idIforLKdqRKrzoy28naqT63bfpITjNCqBZkajWkAgRNeDUGpw8WF9UhiNaqj9gsjrvleWtZyk/VLKrMxYzsejZkz7aSPJWycPA6nyDm/6UNgOXLoYVfdWelu5zwTeRSqVL/J1hMJ2Zz8w8/eyF/wS2sOGb8AqH9vuN9eHNKMFBwMK3iVTDpMT5yq/hp1fBpVmtRBEccWXvINOD5P76gTTD23/fBl6pjJ3MZuWCrTS+ASMqRkXiKW3zijiMgjaoe0e+Hi6YMOT/mlFhNm7QJsSipJPlEUHvQHb5/CzcYfeRLVzC/pwy9JakV2QblrEoXnvZmIresXTr1l8hEQaBmy7V49elyYtq2Qw9zRZPOOhrYZ8y+vLdUqV0omgJq7rsoe/wqgecVH7fo3Edq5UuTg/Y9lCU+zIh4e3SHGILQrUoyHOrj6wYLOwxd3Hzurdz/D1Bk8rXO3t2AzpBH5DvVheKLb6uRZLka7FxJKOJ9gMqDksGDATZejVLy266fSCYkG/La2Fzzohd+6fh7/Xzysn5CQ9JYzu6ChncM0DBIOUa6keE4MDyLSYQSzdS5GjZAXH9ILUFSfInVwQw2OEqeSCKJTqJL6I6kw9UKs/EoFcCBVahN1ALeomwiB9meEFrwD+5Z5LMEF+iVT9jGiRud75uFXX+LnIg24pfyHuborO+uIuPmehQ0QvQBr+Stx8vnx9OHEnNGivfPi/MjHUoWMsmf6U+n1vaiUIvHgCA/VaK8TukiHZN8qGCAYI3ahyMmgOM5c1LQEeomhNdEv5Mff6Q9kcWmjGOUctqT6cxPZscg0aiLwa159FfITjTJblEgZFvPl0dba/ieqtaM7FbnnCnCsTGyS8yhmc0Q8AueYkK06KXpvXl85PKNlNz0c+usgmCB9mWfz4CZ1+JCwtXcswmBOOF8rvZhoUYpfyqq0Ydq3H296JN4Q1uD3p5BC4MarEvsObF9KvWWJFTHg5ywqfwupBibyE4+JOkY9Q0McmP07tBUpTVBVnMRaM/qOMY2TB9fVzcu1OVbyELHAFyO/FPFe7Zi5v3CflLM4R4whfEFkAC7fEzSUn+lF/vcsgp5xIXJVa2HDowJarEU5RfHIVQ/d1kRVZ2dMb2+hRuXEkucJcvao/7ZbKJKEK4aFhRDH98QFi7bGGovr4vCFL6/VaXuTl8jDOx7lgwTiO5nWrkYLQZaDfXRLSTZGieVaglnYRckd0Sevpl48vwJFA1pEZlVU/qAzlMlrLdGf95/1tUoAIM2nVlvwgFQAUPY+7X5Y1+im23x5a9Hd+qGFtvCzqY/Rv4LmxcHehEh+N1Wl+1poBOXk/RVwjevMiVByKD+OPsLc/sJF1gPIigxUVdhf7AMx5+hMkD0ihG/0UJcdeuNPd5Z04aLdXEDk6A1S+hsM7EdMw3lXJGZPhO7svIIiF1vdN/3IDfQvAnoeZ1x6PH0LSiBWhURZ5S6NBTINqrLV4Ag2Z1wiZe21IC+iPJi5I6MrmF+7rmOcuA/biAZ7rfokVvnngaHpNjqESEaIUPSX1eOGyYa1WKDf5EBJxd28m8BhVZN4onwBfPBfkK69qzFqgcC0HHap/HdyhVB0qlmh68mnMDkcphsFaJoP5bnh0oyE7Drv9qDh6Xmg0wuhaqFjApT9wdQywMKvDmRfpmgZPFLfYmDlH89GEsVK+WKqJ9S8rABQrlTMHzqZpsdQbC6WnW+GAwfcdviY32zk+m4q02IT4itACwfbbxbgqrjPfWGIHR0g5clqADo58Xs2Vy1RoL1SCxCl/iDEEnjAFTrkKPD3Pvwezf/QcDGCzZtbomWQnGdyH1hJe27plkelAYMSCibSG4PyLFyjKxvuboFqTdQ4hWto7KFaDV6t5/u7cZas1PRBRdIt2Q3rG8HFes0N0N0+fr0uBSh1zjbdB9yD9axTeybehbmjpiFW5rMDINDYqzC18SMRySo6UjxnsFRQQ/gTUg5S8V0gc27Z1aiXB1UqIfevSa5I+Auku1sJSRzhYD5GeqYvsS90nXAlKbodU6nDwN9WDlZyGGNfRU7kc0NrHHeRX483dqFelzEcS2Sdtgk409Ealw5qwra27rD7ClXgGsBkwVd0TTbi/fAlvB9dPHCn2ZVQUJy1SokR1SUYs9U3BuY4oEV9h0DwHz7EKornch9ngcduSwPf9yqF7VJsybfcbIYIZ011jSDsLBlIQsd5PFPGk5nkBzDg8SzsLKb4FaX3LGwfrKj+p63loJ4bkIhmH83abqNrWxJtC/wZ2+ktE7hY0P88Az/pGRnQbP3q9E5kMwHFlGUQcJ0SyakA9yBj3ztNulPHwunf7nbCa5U2DiUmJILHDYtkkXlro7sZqlK4VK4xBRBRLt/Bmjzb9DPI57rIzmrFpS9Hvc+SQzRLor+wbU5oBlNe446643J59ffBMfP0aqulWp5tFDVpAGUxVpwcz7eQ2S5GcbfxEBg+3QHfpkEXRoAF4RUDRxEckcHuoWOdcSXM1FY9xVEuUfmdi8p/brPHN37e/i9lkTxpu0flLa2hmM5Sb3jIWnIZs4mda1JNHHwPAzROmoWpHxbhx2wW2aAQpCuj9SRSDHsIKB4AiE8UknGeprBomtw/SP+YVinMIVA57FVhmWh0klX2yyi8RB5QXOxJ9Wp9B98WpOnaAakWpJN+8uGsxeq5RZGPkbclCqF6Zh5dg3F1RxW9Qf/AM9Vs4mUNv7EoXa+jcaIezrSxenqvLvg4/DN/fTFMizAcDNA43Ed7toewtQqSJlnLfcyY0xE0/1GhBYPAzNpDKkBqdtqJX7ABbeewghl7bdWJYfud61QMvbFrHB0XIlZ/XfztgJCfjc5JBbSKH4hacsSxdBgbXFcy86HmN9gHXb6AA0izXG0vB6FbFG2t11ynxb8k10XyJlY3iAWvNSQmYXmYrmf0VVj0EGNA4aBpmD5XPwubT+c45cIpyyyKB7y/wMmwblBzoOn8zXlyMfyRNoS2SIzmvazOoBnEMZLrc2Zia0XSGCDj+wiBqoWP+DeJdQnH7pQh919buhWTqQic3C985NJSYnAqXF0scW6HzXojd+SJdVigqemN5d856w+TQSFoHFWSvYC30DAzV9VyDQnqF7gBL+Tg3ws3akET8loBmxRjEAqor3MtbzZ1c4g1cEtVqxcjBX6LyhFJG4wdx5lNxFJIcdpy2qk4HxTBx7lQY/VT5HSk1dv7bDoASSACKq6wnUgNHxnUTbpdJDSyyd8HB2Qk/5izBcQOtrkr3ed7oJq6BFesGMaoPL5q4IOLGP7Q0UUJoYf2vp+8tTUB5LHR3h1KayfqwVF8FSWC3q+d3IqAqjo43EXrIo6xrVmuZvJbHQD8G+lPrZiQlSYiorQmwGwG1SOtRGOw1hQ9GzaNoLhwlJ13OXSoO/0Bh7acmRisEt1iN8KmvztnFaDX1dp3sjxldWo1u3hKI1FDwNBYtbJSZY+56pEfOMlmt9a/8lLFbyV+dTjy/N5FPJe0TUUr6T6ejNqQ+LBGUMFoQ7+1EjvGh2l6Bx8HzhQW13a0U5W1kyb2vWOoX/Hez+ongk+rEaoBwwBMdUG9F6wqQyL+oYlLJOwQKinnIYvqi4duDvDEgfqY+fsqBBhlLqxTa4ebU+d5Xek3DxhrWRi5cWJqQnKQ+Womo62y7hc0lADjBnnIyyVHna6lvjp+eSB0k/HmKbxNFGW+UbD55lRSYVX7huLNTheAo1jCX05K2trhQx/9wTzsuG/L3u8hQ6sDwBcABRtnYH6hKLk/Lo1lSaUWhxs6p/qL2yOw5Hja7WVOxCy/gosg9X5tvtlkp7YunYTkushm9fqtnUMPx3CDW3g7MUW+ygt6vqF03H6jKGDMUEGOZ/F/L1vyBBpS5hEuk0BuKjxigaWYmvYVEd2YspThq3NsWvwBkgcWUAaTgeG/faemXjc3XhDEs0esKRxpJF6hKhqT6rgysiSjtLTx1frkwE3SImGEvcAauZhX4C2Bessh/2rB/SSa25oVJqOtfewezAvwPPj+MzxvbdqD66eSvpjCV8UojAe58JftqAxWuPPciTmtXbEe0P8Vn8xD3J5JkeXn+gNE5s0CYAXr8TfdWMWR1R+Teyerxq5cFsFxyIprF/JerpTjnoIDo+zfFhLp3Wp9dQRf5kvOddJH8EhdTna/haJ41+lyH6nnjDZwgLCMTtsoB2RRliWvUToaDZzo22wJac40aJW7wylG8DDRVi+jc8EL9eBuOuwwoGOWbfMleN3OccIWQFHu4buR4FzqcwI/iLk/kkKM2TUOlMnp4rxTRyEvQ14avn6bcHXdlbTEgwNzadjl4M6fZ2I9fjZTpEkCucM8nsCB2G0GgbvwzaQouj3AvUiWujDJvX+YkPYdWNisPBSYCJK51u7X6hJWGUKYmyeRxtl5Om+uAtgyO/9jN8NNhPFXzQNSdQwLrX+aJwvTjwxRgHUN1lE8+pTeAVRe6B2bsvzFN+vGe6InbaF1HtUZZexQuVxqz9tgn0TLmHDwOzBft0NaA9PeRYxs4UZ2ZjgpdKi7abcIK3Co4W1DQ4yj/hR92pSFojXeIYUZpQSrGZSfKUmJVvHfwi/0Ip6W3mYK+ClwGYLxzw35LEyyzHTdh7yMwPM2nztX8QvY1/q2lzj0O/Yr91pxp/0bWHz4ps0Efrsn+1QaTMY3THDyj543dpmD0EkAT1ictd6/qLrcC8kVFlkUIaSAdLD0w3Oem2qUNjXUuBcNFk2ok4P92Ivf2zEQEREYAplWaXKCBnzqCjBAV6+OcQjm4Z7ENhObWkNgK7506zIC55ImKL8DNZtUv0KUtyg1YSJBZRkrYHHjW3fwNcJEmHojJFlXZ1bs+lHgBXsLMZiDXvOQ+pZPDWVJd0AOEKfVZmC5A9/44AsPga3XckQgJMzj8jIeU1k/Z4hpkH7xDngaWrXLT6lmiuZcL1hj4kezopjFzqn0w1mdPAyAPqknHR/CAChvDEhrBC4pS9xy968fyZOu5illnM5YLLLxPQhkJzgRDxX3nQQjqZK8FenM5HOJQhORjEdwmsqkxArc/IZUNwOt0IkkDTjDjuFE6rVwvEvOrwCzSqm24IJ042zSn2qHm4P9SKsnUGTuBlZv9Be8gpx/GMurI/BpmyHgNwxhkm8oG9p7TipycjY1nnt4ccy7Ci0f0jJNZzXhNAaHDxR0PgTQeLt0w+RA86RzeKInsxbPM5dizO3FmxRtLl+iBH9bpzFhclF4rBrgrrwVQx0lxFNBc8cU9w1QjG9dYh2GWFfjMMdSsWP2QwJGjGYV1+nlQb0WmK+JD810sTcis2XLUar8xmejj+bL5FvNjPElMPyL2LJ34kKJZ9p0KaT72Z7HLJTvbiP56Ew/5ibZ+otIeXkVDoxIlgtHuFNVQajWMu9bn7cASQLc4sPmigeYE3L4OuIcj3jk3BQkec6unYaNpYNFHv9tshpxK8fzdFOPTqCXMHtEqUwQLlOGxVinSG5V7VYJP0pO5FJq/lx0reJCogYgZXO9xyusSrTm0jYMMdERgnfazzp884mHE6VCvVsok4Y+4oKYY1UePg/jtX0mwFK8Jt69oG6VX4nD5pY1zhXnfNzt50CWVhQNGTZx5jYLCkvZ3kteozto3Kxa2jFkHtbx7pSkFRXd74TDjtcg2lXpagCGZ0c3doq3yqMzXKX/l3XZlRv8DAHaaufcGn9uR1jOYP70k7251NucNpI3h8/IB7ty1eM3cQpYgNugh/BkvbqViAt40GMNxAX090MdC+eARjkRVy2hMxtf2xIySNMro8O5h/7dRPj0kClJcFkKDA/YIvQ9dMjdfkcUwFZASUPdaewCPb4jhIT7K2xTe70S4GmDceK3Uk15D8Tu7dOqRi95lPCiQM+TAgldlFa3gyJgl7+P8d2N4M3hOcP8aaDgBHiY1WZz9oJHdqA+c40N/V+QI3ZfNeGQCx7pKqFZuanZtkBmbm72/0sZnchxT69/T2b8d32mmGu+EICfhl62848YzH35J8ZDnyAIA1qOgWWfiaDkazyZJ1yldAb0wgWGDyf1Txz381xyfkaV0QWpvniyZaKmUTxo3DIR6K7htkn7jvV2mmomASk+1LtlojJjpygVRG3gnKW3HHR3d7eF9edL+w+8kLNo8T+RWUzM8p10bSH1gnCUtgF3h/yXBejTz9+Oo8OrCfsh9UqJkJnnVzXeix5EKapiwiXMW0VFCCjKAXDt4KrKYsJI9ozLokn3ZxY8XIb9cd1l/hY5zzjT9Hm/Yltysaf7MO3dZETNQ4Qa8o2DhyRYVgvkiRIqm+K9R3m5UdwgFj0hFpfRwhVPJ1BkRBtoN8tHBkreqogMxuocnQbsxcvIq0RGw2XJ6gXmzN5szK8qsKiZbSEoJLhMTz/Z2ZODPb+CXzygATWOaHRhf1/AEyaAjo3i71Ky7m8TkwiDT9LFIBuRn83L6903qbN/f5ZWiRaz2m06UAbasVQrIF9NhgZfKIK22wEXKQxvUiCgeIai2k/MtQ+VJrzvbUfLfw2gns0oSdMSVSTF1USgVY9EuXWwCXe6riWnUBLcYd2lnEjPOgf1qRHElq/UtyAfK9VgMxqzQ7BVCG7WZ9yTr+IG1UNyTitSa+SVAy9iwyT30YlyNu/nrCiJv8BWRaaOJYv7Q3KI3R2Y2jPZdyAfzH59PX8zbZPG5yBry2meFIGUMVGlwsPpBsfoStN/CJp7FgjmjkZ+KSAzL1tRqNVkwfZXjUSfUQ9bimKrj/qVR1+kagbxP7LTwgLbRN1o/O72TfBMhoBn3WQXtCSywpXheL5afid5lmbK3U3mfUGzmz7T+E90KGZ04rupjzTWqgrOgjaMGgmSstsAOWOztuIKF9sE7YQxCLqqV7pc3UzSUjpCb3aT/GoLEKeXnicbVeur7fBIe6JfSM8qY03gLlO6UArOmYlEDWgimunprxjLfLZ91eKciCnd1xZKex1XDyQJJQvvGxQ+/7+u2ECC8GtvaEzfOknXB+3pPfJaMPZNF2AFv+Hu07swhOLM8/sI/eos2j5xBkxxI+39mNaDnu2g9kmJjfLjgpMyIxI8lDSk8sw4+cNaTw5ppKfY4JIMYkgVUqMPkqI8/aOR7ev6axfL/us7mf9UX2i2c1L0DB8fVLCasvV0RUFDys+QsuuxgHaMcx1xr5/SCnx41Wh4ZkElK6yzDJTaS24Nmyc7lPj6jlcVQyR5xmOy7Vv7NLVaLLvsBm1PW+YreQcP/EtsAemAz0E3E0z9o/FNV3hO95dsraNWdF8F2QiupGWMEYMpwivvJRWdivO5+HZIdoQddzdIf2Pp4LrnC2SYHYfdlvscpiE0BzLtyI9TQfSmyhOhQF0twhNeWBFErs9XSSnF/xb2OrgZCFGZVfFSevi7pFcm/x6ymGwkVQRvluoMuir66fA0JWwUgkmWSNuK7zhnBjKEjpE7F9yVT8MWjaHHuE+pxOYdQdW+FT77y1fdP9dNilsrc7kRacRjoJigJ0MMjjTbIBW/JIeqOdPetR+UdYBCElbOpdBMxwg9dUs+9/6tzLFd+aPiyu6EsbVDIJDNK8DirkTSJTy7VP6JsMEjFQ13VpC2BYlLdqy/w1NrEBudKWH/H5o/lkmfUMH12jSr1Q+ATGfelG7l5ZD+bB4cYtW2+icUp+mB7NBIAFJK1Htso2ZqiIsIIWpzsgEXMkU+t51yBLCalN69xxEzglT7AxdzmwDb/U3c5M8Fl/FRd6lGz27tpUACfCW9ssjnwK1V/dHBldbO83L6Qq5KrgZWD6Spb3Eg3LLpoDLW0GBragw7lIEdt8AV6JROpwxERJPMIjAsFWuhQQgnfV0HOqcoO0gnTn1nQezU743cCDa8TGzAp47D0uDGx9UNX08Y6nokAPZHOMJo7JKf0OXTBAG7P57mH2010xqXgnBuyXgDSxOoZnZ7r9KS9CeNoad/Sjc1+JFCDZRb7eqmvnwxXkoyuyqmchYu7ZCYqtga2GX8WycpoXCVWFHTkUDmGCn/fwdcrvZOfUJwkMOL55Ss+WYhUCs/dNEPKY62uLTq2D2JMZ1S1d+nv8HmQVxwF4CQYNl2ZryBwRmAzgAJ+QrVpoEgpg7QQ0UV4Ju38SSHBMwTixqbcJV+n+qi6CPMZPG4SY86M2HrUDVeiHHFkWRuc2epTuRdDySABf0DYhcerznqLeW45rC/tNJM4B0q6AYUYNhzrBN2YthZHm057iyDrgzb0lvy//p9J6sb3VSdASGkHyCHBi6EhDamu01kk50ogNQN+KJZX1ySKTd2lDZ/+gWFgWOsaXx6TkBvGj5lCOM2JpD6t2ieEaZIZr8heEohimSwwAtYJ+I2aTyr2YBu/4ik4sCkxXXJ9lpyahSCn1VKLHqjtabuDLEqqt46zGtIYL73hMaH1mCAEejXT3KlWO96gk3HfRxKN3+Urt9Rm7mUSXYN+lW/5MZMlfW4wSFjCe8WtrOtzSCV8xDnlHCIzHIX/FMvLkekfJT03dwZ7qBTQazn8tTWO5aBh22GIdRweDpGSd3rqSYzwkFbabV8AWpqx1INosLY6Mq4rJCKOvbZfLn2dcsHvWVv/K1wzZ3cv8gLlacS/H6zWjSgdKCS87aXp1ohibVYs/cdeZoClJzJ74eNgTcPdX26bXO7LzPFRp+TtHATwXtKqsd4sazKBG2KXaca/eOM5NXuFT/yxmZHoOEoCpo+il7Ix7jIRFRu0J3qV54eEV9WM4O4RzKfNcyOGQmrFlfsDZXQX7ZPMpoiGDgg7mONu8YoKtg2FI0hYufelATbY5R0cbsfSTXrO2YDUoNa2AhTDzvC9DATeoaa62UQeknfvMi0uwH1uvcczghVGAMmvQ1lHUX/MxRvWuuJc9UgG/l3PuIIUmD8d+45n3tiEGxq3sDBcIPm65amtFalYoLNw4ApUbi73CBvNA4zTpQouPfXPneZJujIPTx8UM6OFjP0rKnFOE8MFg2/lje0c9VA+4MbWgzehrWfUKMnKfTvbSHqjwG7UzVXzoSzyCP/Xy1WKgbrns0M5qQxDY1ilJZmUSPrl0YKE2ArTJHKOZ7XL7hfRnzTZZcLm2sTwR+vcZ+oxedDhZvjr2q29Fksd47gtbipYoy1stG4gKD/3kui5+VLlkswrA14h+9o1JjbUVJZOEb8TxntsiVm6WSCp1GVv5Xf6u558sLH+RnrEtjHAidX9VNb7wHm80QRknK4UES2vlG723rEva3NV4BQaTIGWklac/0i3B4RaFAQAoNn2y7Zt27Zt27b1s93Ntm3btq1ZxCzkhLSKCy6cpHPPYplSe2dsMGZ15FtU4hyagh9U8WQgzFOFsylTd8+K47trQcmkV9l/ZFNny49orB0gQhi7ThkstU581c1deD7aKnAMNkaHF4VVV/0XIgw6zvi0RHynSE/O0S6Hp35p/vqzTIPRtrhyYp+ePIPjPZrcobx3AEtPvW7mQtTapnIxDL7CkAj8keOn6VvIqU7H1pnDKr5bxYq8quPwczZc9jnnEweenzCHUE6lCxrJKGtEQSIIa/tjPT9skgizROIJo4iLDb3enaol9zEg9W/mlMBOaW/r5Pdk+3YYjadiYTIyPmbJ5QdcNh2I7VoJmi6CEhzadZAbL2y5ZQViZjBSCjj5CgAxuQCvN+q81K96vsioAjR1kUue6L9QdoKP4UprPnRMXlwAHHWb+t3tSiFw25cDcO3TzSYy3Lb4KD5jtypv+MAgW1dMpiawxSdSvDIb0h/Imqx31vIv6dY3v6ur9HhJqFh92dRy4xsLPBlHoENwfNFUSFenIB3p80mvk/pyuqgMwsAYrJcOp/lLvW3SEoe+R2HUzluK1SKvwkoFsUXxa9kyKfsjEDN6dOLvy9T9Ny6go3LoN3m+2TBBged+kuaH/tUy1QiJnirVM+e5sRz3XFwGoZe0aFZeGnHX9V6brHdKUzNifD7ttogsA2Er9CGMO5N17GCUjj385kgSdHVF+uOUYGMAknsbXG4fOrltuw96RCwiP8hHrBNmZO8OeqI0NUohI+FtSeKNwkb5lSd9MWvaOuZZ/pyxyGNXtUruMx23JcJcxFV2xhwv3/lXQnsnALFRBmdRnW72zykYTFt0DL9Hgu2OeuB8zTK8WCvFqvmS+n0VcOx/sJKhlljzVhpQAl6IAq50fxeYSBG1vqwztf5bzjItGNqLW8z+yYH4x8eKxmDlJZBl2O2l6VrmVI5+gua4pbJJmGjqHveANReGReFsaDqrbo88vBPHv9M/kxVCt7idNN4jRbCl9LJJNOJC9+XNThP3Ida07h7zDKCuFTEUiUz8MsVsO1s0vlaVfAihyLOy32HMd8C6eQivptJ0MsmjFcRIKadmw1gaWxjM8a6aCNhc915n29VLfsWLQ06/1agDfBgIGpvpxM2fvLzGufILj+XR5voT2bv9PKKxcfdB3A95tv/uIxG/tGomrfjQb2H15MU1yRIA474O9st3ZS0EuWm0yTFUixBs5LaRippWD+9g/Pq/NtthHTjKz5deiaXFJswC0x7CCRk9g5X6BrzdmLnUVQOPMhAxO87HNL09R74tAsvzBjVMXjaseaFkw/MN9J5MQryq5rmwEl4DSRajxBX+964JkWHGZ52QUK7vHm2yFHEObfgfvYqPQMgElOztMC1tybrhgBDfwH0uduuQm4b4+WT59bD7V8IpxbBSEJWx2VZLshbRNbjvp4bSwD1jsQekc3d/7x4t6Sznnmpn6r4fCxqZHno+hn/1IlNIC79QNAU+KkL6LRgCL9QWizO+zUnKeKo/p1Emf3cz7pICA7PpbAcCoyxseA+n/xwfE/DvNEkoalUEQIX+RJoVy8U9L9rAxVlfs5wgxQNhlk1C5rKfJ4xgkUBYlcnKG/2+JyJVqOTIbZx1Y9cb7mVeNuZtSIGK7n3JaU7GNSw44oKjfBd6+YZ5cn1xOjN2UQVz+bjdMfJiV0z1xDvOsX+0/huvJA8MJf/OAA8UBKxUaJJDU2sRVnkzH9CcX+3XykS44IvXsV/HttUbv+WrJC1j/EygP6Z0o5HIhB24Emyd2rugrYg+uJjpqZUIZTB4DETNqHgK86HeFS2QAQqdMmkt2UXULMOX9TbPr6i5HVBdlMc+oTNyqBl6iYejqOEPMr8MVv0uCdpkLYXFwj1krAhJR0bXezPvEP0AQ90E+STlTCwsEGPA7+uKTV3UJjdoVovKbbBOcNdEg4evimKWje4dLYtmAE6fBLV09fW2AAq88ht3azMwBoXtJb+/r0xJfm7nGLNGct75vEy+rlOthalspjwqX7cItAprZDveFgMvCoFnSQa9KWZXccEceP2d/6tZYaKndqX0IcA8NpU2mMgEIvrFA4nd+fkbGNwzr8PW7jJhHI/uMI+HN7bgse1BkfegFbYvcfJInCHkDiMsrbExBXooFAhkdCNm2BtveAx9WWxCXOvD68QJ29sw84bHmE1oUkoDPMy2jrORMz0aiO+7E3Bj32aVeb3dE5Uni53Wnjn8wZD8b74BcZeFWdJHX5DXj8Y758t3jRhE25sjoWO6z45ViOpkcAw2+g5Ytzqkhd/JzPtUKS+SVp2nBZMuApFGEYPt2bEQ71lANNOKN90xJ4eEah5kC5oGUZzzVuwfmMvYlOHctdiEXHpm1WMUFB1lHUYnrrmbFSrhqvI81Yi3JqjkiBRlvKlwqgVDjey3AbOdqh3gothiiXg/KPCyGVPX/soT/Ja2vK0pJZPXDUSNIIHIkAqSjBgaWKGauKj7ko+ABrgQsFQQB3bEQPHMpEvfScqL1CxMqaCLiUVKqu3PawZNXApLhOtzRV69962+uYQCHsZAQPHTnlSd5tEEWHdhcNOXqK/CH6+7a5Zy56o8UVfgTfu6u+tk1Nli6JkypRUp1fLC9FbQjLqHwxvPF1uxhmXTZststXhsKTv3ZwQHog/ITNd7/k5eNWyqUzvz7ZxgpANDfJUqOiiKKaHx98Mm4QpFJE+kKdQr044Dl+7x7dEb4FHAwO5b/zfvJ93jMuSRz8YzqQ+1xy6GWezjzBGhF4irQVXyg3nEij3pzjYMFGDWxE0Quzh0SQZXVCLTebTc2e4uNe+ni9g3OHaColfDYcJxDXjyiB+zhECSi3yNzAw4jK3ZU0+2sgypfUfJi94J+4sqfL75BnJeSYijq0KxZJNF0kjfWmp/ybjc73gSlFKb7s45HpNFr9Kta/S+OyIocrMlQMS3FX6C04ihDTBRFxmdX9scfvva61Y3SaOoQfqIIJR5PtZ0YmDHewsw2jytgcmCwAlOymeGxQwlg0OjTKbs6yuVKqZr9KetI9zvwYU+apUh+uhazcgu8n9+JNvUuwPC88r29oLYMJ/akx1CWC9NVb9Cw6c2nnDU3tFrKL83lgh5bWm7AJNdgHWmbzw43JxS7eJD68kb1Eg+CXfRr7hO9/C9dFlcZak2wURefd9bkIGANP3gtODoD52wYJ5pcW3861Ue7+Sjdtn2NFcnOtj+KLCu/FTb/6BG/e91Myi46jV93JE4C3Fpa7ibiTv7BlFq6O1lm9YB5PZQdrwmk/NQFWF+2o1iauCsmAbpZpT4UkAwHwRNEkTxPFfHDS4sz4KJpLl2lVDo6WHK0cqQivoD0lLrvU7EXaOb+vafFFyU6X9fcSQ2e8S4LHe5QPLdIy4cDOwVKmy/2JQ4SaC/b28hoQk88m43avGjiVpEIwKwro3cRM1ChQ6h7+Z0aiJ70KXq0C+ujCU0EWnNKnB0RDNK+QSh0h25MVU6h11mSpFiRLWTlSww8ohZHcyFyZBd6oxJ+/DOmkAuFN80FG7iSlAyIGQGt8miVSOLBSUfx057JQJkTbhzlyNrv53MmqitI5tPCAnPgVvBjP+RXAq70RNSlKOB9yLC/Uu+PGY338A28oEHA5e8ATkTwmo/ZYWYWSpE4VQtOpvhYpfUUEh+grdCTwbsNjLaHEZm3O42OpPLDa8SR/j0mqywdk+uPIZwzBb2Oplo5gh9Rrk9j7skaGjk9K+DSRDFWm1is95C1MERGdhS8NYvEEW7Rz0HkCq12ejyD8XWChZP6wJqzT3E/uPTjj+DcTz71dgbJdSCMoehIBgCmbkE8+MBoiF29uuAAi1j59zE2xm185HzJnjQyWiPu8kh+fbLMaYZsljl7y17UkrBRAfZH5zAS24KtxQN+sRwr1O6a6zF48gLKvFiBNvkA3qANfHpuwxkV9+IxlCrmLtbcBy3+H032PF808zmO0oseRTAPElfJRyv/Rqh5fGz1jmvVmFn5O56fX45CC5mBdtZUz6rGOnJNaiprS1Vixlax3yRhITgy4At1dSvehu6hEfm+AI3YgHwfOBpo9PaeELl8DnYkLtY+zYDj8KcNX5cKVeEyCSWZ2Nyuxju48IjVSCRl569ZCoBEtWKi8SWtML/A8Ebtf2HSCIE3ziMoVQkh/lFRDxYJwmyo5lU1wZzxVi9vubvWphkyPy4xn1E8+VUuWiVRz9+4LUuliOah+uoaNKymjSmzcFpa09G4y1HNwKs/dnd5RYl+4oDBmDdcrx5en2q3ykGN9Im+geJKsXrzkwJCyJPzXExmL2FWokJOD9sRlmqqAAWh9I1QMEm2MqCUZ1RgIPO9LDLfMknNZLRTbnnxAP0bCmaEj02u/rfEvuAUPLGT6h6urPkXNKTMmHcnBNVO8uZGD4ESh0j4zqy21MO2U2ZDAliTLliLZ+HvKvV3E29m7zwHlnMLyOrYsxpxfD8G0W7Jrm7kYez1gqj2xvpwBYJjQ/cCGeftlK30DUdFxgA0ZeLCwsV5HOSJw2wonJIIAmR9wjtC7JMQ0YbTk+RVczIF7V3UugKd1OqBYo72P6HRDRWQfFOacbeZosXvKxclD2eO2x+wsqxqKHVTaKbZQjopFkN/UF8MI22WrP4a+LZP0y48fFyr8H0anfFU0lgIcd+8fYRqaRqbe1xw/nek67n02TH7ULX9JRYCHICx0+Evv7cjbUlIuMdK6bdw8pJegJOZS/PANOJkL0u/i6+lx+M2+k5SWb8KkIguXrK1uyO9LX/1D2/Em6OddxUZoelfJM9R1kkbnSFcweDta74NjveoGTQPl9WWApjsDhlYjyh5eSjKN482VlUuQm7FTr5bSLuWVKQ2/a0clks0DzVRmmylYdz2A8DdT7gxkvgw056gObHAu/QHYgHb4gOWIXeU/1qLnh3nxZjB3XrPGfVj8U9vdNhFmrLC3zFdU81w6VZFppUXKE5fOQgvbEhyRAllE2K3olJvGKkDQvuffAkYtTHW3qBfMTdwwxNDSE/xBfHfRuNIhiuYjzQlxbMH4JdsaX9CYSEK59zOFsY02cSAtfBNlEK6Fd6ssEkF3/yCTluczwrEeb/agEe5Awaoek57lipmuZ0dNqd87ICIqPrQYnpOjz5lyce4psgNF9DIrI2e+D5TnDcxbHW80h/WbUJqxaxkfwjcnpMhlr4RRpiAJ3T3WHUsXP8g2OyJdB48Fzi5VuLuZmEKp1FU13/I6pbvYZMcQQfzO7zcFHPkRuouZa9DvObXNODjABKzV5NDKMzBgqHLVsaf++QsGcrY8gp4WZzJHSYmhkZdmB1jd1TmFopjotT6WfGWtxy1Iqj8dTVy3XHfS/i8Y0XVfXu/VP/B3APwlfYaaB8EPas8EsZFjqi6ACGxpFyAdbRYnbft3F0865UXBJglb+a8rQXTxtpYyLhHgrY8T8hVlzMPyk6f4Y3oeLCdKDmNgpT611/j8b3RNImvKx/aRzbT5/U3NbUvCbm1LU7k0/JnyffNKKHsmhfNLeR+xGdXcf9J8D9TiyBLv1uQA1GtDGbhLyto7puyTTyYh1dCz0QLMCVG/3L9poMsxxBpTJKgixB3Kq/gO2kolKM9cXamxAFe6JxPIrHomiKkZU9qLZQHDx9A4epR1oRJ8eNxyfnGTb3I9e4CtRRBQXDywT2nPnpqL30OLsQ8Un0KLo52rQ4eI8mdNjH3pVXVLer6EpGwXb0c4QQgnFIYnZQfa+ZzDqZVhsPvc70M5WkKIzswMHDpakFD+fvlcC4CXm9QHIfsxGiQm8/obr7xQt9rQ1e+3AyQr88tcg3PVwkBwgBw06T2n2812hw0cD5y+coJHMyh0ztSjXOetTw73hBLZM2NDq+XeZxaObrfho7SGVSajLFg4BTxgaCZ7bP60zWUk2CMtvmW/ysEPjrr+garPzNVb1Q9u32/PGNcWdKBzLZS+AIuukZJUZF0Dk90Io8n25cldgqWIBKZNbdXSg3dQCqPalwam5utZZuBJI592qPdM1mxNXjLSzyhMRWVDBSDp+8NunV+jBl/TedKqqItMd9Ts2s9Z3FSppgrlAQbUX0CSydLFbQ2n2EatI3xAPXrg5W7dcan4jp+dufn21MlPHUE/DoZJTBogo0hNPgxOg3fU57UayxaeNp1LiDvkYPCT6X3ZUjpQc3vAGQy+Lo5x3livyqitlfmK7VLN+66CBx0sXhcWIzlo7pZz6y/swWqJGjEGeOePEI+8kVR5Vo6pg2+JtSbOwLeP9AYAaEFNGLaEnfRJXOEbzCFc4+xf/cy4XqO79xRwklpeHCwUj+2oaa3QRV6b+nAZ5DpiUG+SBXSVla3Ik6vGoqP1oS081cjsYaM2obC/y0wL74/9jaHUFRTdqOUBvH4iwEx0iVbMIooTrZguO95j9GQc3qQsGLwJYifhd8yuER+/WeIEPBb9RqQAoZdM8RSRHGVEmPTcjj6WX3SRFZdD2HjRTxsWa3m+h5lTxqjk6EXjm7GJNM0qItU74B36uKhbfYKP3yAy6KfSL51p4rlxs+cr58As2Ukx/838BIXw6sLafvM6TdFR8W+RKy12JP/lWOLrzNbQoj0cWmw7X3UiwW3U38i2nbzkAIOLOHq/kLOuuRpLOcteEub45Rt9d/UMxNAdxfgdgiT7IsuydRVgD0spweNHwR+IE0XSc4hlfkubOnEncYdUu7P5HfugetB0gPQPne9KFV7UwHF+PXR2DZ2q2oA8xBcXowMTcbKaspcSkEBjgxrUmhr+Mx2c0nCfcLZEKzoFe8PwiZ/zqXJAPzSO/oW5KUX94/fIwNC2HINEqYRWhmk6nv646AbNek6MpaakOo5NPy5Em4TMYJ8EVH7nq/CJIl/B8bUtkgWUgzhPFheEGxP9ppc6kDqcZhrlGZNiuBUbDv9Z+d0V61Q5kQ1YH8ojsjNUnJvURB/kNs0tOImQuvKEHJp1xSpZtZJK4bItpdNS149ihw9SFFMlIez8b04nenSKfqdUCArZo8TPB2O+VIStcxM54vCNDMk5KtB7vyC3FKmgn5FVhlcSc47YDLXY/WG6jCmQSfM9pKOe+JwNbM3jXWcHsqLfQwqxKLqb8ggWOuIcxMFRmqlo9ZehtwYHqhK9ssJ/ppMl8LeXXEY3ggyJx8yLONNoWCXCxWaqyiBFORhCdAGOLI25SAbc0TCT5+ZRJwY1j2EEc5q5IN9MlF3cfu+Xc/Ml0t5GFyYcBpP2zzQjOBKNFa/kqZ/p9C88j55p+wVafRmalI9MiHzjO0DTMQcXlUSlubFmFbuxvXegveI4akAoylkH/qiLipit3eTDOHpppi9M7XfMVgY6mN0Gq3SpDio20YdmKE7PnhYSAlEMeZsk6rY+mBEeOz0CV3lE1DricHGVhoWyo+939rffPxMx3EnHeBXRCc2Lc+2F9cpHgP193yXcnDvm98DIF3Ktq9a0NhHqTXm4KTuPjTjaiVAjewMM66wVVxAt61cOl7d8bX0XYbCeCy9olfLFIGCBjzEOJIy8zJt2PVuthbLc5kOXKQfxhfhZg638z7ES5C1fx2ghbNQuS1XjYs8q2k7MgPTtiEYQv/4rmQqx2UmkMhvSe9gh7PkTpnLsY7h0nZ/fFg4I/AssHehkdX5AH2ZGe/8k3kQiagO8lsIEQJdYd0OMimm5HIatyzYzuUZu7tgtO4Ty8Pvo3VoOkFd7t2DUTpaRwMxEiU1NMX31KKo3AGeISFoGlmYM13hxRrbTM1LFYzVoX5K/hZ8xbmtddgAp8y4L4zTGp0EJTcHMV1htNatSjpPlICdy19/xLCaGf9IkND6x46Usz9wU43J03QJSMB5EWWEdbPbapfEZa+5byyfx6rm4NgZxt6aU8kGmtOKxzH3qWNfV5wf6YX82J+vqyHZ+JQSkeOlwidDYvvxcZkliWhpW2OfrA/oUlxiCqmVU52rDlbejw5vLShUP5ouJrt+iiFYZdlHvDcL8aG3ptvLp6kmahVPFTLQ8m6GCsIUolPG//khVqz2OfYBInPrOKTA7k9SJXL3hcEXiv292nbKRHjQUgfTjQpNlvqI+pYyxxE/pWZvz4pAgOOety3J+pzalW/uDQuSapFSUyRomtWWb1AswO3jQKCyPkT8mrcd3VpjIF8CEn6Kd+fPn+/aqIzicuBHwIcmNUQMaK2WCV7D9ktjpfhyNQxaNBUv9dyoRyQLCaVObTACUpojhWqmDK3KCFyNVoslNHwO3xFTPcnBI5ohlqBwZHjK5q9h5tJ3nKSLh3uta+zTrwBjotsghcF/aHD8X5KzstYEU73YMCVJNMV8YRAAUmOZyP2DdjH3qdWy2RGU7fU/A2w7lwMFI8UGyzGPPwWklr/lbz9QGVxdHn2fiJZzuR5kzY6anKx6oIUjfJwE0mntzxdIH0xZPrYv2FKr1oUaX3LR+rhx7LddbzgiOHrUOUO8O5lqZEB4aaSbfzMyUaUS5hVML6jHfuvuRozCPmjvKBIdfRc1LYiHenRVy2XEysUATEpSYTU6OSld8YGQYWaPFCabPZb/DwVfC2ZQLF2ncgzd/Fv6vLKOH5y4UG/8Bkny/eXIrrN8EpwDHsTO8vYzioJklAi0+H+/Y5ji0MgdNNsaVlsaEMcaoNDF0i6zvkNfwr7sEtq78pvH5BY7tQkK1f5g1nj1DkhYQrq8x5/1BS+gIYoum8cA+10zdMCPypnsH0r1HgvaZXzTRh/EhdSR33b0uQJ3gv+vVYtJsgx4UIrlVvyKF4VdZemyhF0Jq8iCws6RaBpi5Fys6omV0gi65/lxnn1K5xyeRsvK5O13O2eRzXIoy3sU0fJoVQ5rD6e2QJqD23ubZufHnLz8LOyzk2I+OTCG0JMrePb0D1Me67en/iC+dz9T6rNfJpDGxR7O9XLbJd2+rXymZUdaQ17+rgc2AFoVgidfN+nVKTwKPrVuzQfgNpNGT9JBR33WCrs6cL0GCqz28NkzXW4FR2Q8kaLyQisjY5J7FonYDyZrIbLwEktcoDE48trx+zN3MSMnDInSWQ5rv+XHoeRzrtIjiY+sBiWMIvBagkBkSlakNKCpSEA5lbm2TPHdirO2wvmgk7NllPabrVn1Om7iVBc6Dohdf6Ydh8xQKhKIw669d5VmdE26cU/3000AeS/vkWVEH1KV2Ea4Fx7eEA4HDaKMdkPXmrec+eDXJTP6UUtIArvDZRQS3W+s00o2QMEW0tblqNvSFaPSdOdP+xp9sLRL/96VpobBaKX3PAI+DkOZOVooFlkiWuxHSvH/GpxMkIqiU61dI4OyNidIA/jTbzo5PIERcYy4oUIRbQNJjWe+wWXRCuTiKdB5BxeIEg3Qki7z1lOgZN/2szvloa43hxy0TJhUgIE5nudlEE3h8r4URnL+TTehyYEuDRchzHRNjBYZmOrmVegjNkoFBgzgYtXs4GOPeMGD1ItIqohEF9wW3TBEP2+rYQ+EzHftCYjH3dkHnm1k5qGcXBRL0ZgLU/UZTTI6tos2/BJo6haYf3yv1jWbTg7+pg4aeG0DZ/NId0ZDLQc3ESDczlXlP+KJzxald3KrDlp0qFh/OaQH38IdqNPTLfALEHCkH6YfnTFhORr5WAxt/5imByE9Oeeubo0PVEWJBfkm2x8EzClboryz0pv1WLKrFfyFuhRcQ4UJIqlbuSfsmd9x1VzjEDuwodljT42PdgnyAr7OWa8DzjrohyIIpQZrmEdvCpOcCwe/ZoGu8/B4RnbjL8vpHDKVfYCzDLFJ+xD4FATqviY6J4OtQxmCd32COlF09T8YQ/WNEC2vo2ljXLRWCS39yfzibK0myfC+Q8x/QDevPDK2GQtByPbO3IwAyb2xjDC75k8MeGU48FajRTUAwBp1FAHIgH4qNQT7EWUZzUxhu70lHIGtFKJOk6Ii68/rdtmIVf6rzVbksdYvmGxkymEUG97WHxCfVruBb60WDN2nrnrml5/Oa1EdMCKlAj4rPdywe112RyGRc0HH5labAwWC2hjrOxQxeJSab0ogaTX35Jq23zzCKr15734AOHOdncnx6sk+276KnhWzWRda5xIf1sG96MjuiLhupXESHuoXrssKHNCvVxY+/y3r2Q27r3Nf8kMLeM4bEEIOO6JgKwHKIzmBnImrVx8ebTNk+JcEqcEkStKXbX3vGsNmzKoELx08t62A+S3OltLUvvmkrhr+gKXSKu5OEcSGkho159sLsv2g2LsyrPr3i3ovU6/H3JxqEfAPguupEJcM6psGiOVDx3uq5qBPrV4iIMET8jPsy7pQd5VIccgSiDKSFhGgulrH+5fZElaSoELfTYmbfrFvZ8jcnqPPQSBvyMKzzifA9R5u6/bW1Q11vhFDtnT8Q00NFtby4MmUIOF8HP0GAjlWmLGDkyka5yHUIZlweE8CUr8o2Ve2FusiIsY24Tcot1SFAmHbcG6tzWhun7A8r/fB51i6lld9PJwk80EUKc1ei43NJQ8EeAkHZp71asSa6Az6QqpZoXPcW2HhuGSgBktHgRdtI3qI3+5PrwkkzdvKTjK0y4DYKMgUC5JWf5Mi3imJhv/Kxf4LuDObAZFMU4Otmu7IoCM4r/wOdB7AUWjw6LgsI9ekpF72Hl5aVqZbO2kuQJsrqfOPCpsLU0XMU/GS14rtr7zXVQBEVXwL2Uo5Ev+NnM0p1mV4EJ/e+txTNGB1cglR5ZMkwF0uvj4RDydMCka3cELIGswdeXIuySSnInPfu2WlBCL6V9RZfNSjIKvZWIQ1xtPnRcMEtZ//rLjSwGjsDnBlFMJeK6ZxsKaD80T8QXEIc9hLeX1/Vf50nZsSJZ6dn1sHojOGYQ9PhUs3VbJrHy4+exk8zgdWJ5P0Vp4AgK5h3RtTlPeOxL4qUxc+PL2iLsR2iwF5tK2/njPd4pa8EMvT5hGA71iV6xtIyMUZS4+4Mmd/M/v6Da7BHJm/dZ1YPv13ErGIHkNtGUQwWL/rFeJIOUpeKwQzeslCMZc6Cj0JjfOD2ppgJdypm21WBZ6l14nAVmjaIcoA1F1ss5vlDEtwi5wJo0XrLVRB+cIYBIPHP80zsbtsPrlF2Jl0NK5ym2rk96WHGKtcNbuDzBxMHCO4dQ911BXY8GIINLw4vJhSBjJAcNJDg5uIMyE2O2GS7aVhz1BhGteaUbaZEefmrZHu1XuD3wL/pEuG0PnuIuUvaPI5gfyPxiilnYacSn8S4YvmcUCAQbdIzP1Xa7cYXbYEGZqrlOJjpXqVvfkH9SDywXThjBcxyhZZNPHxPGPXV36YgDaF59Wd6kFTd5/a2Y1BR/9gcapy1g8BgFC5PDkgdIhmKy6/zk/xgSPQCbHDbjFY7SSrhpZHlRMbTivmyQc8C2JJgjGWCTCmaOTQiyZxOn3FxLpAUuywxsMDr+1oK27FJm8yP3LNC/WewThaUyrrUhVmd4PIFOcbx+1auUKdbZ3/HGiV8NQeGTZ3NY6ENoVEzxPWRREZjjyMvtvLuWtD1fD+JYTqhpvsG13sUyYaIPWJ3Z/ld7EW1/JTSlt5ICE/deOzaF5BLzfuLOORUz1bjSEYYzQdH5XbCY7VwTf1JySEdFCbVv5di4NhOV8qXu40Ljr4r7jvN4e4/R9+/zRa5Xxux0Z+gb+VXXLCDx+U673rq+hull8ktPERhtV0wUvvlIAIwco4nVRhZoWMRtmIzUgwYBwTiF8miN8dihskRg3PxDu8/vcxuP5AjNAW6Yt0MF43MmBL2/LaDUjdmggwfEitGLfP5sdiG+80wzlo2Oq41oRlSIhbi4Qn9jr+2BYj6uC9sQdbce1jeeVrTiP3E0jNLBw2XQ/nYEgSY/zRhqZJgPUbGZtwiOnPwpbTf36UrsRUsBkmzzC2KyokHBPWB+b68BPELfum3KyBYsCs9rL1WHMuSiWL0a371CvdPoe1SUseEC0FDH+kswlRFMAP6sFA5LlMM/7ypojDiM/+2TQvEycnu6MvRfyPRav0CHQQNy/0aDCVP76VU369Mcg3Uz5tWdhXwJBg1vNuD1dWvutBdphbbe//ki0j5VmQCvXAi8lWuiWqiLuc19ZkQlYhfGPKVhWMGU0M0dg61NBuE9moATSMacM9bBgHsYH4l8fjjgJaFt/K5xrmX/ihcFxIEEKHcjPeIS4/5jjefSYu7JzFLEIM8ii+YbfIFwYDRDCxAg0DEkYdOBke0XWrcGRIqMGxcZdNae3rJzfIF1ffRPj0X/z4hZdMuwu1yr38yKc9QOfWQ0Lt330BQvSWmHa6EOHIWeqMVjQJMOOzgoL13uMxFtu14wa8tyJ8YOdmpT+Burrkd0FENWWoEfN1TuDE0bXuStJ683gOrJYVKYEH61VlHC+uAIjaMnhNkYHDVzQlplpYNa75NMPLufic5poEqngKl4KkUwW9HZnlaxPThI5+7tw2ElAQlseHE1PEuGuQc5aAbCM4lt3lCQWqEH+w2SQew0soda6IhWQXsDNe2CcHQE1sz0Ens0/QrejVXBBClj8KyfgbwLbjOaAU9cGP20P7P9cygjsO/YqgO/PKpVedhtBeom3pMEYcr1csU+X2P++ec7Is7fzqlLxdSqQyA5xAqzA5K4jv3effQhGIMeXwdOKU/BZTdGLIWhQfzFSvLK6uJZ6wSFpDxBSQ1VxJxqAvvMougrEhE70QoMwhnNEeHIa10fkrV8RMUbhb/uZUk6GN5KPQ/bMB0GcMpqcWHZxcWJ23/P+8QerQ6AdUmvsfS1DLwcvTvHMnP9dG76DmY8Rix/S0L+J1YeS+xki6fg3alJXasm/TgCfYxBfGTkg4mTUV3T8vMEqa13jIWhEjPXn45SXLH//aZhbkYqGTqvNbNlj6WDxbX3y85miYti3faSl+DJqYp29r+EsYwmNy8MUDMvzWiJtPVflOFZ8MaRRmLfnnVaEb8UI/kcpGN7qY6YJXrZBr8I99ReHQFUuOD+1NfU0zr72s7CDeHohSxNzCxmt+cg8/UYPUeK5skARkuqYdHH0Q5bB7m8nbHntXcoa4G6Yu+Z008eHq4XaJoh2hQrzlQqtYDk3n+BhZeRE6UFr6MTOCUgLsU/AT26MJWQaG+pNrW/wJy0gnpvpkaAJZE7+wJT71sRL/IudVhj/xDvFhpyDT/a8cKvaffEo53hTCtUmDNeXMw0AEXKrySaYC8stSrfboutnAT+USOYebW/SQgiO8hqW+xcpCOpVxJFie8x7k/bLl7Jov5o6gRR91rLcKnFdcd0AiNwFIZU3yJ/0IFbwDltHchQJ2GjL2iCyVXbNmU8QB6tjZDSQOul8cOdQhSkhSE9ysO56lTn9fhe0YNfSzlOgj/Be66M8VPIwWRPmtBc53cTFFflBq66Eo+f5tFtAHVcdeHwjk2TeDDZVVf3pwpaj1FhXNVU+U2OP/9XSkA44dOzUC0T1uLEG6F2hgaOISwVKscfLOrZbkkGzDrFOHXwKwosv6UjwbvTTVhKN3lVaU65WDJ4stWbDm0LMM2k2d7mNQfniOaYamjpq2jrI8w1D6PZcaGIHVdEq/LfnlwGI9BCtirOOGRmvjibY43AtoAwfN4iV/+JZrMgik4Wo9C8uHK0sojJst4/7bvKDmZ5/eybRhDWU77LE41OIrIY100GyaRToHMO+56UOAnIWpElPXC+NGIm3suRp3RmE4qsQDGJJe9QDJg07/FE1LwR7UWA2+daxTnvJ0PsId63bs+NaPvxmjswBWK0C7hO9c/wAYGegAzKV7JqgVLlT1K+DaWkvC8Ob7Hz5p8CAv4GzZWqx8AXuujmqOsEw2ZPT/t0JbuR/pmuDdO18OiwXago2AENiQ/dWliyy2VaLXEw9ToyY4CuQgLLjVv5Ni+rsBr6wtBVgdJUbUyiw7fDuPrybzf5YjulyvFj8jXKPQRMCexSoCw3FzKvrJp96kkkWK/lGHsVato989ghR4or43OuLFQF/Xsdu33z18lAcy8k2Zm5qOdqZOsqNV4ODnzHkKuVGAtsEsEv6QWr/IfCnfwhqYYeucttEqHO8B9zPJIyh4zDL3alwWTF4JZa7LF6WsvSkLyA5AVcogkohG8U6mzP/FDbC8L3l2AwPfkAzW+sJdDLLuIAF1dHA0N5wUfK8bnTAYLDqYKFaD6U2C80QSdBdo5EoEOmeiTq9aGMc0v8MgsgZoa2k/JRVkZ2HYnNHHQThSYgcLC4AFLE/wZ+ekXemnFPPjVX1RTgS+A2KAiSrQEcMRugD2fglXf8pJg/DOqx8IGdADBwa3FeUd8M5YyFnIoJzwwkqdNZveFfLnNVmpxWbGQWj+ITGf9FEbHx3a5cEVVs7nhUiPAAwiR0oHPbbQyrUwTyDxvbnrdlqs7vsizV+rL5qrYy/7mJmdgHNDo0W2BsLSrRpEBQPpEwC5XzKLlD2s4FwB4eLegtcZd1AzqJ113IE1969lnu6oWivxO7E8hSOCU31qBnhkLMmlYQrpGzB1UeumJvvreZ083TAKl2clwe6sr25r4HpDEp3Hvp+MVix9Hi0CL6V+0H+URfoiFX6Fah6w0uxzOei6lOhesRut1l0KuYqaIVVWTvnPFv/kBo/mDTxNjl10jiUeyoVBeQXeKGieL/QM1i5DCsclyk3uX5rhpiO4vYqPbPjczrRY7ZUgdf4vpzJhUegHI5583trjG5+70VsoGnIo56AbrNPomKAui8pPNHyYZopTep1GgZQL32S6trRERIgk1AG0F2uwOTl/M1J6hvipOyPKhdRbvaUeFWtElavjedUyjkBtLDVPn+9537/HrHdENAacfjXrRSH/fINlYwsc3ywJb4yWZTbZ9lNoseKpxh2l3GFKGm+kfWlN599jUmH9BTliPPwERkfkNYpn6bry00TRoLNn+sMlg4ozK1Zm0Ar31wyf4WVD841mUU5inlKJblrNObaqo7HTh2pKKc6IFEeumI9vAL0/+vyoGln6F30eiY6TsETQl4v+US36zyX7UApeRek9CW5xe8E9Tpn1IcLSh+gbjwoceZGvD3v1xjKIWyW99oMqFju4OMrl0ynHg9XIz2ctuBUUt9MUN333uVQMFwbNvMmd1yavtxFOkVEvr+p6RuYBWPcARRlOpW3wl350bWDni7d7Rawz2g9BJnJhyLyfIG3iVE6OIHsW2plpc7hVySaf8rlDH8OvHu0OuNjssZv4s0IXrK/E16/6ot+4i1Io0D0FlOyOa9Ze/7s2ePa8mGmUcudiOaLR51C+PC/Q+2QwkrKJxNybtIZmQTJ7jMBKKIj/ZjiXm9ZKs2pbr+9FQltWh/1kU1E4OpJGrp6hUNbXj+B4ZDSle+2VT4GcC7yd8rgll8XVyZU7sMiDc42zpPrh9p7A3NktoCNLvwFhxb+fcxIsuKoJDhXWTswpL1f1/XgJ1F9e+EQRHb6+aeE4HdsoAhoaqJI/BsYyet0YABDJt0AOO+eloNTk9/8u4Spw+rKEqzsZ9MwwIHNRlyKw1Osf/P7LD/AIgvx04Usw0Y6Yy8kEYm/ctJXEPgPmT0nRkKWI4WpB3Z+Ysk8FULXNpRbLLzBUDk5wVbbIN7GdD6LMAK7MA1POrMI2+oVIVZnjEAD5ViYZZfNvolKQTJuEcOTMPzUQrJKCfdAbYIU8UiP4mmq33oWEJME89M80vxpjG3Ju9gM38bjcJDXAwe4HOACfWjCv9t4qLW6H8pwAUZij8Pz+G/9i6pfPXl/Ayd3XG1AqivE4MmPG6N1LJkQLxG4kNjCYoAYqDUoGCXa59TEuuP7ZlHhoBLaaZLRYrUm6uUdo1xHUnVCSqkcI75xUtJucPotWgIPqQFhnRBhyMtcRUbp+hhZlMoiY1+qrsqyepyfFO38LRV/SpDFE0BGHdhsKGyKvYdOAFxdCrujzJQe/N8zFz8v18a2NTJ3xiDq2iWUa+tNRZdUa1RrrisxhdGh4SxEo1tO881zRavl17NZGRQJLMpKlv2xpwCCFRxgtfz4cN/pLCODADXr8zGZvhybC6ilIz7+MlVjYmn5rILa2M2KEEOO0FUPvjkeQv52jLXgIufiraimklYUO6FPCSqTAbsnhwZSavbFR569CZVyHx10Z6eHklV33bZImbTDAsfzvxXEPYiIBpQuRpYW3X45tOZ4+OaCEQ2Sw7hKUyLQgtwwlndcnkf1P5VsJJY3s1JjFWYMwsTkrbinvS0Fa9FsLvMm/5LUBsuBFGcnbrF6ITREEp9ycTMuC2EsyRc+UjrIpzdeP7hnCUfDsFqTFNT6MCN9lpZHin9C45pRlMUs0nV0elbiS7+nvRyQn/BAuAqdGcRlYcvZ+IyAkeBSHF/x2rSRvJo/TLuKvtgf25lKCVrmGSs4nhbcwYgipaD/8B8XUIxzp2Zr+MYaaDb6OriYlwURNcEQjvkUlo+8pydMMgi/WQ5gHRha+LpPLvQ0g+zqOb0rV1FxZ2wQ0NNNLVlmVpa/fn8PWlzBpl0TrOSiuPle+eVkOl3K5Q6YhO8vqrdM/bheAhnwn6Uhxntx62WATi8gJHD+G9KjXsjf2glvhiN4N2MOfKJphd+8cfiv+iWt1xVJeA6mJSX8t8ItOoWB7bHBbrLJehRPOJm2Z3rDsDAFwOYmvybyOudHy9O64243nLQ5qDmVXLJzJNHhM5EoX2XOahOkePpDekKm8mgxQgHgdOawZJzEjf+r+eDDhZF84gBtdu6+20XidePxVLtpovK+VMl2Bu8oonjS6KyGSTt2pnG3YJ78fiBdyzULXzihTeSsG7VySciKva7GflVWDG1/4me/4iyIY2Kpcrw12LHkvwKiJfrPbpFCuZstJ2CSJAdDA7ZajexmbzvpMKeacUYMGn3w40omPPGvPgQhbI2nAMnMcljizX9/E3/mFNn5af7yzuqbUjic4weE5U9VsjhVet/41DWm6d3jkELuMy3kkNhaGrf7gyIcjhDYcIUqjpUviA/nWy/MK+njtvdTxPLyWR2g0Enrj/hFDQl0mJDV175IscIIZvOrGSVgJ2lLtZXtmuB/UkOZnaNKE+ih0ecT6m5k9JJ6L4Vo8Xo9pDtQBgYUiLwE/OI6JudO87oNys9WeHNn4wncon5/+ga99B+Z6UOmvqRPW9nx6q7G6QeEV6sqRYHYUJKSquMdC8JQYKqvVjlooRayzdWWUlKvYrcCMM9/AzYg8wDoU4lW3vh4OBiQ0hxnnCJ3bsE61+/+uBf55RnxDkZsU//HMvDwbh9Bo2SoMa+T7baUuv37n+3zq8n/LHf0eSEn7ZW7yIiAz59d1pS+hTMCni5amxOgtOIS1MxCvjDg8jU+KrpZaKXqEoPGZX/ixzqWvQSYU9vDHruwWbpWOphzC54W8BZXCOr8URSz/XufEak0sp7y6SWFR+ZGOvf0AhPVyEOqCXReSjqIAy9OQezvMM0r8czAdjDzJiAgSAai298G+oxRZ5hKECzXMSQkoQA2k6kGs47nr0XqiUq0sTuyCX0tKgNM6W+vkBeS1TFZGi8ZXtJbiQRetEuNCsO+5eunY72eIaPLJUjCIbr8QIt12cOzB8X09xxqTqsHXLaiKKjUR3UB3/dRLU68pSqzoi1YD7uvE7CtKQsISLYQD4K87MjJneILtHZPtMb8aWJXeP/SOdBDjVbnMJRafGG+3v1EvNrbH9RcGMvstMbNE7SR6POHQ8GxMiyDAjmZNGmzIzz087PFPBhCZ7gB0/izMSIUXiZcXJ9/P3LdyoIkh12tIyQbGvQu+xWns2VfxRp8cG07OyW0WlYMGiyECyyPHOuzhUEnxGkVz7WUeGXZozuvhSJyZRPwox+v2dAJV74MK5Sh9IdCwxYR3K3MMnUGA18InhRgncOfqYE2/9aIW0GS+/JvLjInmT4IvK8VJgswRiwPFn5oVRTMSujZLAGbxRMFvxCtzCFT1owI1k2jJ/qmDRkuciu2F4FSGef76PLo+uwt8CQ5MHNkXwF9YmyRNfvYGDmhJym/fVrxPN0sHhbS9J/MloUpp8JKl7NZ5k0Mc/QwfYNEuTuD838pAI3qD96vAGYM46oFYzkQEgRWg35SDiVlMne2kAd0EDg2CmrKqgbWiK7+vlC0EG9InRwQNSTMaU5lLzHVBzpb92AZW7HZT0nw77M3BuYsegWEav7R+PZd5tC6oXKrdEc7+a49RG2fgPJ5H9yxqrWAxvgiJ/erNg1tqF1GYwADZU0VZByI9gTt6dAD3vTpbcV4KrareN47o/tVsbV0vcpvCcBjvlzympNBIfrxMp0GWDMFWxukqUdsRONnDTFOBjub8DadxMbqB1jthf3ZlGoaBBIezz739fau2ZXNH7MQDNSDg34wNu0zqPmLgRBOa7jBMxKq3qosYwCLsCa/2hu6/uIxgWCw8WNEKt9Efd+B7TIMdIxYo7OHgkb1v/EMsDB9K8yTLyugp4lZp4q6x941cYhtOIvt9q2xh0UfupeAvOuAj5E+7x80iPcN0SJuxBE5quQB6BQMk0B0EnyUAs/7Ht6wD0Df2vbBE7OxLeR06Wsm6ShiQS6u9NPj9BYqcImKjElwyA8cDkWKPjRp4PrRheJvJ0NwIaZ3hJQKXzAvFwahV5tTOOAvET0S2fpLyVZM2hvGFZ0GH9xNdhE0augBT5LjKTwzfvxRbraxYxJE9S4ewVT67kHwWh6P+pFXmeh6Om0060zJI+EyWAkZVE8WxFw1dh+B4D8pF53uCirmR0ge1JerfSwlCTGHKjLb+dZttjmFLVcIVEQDVNHiwIVO7uap0M8N5bzCQmfQBMfKbjDipKBO2WYOriJnIqEzKIFZYESgVfphmugo8wkFMrqazhlpzqeDtoZW4E9tao28CLH8hSgYQxc0bfi5VuylQRxXTCpZa0C244pNdP1fU8oVj32clZq5JyIRWlAGxdFw+5iM6OTX13w09VA/NnFinOzmQ5hjrS+LPqa/hhtsHTn9AY1LmQOsCP+ooK/S7HXufO3xCC0KRUvNZygjuN0qenFoqKbgMJMAvumBz5bZW0kk0Lvl2bTYTI5FgXhg7CtbZGGOEwxolqEkUuUIEOVGYBSf8+5ZKdTyBGokOx3EgPuNZ0FgzZE6nbPdXkV9UI5F7RF0a3Eu7rYyyxA6UWXVd+OyXDBoHLPB6+g7zQ4XAr3IcMxNH5mQw0zaBtH5f7H4kPqi89rJEAkiQJjvRIFKGYgG8U7d6gIddVAbka2Dpt5nhmOa3a3ZSciU6dhIZitepOvlZ5nYuILyP49vE/8FkK1Parf74hjzbcr3Z4yZTtQlrm5yISjayOEO0XD63qp+xVvnq/L3Kz97VsFhCv1qrqKz1M+tqykppyuOt6UnJ5cURXBAVxB4AYA/CnEUu67MdN/PO5d5wsjEoksgVZ/VncJvhI5e+INRF8fCRodkoEi7SlGXptZMJrE0ZYaIL+KLU9dpVxo2mCV5L5gDj+As68MwTpNBYcxioql8Sez1c62VJy+r9m5IR97fZvDcZvt285ESyg2EOylFNHRkyWT33zsW1Xy081lZoFFwxuc4YCnpkS48W8x4lbguCQki2hyEU8ojmkTUjinuUSwZIK6kFwmOZCw5dwW1DfAaIoqUg11hkh6riuh2fhQin6CD5KATwhHl+VwYIQEPAlZz7W1haUzusrxCduN/GV+DeiNUQy8yGYPuHHy5+0OpF44RIibYcug7Pzgsd0MNQ6QIY1t7QY86zYbv9AZBKwJgm4imfO1XIZcN2o0qqTRpPVn5ODdEj8onDMUUutW/xiq9q3goC4oypOkzwKg39MbCNKuI2bTv87P+yS068RboPyzgYlmn8V3N2KR+fzfy8hbxTXXS6RpmTwtzZxzkCOViLIxEp3FJ+pBcjrjRkuEMCu1hnH3wA1g9M1q7Jpy/OLkek8KLyfnTFmUS2nRkhE+b/HIwJW1olHHE9UFm/574bIG6eKIe7tIsoLkU1HslfMB8ZUq3vUoWm6d1WtuRd2gxWn/y6LABZeLpV/o3EA7Cvw1Y+iRV91Va35ggtqgEnuiG7CwG+3xfpf59KNcpl2Y0mcJQuR4WaRUVUJXkGndah9uIqUPGvA/wGt8xnCOuWi02BsMR1tuqU1QYZS6rhQQvCet95sHQRwyFKLqdV0fg7JoiQyddyKTuJ0lkfGBcMkysLk2/MXz5S3Y47Ykcc+ylMjzseYxZ8yvTljBUDaBQ/pV8Q+6fk80cSxE7tVxzh/bbYfWnQN3U/7LtWog+JiI3iHb8HcaMmHZvYcnHHQpXoOBJqgyNJITgvYgdzMqyAJzQhzAniJhRA5q17ziSQQ4oNKOc78NoEXnDw9dMvEqt8wOFsh0DxGDQTKut07c3Y0LZCzkKQz0w1FRz/En3K5s9sAEofyu3f0xgJl5JPR5QG3j0OcpijlD/uXv12XlvczylTpP7bMP0ZEaZzBeP0CdFgzjFjpRXbpfmJFdiKHqoxSkP6l7eeV6onmPwldbEDsujLUyx4oOFYzGh4YHRG4/bs3A3WA5y+6F2fRh4RdwzXpZGULIjwTUz5NMlNRoYpBxPlIrjNecRlE6FPgcj9kdvkJkiDR5MKijXxNcbgV/Sv98aEvNN7ik21EDnk+UqZX7N4TCqWZENhqBKq3z44OkHjkTV7aV4taqWRuUhoy+N8VHfERADR/bqxUd8Uft8CtEB7rLWmv3HapkWhvtSPXRoly3Lk/o9oppZlfw75RkIdhafdsE0Ld5j07sODFbh8Hm3/2tZQ1rjK+BCJU2yJu3m03QNHsbhG2WWzYX/njrebwZDVmRxlDZtzt739IeVqWgsLR+FKx77YRHp+zbLARFdEO4tH8osOY+lEHyf7iHgCPUzSZm8hVA5jhNo7SPzQA4cWHd1NFyf1DJwb/X4/QJ6V5Tj3cst+X/9f6tYHsKROf34IpvgIEl3z4jT1FS6n5ewAu9jXtOuTeAdhG+EHcTTvt3B5h7GiTle03BTrZ3kkQ7DE+x9K7ZTp9jJ+vn9RjBv5PGSgwvwM3pKaxS4e6/XIDIZ/5MbKH8LgdzARVs4Pj9YdYO1xvrv7f0J9Lb4xv2LlNdKT7q2cdEGQeKlB/dQ2eHOKTwjlXmQBR9Kp82jaHSMSM6jF1NG5EVUL7sv+mYhQlqQyqLTVXoFEYfS70ka8an6z/d/d8g8XpWTFG9+3bF8IzaILkSd+zvV5r0KFcXAmBxl7pKI6Pqmz10s6p5faMsL/y2S2oLMg0XoCQXjTN7XEgiw8KrjMP+HWnIid/O3X2PvSFPDIqPcuQsq6pV0+jgKpf/lwGZMTDux6ieviXGO2jg5+I5nmKW8/DHiUTMB1h/bYB5bQQgsNk+Pw7s6AD5262omiTF7Jv/mxZeNnBGhknLZtDPQZyucBev6NBlPrIniQuw4vyhG3l0akZ2CYw1AZWdOiOhWnTbKxO8j4a3awRP0BV8X0+tBueE2+ZwgIxPxl+t/5hHvAh45gnr6OU5OY+B0Mv+qecCtcM4ppZ+Jf07QAp4mrJC0/HIKhAhfe9TvRFJHnjvTIpCIDw1Qmt23kxmlWr0l9Z1BWWQ13klnd3FRpVDEgYPMitOEBRvjza0lCKbDBg7Xu6SoLWmLWIqQAs3ZgscgR47JSlUQc/ZL8Nc52lnE3uEZQ3w4PabG3NUgmW1k1mUw9mxEWY2XxznD0afCa40923HieGvkezS8ynBTrP7HqoSBQ6M8SIJGtpciVr11uEmganAWvQ7GFuh6MgNoCCAHKr+6D1oRn9tql35fZp8ZuunWEldiswn4Ue1MRHpvPWzdzJa9cMWVJba3j37vMiJ402BHZoc72aNQBCPknEcfhvhIx+n2Cej75aEss5+MkgPFDlWvUO4udAu4xOQK4loQkeQ7M0Ubf0rALbyTeEiwRR886NgUXw6l3f1hzRtygswLkJ1AhA5zaHX6w/9mjsEUYsVotOdCf2NOPBep83KExu+fHhEgdTy76rcw1iw2MTJsgUBU1zYBuYtpZRdRFmdXXT74H+n2YBAKAgAANNu2bdu262fbtm3btm3btm2bN8QN8ti4qu07+d5RDPX61Au24eRRUb3TWXFOtYGu52g+a2WXLG8L6xdPMHgNdpNisT34+pOPTMFzOji/cyq2spfzZ/s3/6RZpcQrm78HfoY60yOApJVShlXzkrAvUXiCaS5DJvDWJF/FPfA5jRcj2Jto2/Xz7uK0/hiyvWwpCKd1tc4Nq8wRCbQCKjRE5Ssrjd/0VyP9xymg2wU8mFgqEePAiHHt8Ed9nUZKdjfhtSQthM+3oir+nltC52Gk8RPc6g+jtOC84txFKTUpZYLJrJAZCcS0ByvIsc+XWyeRKW50M052OoP+EtxkldcVv9s95sXdvSag110H+WHwBJMOvUzcfvVaMgU9GUPpTtUEYR1c1v3Qns0C8fhHJjJC3bLz9EkO7WMHAjXLjdmLSm+0y7JAzKiN7jpIlwN608gPa08wv2m0oJWNqZfnL08hgg4zdwBXRUb642IsyAhSBGr9TwMVlsWu0WhVSW5CPz7Zts7GQhEIdCqfIoMzfFaeLxMiHlEzeujNcxPQy8fwnBTCJ6EXSzNUUKoRmilMF/ewD9FI+bPoQ3h3z1tGZkZthXfDsB6axsxC34ri7e6838mA9DerxA8LldVjltqzy5PTshqOPYBLnx3aaKaDrzzV33Hvk82WcjfxNZOZwksCulnoGXq5BDxBDzSBovZnTpPvDmLsslajLaRewaivZ5ytGtMbH2gUov7GadPwja7wl0co7fPOuZzGNjrVYQIV9qm0+9HLye7hIROqCYwdccmHF0FQceugEoWo4al1eYrC2ncIUGzS9tOb76ZjjgfV3neURWcMJVUXzopLeVySxo7uSS4lp7u07o0CKARUxH5O1PIERg6sCOTc/QHj4XbNCg3JlQmE1TLKFk/S5+kZJDGAxCp+ezFA1vJmLZjEQa/IFT+vz/YMpAwLYZalBwAdx9LxrhH6CneV78E9FLPGTexgujlVs2Ridrfxg9+B8Gkw7hUjZR+E6fu1UwCr0L3z03lgTgD88uNEbFufYYQ4hbXerzozy6jzt5nVvAeu3npfGr8gFBf7Xj+4i8ZjPROAqIU5XCxH1W6Rup0oNs9SwPTNANHTt3a0xD1TbigZr212G9aDUevdbDgZD6fLgyUwDhD2j/YsGow23mkbSrzX2oh0N6jq3YquFL1RXQaabMGjjkHYmoa8zNX+mdDHrJv/GesKng9nz8QGYyeHRWCKukltmsDIINEMoy2UAZupZNOV4IZxDGZJwSixFrGVu7EOUkn62+Wxsf8wVMcJ94LWyWwjP8cSS9OwQXahOHv4dknHr7WQsc0SINuLq03uh9GlU3c9nA6n/oiWAZOclbDtRiFtv2aTY5g6dHR9JYD0PQStlfDyvXJOhfuQlOPw3KQDsvBsOn8zNdh4+TIu9APugZ8Z6cI5hEdnADkY5talgq9XNb4HT26/nZKPmXj1Ye4XKKw24PPojee2qBYGjzKcT4YhsnP2k92JCM8VwqWeKoKeJvHAHqKgyoNfR4uPRoL3lRdqnSW1i+9N1+pT3PhKVH35wSDlZOoM0TKX0XRWjvOIzELg2F7qjD9YCmSbeQr0CtDHfrErsfNU34Dy2NlTB0ZtEtEUI7X+UAkyoqMok2eslZZpOh/ByaGpgGmQ5HYXk8XUul3nK22KKJgTiJpvxkJGAjXn7G2OxaVpvBELynPRZnnj0YwrMbRi1aVmQm9PDhxS6I9W5PpTmPcax4wDClvMXsb3GUw8VJ0HTtZAUzkjpeVNjEsO9pcQf/HYOPrXw+wpQOzsgNpAoKxRA1B1T5SdMfu8bVavqGKXrDd21VIVe3R4vWh/LVib8UxhtOPeHxt0HsbSq+ZL2AR1aL8AuWNydN6ztQFw5BjP4rhHNwRtpPHs9uMREeewinam2RimINk8GzY7WatLEO1cDaHyK9SYRPNXgkHLBfcudKkeYR4cpGMf6LnXGpyMpJulMDokrQR07Yji2o2w+5TMTvDRE/EuNjBXZKYID2H5fR981zV4feRCmkPAxvwplSp6wpvaqhfNUbDdVOz+szlR6gF+Po6AiSyuZ8L4lxslhg0qg4ImfAnDvdzGSnDB6khZAOnUVWM/iBBLfMI6eBts4nxaY+rtDJX9q8iwV6srBd4PoNEgYouqHP9u/z4RtV9nIJRr6n7Awj9w6Yv1h3jV7hZQvCDiIQACTTx3OTF538c81soKouT5OVLycgwpG6RX00BYrf1mapT5UQgyxtm37/45pcS1KjqYcy6EjWXMj+2WNt/LWhIhPdAUwkltxft2xn3QZY1pD7EK61F8IX+VMF2uSaoFME84cFQrQb9d8y8g12bTPPZk2OtfB5QsL79A+kD47EQDbx428tzl0yN6tRpcGnugTaMO+KQuPviL59oqumnUi9uWRywluKx8cHUPTN3Yttl6xfuuzGJV7WtIyr0IKk+4cvEZKUW7PW5ErwJd3WZUIBVVKKfYvBm9WikXzDY3O1S0LiD/OhWJ84eeNykv2yvNkZSUqSw8WyOgnsrJ3GIdCklaNeSy35IaqV2U2UHgB7g/Ze7ZWc0Yz6UL5iJGgCEailU1JXo4TqnhpeFE0dbms6wc0yQ8KehURGsv00s10ETS5z1bECHKm/a0WDre+TCxOvobIURrscIJKioXQHg1gffOY6QcQxx5rEDlVOAJ4NiCZieBLhmf0mWtQFYMzZZ4gGSijN9llfyQzh1qKWAj3DAruFjBikbv5u39y1PFPIkGIDyyt504UxiTDdTaA5cE9yheOPST6+0Uh9lqglgiTPIvnHc5blp960UvD1XlNfgW/WlBjAPWPksTeXLQFoGgW1tobTp5OEHzbXNmrcIaIat+nq07Lh5wpGb25qx5KxPcooPqNDThE5hymC3ZFZAQx3RfvUvgiKMtxgTjOqHXrqLEX62OmU3g67RGyMNwYIMX3AfgtZy6O+pbHFGPY7SMQDtvKT7as7zcL8tk22EgazNVQXTB8MfAKOD5DGedpc8GzxOK+pCOdjPtKarbwkFmwyVbm3s3aihJthbfYfCwsXGxjBW/S1N13GwbkLPY8K5QB7zdYjoCmU3zlr+88Sj7Kf469E8ne0lCT+C+hsFlygSe2ZIqU+f6EDmgz7e9af/w/Y9CeX5Gf2M6xMJw+LhXbtRXB/xzXEgXRKOG4cL2aVWiGRmx9T4URyybTHKvaQF9GCmRjwKExWbhtJlk4yn7zfIPHs5BU8sqYa4Tiotv9ifnJVo8Xdx5X6HsCExc+ryfJI4kf+LFCZ0C6loQdzWmx1fAHs7q4gkeBnBHQkEWk/mLYuZ2UckpkX85q7ETf7hc02WfL1xh2I539lroO1g+a/62AWGFLU9+Gm2ytlNf/cnkmUR6YRaDhu8XzMoQLbSZV05Cw+xEPCmIplARC9qPVfbNoBDxv3ZFZ0kNKh0LfOXyfbzN3xMI890CxuR2MBHMdL7ZRyI/l0gXRVl+pJFoEDYdgGZcYGFoUgauQIol5nzuf0w87WcjwLgsUmv+lIJJbNS27AjgeVwnWg0F9L+4DT2D+s7ndNw+CUn5EQQbTo1UtaWYEZ7B2RtYxPdVCZjvVlupjDSPRrvsrpE0INYXvLSwsuyKWRZxp+HYQywbU4JE6eQLks1auWMQ2aX5pyTMUI4zNFSJTDuz7RN2P6GhjFm4pZjyXrSm2yhDo1sNRgAl8tQmPAcHL2ii56SjVBhwVIZ760maCjtjl3Z0H4Sm9PGNuWMaUkgC1CUiXTFCynAMOeIwcQXps0P7FklojES1KlrRZWr0zEF5OcRaAR8NoSKbHlrA+wJwHLoFNS1jwOGfL6Jr2Ibs1SHUmGn0VC+ft4AgA9NN5XQNBjvKoykqC3AztDTExVsv50rUc+ZIyVdgY2GEf2kbvXXq0WoSR3vi9YfSqLm1GiW2mpsAy5jl3z5a4qbu3FinND/4avwqVxkULSfsNpRV8NsmpP+0piDS3fmZZCRuAb7e4uuRMAoZXmIO0iyUtj7O4ntaxhyGjhUcz11ygpAlfqyKeno1pbz+6fD7w5jR9zLSxj+0a9k/VeYq3AMK1/8LkZ7hYduGORxIwIdlTg5q5UFmKNam7hKdf95zbPprYJt88+3PXGaVlVW5d3E0dadtCDBTjLM1uF58CrY4jaJXriaSmsEXKN/cjZBk7rruufwNG78GiZ4QyPeAZ+R0xcNuAp/wkAFylwGzOXbjdqPJe8TPoMh5xc4cmU8jig7nk34huImGSr5Wyu61trOB24DKld0rgmpDh6cz79c5Kr+SGdTQeCpduhcjnTuLAOlNUauEO2IRmQ6WvJqViDmSkRFUaQGrcrUT+jrys8hwfZdtz5Ont7pJZ9LGgrZlslk+Jq4BVxAeSQ9PHj2WNQDSexWUq9hYOKRdtg5JTAHXucHpx1uIfaXMM69svpYvNBa/SWBZe70vdjPu31fwy8hol6Vm5RnwMARIW9Z8OLUhoEuRutm/YIZLKo/73ZAXFNOe1XqIvtdh4QdlcNdTDiXPjiJPCd8u8BSpa8JLAy0O9cepUBJHcoI7k+IWoXXBlVQ1keg6PHqU6SeViX4zJZdxzAP6fWKP3iwqlHaj8WNOZpFPYn8IeD6gXA+GgYVPX/u1+sirJwSXgHJ/Zyfs6tS1ghHbs0nGFIt/Rp0N861zxk32UPLTbOONx//nut5rUjbno/h6MQ9Ngspg2+xNFZziU6EJVfwEm2Q8YITmJB+VG6kwvDb8IA0rFRnsRsbM8GtZR/PXESmef46nT7Y6sAUwcTik+T7BwkF3NEPx8XdgcTzeU78uFJDUH2BaWkqbAwn6mwPQ22/nDGMoEsCGMA0w4n6/yh1qGYuzYnC41Q85nD/gRLYohy21lpMf4Xhq0Khg6sS6NAF9aB+6l9ZNQjYFslur2lg4FiyxfvO8+K09Mm5jO1JPbNFmKMxkXbUDgn/ia8zRU66y/WklANgI9G7Py3995irHYhDTUIxKngzSIN1WMWHzI1Prxr0tVg9gg4kZMLRmfmKlaleWTyHrVmFTCV9jrNOcxhtXlkDqtLom5HgCDcu8Qr2CUAv1wKRiYzFtp+OWRr/QDfEHr4wNZU8EeSjPz14+ihpHTzyVkvRToSCFJVxxcocx46jM3YeoLY4RKbHF4RUEeNB3eMUZRxM0uLOQjZyP17co2Jdp2EBwzwZ6LtwWb6uX+4IX7IsFn/roH8tUAdtYzs/5+xYQAlKeUZTBM46XpKhoIu7400kuIh/YP2DswWzmV1mV9suNlqlvFUdE53uph5gIjrImz2m0Q+LCuQhQmOviGeu7K45ymc1WSunlejNdSNaEFWdx4RRGiTGcg8ttj5ZOuX9CzKLvWzjlWnZ1T39epZQaupv3chsSE3M5mer/IoXP5hnumP3SnhacCEAZZHHzs/B3VWvMhDy94hJuToMZFmLqOalNdGeqQNNZSg94+zqjKgokN5+Xrkr6M5aBplbl0DGBZ9MHrpo8dQLHWBGW6zAU0enDUvBa6gGPw0C21OQo3Wk1dH0efP5tnjjHnNW5fRV3330GQNPPRvTGifXSoiOYOQqyfqMTj53cEoOZSq2yvzopW5fed0Akhci4Vx7VN0mmW3ByPB4lC9csoRS+rjLqSfbYz+2LGIpemFgoqkCfYwtQdIPUpm53+VgqfricaNliqNIiN2kPoihcdKNA8zZUqpHueKU5lOg0k38tgG7L7m0MaGlto0j7GnAXJxtrQ3YwdBsdwhAt2Cv0T5gnNgdwKVlwCDNd4ea7p/B2DXUYGB7eI1hszLK38CmpoJa8ZsGeQgsgdSsJnDddR+6c9TasAp2ePSMxF8VyTJIwC4axQP/Exkf61yGtkvKdUjn/4tYqBjUqWYpQJQZW6Y6DjHVIAOSsgS6RbsH1SzpMa5jass4PhpDhGzzNgLyaAT+4sFarpj7by8sUUiO8cEDZOh9gq/i3FIrwqww8NO+y4btYi2agp3aGKS/3wJ3ekDkoMn721KMyGfk1I36bmhIPBdFZ2WBbAKGZbCrSL/JuuzAQGAkYJKqA6iWrAlovaFToGj/le5zTtspSUzhKAuP4PXDDL590x6NjkRedRhwHng/9WHeO/AXeyvJ6nv1iukgUIyfmKAUJtgywVbd0tFIjVoIMautxU1+ot/MgEsvEuPMZkh18SwToVEyfPKZkgCj7t+bVdAbdpUYP7WXpyH354jgmrfM8l7cJHp3fvLOu52JjCTTTSKHypQ9uUxfLOxbkyFDeHHrjMVlreJAn4EA7KVuES7vKWy9F2cdHfW42V+6Y83k/AjMEXDNO1FXw4oWPrwqw1rfXwpLLIuSeSIlin7xx35Ui6GsVFJeNRfhw1OuVJsSFU12WfZG4vfCRn97KUEmH8Q9itnMPFNdMR6ycCbWThHM4vGah0a1mpZWBEwP/eQi8tbWTaOSqxmWEEUgoQCjAi3OXND45q7KS43COSwJj4w0XMdDJKL/ndo2Q3Qqv9/bk5hbMfSvbamBwLhQJWWXMO+g7cfE65ZSjEZ9WgDKZcpIv/94pPDy6SQUWRQEa08yan5jAGJzRtSO3UwpCi3BHEdhqxhNt07+OIY4CxAYmWRpBfUuPfA4zpeCerzrB1b5vGOwANR22pYo8FnTOuZVUTwCZtNcghx6EtLus+4cVq9i4ihzMZsobbZJe1j/XcPM22JoEg/OXDCJPIanv0OVYKJu/ICd2xWQ+yp/WCMGopxWehPNqBueK91kBat8qAxaTLqaB7OsI1L99KVlEJU1npW6VXwFzWA5qqZjUs9IRqp0lqlpatw7LAusg9Dw3oSgmHnM/g4kjZ1y7f/V1065iv+TGYtX2q03iNzGv2m9bnr5Mwqgq2VJajTC/4/IBaWBlzWeyS0zC5Idp/YIJr17azxYp9EM+d0b2n3mNl8Hv0SWww30+RfzDUEIE32OBCyCo0qn+cBJfR0Wtui43nALFHm9bWRnUpBqcXW14an2lIcXI+INOQqJEjW51BH8Rd7sCcP2aMFqzVz+1D5odasUTaqUD6KU+RPGSf+JqqQY3Qp3Pk71f+R9bnyhTuLvuJJ/L1VJkAJZY28VvRvGlJH13PFNAB4k5469Gkn6dcs/xbJTwjUaqOc85XtHD9j84vv3gfKOrF4Ww5Io7JWdtuuMbxPNx7dqLw/WKn53RhGMfuxd4bRNVqAYp1NR9+ozlPnC1QRsCbwWvyfw3nyEdPRQMOqglBlcT0yzVaKfJZfB8zGNKV0smocP+2agwlwl3TWz+M01r6RxIP7FvTwAIlRn0AsHDfsHXa9pYOqTwa3CmX4ODY9OpKlPISwWhtw1G7pUAyCYqB/lZcg1IsFPQIeT4sg67tj4MJyYLCmlxLC9YRucoL1wCQXMMIhfwCBiO2NMujbBheRfVPotSs9ojr1sjiUYtTfSDmDSKPXrFKIEv9HQH9HwAgycWkjFOQRxJB6Gh7Y+5Gg7b1gng3A8tmldXWJ4tCCVVqGcPt6VbvldFHbyvxSMAvhuvVfNYEmHIyLa7rob5iIxyrEEh2PF09+IHgbcFKzBfelgeubDA3MXnKK7rulUJUw4G0DQoJenah8yVN62FtYA4UWZoVhNbL7r1NMWlPHUDCOy8kbqEN/fWMG+xxwZfexOxiXhPftj4gUUTlgSaKUtTiD7g0jLTQfnP3UTy4F4aksBTBe8bKCG8LhSLDcSAgcswdtoAd5Y/lLozpZUaTILHAktYeM/S2akJd0Gyio1IbtNQjqv+rfegeYX0eSSMlzIr1s6C2tPCDNVzJwRGiZPItFSqHFzWF4QZGHaqLoNmTP+2OQw8Qsyv/JIVjX/hE+gYI1aXhmpHZ83Vk3kvHH/xqDlwo1Z0Fy2lD6taPu2sKeAVzjxH3inKtjQUWNVg2AnvOY/KM68u4vorrVQGI5gieIATlfqnAMUqvlSKbeZoJm6EwMRC94jg/P6XH1erN8BxWc54Z6R7cxgpn4t/dxAxbOM75Zp7+AjnAtxbbRCkFUNBWe0q9Tk0zQNecTK7O4muTFlesWFQR/gOKDG9sFJ7XRlcUxtu4GH8txASmcihvu7zg2J/LTKF8d3/GtITcg5zATVfgZsPkDzzn7yQovWAeRaLJ3XU4HVSbC7s/R/9ZcGdVNlPbAPVY2FytcfV/cNbYPMMvvtKZ9Qm7b8JFV1dzKMZVGP5ddEL0Dfr9Gxtu8RkXIYpkI5oAlW0br1wKle308tmFzgnAs3c5G5aHvjH+a7z9wZNor7uWFkbbdz11tY0S2FNWbV3qHiNog0/LHR+7Cj71r5RnY3IbxjxNK1E2W4il8z0PTZYF3j+suuAK5yKwDJddin5vIgy5cE/QMMjWZ7h+Vtgzax2rzDpgggip6zTMf1BA6sZwF+I5p0v9B5RLjGMYLRKtlRisSKccKdAPYSE7ssvujvOD8Cnl1dFWzbrIdWy7WEBMMAJaQc2GZ3FX4IR68mVSBgH5rlJ0axt1fu3XksYHwW6iD3JBly409r9dKItFjkEzQY11tuWzNlG1spRSV8w2K6v32TtXL2vHFUe+NTnymDDAAeE6YCZdX+BbtkA1pWZrk3cGlCFEJDZlfisLDTNw55oafeMGaLKfbAsh4lf8mNQYEnRLR/qacaNwIQtoqfcRdzHYK7FWMgvFzlDR7/bDLNi6xvSwkAh4J/ac+LBE0XYIkAZoyaLjQiFKS3VoU/3gJTcpIwvt9yjrvhQPZShx2fJq+LP8VY2/A6nT8arKgcPRqJbeJpgwIxMfrUUWSmrsJk5NFyfpMJR79jKMmTescsJwXcF4o/PPx4ltmDsCHSpFpExSkaygfEifYcCpjXYW/h259UJSlg41Gl1QnOvuajunQk2tQLYDZ1fA7Wfa6OIdNuEoJ9Ei5ncznqCe+u1RdvPGNjObSDIxqiGWvNaXhFJ2dbREuvGrPMNBBKlQaEmVe101tiIaMq2fF0oY9B8SLgvTkv+yf8TZENmvUG97HcaZfwdfcLyM3RFK0PeNCmmE/46yKOXpGvSLKsD1UjpMQ/L5S8bCXvtSL5L37Iz7kVTVtvkLm3LHkSLwehDySUARe9cigMdA6k+A423xbzZm+NmNrqCgLqL8tcPb8ZFKKTivk39+BA0aVwU8rDYDlErNyo1P5L5nAtpY4FXIjz1On29RJSsRP7CqRKZxJyOABRVovyRTLu9VSWFlQ0vhC0yycvN13l5v9opNBHe4n/JmAR6S+bpuLpW1bGYMsjibXxS4UvygWMLPZgriC3oj4xQqT/G/DDkzewkWbc85VwbqDR2tQWOP2mgMvk10HA7J4BtxGjL31PiJntc2KmWTKNuPWKwIGD3RRE+66uAE3X23ZO5LtK+B71Km/Vkp61v0luuT//8CJrjJP34ok+iUtAUWXHCRPpdZprcF2QJXUSrUwasIo+WPp2ECB94UadAVibyxnrdKQaRRzTyZWlkfJnMtyy6z3Ac9fmeQDfBsE4Ud3rFp4Uv093EjWTgLndJ2rB0O0PPZms78bZZGtDm1ijFm4Xt6rDwB2xCqN7Zrlo+TCSzjpoLv/ytbJP2u6kGe57t/Nlg3Z1AwuM0xDu47XonJhAZ52+dnL+JaAe4/xDy9sjghiUYaUFfkuShQcvVpQ1YojI41v/eBplihE0l3Ro2ad9BtFfQAkhRlmzXXMe6x0tQZkrIry5iu81u+MLU4v4R9HGGa+Q8hQuNaXgn7pNo6wwHd6BiZyq9pOcY509eD/6bfahUVHMbJ047TV3OQOxbXrkjz8hYyzdqQj0ub92/LGTn3ZKvYPOQjoBM1JcDdcZMGaCwLgtlWqbNxs0D/zkJumiIbuMxIbeYThmAgIqRhdZQu0w6t5X8dvTZk1EsK+X3AxLLNNtCcHCezJFWVk8aaR24xIJ1PB49UTnR8FU0vqQS8NKIaoqMw8zXWMhj/InYQOTnjg2QNwckOmCUDFMRwrj1SkzjeIWSYSUX941pDKtimixGljjkJkxSiFNRpZ/Tit2QfDoC4LPgfAnGSZFrKSewBWb+aJunzV7ifPJDGOKeFyscQdjQV5QXwZLESN7MKrh2h1df+QILSOeiKPYvE3RPSby7lGnG0evJMEQZU5svWnZbcicQTcpaUCdvLMsdRWGZcWZNwDb4GHDnMG5qWyYroTvr4niIhV6zCgUHucQdVim6XO+b2O+4vgdiP2BNeSHqSwk18Y2B9PtRyIcIvbFaVQhKpTIrMFqn5axCjbvEANAHKzzQDQ5hA55tJ4ciKwJSiFCJXJfM4GGENcZpzJSlLaIf1GrwNFt0oHKNx7PU1xKoaiXK1W2anNnO8iNxdYle9e8nRfvn25mGQbK4xiQjrhcoiKrQDy2NN10VDxvjFp9zkl10cONdaBpfukfK8Y8AB2AnPXSExgeWs5iDUb4LsmZVVQKvXZdTZNPnsxMbDvnNaa277qnB3fqcl62tMDjhmL3c6AybY7lzIgFCUv6OSo4R4LNet/tpNrS7ZTZQAm3Bb4uCwCj8ditsIqrVWfKsaUbohZ/t/dMW5JQv/meunw4QmyeEtK+al0CSPH3Rs/RJ3lSZmkdHzg2u7UhvnNq9QI3zL7U6V+Z79s1VtWxxRk/Z0/SY69Xd09dE+EED8at8uU8RqVRTzh2L5e06AfDZW846rx28ZoSlFfrKhiHDGv3Dvwvt7AgYqkMgd5vKIESM0bCgnNzs9sEHz9QeJE4IPIkln+Rfy2Uzd+tbHNrtN469Z2fyy4Q5Mun6Ks9wMnUYArROD8YTefCoJYjgrEtInG22Sv7lm2UWrYauvKBuhVib1/i6Od48ahOu45jOBxyvCWATaD8tpj6nPTwxoHNldOTbypa4o5+r39Q7bJy26cz792zYq4ps40nyY9qRqdI6kSkmMznSaDLvE+Tj8pJZAducKiSuYlK6rAv46/Ipwy3+2PCyAHNfTEA7kDLJSTLHHfUC/a6JsQll7HZH6iHlAo64ff+I1qxBIaCd08WIAJocwrUnnWVYr8Su6flnzjnBaTodtKVuH7VCjd6EzQ1ol9g5bbq9dXVP6MhtTUcMf6RcAKuZ84T9AxmzHewyEvBm7e1F4wEgC04FunY3Xe5wfBJMpsnx5ICHgokEYl8E71GMomMwUykFjBlTRcL5ynq2bTtfJafUfPpfpPRYC5f82W2E+d7LSWHlDKouJBwiuojbnnCzjlcdA7OvZzwbkJil9OkUUB2cNYJxtULUHzEWcVLipumLGvBiK0UFO+e25INOXCVVBLiGz4Dpkf6nv3qVL7VMg5SULJ8ldc7jzcpcWaFRYrIho9czojkV5KEPpDG2LNow7dYw3T4AwdqIkI7KJFzT0jrWfR8gnKOZaMqQvAmyxnekhwPQMXacA2atLck6zPwFlzAwQSvQxyH8mKCmnnnzPBmm9kHo6mXAAqEqO70MUW0+JQ3MtnJeOJBMyTD3ZsNrmGaOMVlhsOQNh97eHcor5LqjAltR6L1Io/mgz9vS1FTkX4PMALzq8nqmrzkz0drxrAZyHsUejWPFiBAM0E1g3Te5Scawy1JEfTpKFe6YsU4fOKktatPgwXa+U5aOPfJZBcKpIhM2e8xBo7UEf3uczfR8zaLzRgYL+cdSCFxM8+vWZojsqeR6DwY9ve9YpjePow9V0PEdenCl5xaOjQtR1HjaNadWQndj+oU6t1oR4P3pDIOxhpWWd3fJRWeteHHLejquBjHFY83QgbJ2+UjUuixLs3dH5mHwdnJ16zF4450JCQ0F/PAlIe7COV5bjjVUq6+gUPuZ7mlu8vcMqFYtirupzf5Zg7S8qH5xcb1ND0HnblUdaJwQlbk0NBBpdl8jtmWtXNntz1bJI/xjKhrsr+U9yxR20HEybFU0R+tp2nc2B9I1CA22polzIolQYqKMloCGkJrGqmuvkTTCxl1sjOKCvuHn20jnhmL9mVaAXZqxzBrIVmBdsia2tiuX9hJCpHOlcEPTe56fPfw81WF1NODApamS85zmpRJrhIgIEQgKn4Xb17elU/cJO5RKc2bICtQ8ejbANZcxghSZHJTta/Dv3s8tjzsdk4Lf8LMJFcYUTC/BbCs/+erduMiND+R7ByZoC0/GC98Qm7FWKpS8n59i0znSzelgH4APc+NFxaWaewQpt1y70Rud14YYOpqC0zLtyEIO7phI01Y/9Ev3zhAyq1XNCE9hXkch9t+TZoCjCpcl6gOTbahXjzawmab0xqSKEtlHPIuEkGkfiy+O/7+wGrU47c3Wfnf2MsBZfSwlJBT/Cg8jRtV28ViB++BHB22MRNhUQqzxE7YBNDg+r2rfwFj+jozrIEwrc57Yu76mhENHkSq5IjEIpvNfECDhud1flqCLng9NF7q6vrbrjZoSNtyLZFvDIMPRl1wfCHBPp99475bGNHwqp/IeYpOUEdrxJK0qWMFgdVHESORVB6BdEGLCSF4HLYZYlHmzVNH2e3RCnGomakhgVI+hFQuzBREcMyNPPk3irt5nYA4/hrC/HW6dGDCf34pRPK7W784a3Asx3MKIyyhiSTCh/BqF0ZIijTJ4zod1v7z3C8CTu5NmVDD1xquBKDQDNo1Oe/11ApgUpM8pbSdARJlt6qxKrV2UyI2+nPX3r2i5s4pzsH0sdOQ00vdr53Q/PBS65WPpBlaXY6to7/GihXniuoKvOQFTtSiT5XA6IZMwhanWe4Nx3ejrtIelMdSgJ/HyvNfFlkOBnFSA36oriIQHsLjXtlOItBGoDoSNySEtk9hbc1RrcMvj1tEQ7G9YJQo0PID+Bk805OI30bs59zlJk5Cys71mAZzDbMmvwlSES8w74j4NXzKhvQ2Kwd+fCuy+MHmFnNF+Q/QjA0eT5GAnbpgAmVrzN+kE10cIk/cRwRSRQtVPve7vHXD6U1QgKpHowbysBAKq95/84IpF6IojWkACMi5YpIgnV4wN6ICwFV+/u671AkcJZxGpcaKLa2u5+Ma+x5wkXTDIoazllr2s8lvpKdu8a+9NOAV5tjhzuIm+h+5mIWu29anvg7/K4KfBT6zd9pwc6hyusC33o1+tL4SV5aTN/MBdmNbwvPoIpWoPANJs6Vmrl9EaA5afHBimJ36/Hi3dAO3i4gOVHB0w7HKnGLWgXf1o9VuxjNn09cEXqqyCIxJOJT9APSe4/tOF6UmmM7GNL2nVYlWAYHknKPmLCQTf49js3kGYgpY8CpqU5A2dVrJUntIVBtrKVpuPNZTAlD9+HJ/YXhdZPV1LBCWiKPqqlvwmiFfHxZtHOBuDWovcSkpzH1DHR45mC8s/uBY7UzhJvAeqpSIdDbn7YNSLEsIalel2jH31MCutrcKrbSRQ7ZgSyy2OJMHEUl83BdoYn6UQQdnRuhdv1U1dnQcHNUuiJ0GyJlvpiqZNvGPvx3fQFtEJjBDjQZry46IL0C9pBQ1XF40S6vDhjEUgonWqgqDnaLjpPB6qm3chI65I5I7bXaKM2p2i0N9RRq8hHHOcLsiNZV662N+qBn//yQK2n2AInvI6APFLegZuBvrC4U9cva+hU2epTu+vVr2N5WHryBBK2L+dwRZU/InEs5LbdJGCWeSZReAZOGkyCOVu3U3LuZseD+sdv3jFtVCBmIDeuMzd2cBOokVg0cZNWc8zxu4tjXzEnO5TsV4jgRHEkDWB8yqE/XpVgiYGb03QlKH712Zxb+pRMEuuQdyjaMuPDxU2u2cPZB+Fejt6ROLdvOB0O3LrJZ5/7vedvhx+BHSAj7E9gPY3asFzB1BvhcL+oHvsARidqH+LkAl2wbg03mkBn3R8ukuLsfItCi3bLcO4KDf0Psd6CfJM6NlAC6nY6DIa3uow0gJfX/y3vANlYi0QCMtAYwbiHz4zZ0Y4JIk9nH+R1jRwQ419bz28AWGsqDWT3qheV+KFutdeOW8orYc+jWbUmj0wdOHAuSPNGXQVGmIbKybz7dCW3YBvo3nItX7IIheB+nwTkTxRE/ChAuV2Zum9kTBi6X7VOXLT/Gw/hbQnDcC2TJmtH1N+OoeYr91k/AlnHJjoMuY8pAgyS9PyMkBlK+OUvj4VvBOcL921QwSO/P+2aDDT69JO10aNEeNHL7B1w2UHrpQWYNsgLRX9Q6iw3uEDlQUb28bGuVOlVAz9dOJumbrgnAQ87yuaaHSEqnraEKxgWnWqjHWOLzCadSVM9ZIc2Pc5EiUZhnwSBZFvzrEIXpxvqDivYJuAEfx+qJIZa0yQORb+CmJHpk9+2Zoezxf8ytu4TkPwel/w2m1J7Nzw+lWjdd6MHxnAoZEf2XTw9unG319jS82hI3ezEUuOnm6ThlNLuaB/oFh6WCdkUuCN+3ui84rqCdNcZrhqtU1Ocrp0A4EYzz+ZawPiPSv8mDbJZ/AQNwU9qnm/Vvo/8a5ok5Pi7EwitkzTmHA/Nec/EM3wufTFtPx6JZN1lA/EGQBw+1VWvjPOtwPzSwE86LOg7h9qb8nDElf29SdCcOtidCtO8Rra9Nunch2A6QpBfXpkfwvWIrXf2CgUGFuDR8VwudDux8WXoEr8O3W4IeyVNl2RqK2WgYHfVPF/vC/iE5A0zDmmq/YZW2akkNYEPfwaCd/BeJKW9uvjgDPEtOppJuhiBH3fEQSSCJGjmkV1mSDAqRveDMF7ENLwnuAhm4F1n3iQJaaZrKAQboNUp5wy1AsZjHDw+WB0Md2FhyFnOnYcAPbKaDs/2CGJ4clYALWy2AoVQt39WsBJnqI3gdnP2rMnA5Lhx1pRFdNTBVFVIAd9nu3WAGqft9xyo6HYhYNqvyWUailnSLOAHmRAlliLeoq1XeTSXKJWCLcw3tbgZzn6iRssHP+IgR2/zBaEw6N9vn/i1K2rXNvaNPJG7W5rFyGB3LdSzqyvh4HfErL3VC4MgLg7Hel2Fm7UAWCm9K0a7ff8sdWLeI6t/3Bao++FDcUC+BBOdtxhMUAC0mF90KyJhTaqy7bMcbDoQ2s0J6XjI5F5V2mcMbMuIfnYAXr2iBsb+/Nrf2DYWwn4WKcBoEXz1V334lLAQI92AZumJnW0jibCuRxmH+pnD3EsCRP98BilP2WwS0xPT8a8Hs7caXY4oCo7KhewDP45YUtg039TuXFgGlGH0Sv3xAa3fDXwHZ7Q5IO0DsimE9hc2bXbX7g51FiwLxeCIB1znQ+g0KHuhCms17vorW0Fd//+EZNWgUyWUAp3gftRVtsBqS3lOID/zp4Y90dlM1I4Q3wQCMZnGe9LJhhHlIcr7mSn4wCTjYkGIHNcjnaZjaL2Bhd2tRWT0pXcFqGaHAoCNEGKq9H6x3o0p5SQ85h7w6ieday7to8R+x6BO/Gb/TC60RAsIL6Y+O1Qh1fiXVvDs1Xiy7Jm39Lpe/3knX9VoweGXJ5yRWs1qD1HHPXsa+2jL1rSM+JOG4gb6VCsoJItryqWBgu9y6DHdovx4l6VoiC/2SWiLUhsWiwWskWE/eTT28wRq302GgVc4Qm5iOHqXJCsyUrWqLRGCjuhzMkuMXY/naAw5CVlM0CFCCh4OGx2PmbU/9Nykygon5DsMjU+O1Z8In7c7vnioLI7DNVcViht8bVU9DlgGovWeic1VdOZb/VJB3Gp2p03awQzf4qyDSbQxIIgA4s06f0pmxM0fiSS2BozbofyJzODzsYVMYfVIa7HdP8TDKB4b4a5isQ3KOAuA8eTGBxAj1d1bOocoIqc/xKOTpisV8C3SxlpuagZ0B7301wUglKXfMQ+2SdGGk1EYn23f1olzxYRLufIxNv8MnFH//yCts5V6xq/zv18rdOOv/LUkLmNj1VFmHx0g587xKVJ1CbXMkQL/aGaMQwdayEqvM8Yv0DQ3081MNTLNQZJOIvk1bnafoDEoG5LJCL5Rpugpbj2y3+AahsX0GddKo8PI4RRTW9ooisfNVpSufkrw0Wf86D7Bi9m1MGqiN1gFWr2ABPLUahyUSXNM5scebTZcfx4Bdp1RwS4TSW6t6POQGzU4ELO224iw464OoDTsgbZ2o0wZ3giCL5wyBhjI9hRJoudJx0xVW6MmXAvj5v2QXp0ps70aLjTdwWGYD46xc/33fHnrbot4B4ryjhsQaSvqds02zIoje5Wkl51X4/F3rrkWeul63iNKzD6lXSvPdQ0SggYUIh3PPVeyNIWl0ksM/MfwXVtZyVctgGhRHqZ9rMAsDXimRr3HJsN5mhTgFFz8YZ61yiTnrURFA4EWSrUk6vd9439t6nHoE0dY/n5M3TcN4WiVTHudTA6cJmhSZgxHXUnkKaKQgYHEnesL4rQLFeZ4G7T4W2gYw0GMzLPt9H+G+XkePIeYa97Ebhp2ibM8RuXFGoNEirkAMZL/bNqAhaD651uSpWiG4XiqCV46F2htg5Uj03yHOO/ljgi/GYDTz/07N5iUnlU9mZdweUhgrcKcIhHgBQYmk6K5nd2qQFyV9ocJU936b0hSw+83ilQwGikEWyVaUUMUMSewNU3eSLxJ43oG6v+C1lvprg8t3w2mWBCOfMIxM0oCVFgMeihbIfGh9R4JKz38MeusqPp7bbYW8x1CGnkhFKHk0c42CZwRTM9iao8Mj6MQVKgXhrYNzd4joNAAaMHsYFKyfz9yhaa3o1rZt5NIutUizx3STVB02gyXkxrUmpoOC3oJD01VcbRoj8JoSHYa3Cf6Tgvv9u5mleiOZ3m+ZS3ksr9BcB3AJ5hreqbDOiKvxQwZW94s/5JPZVt8gVHwJhKhstc0OO62Snr6RvHpY9fti+2pP2JOt0dOnLR/vIprAGlyC6F3VZCjFbYwdYYS6DrB4P4qwZo4d2SbMRnoQHjTwzdzFaFwtH+STV8XBkuAhUi5HcOOkK7qdoE7Kw5SS63ORggsO+JwmCdIm0lJsSKD3jTHuBi4IlBD/Yv/E6+ASj0+Yw0iWqfWXqE7jep1K98ZUCWVfKNqnpZ2AU2B8I0mCEcq2c5aDGz25IPMY+j+/UmgMqht/72y4W8Qg+y9tggG32AzPTflqtQz1CRFCY2pSktfrJLWir2HZK60pOHOWQ4hiAYP5dN6VhXw8Vj3jaGflctY59pHQqhsb/eOGnS3Loxyq9a5OoqCcdcc7jmIIvgP3Isq/UsXDjDkXAVYRATaPwboXyUW6QCgguz+tKh6G9elIXWt/RWgo/YwT1YMLTP+TDBX311itMYMx1Si7aQINryoMQQ/Bgxr9ZmxjcwFXM7E4VLnx63F6MkALaiaIedq2yxkQ01KrfCdNGLon/IBZgi7tVZVVz1g9JJDUcuQm5Ypaol10nejl5rIDY5DDZYisqrbs+957reqeZzpkgJc+eNNkoepgP9M/EZvsk+sVO0kZ5Wls+GNjbQPFBTKMU+4wsO4+jnops6k6eSPrelVhqTlyKuRqftUsFrbJ5BtOTHwPXTr3xtEyk4PbPDOIOLLyAGzQHs61cTP3LGn+rRBpuLh76TvjbNxAfr6EhKGwwExmP1cAnWe1gzT7RI9Yk1dOY3+AM28w0Go1gFxDhDShvySx+FVKn4YVIlyE/TmZtfyTrhgnE8oo5zfu26OCZ7Z57DmW6xZGxSZ1XoiGhSCcHMrlWHfqoSyy7cvh7pK2J9+s+2AsDUMGvOBqJUXHB6WoOVqelVBYC1AdYx0r3XP2zfFwc04zJB6w26VodRpAik8TtgWTOrz3WSJDyQR5cLB6J8L/suMdbszNlnV4uqi6nXYx7hlcMpHyHsqdGFYyok50Cvq9u+rWRIrtJOrbqGT93tBTtyQSeITvv/UHREa4zr9VdJoQ/9/VePGE/+Va0d5p3gFkTUBy7mHs77smd4OEQUU5i+ohIVIf8u7dyaSfKWohR0O2sjpCdb1sExRWRLgJNOrQ4Qt9xEYeaoIOfWrnESf98t0dXgUnwVAJAgx/DnHiGZ6F9Y970rDwarwMp2ZHxaleZo9rL/adx8aKEN5wtY2548NcgG+tEzsiSNPyhD3c4YHdnUfHeM72Z1FxXSCdTPf7VRwVZsyKKyZr1Lo/6BQ2hiF35iFPrti/LC3um6r4EHfAVOUnevLqYoq+gzylxaZjhM3ylrsn/q8BwnsTcuuRnbmTseHbtAATDCSHAkgqm01uUzeajnKDkfB3DaEm3M8IB+7cCXpQFW1ZDGgxIrSKZiIYE0m4T1jczVfU0VEuOisrYHPWtp2h9oKtAoDef29skgw5eNMqzw58pkjUcRutEUvOEqBCjv9WFV+awZrjeODTn0OEO5z603II+XvVm08YebHCA8l+EYjT+A2TwZHvK3O2ReRG5ldAks7/j0iE+mCZWUXBA2yZubstkCD8gLyffv+inO3qsMRPMkqt8yqzrTwaud7AQHUJoC0XsmZjbypwP0E2SPGja7B7eeNWkqhepB3UlP3KivjdD2dYAH8G/RJxXs4qApTzzW1Ofx1a7ztlOwGW62+xf3pZnv7j4MwjhCJvzwFRY5M+dUiQKAoiynnYu7hdt4pKNNZCahWW9ZjvKmK5SuySHiA9zeRJH4685eerBkfARO2KT3x+cH+NbLy6BZB5+q1xYJG+zJvYR85uiUck/cqoE4/cCKuuEzxCKrBG8awJeqT/e6aTLzmP9JQGF4L683pAeTqnc6hINW47kpUIi9c4vGd6un9kxEcfJJ7NfcMr79q9vgaZ62c5zttQx7H7poQpPj/nb4suknnKkhAY8wo+Lpv05qepCcnfgWSbqaMPrXcpbs0MiYngk/n/3bcZm7Hr07fnJ7Ozz0z6Fk8MaSAJHoQR9sk9hXA5FwkWXDMvMQxUebnGo5zYc0Sxqb4M/0qnaktqcWkblbEtMhFcVtHXxkfa9APm896ZJRfcXDD89BI+ttwnoxag/+E3M9JSRlZmcz1V85M2ywe4JJKlOClQ8gjDaBl88P01L+BwGMd+PP+6JW4ooevbytRhUIAAROujBSn8UrJ6q+VuU8VMviBK0PQHMibFEEiwRGpBmoLqbIb9avPutmRbS6UFyfHLyX2QSO9pP0K4fUorvuQTi4IEktWeR1YQw93SVOqRmvHrqovsyBimgZQy0/aibhO1XSJPDVmrPXGWQx6Ulp3t1QNPN2HRYQbLHNybYKETe9xM8lhNjJk83BlNz1k0hVf1YK1hKg3tVMEVxicPVgt8K58jN5QVU0LdTp1tS6RD8qLjqRdGdITRY7NX2A05HsdwNbH/plgLUVIFXYIsd0oJ/nsjQM7Civa4rqJ+x5hVxU6xEeM9FrPrn8CAYpp27Wg2CtfpZrusL6xRssaTKldxMJrsVkHPJ+RFPEoTYAeV3XUGkl8ZfZBqWdRM5HI1uUt9wArE15+RrjAFKXf/tUzSXtwl4FmCMXFL3Ji4dkLamBkkmIPW/ncWb+OyJpQANq/1YuDGiQcC+xbf617dd3oEeVBYcVgO87sByeup5EF9OwhekcENVurBK8HkBcJ+GZVf/PqyJ6b3sTyWtQpbbSmWtzMxAzPaSCm3IZrqwBUDB7rIjGNJWtHThO6Y/yhQWr7xKPKCKDLU+VjrWPrQoB/WJ18aRZN667BdU/ySR0dJq3yeU+EEyM05xHNIXUvGv6XXJixg81BIeDGSj7piOhtmGD9n27KDnJVQjBum02h77Mm0R1q9D2uJvm2t827zZMcCPN/0sA1UJzlPQdz73c/JYExGO7rJd+yOKSmIo/keusRuLm46hx0HXxp1zttAn7N/8S7gmU8mBoAKHfHVvyRfBkz858LbYRaHxY9VoNOdsMg3cJUTSFjMKSAvrLtADz2Qq9JnW4yOFGh40Xa8V9eK8tTUR2JyVbCBQuZHA4Gcq9FPaHcQd6LS4+OvMrf7SqhHrVokOFUuv68ezyRQ+HqWPUgpbcKZxaURbs26ObYPnaiXCGFbY1F25u/VstDcydUXxdS4yBZH6jyy1v0TyqPijQjlEB1OgahYQZh++bgyjyfSeIH8+t/PvQz9mNPSaz3JXdJS5EIOBTq8SeBr9SKCM/zz/x6MeQErY2uIiockH8LqXNYnabFzVBku3uoPlqLTSfHoESff2r1rkCNyI80Bash5cnzPoTJ55bxc7+iIgJRui8BfueQayxTH2KtZTlMLerdPk1K+KmrtI2iW//jeaa3BDVNKe8XC9HlXwFDHruyhZ9TrMtoQ0TfI+1FR3jV2fOEucG/WjFSWFTFxbui8gu1WrBiXXNf3dwXnTqgU2fFwR8dvdEHkZmJ+ewWREa8Sbf45EClJf1D2tR4M3ICX72BSTu6/C6sXBZiZLdCJHo1Z46pZC4ySQj/7YsDfsyzwuv4RLkHKv8sZuVCS2yDei5nTInttw7s7cmbSt0KIcdfrLBbfjqnxokqjPJsrqBD28Ka7SJ9rV0ZAsZYIt3v56P74MHtTmdn6SwTgP1kOL3v/sEY28pko4Q6ZUX1YPbQSe3XcWsMwOiX0jMZhyEuWL+GPv05X9ApnLdP/mzzAyynVWh/vfNWNfMpHD6H1WbftPxXBPdt06F2kf6yBDMDaS7dVz4egP3QakbsE9ED+uxPPezs7yD7iyDQi/Wd5dBVapT52qzITB3IlHbEU84Fdn9U0VCSaWIv1FPykWrYa5EJzGka1XAtOgfCLntqjFTYvCItFTHVwYO7sMCws+kkIY4+dl8IalMrXvhCxPEzgJQgekSQqBsCt8DyrIBZP8Q7tV0wyd+je8XLktuFmSqu7/0+TUBlZ8AKk+5+q2AA/SdOYvFAHNNZynaCw1yLT1mok/2AcTeWnAXGOmRxbWZ4kMaB9aaE9yiA8kSV1nUfTIWn8EaoLxr2D8axnlVuj+EawTtTjE/mX1lcJaVCkqffVOp6Avb+uQH2wTGT2LUV/v+r/+PBvjMwrh6YrxKRWUv1hQyHQOWp4eVwwVH+7KtJbg8zL0rdjT7LSEsqNhm4WJnm3Ae/Nz8DDiyVjlbLB41AxKUJpnrR7cpbLYr79L6MLI6uPvxpfSjRKJvOpfJrB+I3NS7+ovrAs82KE03d5DVuakgv0nJLih+R3k0/zTgTItUt2Jjoyq7AN95d573hJChiiFQKPU/vVWZuBz6UAO++knglXh0N7Nur1thQSMRtt+kClxn6EJYbfysG5bPvByhI+EFkPJRX+7oDrykvXEsoKQbRdtZ9Ru7/3fzAIyRkmyWU9gROdJmBjTlctqwjuI7hs3tMNScnMpfGCnh9hzslvdLJhKWZvNEhQf0mFKTSrtBAPvdcNTZ1m2d6l1V9VGDTJFK3qOuv4Bad3A3UQHurIbFDNYMq2noWukp0Y+NVGeuJBHHM8f43iadZSilg+3dAGlLQH9yRszuVhmSbrzk9o+p3+xhho+W84Qx4k5sI21Web0CztxNJSSc9Ix7V9ekQ1BLSpZrMGoqyD3KMrsyiBWYxBngnTrf0Ol0P0Dtta/QDMa+qgMHMFhTHv3MAXf3njpHkkFDpo+y98Y/Ku9ndcBV9tbHYnhqHnDs23XksN90VjL13T8SD7yxYvpJrmbcW9rketzQdqAYORALk6oPGJ7ucIgvIqPzQuiE+PxZboGtQlPPixYT6gIDaKjWbj3v/RcjnhFilJNFWxk2te5QplGqDNyeDWD457GJoegdiufUW3w25so+aRwXxsYdXFjKgtdHYc2l5Y0Ww4rMEuWEVKalUvInlDlgyAoymIxp7Q9AkIEF8JYbXq7k3wg6CYs+Ahg/pcuG6O1r8ZCoVbTxnq2AjSKnsyJzTvw81TXJ6YZE6LrXsHGB3jEklGBCtx69X3vP9xoKn7gx95N0+O9PbYC2+Oc309LRR4ymf3I1bVB8NHxYBgLZQyNiKuEli0WDRf3V8G9jlCikTtljwTsnS3ge/85lh0xJBW4qIdPjlIMfec4F0yv2Yyiwl+ThXtZQlwf1XBDaKvXXXnulUAJSH6BE2OW48ZJV19PNYZsk6i3bpBPkb0vMlU6O01GwKET89qQ5Tipq+nqMA7yWJGEy3aAqiy0hyg2lV7H8UKIpcDdwuXcdXx5N1Lqccd1MulhPdhwUqAYWzKAd2KFP6Qq8/8gG7n+k2wNCLQoCANBs27Zt2+5m27Zt27Zt42fb9bJtzCJmIQca9tpt4UNLv88LLW7Rmi4nHGkvW062Ne+Eeynccr4cQ5vh6z8DNncjvijOtQ6VD79l7IxCWhPY3hTfzRxy1o98jRr5AjV10YNAjZ0VQlycx7Ni29O8leACAaS5waqL+GU0fihFppjkwayA3W/DaQFhGrd4JsSKTFlJXCBrnIYerIXy5kERhJAjhOhU4nm6H2UxVDhdEhD0Mn2IR806vFLi6+pn5tjxbw1404w8AYkUJTC5uO8Tomzm9RGnwMbuCpssWFj2gibP/bbBKkJrADs3JzMGg3nl0gbi875AWhow6EuhYHb4AOcJkKGGxLzsuQSBccTUnVKxUlwrroLtEjj3jrsJU9BQhVop3GZxVgg/JPqikgAFpBkR1aGdf3Rifcgkj+ua5mjAmNS0dd92Q1wf5Guk0FmsalBvehty+9DuhAHK8djAKJt7mm4ubLPmwe/v0oJdWfZdraBaFEZUYa57jdjLgj7XinToC/WZR5rcX4NN3PXAQsWjaxNdwvhX32nKeftwSBTYRC1MuZ42SjYGMiqon+zIANr7HWvId+LIjWFpFBUoi+wXneChuiRm37uJmlaaYs6ymApjM1jpeKvg8qslatkaXhsJTHCa0o4cNli1xxz6bsaf5BCKtkfv/cUySEf2eRX2eKXbyc1l5/D1nIExrzvgZ2CmYxJ6finIvxLBMg8gA7WRspGUU/ViQBjh8DVqW4eT/4fQyFj+kZuzEVwJT4XX5f+sXjwQhvaixEQLqfXzQY3f86SpVYFasEm4h35mhFF1tHqTYJYZYF3Iud6QhrmvgSVCPAT61+KXLU1XIaUhwnstv6GE5VB64M/e650cVMU68ezSojKOPQYYJb2M8oo2K53k2HCvBwE2U1k3JaJwMQwf2wVb2e6hQqCTdDktTeQ5L2ivwhSiIP1HHamLa8TkXzaqbaeMb6PF04J5aDVz7L62yLMKxeDR5z46XHn+cKRR+b3QhMtkoqvvyXgz/5EDmxYcj2olcNDgMvamJhsZ9pHajtryq176loHwpxOLU02MEFJm7P18QBL0Ag7X+FjXx138ECOqG4mXa8HuO0mbkoX4lHgEu6U2ikr5G6T9trcCkTx2BMFHnuMjW73p3lUBpZDsDDW+6CzYY2VAS9y5ofrJqff3SSVfpVaHIDYwnDSBbuhdOz+whZWiUiU1QZimgnaOnrWj9qEqD6ZIYBWuhQqpweRmHzgct/kDozWt65oWQp9aB0QNvprl9+LYwP6bIVPIsdw5zRgU1rIRFfJYJw1Ip5bqI4NUTxtcBUGzUJp4DtHEAHSd4J32MZrh/dkbyeKbsc+PFI8tbalCNuL1CUqZAUiH1ZW4MSYuFUVE4Y0tc6nApiB4ZFre0zVyp871qJ2JY3B4N0Lb/8XWJs93EqPiKXDICCkiZnTa6TCxDzWm56fxV3DIYsVYdcwKVW2e+TwO7ECRIaVUjYcpkx4xipuWwx2sH9K/j75MfXaD2rwvwHMNtHsQRb0Q8bEfr/1jrNr010nykTk3wj7njQahsQvy+csCM4XiRs/hpclVZwR/msklzYUPJs0Vad2+njbvjnFGnpmDMva81KYY0q+d25pnS4GDfGDh/Gw/QPBxd8ZxWVNAXd3+dA/w3wZ4PjUzwOZo+S4G0YPytY0OweIiQKFy7t05DEYtHd6ibdHD9UidunZsr4jkk1zdk/N3P9+Zdzqm1eq5HfICk/fKAhZhjbYc7az4PHg9O5QkY0K1x4p8g0ciFe3g1ZV78e4DgFlp/Rjfwu8KHmdxUXxBaRiB0SzLUwWQuDdDrVlzavLd7otrMFvJwG9ye8lZTEeLhbkau3zS7Dy8EwCSt940HStA2ZW2EnSNITWxKgbf34zV1ENw8zYKeS0xpO3f+8TDXZoqsNdQz8/n3Cjx9BzJnCrRD6lYkwB9JUeDpTSu2reAXQlXzdAFPsCWs8SbRewFtQMlWATwRvnSq3Bupkioe2qErlbjrzbEZ7raAEFemjbIa6inCuW5uTR2YpnxBMgNLpJNm8gCPXWKGpEFGAxs1mfku9zgbdoOcLm9hgOgYhDkY7x3Qt9Mg2CLMbRfpNei2zwDs3El9rsyuPqD3BBmTIdQAXSFEm9h7Q6uihFZ5r+lEjlMagSD+AL+5j5ktkdYN07XJQPvXRc+KeRpX6t+2bzgvrzMczKXTCCF8xjvSjrrokiDJEEwr52nd4mUqGtVekX6z9T/2mSpQ8e4rrugT0/GHcxiRG/DzhGvjXCY5kAjDSNdh9X384tXrgRHy3wJQaDWi+FUwhNOZC9j8cKUeZW4sUsE3OyFVLPOyOn1BVWRJAf+CYp2j3d6mqlTshf4xwnwz16yv4JufKEP9pkMxvIcL/3GtovK1DM42RlQ9707iv+RdPnSWG7AcQeObL0XB6w2aaC98T91Aro5GJJY+gh4c9uv75oSKB4bNYEFhCObq2BZETcGcQHCtur8Ak4AGcf06OcpggC5bM8doyzAsuHXPBpzA7emjO2MmSLK0qJNhwiaGFtZXZWvkr8yJ37oLS3eIjuH+YQkBvBJfCqCiH2gxpywbnBN2xyTBBHd37Z+uQD8vYtDp1aMb4VUsOV81kjhxCiVUOrEdNt9DB9rIq5u+k8/NglMfsmpHV55t/QWPxcD7NeqAkCpMij2eXqO5totajK/J0RDMM8L08ixoRVGwCs6sivhy1YjdrEG0NbxjMbQvCGjhQtUkJF0VOU3eZ5cwVzIzt3cvO5drPBOUnfJdSD7nqZI968Althudp5lwjE3bTfOFW/TpET+MBSTWWAE3aEmJTG+jslt1v69tjD/bgSqE5nCQyfEyfXsN/k2HkJ0Weuw4dJcnXiGjfJssy1J6dvHfjvpemBos6dXR5LJScbx44f281Zk+5IsU7iMeLwoUl9w+YtYBQjpZLou3lmEfpwkFxh9w2cb7GZWqCzm6He9qiD46cuSUq7QvQQ0/Vi+z82iHMBxti9vMyT8IEl0CcJYoPO1fvEA5h8UqCZvekF6Ob8j4FMz3UaVNKA9DYRduQibGvEpOUryzgN0p5qfJNZX4L0KqYGhXJE9ppAJwJU6RKVqbPP0OhVw2FwT/HU2fFsM+rR7AMJyCT8aS2US1x3odvDxS4aS08gRpCXp1oW5JQgbLbhI/HlrFTyTlT+MJLcQdCTBNMPeBL6J0qzXNzmbXOHYiecCEhJ5ySUFdWR13TYv7w6Pq4YMXckd1kD0q/1iXUWXWMU2wDJ58DX/ysf87yfuECqvKgvvGdmBvOxA8SEE9R7r8Yal6ahLkoSimRo6lUWRykNeq1KYlf3nVqYPWxV1Qtj8oMYvRHCF6F+XcWgxYuivHNakzIwErTlJ9xDjjH3T9nO0Ej1UD9/QlmisRuARCpwSylCVc3R1e69eYow9Tcj2NvgP9oo3FXGEC8KRkdpA9o6YPsRN7V+aO5yOPQuJeelyiCBZYpc5c6hvCKUX1wtQtE0ATluKIgGDUI++XVTK6FeyssA6WmhgMgLO5mJp7gKf7XHEOr8L4jDhGgpkYwB0duxLfC/AL2j/mWFfql+KpeUrJGe2vBlZtAR5Ql6NRlhxmQxGXanZPGYkujs/IZO4adb22aEca/9MDe7Gnm3AJptXe9q6L9DqKBD1aBKLxyR8ThPa7X3+hHz1xu7KNfRc4WNxBU5+j6isrqCFZvcF2S7x2giA8E9zlBopjf6aaiC3XomHtp0c8dX+FH0DDK9ewQ3Z8Lf+7Yl3l/CyrobURjOpll7D8RXK8TW5XWvBInJUz08mWi1pG7K5oGXY21d9JDiFJIwHU/9umeny5b52ZJw2GooKI6+7b0DHedXhbkZvLEHAuZWzK9iOOBhmsftQoTeZUVRxvg6cqLY4d+CxB7I39Ml+kvw/PZukEe1DHK3iDkqyohgVYZLhLiBhJaibxZwczvj1tmMzl/x6DlkZzh6MCmyjhuGyKc/lJZwv55qs6vXNE9IbDdhvOuEei9Bs9jp5CjxiUjFoZHGX/EKCX7C2lPylg8OZTAyO0FmbAAO5QJfSemCTD5KnJ26blgYTud2xC30aCRe7nP5bV1cO2Y/DJWdFOFEVuvlP8Eo/yJiLj9c3sAAzjBwLvRSKzNol+AEbe/VY3ozvBxmPvDDK1Mss+dwNmGoasgzis9tD82U4PIYkzEiRhrCKO+IchIMob2WLdS4a/lO2TiWwkGJ8xtx2x7fwlOHT/zJCbUIl1EXOL7gSyebaBQlR9pUB6+fgIgn5SrnScplEoT9y63006crhUpYRpM5maR9U3t2vTtUPcU5Ic+MesL+/P0cd+p6WZBZB1Coy0mcLO31D2RUX1OeaIr6ksZ+tzPd8MT/hdzJk66Y1PBwHpUqaEKytgsdkYPeP09UGNJ3vLsJaOBI1qPa3sasC0e0iQW1Ty6T3ggt23kXmV/tJYa1xCI2nuAu9Rp3+m8P3PGe4AUZFmoHG8P8dYkkjq20JUJQIDTLFgVDg0Osh/NyiwnRW0KDk/GLYwkt+7hUjPsNilBq6m30mx4QRNqYXbKMoCPnYU0q5FFhlE+jpfPxnH5aWWUSuEbzQk2sBP3VWfVD9pXupgWBUcpLC3yXvunN/piqrEUHbm8Nyilv0QEbssRCMlRWAUhGpDQFoqT97bPd8NTk7So6ujTd+yczpcFxbW7sSQO2VsFYHZx9It661H19Tk2KzUb/TGOsiZqXI9kmyGke7GActbjO7G7VB+sj3rdDzFKBCL86JWa12/k40GX7vQ7zO2GLaWSprsb71TJXgeQOjTMPOnPRCcXgtFloay1xaOLl/jQ1pxoTDihVsjaTTmy4mU8vaSu/j/R4kMGFzqaeYCloVb0stTdhdMimExC3sJgTrkRLXSVUikOfwOYTg+2ENc0fbH0tuRfO8eF3FEvMyLE71XrjwZD8sWUpP1Ez/OyXESenfRZk7N8xU/ixVXsGmgKUGFPz9QEwNapNBhPW5WOgg6DdoOPM3MywFh9B/yZ03NmRE+dyg2OwyLllrnPM2IUu4p37oW8UAbf3m9mP9M937x2yiPVwd5iepCVmnS4UqN9n3/gFaRG5DpqvoSjSjAh6yQbQqe5pxjJo+zm/y4dB30VPMUYMhzYdupMX/jm81CfFr6E58nt47WhVxELb78QTwq9hn5CLRopGOg0pY40uV0f3+XtIGvNhUoorLMg81kmgiF52SWV1VwofAjqWeVUTH7AeESE0pZp7APaTZMtro/meRjik8z0XzR0coyfTWqI7LI/qha8/sLGdNMwID4u/3G8jKcz9uGR5pxba0eS1tCQdoGM2wUdoIf2NNoe/ccbxpQcDMWszhyzBx9nnN3PN4iVZAtfbsPayTY4yWwI1gVTeQD+ZqBsazFFXuD5g1NP6nvwhwpKoOkJNU7OKm1DUZd/cukKDS0SRT4XJ3DJ292EK84ug0C6I57oOMElDSvnNAG5iEwxU50y+DxkCvLkbCDL3cqQ0Nf17NB8uyYtTaBZpDQr+pJiBF84oa98ZLKVFV+v6MCk9N4yTROyn6l5mUedFSfnfIalySSIdUwVikBVRyE7z8iJOxmmjcPZIQ7iGbv+DpybxSP0Nbap70beBAhJCaAN49mymmdx0yFvY1rfvctK6LHp8rjPAnaj7uv1IYHi5XWfClTcMa/KxUbojMLYxHSlWBJ1i8WFwUiJhPOyW7emrFLvkfg68PJnIZGOjiWbCuX9EcbfzYmtFULwDZaZyLYtGoST/gXdlGLfZ5h4HpIox9EYio6F9XrC8lC27G9SL8hc3jcfm758GTPzqggK/LyZsroCuKys5s0Uk2qAV4+aWLI23xPx8sOIUuVYEE+uQbifLWaMAvSW3DOeJUkbwvN+ERBYTRKEZFxqrqRPK+njJ0HAkIOHINnDREQeR1PlldKXYt1ckCFx2K2DQVx9MqTzqlVx4eP0ivAuNhxerpXvnB3JeQa3EgHhfBk41zSXP3/XG9WTPbGQBZHTng14oNm9/xFYFDSCLjz08zWKkVmZlTprr8DyOhXdUeVELIr4SoIcJ6IsMQLMp1bYoRJXcVPXDjLQAGfx2s6zv1MaXTXaVJOhSKTXSTHtHOwleXqeaG8hDBCd9cIB/kJhA4Jz/ysFSWgMi+Rvo3o4GpX1+S/BCYkJlGheTiHAq75dFEI5cIZ/FuLqRpN3S1jPo4fmjJVk3z/qWeUCbv1m6Ax17E/i13v6HdWELO2/2WHtGIr0kKtv0To7vQmfZS+B2VDjVrdveOs5RttmwBVkdaPQXWttrG3xjKbwWuqbG0Unp0ZoR53mPXQLdgNUz1+KLzw2Jzohr60Fiq3mxGGn72l34+LPTr5OmUyHNhbSCWQhcCyQv0ppR55gGfFsh9VnmFMy2njWLuKG9kQZdvuyY1Zd/NXLwta8wZCsboip7NQST2l2Amk7nwSZZAwkgBiggCQQP43JiYtAu7cFUKNH3PkyOUGbCsdYBv+pmbsNE86suWHLwrnv0btanInmhXDGcHAuWtN9iBqNnjyBOE0z0egc+WuCJc49UTSak1EQ0ZCLWcWJYQIpVvx6RtzVT+ETlb393+h2xyzS2NGE63fSsfsoMOx5LPPQA/dxd2S40OGg9Un108Doq9pGyJH0IS+aNTfUHQg6rD1Q5Beod2VYLbfB5ph4jd173Rjy+9b69Vc7ngqU8+pktbzokIGqt8tGys+AK1qRqOuaRE4N3H8Ax2Pmw90qJ/VybRBrZBn4e/rDe6Xs5fhdoLm9Sap1lXmgZP1aGNywzJ03e3vFnE0z527YYTzQzdeF6XSThrVOqB+FzRFP1VEbzGKQin5qKIKxumYDJAw6tI+uItYMtQH04+njROPl+jvutqWSk1e2SQU5/Xnr1AeXX5G+2zHOXLdDI5EpeTS85nleN4W0dWsbpV8ojOQlddGUQN0gGRsJSL1fTarD+PzDx6cgjJo4P8QCx3hyTkWsTI3jGrYTOmZ5YLjQkH6EFLAM7E9ifC9IBi1f+e+jMANoQ5B2KahnEyY0g/fihMCy0zYoJh7W8U2OhnuixwQV1cApK5v9KMuse+bdRkWIXnl3mh6831ahhh6YHnik7CXtwoGhNcfN63tdQiloDfjaxqezsRfsNH/Yz+GKtFzcL39SgRic+m+LC9Rt9FBheljq7+fETIGyYDAUgLPBQAmLZtXjntZdhtFtgyH/r9Y6Pr+bLHENgaXQ7dpgMb2K+WvwnX/gLXuFKlcmQWaXf+m7oHMkV76k1v5s1+vG9KjuKdkYzCfJGRPnT9Z9EeKAEdiOWmaaFb0BGM2zOsrBuS7kJMt/wN7XT/g0JvHtm+1tYF2hB2fvoDAWuPv+mdCmDnAK+5A2LsTV1kQ68jKHZ/5A4ocoFiu7r1Zl7GkcNj8+OW27S4JR8pJ1J3DbMDe6MD6/0kD+hwrAkJoxisG8InF0ai5P+yV0P2LKQhRBRL3jSoCM7ROsu3QSmvmOGvkAzmY7yVX86QHcTvADe8AHY31DPaBo/6SfIs123LsnIEdmmYCiJ9Nak1+FvoStD3Nn2xTDN+6Ywp7AmjJQmS/i+af10iXJNE+4XChiikpvhGOWHkWO4IYWYJydkk4N+8Kjg12sswak30SkwblXlSqhN60tKTX9BbLkD5hPGwuzQ0eMSCDTq3Ih529mY4wbKoWIJFJw3vA905u8ddYLP/cNtI9YNQFXIimi83rMlGl0zummu5WHbYM+6f3PLcPGzctwA0tIC5yV7kdr2EVF6uLu3P52tYin0N8DAfEkz8AVGJ0GyVxX9PA2cbDCbfWko7cmS89120OIL7CpZ1dPp+C3YRrMNY96wDGDOcmkBDGxFYOqcGsCJqdXkfEEGD22xUM9XI2PJP+SHeNeKmh5/kCg4ZvTjGdzXqUzIBE0NvTIKr6o0VVx8V04eZQQ2WgaWUlqok4yDJgQ2jUDCkVNUBglNRMay25n0fgFcLkvuDZsisjE2FNAPqpgDRp3Zh7NAYCvj3GY1CbmnGJFAN4iQlJ3JnQuATm/1KK8u2/Gbp6kaRhNyEKcmGc8GXERw7oquz63sH0u+rj9URZl7PmgJX1ZUd7z+mdtXdo9Q4WFGCYyxqpyMM5c4OJUTFLlVmxUuFeQlMigCf4vAYz8FQVZFOHdnbQz4el+1uo+tbnMl9ORBlfOp/gxEv4oOlF7TM0/i+098FzvxBWFhgDh3KND2fVxLLKhtbOTz9QQVcRh+T6XyavBNEH5WDK7LrKa5DwNA2h8tNHRTPtv8YtSN0ZJay/eHbCrHQDik0btr+nRThv+azlT6jxRpXv+FQhcHIRIKKa4N0IW7560pUkq4iCPxD3bss1pJoNFAOX06+2FTPE5oXHSUPxTZOTsqehAB8i4vj24nWYz8vxZfLoQ9OX9p2qNERf7foEOJtk0kBn2yVzrihrj79Kwo/Hq7z4b36h21B+zngFsuYU7lNlhHLYYPkPn+//SoxXyGZ02aDSZBm8ZsiGjzcPnmbjgw2DXA/H9YU0pH4XOcb7WvBdf+I4mx1o2B1XdBHe0U8nVIqFkjhPpZ+c8o8F1kJrKA3SNCXgxBV8+3yMhpFTz0lhCf2BXfDmH1Lpk7ydc0N0vouvDfwK79hFbgSXRDOyGsNyjEjkShk78Lj0RcWWFh1+GiJQKUbGrpiKgZQFvlZKO5qSnPRCoy8zTfEsmcTrBi2TSE1xw2VpwRWT6044ohpRvOpWEuXgNU/lnoUxhBjyjcvfEciogkquGPWPLzSuK3vaTfei4i7h5A8Zi7TUlXxt9L5FsOa0Xr94VyUamGizGIFJ12qFG2fJHN2is7lAKDed+ehbfhLRB/Hy79aDLDCGkxGnZuUxhkmsKHtXU/OGniS3dDCYofJaTcfrZayKzzGs6LWIZKE+H8SyVuTukF3H4G2Oicb/P4zPR6XLGHrbAcL7HxLY861axwS67tWKga1BtG0wuJWFA2A5o18w6A7qv5Ysoyd+y2p+Dgz87xRPtTK0ybkMTeGX6qhRWlt0CYA430U/Yv5aJxcPf3PlvlJia1fqK10leSRGxP1Owz2iuMjNvRorINQsdzoltooo0VvQZEoL+/1ebNt0rMCpTqzjCsq6ILwnmzaKoDJHw77YAr14q/FvQbIrYT3ApY/dcQYb0MyXrhFTM3Bu+Dyg+arRjIlvdvk92hQIF4RKPCuSqLG5+6EQ71WDvp6/z9h14baR7r8AFG4qT2SN41zwxuwbSMt2TVJ7XNc/7WlT83AqUUYD8AnRRBsmCi8Oo15U/vhj+kJftWyg/3qPzMtFiL+mSODD7Owt/ztss2JM4a8eIsJQ7fmLSsulfMGCoIlj+Fl84zqlHP9WsrmaVqKuObL0Bw9StXcMDpQfdeSrgxqFNYYaD+aWQm5wK5ousPUolRfWFGBSK9VP07shzvD9cesyQVvQSJDd0dv1BMiUDmo7uqqG1COuLL2E1TDMZ+qZb8kmTq1MGLU5LdNc5LYq/Nzr8LEZa37BfSqmQr20zwetz3Z9PzUL+xxERxBQYknF/ZxcTiLgqowmE7eSY3Y0EHYNLoKdNg5Lm98SlsM91hRHiDYulEfpG4FVLoXKQSz+pDF8uhrtqVBOPdd9pNOkix+qjYSaTvfRT13LeVMq3QUymeSRhC1FWIwiZudfoe09IQgGWpQX0BNRhU5jhF3LuKiJ5TMbRRnFpDtsc+Ke+rWB94UlqHKps1rlENzKZvZo0VkzassUFNp3pYDhbjWhic1uc0NshXQBj0cZwGCPIrqeLzDPL1wve9uTYzXSR82xbIy3Z3POCeokPO6dzhzA/fUw+nkq6w4EP6xi1aAdKiU5UMskMjagdHqUhUs5ToME39J+vaqiYwIkRex+XMRJGlEIThCj28iTdiEYCPGtqY1IfNCloyC4BHPgYc6cPegFWNrEMDFkfMtmJ5Zhyj4/Fs8CwSAjHoIk6W6ynkbmeaXxu9WpjnzpkIJr3K552IBHu9QACAM1j/rX6Rwbl37VR12W2PixOO3S/QPGXmbTo6BsEE7KryzOsvH8BD9WG82cbjKfBf7yADNkCSqbsl4g7OWA9nKA34g4hg5hgoiDuTBvmKvV/VGBQ6j3j5hYhNCqN2pLlIoDGWQeMLSDbLctJRwXWB7XJVLo9XtrhILFTce0MtqPNr8ECUdUbWSZU2gVRjSxOSFrIYXMF3hxmBl5oBcPB/sZbUypu80N75BmrLX3uz+reqBS8Z1aL9lkwrPugyRDI8yTcDJU4EjJLtqv7mZ5NmriawDwuJBuuy4QxB9ub1VCfHcenyT14/F/1Yg0HtSUw0E4lFn8Cz6k2HOSvLtnnRBMOwQhm7NtmL65K8+oYR5qCiwBEKNymUhoLrW8XHMQLzjlNH0J3lQdfwR+AU9XQt53ohJ2dSrjqQdswyFA+zVyyNDcreWHRcQcdCIQvzB6g6D+ms6Yc4ZDguvD0h4l7JP/oejCVotR6bF/4Ac8q6+8Yh3LNpav6DAa680TB7Lhhws544AXvs4HhchLbrvTU8c7/ALfTU6xGOtHIt0ziyAJuPIlYUP8VqCaFytzsjejJKRikL6OjTTuwumUJYis+Xcsf5KNU+ZHaRqVyO7qLzm4ptORm4vVJamQjU/OmZjkD2ixt4s1ZmC4Ltvj8zhI++bPKVHe7Bs6wuOb7SPQJjqHW2KKHsZUFdcceSPimoVb8YA5wi0YnMF+Eokx9ybA4lVbwaMztHONdOKaCebiLkUnZs/+myOW5F/zLO23DZRrlLRhCd0cKXpfFTDDciXtgv6qQlO52YPcl8ZgVFN1rVj3HFyfRDFnfFs2uHZG7jb6VWIV6V8Qy/f6rTaRbEXAxGlpxCHk6blzo0LCJRliEuVDZKEn1IYWxNN9W430D5yw9IW9FGHjb1wKg64uMx68KETEGp8VGYn1x0FBxsqxG3SGNhhtYpLOcAWObDYnPwmo5q0PreTUM4DVoyzEJwfifKPJCJpDjYACuSZcK5PvcNsAorKweZ2FPJv+kV1ZdFzEoU+HFi0FSk1GevHThZnZJSyyVeeZYOeu6a+cmckStnixukdwsjhPha7t9nzJ/Qx4Hpf/oXPtCm3TXsy40qPqXd37jFabLS6/iizo6guY+QkVW5yQ+cihKZR93vZt+tToqNGz3AE0+9OVIqRkXETnGiCqVv98ihak1av+AmHmy954p2zjf1r6Md323A2oSKoBNznHsIBcSagSnjc1HaNwVCg/m3WyJt3wKjtvOoN829UIebwicpHwWHdTTLsLEAdqsOY1Yv1iteQUZR4WxAmgWdI7BLHdumRrb3bGUPh2jljF2k06BEITlcCxBHhUlXkpaPq3+NLbdoLI+44b+DAy3Bb5Kd1/qLbN3+Jd7cltwBH2J36l0L6wTt6XcRUOZA3fNGs/aZrmCjKoaBSfr/g2ZfLidWa/7r1z15JNtO8XEYst70daTjWqlOqP0ldRYEwzR1vpJi+56N3oP7bt1zxLl6YnMPHXHAAYenLDJwnUIBjjWdBG5IdgXBUECW5rrQJjZeDjysZkzMUMu6Tne52UenrshDDMzfkt+i+ygXbjWabXSPFpJWx8SkPDwnyrB9mvNWaekLtFhYzFt9/hQIiinJWjgbCe0sXkFCKrzYCFiSmq9SGMaXxh1HeIOmEeg9sePAtcrQEp95SmiHkDwcWAxqCbetXpsAq8Ls6WyZW+dcF65FqWa6ACW15MSifd4PkNwwbpgirNVpfOhGwND0i2+ejCZ3AmyBGRzLUip15QweOEMaKSFrK8fe0wS6K+9zwMnDDVpmyBP5f4fW/R8WyNKSzAY6ipGGxrcFZiCrr/mDyLIX4w7Tg5SprZKNidSKHZ+hUmsgQm93gc9qk+XUaZFQVFhMTdthEENFYKt7FziursUB1lz+Iu+z75dxNXq2PiNzOgudcB9Q65HYeuM8+oZ0yUevn8TtbauxnhW3nqfOLhGYQx7+iPkrjI7XPF6JjmYl7btbqpodilTiyA1KLJl1WRHLANA/IUXmQ7hDt8/ry0+7ReUXrJLAzlZjM/T7TJWT17BUNPz19/eDYVP5htKm7YEHdH8wr7kF6zUlXdtIRk3bQ8y2geNiHS7Lsac8EOdQP+Y5gEykKJB1CMX3oATsLPfb9hRhzj3gH/rXaGF+Oq/UB1dsB069BhDyRLc+YoKXQE7O7ldn7NynO9BDfQdY4bckFrZgL8tsbJFKgSu/RqYpyUzY3CaR7+xPeBtfzTzXvFiWEWArETIGilmJrO56V56MQougzxuFljnNBiXbjENwYN5zJq5TG1aJC3qDkXRG409nTnWXvfJowsGaQv7Lz1G24Ux6pcYQ331JEYuAm76oeCvzXo4EZwRYgZF5ZbjJdusRhnAHsW0yPKe8uXUDgfNEmNTlhOhmgI3Cfh8asTB94liF8vn4NKxwEnUNgD3E5yhoAxKgT8X7VJF4ShW3YKlHsUcNaC4j1r3YKoV4W9/8HkxtwI9VeOPHDyavSY1+irJFLcTiWnaZY5JLSKHO0RfUVFU0xpP+QMrAF6pqanUggeLUVUPudWnauWaMN8aKOsBXtSeyLpNbH1kNTR2ZPCGXb4T2N43+dQsNOSlzAPs4btSCR7Qo20nr7V4RJ/6dYduHfiSymZyVzUKOgPMawsx/UB8L0OxW9apkapK0VxQ+7QjA4NwmCTbooYAB0UGvgeaTDHd54yq5IIFt9HIi1sB6FtN9kJx+eheY1s2k8gGVT4hmlNhusBKqZeV81b6YmR+yGVWkpDml9gbb0D6Au1fu+0ihkqAdQf6P0xiVEujy/4LIv2NZXHaiCjVy2Wq69HYkNNlqoWhwL2FkngRxIC3oNpxdB79eDYnaK5U2GZGyRN5DKbE3n+3myeGZwVqMwJK1jgP32miZESPN8LUkos7FvElfwq+tteboux0gPaOCpChCwMGSc9NoLvTJ4iMNkJBy4J+6VPDB1P0kDK7CV9rBX6Lp19vV5OwozkDKa2q1zgY51PPRiJKti+u94qioLx4YlJzl36t73DZcnYTzL3Ex1FIOygfSfQQkJIV7H8CaHKs1TNRpxgVVp56XJrsfsybAPuVIMI19IMVmgnvHriebj1pohvOE+OKA7lSDm9L4JgFez11XWd9Ad9OdkD3mU27D8nqbEnl02Qvd0QgKEGRXhPIBkRrUqVMWywAV/21GkfcPt6ZgSdblLzE73OF6K2X1QUYikLP+Re8grdWstn/ALtlh/x+GfnH5yHYEiSdBXV0MdayhocHSFOi+SzjhfTbCJdcfXkJs0od3GdQ8qdQr6id6z1Odo8Fmu4/vAww5lijr4AjI8zr5/utwEhNnRddYDNSWK0S5vs1ObMdzOfMsYyigqhmn4en+CogQzdpFf4jE5KP6HnP5NefJqDHonA+mIlSqUC01kfqybjDtj1/wz+bebrSy75OiTsZASbzJNrqaBw2MdWF57cbC9tHM4l1GS5Xdegas9R+xcQlhHImO7B63PfmErRdDvV8JfpuKUmgmdaQzh1iFoytpNNBzgk+jN3W+bnGVoANuxd2f/lxRJd0dzfcaSw3myBhd2mzFLRIBbhCRout6qYhWVH4AX5ETu7EtiSZF1nuTytpZRydaYPsw7RqDA9AtFaHj5GUkRm5+NOuapJonahFK2VTYoL+5OK8UksJFJBQfOzENn6QoDWNIcqSoGA0XeONGk+dr+W8NEn2KDXKcROjJSaJdpdM2zzsIOHBOjkxwPzUDPqSur0d4b8BxnwiEKYJcW3y4q0cYXLL1069aWChBSHKBBcfoHbo1M5FPgbGwVZtSaiL6J61/js6MWIcXnmqsprMfJxF9z3GK3rWm9W3l21vYH9bKi5RskGz/8R8MpUu5BmzTlDV4+j/d7LRlvHvCMeWqGh6buF/5zOvIHNqRRg1YKSX9CWWtAtfp4hzjymty32ZTUhuHfpU8cRHxe5D0OhH+TlKL1biVBJe8i8h9WUjFa2MfSUn3hIq/GkxK9B1By2cUHhUpXVJK/m2KKavuBJb9nHALpmLkBPMv87doaSmBM4XeKMTFbhEg9wKcTGLHqOTVk+MoESFzBs73sKt2hIt1HJZ7WK9yLhvFuEDT2AQ9HbCKiCcMp5+EwAHw4aQz0hi4DwSHNjCHwpJlIdLtszNLfJc9rVXeG2LYhKAqtabWiPJ+ajZmewMr055fI4jBzGQODjcazryvlEbt31Y7JlLeqZKAlhR5xREeMwUBzu6Y/bbaI8nqbLwBbwqOSqQduI6wacT8oV9Hhldh2tty8cMY6l5mju1cEWRZibi9IfSM8zporFzKV81QYY1vrWOsLGKF74BkTEtXPxl+IPLQbXnJWOI2AktiOvwcAczqPlvldx1Sn9Q2dPSBWRR4Ho3hqWCEklkgSfeURHpXkrpvlt6K9voM3oP1jKRJJ3NjUaBTkXNQSE8FNYpBlob+9prznNXPrFh9+unSO6dx9hDC9T8VJGV2yVyi8IjvlkA/37HR3vaZsFaidk1cKgkPlg4ZqLeaEiGRaioNXXlzZhm2V6IC9Y/A4Fvb4eI+BwZDuI6Q5Q8s4/cnZHT5pqm2M7hmVXjoqKpWqSdZKpQdzoyBaAaVpeWzMH5y42FluhLyJiSFQuWMvfErF0Rd8iFAJtiWL48C3EHRgfEwlTxv7xbFakEoi7HFmE+Dv94e4njHlXgMiKtFQZtiEXb050Sd1yNkbpSQOCUvRsyxT0dCKO5HMdnlqzZzllzkImbqZI+L4ew7bGON6yF1sfRK3RhKkqQZJZg+sQ9s9KHyuh1LXbGGamM58gqC3udOVAwNrlxYQnjo3el4E9IPIe4cbSycQTqg8p8lK+cSOmRvFV1sK9LN2VJncQbVcbFHK+kdoKzKPcOCXMgknNrz0MekFUkXpmNdC7/r2EDxV515K0TTXjcmtVWrftesK0tG1mZ7S0Msrf0NYVGWB4tV4auxM79Zv7ayjORAO35sFsgF6aqJm7iFoqTVzc0F2zlwxeIPTnre2fGIMZgtX/UiazAJD39IKu2xbuy7xuCYxMN+oB5aCD09LugS6UYlvTCmjvhFGh4RheDUHcpHHqNq6EU6bRnYeFMbSL2xSjr//lZjfw/DpP7+T38g9xmOcoKmrKa4A5aMEowvQkV6SpK+/6eogs4aKV5CSg6rkBKYXX/PHu/XP/1sXbCoXJ1BXMUfZSYNhSJoj4YDSg+UW5qanUO5Fsen9D8iBDaf7NfeqsyXzHJ3fAranj18qn3CjsLy92WCdaJBjo3QsQCvokZ/Q6iWXRIP+OQDy7bf5C6hb5dILP3rJCDCLHnL1F8L49M2PtPu9FxqMePXx3Bl2WKxlF8j6AygkayczSizpTUOwlBGnnSoH0XKpCRzGwZ2er7fkDcbjlMmoewzHayd5G9v4tHFdEH2vX5quN72YCSrwg4TZsvOKWY3V0NXUH+LJ7EMK7tiynMk/F7a6sP4vVA2588vr24oDe+6bSjhFP0CskCQMeNt4Mic59B0sZPtbt9/fNL68AWXGM1PEcfvxEARrfQJsLX4CMRGFYWDb9Rq9do/bfKMNm/WKCLjdCR6+ZEpDI0Mw2o3t3lcNVz+iJ4VjY/GjxNw3H4l4rAtoCc4fRX/9oLH5ykQDrVTVd9h0K0x16upYf+rsUJ+mL/b9OX7noUek7H7fBWIWeNesrqxVz6RBdnGFdY+OeuYX2m+4tS5UtdUPZGO5Nq/Y+fqvJ+OK+CakzQb9iq7AaGNtnfEBnKQl9BocOL7Cw36ebaVTrXfXOE4vln/g7eMj8KHitht5GWZGVNTZxiM/UKQG+Eshxz+j+W6cIlTiU1JDBg0Wkk6h/yMXOjQ/6y0YRHaOEvob294guTF2B9Z2UnYfWYg1rz4fDzS5hUBbockZsP5sD/Xf9b7lkGpiZgIOpnInsyiagrpnsZhSW8j25fiPHh/J58VnhmgkDQ4IITyBGDP+g8HtHOu4j9nu0G3+9cAozCk9NKFG9OOFPAxHG/toKEz95BGrKV8D024J0D2X71vik/5m01njCyau+FRMjQMc52PCWtAcPPZmxOtPuPnOBZSyqQBHoREEX0p+FKkP1ikxnrcaIOiMo+zuvqCuQZBg+Nt6aSYVjPCC3x50DP1HPO2pxPpyrP/nxUqiRohXMiIlWhyXCjEC8XtLXpj8cfCy7DDI35BcJQ5F1axh/9tYAFcFiEKvqyYGc3DJ04WORCCI/D8MRX9lWJujSGGC77lVTXAmPLuJSaB5Mt/yQmUEV+jmAlQYfLi7bhVsJjOp8gnK/dF8yqoUlc75Ly+yvavGt658lDzHAzXoVNTts3DqKeKdeO8txDUUyUp4aSxCDYVHq9+vhhsT0KA5PfQHILWavYDY8vs9DqWEl244kj1iFTP4EYmVugJ29XKbl3fGpRs3YTntXSTw/7Ab/YEvy887JLTbUt2yhWKRyRYwY48ys/n1F0Kt9YkTp2j+blZMDqhWyWFtnLOvAS40ggPfHUyZONCXxt2W9xUE1tyJM4MR+WuqRjB5n5KF4xP2Qjl0D4tbfFfytzDrSmf11aDp8i+BrWlnJnxc0Y8eA/qjWk2Y53EMObOjTpf7E6OuaiQXGXW8Gp/fq0OQrOk8IqpkHDtW3oP+BtF2+slmVvgqnfS8koHufFi7NUcnKnQSvM7wLQbO/2KbsJsZnExNStsUXIm4zpWnG898VVX6mGxSnw0VvNNHeLj4sACPxdO/BtVlgyHEPSwpvM70rFAnG9BKDGBKvrT/1fNv42AG+Olt3EhoQfXlABOQj+YAl8T4gbI+Wbr8xd/v8wNcfL2inx4jiFP+CUGGQjhD3Vzc6Vr1xsKeMh3FBhdPk5pzun4mKvjitvUHSNOr3GGFuX4zg8L+WxLao7Mf7AWyHMlQu2ycttp3qNpK+8bTjKP1KsHS0yyHVKWI6n7gnSFz1FoY+CnvOIdg6mMbjbaBTCLQm8KO6BcSbg1rRfZmmY7dsTODUvpjjydKVL6fa/IpmiQXsYxcOf8kFXwnFhQFPNMG5S8OsNVzECmjmclPC86JlkBkIagKG4bYcx1jI6o7EYwF2Q0kdIRTjhQE0ZzUR9NI2oMDYQYrfkNUbl7N/UeM28758k+1Wvqpp+/PxTh7EJDs6R8Tpdpr9qwkFfrbHM+/RTeXhRwv3z23QjSK98jS9QiWNtYbFZdIJM2ve1iBzPzQmYLxjJYY1Js4VgX2Lr1KVU92hMMOYZFxJomQ8ugJishj5KArjCKw7izH7kR4e6iX18Gm9zZEioQbrgW0yHZSHpYX/22iRA2MU5y1hzI1NmDBZ+/oMdmoayf8x7+jqeJkFFbMdnjg2Bnbpezr9my5sYhX/QObAvH7zPASjzL6n9BIJuJx/ujSZMJFyaR3gYLV9qPEoJ/JDoJJHCLRMQZy7EssNbVLjAn7cd70hi+eNeWklNiijWvmJUeP4JcosWFw190gWMZALh13A8eeIIQKAgYL/jBh3Cvuii+FfRi3fs0JBb48DDrZ4R7eSKzfLVz0qDe1/wgy+jUx8NElSHqyyyXOUb7WGvsTUJYvouNVx3J6YRMSKKzRGXcmKNsupq4sRu9B/K6GfJredYDgUTckT4Qb4ZUawiZGD8JrfCy0CgSbxO9b3fClhKy13IzkDwuHyZJRU+EQlTBINceAMlewySsq9raQmUNFU2hBD47xn+BTPChFg5geIckLG4rUk6bc3wJ1kkmq5aXeE3m6MYkTR1aK9hEuLeVM5HPH2Cr+FvFMvhhsKYNZ6snIuzjJS6fGJPjrm6fP72D7yzmq2fZ/wTVW6VkjHXvXKegqxcbdpzyuB65oZrsiaOndqs73OWnNwJ/t6ExpR+kJ1K7rdhHxqX63DHTFtSvXWyJ3SFym31+U+DVbfXT8ufaxfzvx1Zz9vCMBqLjC4yH2evRKMPZz8xI2vSVCNyiXiNlVCApzdGMefA4Q1/szr83HPCseWcp0k4jMa4qT11hEnyYy8EM9Inp/5bOwRGomOC5SnVLQwYpUdbR4kuWsF99x6zpHhGfasrYNB9pmfsdV88rZq0Xxq+MYaRFPsJSCEe3eKqmMmBG/fVukyeD1QbN1RyOWE5WhcCoyLetNeCfowd9lPbL7s1ePPg4RgU5J/ori7Jr4a/4dipJvE98LGIrBFntm9JcaUREsQISU8QoOxBPndantyGFhx1nx7p6scy6falLuWjU1K0aAXaeyU1xvlAmnij/OA0bF5YgRNOoGUSn/lFZBFsj+hapeSQlr0eo+cwaGXU5V0MV/aGQRdGTe5GFgqsaQ+fRwUURcGWxN7XaXdPnYVEB2+dbFzQSYxHWGuHMK+IQdBv2CtpDOPcMFBvYZ6B62CUP/xl6VUCvBStYUHCus7vtvv15ALF3t04pR3VejRRGgNsV1Z7LsBWwtNZawk+nbZASoOJ5i2e6y0HtuJDar3wnWmVd6/auJqwYdWanxMGZlX4ffa14GKiBqC9IjxZ7srTgdA9klDDnCazgx8nB+O4H4faOUh9OWPbNoBtCr1CLxinQ5moZcuIqCOL8IiMQaS5iK9pbgu3uG2B0G96EUPD2LJFLDzssvnsQgKOV2tXmiVDLHrmNFFzdOgkLmzM3hkCArWrGMIgvZSMdTbZ7P/qgGnhQstIrNaRYZO/O2OxGNzFB4oLthmflXrohIWXH/OQEwldwJuvRaiYbCF/fz+egCPAQSf0H/oXMeX41ZcSOBE01JNM+f8Rh6EdBPO5/5FhbArSalaPBvoow2P/KbYO5IAZ2uz1qDJBFpU5o627hQXIUpB6xqJN/aGg2kkIlybVO+ZrFTSZR3+ovjjW2kRgKA6BT1M/yltiCe6zl0vUuyB/OcM+YUqxwTpTNxyDI3Oq7ulj7K7BVT0Rw1/01xsgKQB4xPfVcFBZ2deuXhRc6F4wuDOfGDWyHbnTM0l2CtOtDh3mfFh6onR4f1DDOA4JEG28caQZufGBI4ys+NIYE3xCxfA1MvcBxp3u9z0HmPL9Z7iEhHpUOUItfjtSzdM7kILyynKSO7QJI0zd2wsYM15lwPx5qRhBB4Foaj/APjx48M9xhNL0yKoHcefe8mKpAR6TWyWVweYcKS7shCngKsFbrsM98olHoPhxuESZtnqQfSKGmt8OKoaXJFgHL9tIM24mV8UcAGJdhOwCyTaa+JNOPXxNRqvyzP625NXAo1Me94sr3WK6qVrOgxnSB3Qum0fIm67dHxP5jiWk/dS8edjZ/d+nnIaRqXqxRgUho5y2DpX0tXTiUb/DlKxw6gTeqeWMeRfary2jb/LpdGWtzxjOmGyLpeS0ZJSCifEwHc9oVdWDd7KKcIIwGbELsFe6eHWVyK+ZewU6OJiHS3mqLTJyRcZVvvFPaqr/FSzyCUDOa9//qZ9OJfw11ohtSvDUW01/DUcVGlF5G0bPef+ZIAq5A80haHxJXzaVb7P2a+BPUd8u6K3V3eq/HQOycOCntraCHPEfIbu0qbv8Roc3SMOqwTy0yzMY0gZ9kwHkVQC0/M1RscXBTEXIA+uljWK/bHRqBqiUKCofeZURdAnHn7yQ8L5WMSFKIsz7crb68DRUs2WTZzYZcwBTz2P5qR55ZbF0eMdLlJyYhPOA1fZQ07KF5Gv+UYhkD7WNyKbL3xKjSDhMO+4vQlTh13Kpe8cOCSa52XPMURzpaFN1Lhnl+oJ0mzTQX5dCoIRhdOmv1tYgrLPHiYG7dbHrGwqO23n6FNIB+/e9BsB5Kg4Xq7xn6nEBi6T0Tjc0arL4TtPC9IJfBHaBv38x/c63a2Wi78XcAi8Ds1z0dXfZ1S9QgL/LeACJv3i4PNW3k/L+lD6LkwJGYlElqwZBvYAv+WIc9DkkfCi1mPOq/9w/rq5NXrQ5pyfDfaYMoO7/uwTXM8mIKOvVJ94H6XAsFK4ZcoissybedaxNBhzwox8ElY+9Yuie+g/QcNtJiDTUVuVi4kvbrwQeMUVS3T980UUIjbkWqzcklzCjRy/dpO/9R3O9cmJnSONrTK4CSmCLxU12Wbw5+XcZO5oLEol+1r1hVaDXAmcuyf0MbIQFa5d5zNlUPCjYKogs/GYFf5RGvEpM7IBQcnOHOlG6Qvp+SN3Mnrz4JHiPsfuHhtFHZ2uR9K7T5orBQSElEmQAKE3avsn2h5F59zQ2Orj2srmNrS9FUu/cgjLUNBgxk4JXF0q73sjkAcNzHilcCmLVReZH7G+18BVnReg/5voWO+d13PoOOqVnzC4LasPuALRjpWZnjRt3lFWIzmIypAn1n9PLG4y6KGhcSZ1QlK9xGg+LiPKTfDtgokZ4G1yVnLryTCjW4kZX6Hcnblqhy5IJXD+1HEXdSxmTJOLmL21LNRwDpo56pXHLxEqOL0i2DOPQtzn6sY6YuWCcThI/7E2r3Z76WxvsKi0P+QZN/lmEmTlPg0rCW2R3Um1890kMeUXiPybUffmlMkhm1/CstRJePYVPBUZLTZYy2qLbciglst+w86MdhERc4xMCFqtXniQEQ7L1xM0ZwMDanF9kln/bQbJfT8j2CpIGSk8TRWPcKtODg+U4lhN1vSH+iW4J2f1565k3zF/kvZEz6r2LysbSVFDmV8fEWI6r8hTA9iJ7/FSi0Yfp4Li3y/wFpBfBO/E84Qnf8kjMY5JupvvH7sD8TPBEXPONZQerE9XENGxJfYH6LqwdTGImhyzJoap3cN7gBI3s528nKcuo95CIdJ8diY/jwS/6//T7DsWnrlNWBN5h6qyyTEYDiPBXg6C5Sw4/qIyeWPDm6Z0xTQd0Nwh+/hJw639xPr9VzS/aYpw/488g14KRH+BctL1/94KgRpfjdcnQVsNmhtZGrWnRn715+G+r3LPTq6z7J8axXBL7KfkT4iseeW39sj0uP4PW9I2CS2G4NWexJwceH3MygVkMRUsZo8ihNC/YTqtihvqvVjHXXeLHFBfx6X0MMgdmHuNKaCl6kgNM1wBH1Ps2w8yvc5umGxEt6ouJmq5oetOoRxrhnlE+pqV8TR8B9E5uErnG9Pa1C3E1EMrYUJc0q3jKsw76vIOFOTW8rUbqp1FLAKO098bs3BPUlTUJIdc9KcvibjBldLZBU9hNeXCaFQMAmUUkhlDJcgoXKeufRW1Hxo030CMtCYeO0wOFAqDN7n4ITbg/Qp9xaszWXYrcsc9S2hym98xappy0qMkwr0YIPeMWyxQc35S8WyUbTUfi5iqN/th9IH4FahsjBQ/G3BJodDUL9Yv0lG/0EAYnHI4BJXj1Rbjj2O3uVncAB/ENtxqKGNXnN99MwItQ7/PFNEDCMTaJEC/ODb/3oDRfo5uSisaIVCktXsxYhhkrw39ym9LCT+mHA2ZyY8X3EMCJ+haFVNt2Y1Wos9LRBw1ZTaTny8ufuD3tdc/DijTnEaHmG/AyhRf+B8SJh585EQyKjmvilxEdxJv9ZYXa/eN6tb8TfHmEbyOZg5Gj+/MtHpMXqc9tFZrxIkdSiK66lKeJYJg9IEhhxst3ujvS+nUgZt1YU9LopS7VqCAEPnIJYqQRN6l5lXqruJKAr7zTnYcKiJ94q1DfcSQ5vKWr24H10wibKCSp5IJCAEzD0wddJONJNVtO5TgS97e2+YU5CWQZD9t+lQ95KMUnqCl1a5qmMhal7xs0SEzhlbbFHa2p/MFrB/ZynJSnzFH27qllvjXm8yLFdtn0vYat6L5s9CNUODmK2ObyCr4DdB+t5h766Ukd2mmvAOB/iQgrRX4G+3R5rEyps9r3JAhvT3XtAwE8e334Mr51fY3r/AqIcnMfKNyKR7MrfLPBxOcZOXNc60SYk6CagbGNOz2dWAXY6GDtgqoJh0VpImvM+zMUOC/EgpyFS+3px6QzhXjhG6nDMhhqMslppr3B0GscIJbW+Saoed+Bkn2djPgs+eJhEFOG0biVMtNS82lG6gE40z36C6iw8fdTzQ7d4T6W7zuHX8s1kP7aViKyu5AMP/Fnnm0Txv31GJx755WQQq0f9Huj0g1KIgAADNtm+2/bNt27Zt27Zt27b9sm0bs4hZyBE2RnN4w6UKW5YbDXwM+kcqefz0BexWH7qm/5H/AYhhgyaQODcFtcDnMwv7UVAmwUfGNzbmWriTx8UwLFu4BGmntBk/k5tUXJwMouVQt79y7tml0bnYmlPEfVwPj+O9CZ/hNeatK1pRFlQgaC5RnXlteAB3e1oe3j/ofsO8iMZmFzHtGNEaM8bqZgnKa0dGgjBzWI3eJtNHLgbuNC7qxkWbt99fpk8oPO837nX5x2/3qGAxOOCHpBFNigRWNQQ33Zum2jWrOnEMRpW2T+mimVwvtOC987eSWPSLX5rf64OCgkllmlO6K2YaTd4oBos6wKPWVCsONcuTvoOYI3BYuAu2arhh/BczdFtuffQKRyOgc5ZVWf6nsttVsAw/YImJJg+L93KWoY0VJaa2TuKIaHUlqsz29m1GZtpkxdmksT4u86YeuxwuwFoRYdUYQ00hnl8dJMDno8uIUc9uei3DaXN+gBhboiMsHaSOdFMt2G0lqwgPbYZz6NNuLqLWtdN8LqCYWW0zIzfmg4N8QV/rLcYNkDzRWwJGkHc2LFcZ3aliqE5wLfKibqDwrhbV4v44e9TlpaB9WcX8RNPnX+MKKUTqOHVbzmib787PWlYVIx0hjbjcZmL6WiN/Ieev0hiZet5IRkzmTRUc4heOaQlzhQiYX550b5xsqUjXMWm2SafTBS6O+Mod9sR2YYOWf6ANr0QLB2gFbAf3yc1PrnRQOen0ISPhkgPzZDyaYCrX458LvEOA4s1k2hzVNerbbkllEXHsRA45JFTsVYBQkieY3y6ncoLe6ptr/JGOYpU0h7nEy7P1rum0+3nUGORTQX97hm8gJAHkCpUE1wQSNVl6ScQlY4mpsSQpKjYb5g/rsGjOuavfBWkyC30vfQc8BfOJFRJv8SkpwDB0eKHXH3vWXn1jSfIQAEkJXD5Owbcp0YFqH/yO1x+DQ8h8E3OJZTW93DgLAdwenNUSxSGuvTMflY8Adc1h4a3+uGSSg/cg7dPSDdrhxoA/qkm/QvpB/ebo+hKw63ubEzw8UsqUSnITDQZYOkkii2RpmZM++zhiIsNd2oDrKXwLlGrOZW50lhPe2hprLo7Jbs6nzIgQQC5kTfj7zeyC/FZBwsTRYjiYhl5cC4HafVpjlEtmoXJpXc17H3hI2g7JZpwNgEiN8mtAzW/h95EmilvpOSEYSIQN1nTWclIw8hk7eWCVYJCnhT33T+PcISfX+LD+BUdhPv7HDAEAa+EY2BTHXpQHXTzzhTcExSlnyaAFeCY5N25cuGngovdJnAuGY/PlVYUw4xvqDATg5Vs2lpS4MB+FvZCBsFQxel6Iz88TMqfLIQ5oG8cRca9nXGOm5EAaNocCDRc1bpKqcKw2L7++EQo+6Nodr8x0SzNOt1uYO+/jjwkiVW+xMUQoYwdpFzqlcv8eknDP1jO4jLGCmHYbZdvn0aq7D49UEuzxBB6zhLpagPKK8ZK3tgNTTqLK5k57oHwLG4KPeS2oEv2nsh0A1VjXWrn9ZZT383t0IrWDTo604aTJBpS5ZjRoMHF8be92rP+MhQV76pNW8wD77SSwjgitPE/eCyJEnZbgVIZzLmDwOkaQUCeWQWEboYmQhSllqJ13pxReoS7nBalB7vPKJRBQKJd25PD8RHQNzKCV2ilLKh2rmK4GqcNIQKkSR6v4LTlwtAGPnRZKd0cE7l4v6S/jf+doKfkY54J1U/ajP2i0SLkL4NmFqgspxWzfV0q4XITB0ZOdEzV5a7us+aefI81ntY+4AunG4Z0P+9KGMJISEXYZstJDFwJwQrKxEdweIAhbsawR1Gu3iFcC+cqNHsiTQIiU8fl2ccPlPvOtb8TVEKJk4/sPdzSty1fTuFWwnH58qjdJgIUFm5OvPvK2N8VoYwhrnJF2uSgyamueIeajfP21IKCGHRzhzI4OQjFQhfa/eThi3Rx87X4r7U4ufrdg6QN5aSdKF6f7C6ESwby1h8MKfb9LSo0vnbxL3M6Kl2YmGUagUnhWSJGFO4Tofgt6Wa6Wt3uaeqqvCJGD+FHFL8pe8tTRCx0Li/0ZPyWr+0R0v9wvYzCskqUAUuQX98HRhfgeUS+xXqF4WeqmFNfLOyNKN00+yXscr2o75e9eORL/Ft5QvPEIX2SFnVqP1r+mxscM+ZCO/Qfx/EP6mzp+Bn0r3ZDKXTxNJotrbyUO+P1mI52pARYibBM6XYb16CDUVnhs/l4WwECLMZwEKxHTdKg1PbGbGMSgkbOUyaYdpqtclT/wE+qRnBRsH4pd2qLC0qQavTTI9iS1wSQQOTe4l5CuY2c1skZb05a+BLRzglNS3XfhstkzsfPN6Dkg3SV5tGE/f//uE+gV0ZsyQ03L8mW8qR/bU6G5rogjfsNjg+qhcC9DUlmjTSzxW16hlTHB6ijv13sTRbfkTKh92u257fm3qvd5R2tiIDYzjKVMlKZqJKyZIMv9Nm4qCmebIAZ65wONeVxmjUeAnGCXEKOEnbjxOH7Z0CXzXQKOUUHYo8UXqZmGc5NB0lbzuVGMUk3tXutf9p0S2j7wqm4csVBYbPRy+rRWZd4G4zH78SmRouOm16mRSdMst62Sww23E7sTosOh8Jwhw4AjwxZU2eR1yJUSTfZvqX2VEjvc8LpoR/EaXLvTHIeR4tMxnwG8/Hm3tF7k77vDI2gQmf+xK0rAP/m9sUyeMCHoAaDV8Bc31p40ta5MK3uCoW1n3l+laQhyLXm8staHD8atT84j00xnXHPq2mjFFZcEOGbBvmP5CyjmF4Ircfjrohc7UCLqTDHlzHs563vtNRumj/PzxvuXDx0nMHVYUs61DGUNHrkv9F2lWu57eNZokkI25kQyDutjfTkc6pkQL6OTy4/X1WclOzQ9/+u7JulEKnc/COEuYpp3zMJ6R9UHQZ5hB3aWLnvJB33VTAuVAeXqsFCs25iQd7Ih+KoGqjCMBR5hkVebahYwRAVYtujnkqD1tpoZno3pFFv8Qj1FyLrRI2MF8lcJ3SWd6u+6zwkzzUsDjTKT3WL1xMIYnd2VmBdN3hGBzpUoxhpWT7R8WOrtcfAK3WD7ZvG2PBeGsWFloMX1E5aHvwnCsbWmR0BuSmfqC740tJul9JX1gGUcJazj9GM1pitX2RJdTTVrrs0MrcP9eUu6UKvPQ6wbgoPmnDdo0wGbII32X5aM0uSDjNI299hBzXKL35y9TeQPNR9lZku4oBDSf6Be5js4lZzfunXNYsy1SP69wRub6MUjf/jx1sW3ismHDnmRw8IL6mKTDptBupgBKxW3P2IHkeT7Z0tdoQRxY64kRKycQGju3jXCYAdQfRKycY+evdwZHhKUcpXbYwZT9EZNuJmDsioLStLU6cmz6qK9iZFdOSneg5b8PVeu9y6vmOzWSKvqrIp5AZlr0yRf2LlXSTN8UQytZWFaY7zX1wV0aHY4PROwjm/pgWclqF4jXlm5QLkk2ew2C588nS+7YjFdWsiI3zuWj3ei4W6Jb0mfeNXdF4Vh5O8yNF4n3Y7fGxT2yQvr4SvORIEVhz2aykl8NrwbBhftStreqScJlun2Z4lqmEGudmQYY82qSVcwQl0AHti6jOqjEbo0tHi8kJsaI2wnegmSNE3oS5YBIxRwBBeDo90speCaMRKbZyfErNFRa9RckYwlyZ009ttTplbxyfMqABx0+lA/1c9qDIYP3dDLd01QfMd5Cl2SHcfPE4ek1dCd/ZRkeR3vm12jbqw6MPIJiJm0Z8Kqne+lRpysOWwRIdPpeDppmKDlCZ77W9LLTd/n+9XP0veJj8qYmyoz+BjfGS7lcJGm17AOgu+gYProftTmpa6OIAWgKbk6HvvxI9iNCQ0tWLMbXvpPlgxby77vn8QimlzEK59z9Z/T1CC+7/a8J+BM5JQiUrAOa7E9fT/j+ASCZ+tJ92lMFU4D4H+pYsyLHg4+7zt0xS0H/c7DSBTsAxz136kfNhl+LEqjMBjlCXhXbaVSh44F49LzUP0Ja+YG9/OFtkBa8aen42yoPUQbgzTm5MpXnlGke4xLvEGmvU8K29G2RN5sHB1GOefkOB+vwUlPsgW6A6drx12vDldCIJqBMoHCtF3/YpUHxGXI2VtcrFtTc5mtHmhBvwFn+UnOdAtLldeBA/wsehMsppgE5jWyo4LC/ieA4TeHqswZPF8KmxCmZbMXU41OkoSjda9WcEVPHmb1XbR9M02I2AcSLpeHzNBLbRFWbKMBGxNpMflcYB98zyzPDcA25EoYCd9p3TvQIyN10TO0d5sdtJmdQhD5N3teTz0FJuZz5KGEX9xTJJwGEJoLwcLyYSkHtZCt79QKoX3wOc92tQJVYPGW0tgW/P6mrmcuAlllBRvXURy4oye0/loGYuQA2m5rkNCg+ohTeMHxjz6IsdpvUR55XKgprWg4xaOD3wyF4N9ym9h86tmuiwPFdb6KJkIDDzDWKLtmrGVPDNsLOBq/P1mKb5WQ14zb/pP9r6uNO+lXKPhbRqiiUkuDvVz+7uLYxd+dVC0W1n3Fh2E7+RQrRqVHiOhsM4yNkvPk8ts7FZ4I22fNl3FoHK+KDThAjwlvDcwOsyJdKeWfHbZy1L2L7BBtikRnwxnKBGRDJ70lTNG+xcy5X3df1XNvWg4yWnepvDlcbGZoQEpa4roGXLc45CVL/7GacJdb1XL4On/XGFKOk2qntBNiv88/0VOUYYFbZMV/Z9hl3V8FW9Jvr4PdbJRhYY+0NBUxF0PH7U6T7bagBZjgWeWd6eXBLMFqBhQMExiSVpVnSA8JfRikXHKI6A4UMHRJ25VQ85nF0wP2Ox/tIPHkfJrcSJNBqzjveZOOr8XPBifcz9MMVk8kgUTS9Bk4PXhVwre3HQ/ibo1KGZxnlKyaEqn4uvNIn+PzDGutATLvcHqOsRCdMsEnZP9NIjyRyLO1E9/MD7iU3j1BW2aNvqeQpiEpznybAXZMzpE+cxSk397n8mZEdP1MaJjMEG6f2Uyd+H7rgpkUinPODs9KhttP7ziwIgJC2UgTcIRr4aAEdctNBEy/LX/RgofPFa0cyHSQtcx3bhkTEliKnhbbp7bIK3qqJtiufNUNIzPvwm/lhhK3o/9jVSkafLV39hRZQx4CUuyCJDEzrjncpBrcd+M5cUxaOMg8KxvLGdPt7rmehM/3/E9ErhHl66nVwCKsEmkycGQqDvFQmll+DzsQsxqlU/1Ylzfwv+NsXQQ2eUHoHy+/DggzBCkgWjvmS/YJtlmOOFaHL2900ex7nEIvk7RV63nTA0iTsypaqP/E6D6hY+jk4ImLrhDvVVQNT+pYpE8zUeBRRLesdysVEPULvM8yyEz+eI5ZS/xHXM4ty+5CFH6KCtwLDmbKbfeSIPPA12KO01wHlVid26jZO74hx9w3Z8fZOvhqnGmX+Auhgu1rSSywsqtbCzz4SIj9PuWqae2ANulw6Uv9p7z8453QySChexl7g9lSlj8LJVVeydBE3/mEVjEZl6FUC66CZXm8ztW0YkGNqsETqtYoZ1hAh/6bDdrfvfNGDSXz8qGcyUfSURjQvGxEXTlhS7aydhIU/nMpA3+NMltAHdbJJsTiKJm9MKGHIulmcU9N8/aqBBKMIru0fw3+8eYV6ohNWKQiSqject4tD7sGZzgyl0gyzq3npLQWSzBuiwQlguAcf2oBQlxYX/PVZ274N/DbF15IL1is8isFM1xiZQjbS9v8vr9rrP/4lMtO4r7BoXBEmIT7InsZsvC9H8ezx8BO6LqBkxeS1XYZeNKvCu6KvXOpkVZf7S/3x7CcYUDxgRGsY5Ik9ckxYFUu4VB6xvCY991z3Hy2k4qbTZVmTyO+AFnjRtVts1N8jJQIZSDUqGPPb5N0NZy3250pIDGCKX9qSVtHFGd/n2VJTA8357jYUzZBI7/CZ/hoPqNtqfGfyEpmIW9J7XRDBnjdiflrjmNlkMR5nsnHvuY+eu8+ttEOFnrchVTcmVFdoZXfJMrdjHLQlS0xzPBuRWgFAm8hbaB4UxwgeIlI8A9+1/RaxjpWNasBgmpBSiI8nOVwxrg/KR/3tPdI9TleRzSwyzr/8ZvH4U7bCGJRGR3G0iUg7Y19oEfLElt2kteYWtvYbrVYSqO/J4PJEjtxrfU+XftLK6hlsAcY8vRE3Uskz7UfY4O+rQsxp2UlzctvP+fBQOvOgvaTaZ5TC/f6rS1yCrRDwDkp+GIYiAsYyPSSn3vRDyLJYOCiVoDZufBRlKrDLICVn1J09fwlrDrjOxUTcMg1KwV81kIkriJqHYRz/wav/zh305a7YTXR3YrPxsrFtgrTOFiWLjD3BqmEMoecyT9IGAES5qKzc5kCZOzYslf07GUb3B45tkivKx6mP4maiq6/TVSDM6KAKHC6qelTOKfwo1NapKJit99EBcWUmhcHY1oq3fOh0hQIHkNXKq2CRuOdkgiVjPfQetRPJakokvYLG0rhAquzwSm3/V8z4yRepo1HboeKqFM0iSfMzckm65wXC7+LjiD8UbyMcg91t2B33SCzU+MFigctO29PCnnskKBZcehWHzvSo37l9c1F6u6QhW0CXU0w/tHjHsog6LANkKUEnskObx8otIaOH0O2DkKDKTO8SMdQV6myynBQf1oUhlmQ+YhkpJgkxGs2srw9hAuNASue63UAJXTvtqgZtj5lM8DcZq+1MXdtKW0xKCYCM8sYbkrT6HwI2VfIRQyL2rhlfRgEWVDcxF2J4O7cFHFYgoGqNjklfdBGtluFRgRNoJOxVSHjJHv1HgoV/w7wVg1WMcs6tG/9DazAVj0bjNz9ie7y15KB2JH8Xh/yAnjWsi8WIZcsS154LfIAiO1Lc+gsgFHVLL++qFBPgZ6NR5r6m1s/5HYAqPaOA3RWWTqxjSIvb95uxh40rorv4eFX++yb3srpqWT7zdbJFyCcDrKF5VwjCvRkFx/ScDmpbi1FUJ0MrsWylQxNBOHzA5+UKKg981vq+0g1py5tFAlKLtvVIwPgs7zqknrQAmAL82d5+k5zUjs57SWD/l5WzjYHqq8dlW+1PM26K/OVlXvUDvf5WQmGNjtypxQMMsrfeaJcdzgbRwiCXw5+eZ/oxodZKrfw/nETlxJmSI15ikJO0CiAErXnAedK3AWassZe/GN3is/KZgpqebEjS/TFH9Af+yz4DsWE1afbZiclF+CpHFEwyX2Ptl+mQ8Qk2p7G9fVD1wm+dPEbTd3KExiO6oNmxLFrgkqjE6+aUvmmL8Us0OO5kp8pCNm2fRV/iEQ4Z7r2fo+1K33ALjjKBagfMSIdHyLv8YaIsSMIxbB0Rpaw7Ww7yFlgwtMKYMaoiYXgNx2a0MJIRIZX3DQ+CmqQ4YqJTYFt85mwpbFwsH34OaboyyuSePVy3fZBvPxOhMjGONRUP7p7ta/aV97z4y4lREU5Co1reQbKN0leg+B1AYEkEq87YurToqkh0LbMEZWCcMTToZM/xcFLd9u1YQZ9QKN7C+oZCXtf9VrpAzad/TZNSrXmDyFMBb3y5M9GxbpuHhttkh4sPnKEXutBb26pEGY4wUbu/s7VvZ22yQ6isO5BdajBz+HXmiMY/Zsb3cqsjhVj1AS2XAV2SEfUR9AGU9Z4LfTRqa4cTGziLivxsEHqBpGzbdc4l2yeMulhHsRs5a/13hmhM+aY9qbtU50XZTDEjtlxjYrdPqUXjjYt8YXTmq6AjyqB0oRNFRaATpn+ZR2FYySsUn2ck0l5EmJYDpPPBmnmVP/SGsl8To5LOMsVQ97O6tFtQ3HckJqKa4etDRsVfZ/VHjJWAxoRdnqRFTZhGltgJLRO8ft/t9Ska8eBPB4RmRZRJC1ZFPkyQWNeOoeBoYSDwZ3wkLHjqCXVz4XgHlc3fE39MUC6yOvCVwWiGSpFYu840L04xyj9oV5I3imwO6IF3YEmR//BdMsS8OvXgbUo8wHxyg13AJVnk2VLzjWIDAkBKCwpCNFlRV2dnMjy23jknjXjcJ8eIaJk13OxUMShYnmEJDH4LJjJjVxaXU9EGBcguUl7H6t6AUDlz0Zo5NX4+1V+/6Fw4OofPMwjuA6Wv3kX9Vfk0hNdrDoTbIZjGaZIFmkxlXC9+Ago7cGuSNx29pLxiIERMGoRAoVEjRlOd7LWOPpvqH2Wv5f+jbYx/jci7AecytxJrCeEWawZe3w2KG4VGqhtVaKxwVsH0rwM6cRTyBbgYnYORh5qLICSoRa8A4mfrkxqhObMAVWc9qSLoIXKAbLBLwuKwvwKI3HHpe42XXLO7vRempRERWvih9XZtCZ/5Ummqis6TfFfETsTrsp2jGzyvjDs7SR8sTdiryaLZbBmKFBUOIfeWoP8e6YnfWvSx62NdfIn6bb/GFV2/uapVBQBRR5NlDa421eq5etAkIZbSGtzfJHCkRH16WFIKVjR3lo98fiMfWk7+nod4zT/vzk2GfY1pV/asBe4yOCjBH1O5TmxtEyYB9GVIYfC1F8NqLu6tY1nzlwM4p6seqJ3YgTZGo//rvqqYa/jNsfnR7J/YIOVjRCBGS+ORZReK+j/Vr5zAQ6PjFUWhmi1pd5C7DuovMOtW3GD93Cz0FUgPC4whm03elRsotndOWk4MnGC/zEwtcKPtJE4h3d/Su99dE4blmpjm/ySW94LPNzs59IVua89jCcM4pja1yr0pyJ5PmS7agSKlumqAFpAyE9Y97SlRXYFgTt5HUEJmEBWbfuuFpgFHeEt2dsnm4MSfLuLWwcVDQw1z+YICm9ykfcjC+iQR+bVV+UL8B5Jq4QUjeleS//TnIbaNtcffGC9NcdB6PTrc6GDmCtd7HlhvMmWXmcJKjCP5+T4caSSgwhMOk4bwFmb6yRLKv9XZmcXjWOi25rRSjYlhjfS2xp8kzXdE72TpbkbHlIZfSnRXt0Io+2gqoUrc6KKuyvn7QkzkkWVIvOlg2S/yfz9+3fRWuQ1Mb++Uc5bqF/Ggrifi+XlFTwdoQPLeW//NxN1y2P3J9rcscvJN87HovhK0kmvR1uiasZCVtHUyc0GLWhK1hDlfxZ2Yc3F4xstIsZOHq+s2qutdsujMfOmzmLhv7VAcco3Nqog+sBUt+dYAjOiJxedpk98nTClp73UL21VUY6I3gcmmKy3YBPpfDyEu6ei04kTXBLvAOIhUFoATCwlnTMr0lNtWKyFEj9zHIhrTofPzN8XfEAqTfkfxTDfFzG2ujk6sa5VvxZuVWyp8MB6NNEJ5k2+PjEQW81xbKSy1vV1UlG+Agr7O88pmHDA1hDVff/gC6/v/GX2Kq+8W0ozbyF5CrO45WfJN29c1HU5nouktEslEIvWhQ1VRtnJv1qI7/hRV1APwKcHpmALyyy5NwuCNwOdq316DSJFhye4tQgd6olZelzi/QzeWg2+yYWbFCuz7mJ78NgZ3OfYOCSxzcGMJQ5HgV+yv5Y+XU6g7MLyX74wL/P1rEW3DFDi0GqyXM+LZUcYzORbrcDovM/WlScNoS0Y9hHg0ZHmL4PSS6V6fEqSjmFs+3HUvWpbctnJ4JJzQ67KaXQN8QpYjq4ryHRqReXRkM6qZ6WJxLp2UkYCWA1vjuAIOQvg9wEwbm4KwuDR5ZpObwpsWxKbvreEFdg0To8/fPYAt81BosVdsPXGd1UnlbHN2RwNF0O6kBUPO4DYRRnwhz5FeuLjFDsNSN1HsrCUIR/0e4WuTvpnWW5zOqVs+FtpS1cA3AgcZplhmvssWNhA/USTUnU+dungm7ew+sRBZgbvGwy+qcSa7/MjSDnsUKWmeemIUYmRYIMQCAWjyJ8fTsvtMp6cn3OLF6+vyBnxIwtFV3Vfesrpfvw7tlhPVstGAvL0gWyebI5Z7zrGzxQKRsOOin2pfNiekkKn+TfGRTtnbywbqDcDCFwCCjc0c9+AqKLIhJNwxWSSHdU+Gvk0VELa6dXMS8Sxz/RCXFBxbeZpcpm8bjtZHvqPpK9LWsGQSgH/p+GVz73Efs6QQ/KBFLpsO+Ujcm3FH/Lbz/ptVlTXowUkW+KUT1cloJKAhNbm3xNDbFzMBpFewNUseStcJg1QiJLucuZGGMhGdKxyk6jG57drFIOYwXn2Wpno/UjwVMYSuYoWVYzoMyyyEATIS+k4gqbepTpdmaxhyeFHLJ7Cf9/L3EWs64eR3NkM6Pfh6hy6S8uJyjdibp1CYJEnod4pskgQVE2earDl7JUGFnSNKiwolVf47IEn4qGf3gzCcl+ijJGG+TNu+Mf8K+2+Yw1jeS16HXEid2522VfO6rzd8LbAA530mqYatiQ2sBfkaV4TwbDZC8AXd0PUSYmXlUwjSPHTwmIWIDtScUPzRvMyqhqBVFQ5zRV+qfZd1Cy8J070PTSaYzMGqG0wSXHBWa5VCi+US3oc1WAmt8HgEjBEnrW0Fv9BwNCLsybfHYHdb71EjmzTREjvjI4ffQ2q3YpyvldnALsVxtl+9rZBL0UULZFA2U4TAgi9Pl9BeQimTse7gzCKHtjtrSR34KbmYwAPLPaN2Dsnlgu+fIaM0gyY0F0eo5h2Q6bi4dnDEJJQtp32roKJQ7FpcywcAzvYnCxQ+MsHB+JVSt0C0MT4ryWij9RmjvFotsYJXnfgRQc97BBqTJw4XoR5niv5+KnZy5W/v660BkZon00J5a3N46wF0Z22TaVhQqx7R74+9tXmIKwZp6z4GKzGmfx0ZMc1sC8qyZcM+hk38VJDc1+PkYvRSd9M9bK+bCHxPmQhhEOsPaQHmkWdnEDCwSo2I2T6D09vh0hKyVucFnGQe6nrd7sH3w4waRmbb9VXOuDmpO/nfnDLEZI3n6R2CxVJJHXo5lhksaU1BmnHcF9NSw2Bstu3Sd2G6dxMpuG1KikBa7cseRN32rTa6iY2huvxp97ekT6hv7pepNSWM3/6NJsEJi+LZzpkBC3OoUoFt/e6cabGVWgHPXXaEj1+ShqdEii+hwaDPQ4i9xrxAoksSxlK0WerYc+wb22/SUsoFfaODr8MkJZjR/aUIjhohz1m1GPafg5GiQbqBRhOxGCzwvk9IS0sT06yzr07n7r3kJkIRJIz5dlWMrKr//g7QXZxCXGub2Hclu1vKBlgkB12rKbdhT8MvZsNWBFVFukKZyCvUHDf54e2rsrSEBEMWe/+8pCm5NRGNKSFWjFs+70ehGBLG5HGqTeZfk1qAns0NhCu4WHYqbY+Y7iHjthULjesOcNygiFOi17OKL4pnLM0nZcRpoJVlFnK6FoFfdVOPaGcIjfyuMPnPZLcTNCnmt0f3GXRZLE8XzE21dw+xBMWc8oi19+fRIk3eYpqmpwbhhobH628cujSWlRcgsit4my7mlvdNVBiec4Rd6cLVqcdQTx39ZOv8GRY/3GXtets1yx1w2pLumANVtCLuaTnwEGYYMesVqzGG0Re239aivWR7JZVIx3ak9kvRAoN8IR25J1aRyugPEOPyxspKoWdNCNdSVjcdzWPbNhfSn7aQo2pkUw2UrAqDfHzyKM84vRFf6k9In0s2xt/4fKrl0SY76XWPrxwKbQLBHiQEoUYI372gW1HQ/HU/2h9j/WEjYSo5zSj11ObaaMjDaCPzInYQ5ZHhbe9RY0LiJUuB2csSpsby0NWjxa/TChdxgjh3odGBgDjlQ0twZUnv9gxZYTKa2KAqoAMSTnWp8KFUMahpS6TOlE1bxuewlwF4fIHwCpPedByyxKlITQsTYKA3Jd+lSliku10ya+tF6Q93glLLbrIKjPJxyGGTwNnn9e7qwVugDvAtf9oA+2nGoulzCSvMlXYBS7luXJS9jdNcAXXmHslqSbTLMQykHsFOI7EGNrWYwHBa/gJRdD85xLAvCY9fLdmUJmmJqrfJqGsne6/Wqxv5kOiL0pRnV1RSTLomNDAd4XeZmurSS3MNoqnPqOWA932e9VTjGWvoiK6ISwwSqoOF/WUKINBlDACBafrXY193OVNbYasYv+V1XJxGU4Lqv6eh3zfuKVHBIYiEpVzEvavGLV3xrY7BECfMbqx7mnJyJgkxErGk1HR6PovYvdt4rRDXKGd8mwhsyISdYnJ827CTbKcG2BDSGsWXv+Eln8ijGxMYY9EHaFYon8bUBmRy361ZzAH9FohQtTSHHwO5R9KuG+AuSVu7rKd9Wds82hlbhPaKNqoIONYqWnucAPkRVdBH39wm+zohN4fsV+B8E8qQIC9uLnNd4eWJygqzjeS/sXD+LRxhmT44vtdGUgTt75N2zLTDGbYGU0K07yhKTg99rLiLMV2gbvQt8TbCxa1ZGZo/waJAGRR6L51eb35Pdm3JD8hNvgkMU91cpBFtVyzMplu7eS0dPq/uGXbN4Vr+jrd9sm/r0lQfYk6UIkvjqBNzjA+U7ztT4lVw+6vf9j45BtlO4bhEzKitJqj60nRDEsL5LtFY58CBJVJXL6Cv8WyJiADhGb2azGKA+YsEM/hvFi8J8SGzxvWw+SFv0V1HMPOElJqFofZLyj/vMBYyog98Dwi803x3ziMgLTGTT4ciAwDhvSInJyk2fWsOZqLvZb+tVR1zOd9DZpRVOcyXZfatI+qGE7UTkNz6rc0KoX0vJslxPfdg088QR/XFwlMinur8e8LndcU2ByCfUpzsNAbzOFOMXT56sAb8eh5BkSUE8RSYez7+PXRYH8CBnMSfeqYBjEwNgXyBeD9+G08TBWVEZtVUwuv2c9ObSI15kQbfHv1tRSVLfMUVVN4Jv6xkX9/ol5qa0/h1Thezxx00CNTxh//dr+W2AqBZP2I7KWOu5iqYzWv/0lsHI8bVVJ+tJjTqqxtRyeUJlxOp/vYWVnc2vbwAX61ksqyeoSHuzE+FpO0t6BL5/WF7kx1NWHOWA7exMqVZIHvXNepOiupQRUDvnTfsOKFCjob+vQFt/PJ+PbYeqpZJ4SVv3dh+LZcrQXFFufzSFUE+3JPLRgdj+LPIQQPwmwt6qdNIIuTePgxDv/VqEmEPylRECXK/MwFpRRPax043m+k/L1ok7Xj5mXV7D3/ysw4LtYrw44eXJ4N3nVWorI+WlPyE1OzKu0UyVCadHUznOMnFnrdjusA9hDefiDnaI7dB5gw8/gzZQo51A9b/5KztrO3VNUlWUBkwfY6FYeHfIwsUHYDSZI5oiFd6ZgcLusuZdm6BBQ1+xF/o1QZH56CVxXysfxeom3wb6Wm3Qt1t9R4L88EeN99GZmGJKqKYtNS433n2JrfYlUNvAyWa7A750lBMyQ4vOtODvl1hRcylAWx63IAF8jUnY1lPXg97WD1eLnQlwMcMUK1EkfKZyQyUE3jVkOdRJvKeh2GGJFEjIgXsZxpV7nqYw93UOCxMus7JWzqLjdt1P9FadbPOCIvShRgxu+UwH2wJ/Kgcbv9LJ0eki85kR2VpGk0cYwguJ+y7oZnoYDwwwxaSia6J3Yfr0ToTmnlK4fP+nensDKw2l/XcV5xyzQ7F2BjQq+luxZ+So3nnso2d4NP50HsCoqNA9XrEc+i2QUnpBa+uvDJL+Dn4LitjHOVAukLZQ7mb/HvAwc23uNSUFQTjHXblVrCSzEmDa0iL0WrilUBpJkHgPqXSZYAt6GLcdYuXl5OIkgfMLzyOsPu7+wF/PQ3MaGNESUxPI0QXVf5Mbg5RvcPHuLSEuYMYIKuLD5qExvQbwu8UAeYnYIc2IXi9RwioZwpxPGuH9YtpSm47BLzCj0G2sj5PxShPaP0sF8/o4Kbsc++C24XaAKfSPuOzRVpR3i/fuYBqnmg0KlhkVrognaciVjBWEn+gJFV9suZsHKNS44qS/OiwO3OiIE4jAxJMgLuAExIESbCxeNHutQtrzZJG3OkT8xSwg61f59bVu/Y+pc5GXyTMu/Mzamb+kM0y4Zbcj/kbIeBl4KZJk8Dkd7gDWwlUGQ3UmvyXZw8jwGSgphHoDb8KY12Sa1ZSEc81LGNtI0OFYI9jj3Mul1rovx9+pXamXxsmMIYOSnur5VxXGPmdMBiGtZ7B8I+UHAMXJv/Woc4c5+9MKxtOoEWonSz9Ir7L1LeDTda7Pl7izdwum88CeSkGof0coIlZT3lfwmKJwpMqQbakeZ/NH1N7s67Oi0IWDUipWY6tOp5iuU3Mh59JNyRyLJt7BBEIVnhpctUPrx7fit9k4MerRJHMcZZHvyHxt9lROJDCErtOeBzvBJeM6vjnQKVDnc7tueqim+V/xhISAQrlaS68uIUXm0goheGy/2VUbpzxdYRhi2J3tXQNebsjAfiTHCDJn+/L/CI5bcCcSjW4+trOPVvxKao+pCctUgoXccAMdIlAgOBrDINmb2GywLuFJdeeHYmAWKFe1+ELxEx+gnFnqrkapEysgh1fLxe2yyT+HW1VhgvluyqvnM0Fi5S3Vln2LFfpjOB1lAQlQrhTF2ZZaFo1KX0duEymeDQVspKY4ePBMW7WlnThWmgwHKVONVFMYfOYyedRQcFGUQ23AApWLWtTd82RJ0pR2SOCYXFudnc5zVi6FVSQwuDSiL4rLudmBSLzHyx2byda6aQjL2Wd/T/0LF0LbJIMuMgGhdIOd4mm952jbi1B4tXfzxcYtKkojtQOaL3VxctUMQXoJAqxD7VCKh6RBBI4BDctl+kMSo7JzAZEaueCszffo7oWgre1om3wtlT9NJVfvZ932Zhwyiv85/uCmO56WbH7S8XLWUko30EkcGqJhD47aracdWN0ltU6QVM2SV+iUoD6nqOr4JB/pxheYUK4APQnq0CfulWu8YKxASWmSrl8fYV8oSVM2xiPFW0kPvXm+UOGl4lq/cP8MUlJYEtFMvCe5Ly+EjY9664mh/zpBwHxK74gHzpuk61j5QWXvou4YF5pWVvxacffo4RNPvKDNt5c6BR8UWdgTOTvfb50l7pp8fYpdsCSzWoBdGYimDPb6No6QMXjM7iItHBNh6Ns3p1WwcZ+3hpG+tLfIOyAnlse9hDgJM+Vm35DBYq2fWshVNSv8efOvXOxuJ18bjXAzAPeyzQf6gb7MrM3yEST0SMCa/4k28loetrDDogfxNMI5puk2gANcu/4yW7efuRJSUHwxx6VKzmCVMG7Hstugmjz3xXBHrN5IRu/M/kmu3GZ9dO5WN5yZasJ4OUrNrM5C87JLZ8aEgD0EdOhQor8suirZmpPP5AYsPszm9Q1yUBDuKgtZIPCDc7RN8d+GR7HjOHitBAysN/WOGDQVuciInMFB6RHtJ6UvPxDItPpd/ijxPkDIDtVKxUZiJ1YZNUbVLcYAJo+X8rm8ZbAOq61oHac7qJ1TXcmrWE7tqHzjcUuJUEvQshqYq9uq1shqU/SyhNOIm1os36TfLLXnYW0BC8LqZVzwB8Jj4i7Qgn8K2EXT6WXGmt+X2dY3UGlh0Qs5NvQI1vVZ6h1vlbTIY6cxnwKA/V6ckKpHPi1cTQWts6gvMv0CkwNx4tjUSPe+mWtSgJ3ZYZ8iXB6I6nN167XrjWHyGDKf6N6X8bmCCXZbwUgoiDWRwu06nZakinYxaSYGwFXHBVPSV0jUF3tLBCyK9jmhpeTVwjnmy1RB235W80fkwGsct7830Yh3epejZ+o8zxIVv/2fE+5GI6VacZTTzM7JYMS1zapjhnlYtjEqWqwVlfdETdL8MrCoB1b7TMMEoVFrVxScJSddKaw/olYbBTV5YEbZXYjOvrEFQTyoXlqvSOlnVI+9QomMjB4hm/rqNeEVQpvYxkDAR4h47VQ3k9NCI0J+8K0u2GGlIluy2ZsG1AfiMtnxVHDCc+dmWu0TOFmqhHrYd45fxWl578ZZDSGkpq3l5JnSkEKYpkWOinFAhz5kSeibmQghLXaGlqvwhWlcj9C/TARvpNIuv6kEOKyhO/rYFtdJgtsN15MLO/nq8d96LgMs8ivdYQuDwSZ7VxVBeodl5g6Wnh16p/8u9XWFbFQjWZzPyduRvv5LBx3Yx+FZ21R1+XFHmTTgsls20j2Mzw/DXjTPoVB904Oi47eq9rf6DCQvKYrMaF+aRaZqSoMTOv2GfV4oV88lUKymGvIqiZmVBd4SjQ1WzYESNFu2XSmmudXAAkPIRKqcSQIcrGq4B9FPmZHKeRC7ZFHQFvgBmaDsm/fQkIx9fl04b2GsS83ukJjPlRlfHKUAihDxWsenCQMVAC7G2RunPqMU37gr7l4FLFdvhibqDM3ZzE8ftPXpgbfXtLU4YYSAq8AI2smHNcxa4JI+QVXMqFEZ1nB6lQtx8IKy7pEoZJap47pqIVlwAXXp/mgX7R/7A2DgI2lZ+5Hhuu30TqCyzoQv4RKTtdXEPQFh9XEYh/q+KJHC0WOhmUyNNMY8lWJ1K5IlOS2vp+elMrEEO012orgOFCXRfF2FtcussYVbPT6SDPsxWA+7tCr6JaqXHTswN/bPAfyI+AaOHY7J9jHGzphaW6VLutfJLDk5LdGSmQe3Uo/YH9gugDKySyX1JPXD4T5a++FdtgZxYujKEoBNsRSn9JmrsEP/MEKgLAKxK7XqZcOZgo1vbsExWG60ORsMDkHUyxP3cd1chj/n4/wU7mkZPFe65ONiwfKHZ4k577npBOfxh5x5ftWfb87esZjaannJM4hKhhOowKf0LuOU7ZEueHwMqXPpaMlyTGH+KR6dUR+Kkcts/zbvGxhJGcsEMfZvKvaF90wPx30hymx/jB6rfGXxHtyrmbSEiauHLDJKd6zKXIhzv58KZ+KvU+kEWICCWHw/rzPTwXj7v8/tMbsr1I70FkEY/9bkXsUTcfTp6pSmf7vh2vMLxS/lDOLZd/THeTl5MLFACTLXnjTF6mWsjC0D9kCIBerdVsJf2vhnRVbHw5hQ7NR1YylBk0nvoRMO+FSKSRcvTv3hpXRp/aKDjlnQMQ0pb6nIawZXB+RmHaOoREdzn7VqhWNik7ygdGd24/sR5jVeVEKFTtNubi+Mg2TxR+q6UGWA2c8a3RHm1EiG6n/BCGkj/P7NdznYTL8fBmik3hYRZ3me6Z4FLmZ/+ykLo0+rWQiD0cRY3xBMbyqytDjNLevH7InEMATgPopicrhV5K9PjHkN7QzCu8gToQhQ9aTIsUHiSH0vIoP9CRFPkRScjs2gUTzqatAuvfrSbHbhD9968ERKtqFAWNawZCxcEBi+Ii6vgGR0mEUQ2jgJjtQYd9YE1xXXT7Is3CoD3jLV8AYkjX557XmlElaz9LLAjxOYWkI8d+R8KBifmnBsN2MsEgi1kSNB9GnK2lZZLWtuN91iIR9IL67z+Sd0RKsSCp0EMD/kdnMd26nn7Tqoo09GmJGzUBkJssUp5iaNwX0HYeANVcoMnjkvzgoy424y2NECGjduVEXFjZrb+qKmfeCevZsv9x8z2gsy3R016mCkD6nW4+9iA9uVOcxu5VgyhJessZkfwTky3twfDrJ+HitRqOywXyO4CIRe4q0I1Mp+RJsHRhfIt4hD6NgYlJsXsnWWtBlEzNudgdnwHbyg4fCH4H9HfVjCBsUhsTFP4Nzn/LOtiZztIsS/f3bi2JUJvc+OtL847Q65W8BRZkJS1GJnxhD9yyuM9Yo7pZqYZOZSNOwV8I0E5Qo+gQU6kzwb5WHTBhvulut7YFL3rnb693SqiPIGRdpNhYASPMC2Vcc4HujihVUkIuBHyRkUDwkWdYLzAGyEgOkrAb/1j8dOgCzhWMXLm1mH3E7sL7oliYR416xogFygsp/+EUd7YNrVLE4mO+OM+tveufyI3U0AcBfaqAgZCmKR3IxJmY0GyOWNDhLbLBIlvcPuUo8b9iuOCTf90zVlE7Zj1Vv8/DkVBKahwxDn0ze42swIqkPxNXya3QJwdm/+2kKYQRgK8zMfvwJbgJ03m5ES3uXVz7zBMazjghZPlqw+smNe4/+JPD1qkBnnma3s3alRJ39aQuSxd3eJ8hEjxUEgOvRUBiY/OMD8wHF0HltBY4Q7/cDyljvXMylg00+lS1PYopk+kvtr74Q1y1THFySXQ+aMSGj7vl1Nv/7H3CZS+TtD240syJnu1qZsMl9ap9m4ftmpIGS5xbDP2tsyY0Uh1/XBr+wpRqyb0fRaXNvhSDzxhQ2r7ITDwlhPaWsKN49w2DwjYXmFdeuTiD0lMj7u/1ErcQ/YjHQ2JRRoaaUSIDTYkQddHso7AHbxLLu81jcXzRj/lW9WJfFZihyxVmpX454bkAhzTb0ysarkdp46ihqYJmhTdtXBzSoeGJuuTwsfOpuOgZH2RdqUKARyS7hwNFqS9N9sPXBz0tLNGGoZ0VRFF4p6hBa7iTbBNZkdc2/ve6hXst9kn6Gau7SYD/hyt57rkNAy/L7IPv2tNdYUaXszDBPJDTkzFPqb/vLz90o1GZ/mImxbWI2NHW6m2EwrPxUz7eQ0Hjyw06n4MEeK/OUIYbo3ecWQRB3qwsWRt2X3oVjl3jqzTUpPAckKcPOEN9EAm9zo+8nt9WbYz5W7hR9DYhHeTCIPpC3SCaLni/HttcfHNySp52jhySyBfD2yhheXslsDVHqsgJipCjaIOPwBkWFUNaCV4G8dyq1GUCMaSMDlxQl5wvZ8m+B7KhDP0gAY3j31DMl557njsndNykZY8Hz5QWsftSv/KpuiwuFwYIvJvO6yNe07WesE8E+SOBMGxO+6KQ5R4Lgcu2jf/EjCPVz1X/2IC63bbVZPDJkOSygdumL1mDDqw1hjGFmETFzFG5n0iG22Ix8yjnbxe58lf9ypuexbG5rIeMo30jspH3cqW7xPfZ7/Wf+UVomE86p5WvWOc77ChBUkbZ51OlbOHsschh0bBOxAo2Cj5VfH1HA0KblJy3/xLFe/nheP7BFtvDLZ8tJ+lSZR3vdv1nqGe0c3DhDp0NHNqlWDo/HmtbhYGD6EDqTZpGABGNsfC26nVfMzGfD73bHK3L69Lm9AeUQ/9xzSbtETpsIDVSiztVPeHwz5oFLiGfHffbxc+hQcTj0s8951jEZ7dYfBo5m+70NiX+uV0ucLlkvfGnTEV1FK+oS5wdYJ9umxXF85BNYmvJltTUJx31pSdutihKw6w2u30riYInn2Qg3CYVtaMstptwdB5DKotTjs95c3K/23uL1KjLXk+nsOV8jfmZUNi7VH6sMphuOwmYoCufMQ5shx5GNXmtL8wA8rHV9IZiQlv427TgKTgooLzyYxguFljhuDLyWmdrnJSo8TD/4QkKCy8Qs5GX3V6jndBgQEcjoGS0nEsK+5g6974JiNlrpJx5mvwMU/L0P1OFoKtFEQ1Boml+j/JpWmXn/Zedf0Hq+s2UeptPDAOJfrH/jYoxxia7P727crkT69VxYm8ShjLA8HzRlrMZlC2GA8HKr/Aaqnq3dP6cSv7i+GTgZgTnsYWIPDD2X37jY8OhPFYxJTOZ/xnNhsBypcElnDYO++GTsKUyxRWFOU54Ju1qq33sx2RcdNTXMVKftpL9/vDlGKNV8116UrXuycg5xtmWypqaJ4HYaMiG6ULftXFwMOnI04PDySxuZwvx5UOhaB2wm6vl5/+ITEFri8d2hw7fEPxWbd5MG4kqzXw+WJKe5lEbdVNJkCD20laloER47TVJL5oBdusRB0F+G5tsza/8Z44947cHHeSg+SHROdIymrF0CnYpA96juFcgtUv2Z/ViFAxHrcRKpHKGB+lJpK8oQi/2gruJ2npWyT26aiYj4a6NURf5dvvRgBHalzxpzTYr7BhdF3juN98QuV35nJ2+hkSMUBgVrlBEw9JVVI337RgoOd9d+uo/hl71yGuX1fmecCH6avQesmaGnl9isvzO6q7aCqaAuDQITjvQVsX9LXLC84QVDCnBbWagYcsI+zbkd6+cuKk59cG7XxolWrOVpe3jJs+++uxiFo6gtMXHpmmgQqke0R+YUvUYdklllzgtCNrw0ZS4+etahZvQt8Vur0gOUmGCgcdjR5HG7W/YZj0xeFtGlA3R7l2VMCjGCIw20IhkTWryzBrWtTMTjUZltANAt/Bv+dm7jioaTXCkMu/SkiYzNAEqgkYnhap9O0pIaoa8NTxahTW9WQhD75wtw2z3dRr7+8Qi3tfgYo6w/jakTsbVYcMZH6BN/wFEWOqjlaGhenf8HW3e2rkq46Y8z6XW5cuZDlxtE9bjGJhx5r1/I/lxOWjBfjQrcu6NsoX07a+Mi1GXg+lB4KG3LdKDUYLSMMGVL3o9zMNuvj6zOr4nd9HPWtCcAKh+duqj5TtrmBfYDRaaOwWCVzxE4XX7MeOYcaXzBWoLe4u44Y6qiTMCM7qNL8ftiOt//SP80UDQy8rbiSFLa6YMQKQt3Fp9tQZmiBkc79pNnquWZWFm3tu/unL8V5w90ltgWfOvICCjr4SB9w9AkcTbbCFRrnin7ZCaMFJGvnoJx9TSiNankl4fEFh0SzvbEhVt3T8vNuBga8iH2K2S4QY66eceXSUmPTPa666s3JbvBMlz1GXDoeGRNFww+jFdZq7rA14ocmHIWlNOrwahQ+qXqGseKLjSN80iauvb1KI9PTaq2VS2WBO1/I/tz+zcBSWQfVMcAn9lKY2biM1I+2pb8H9OgVv/GRHMqOr286Rzu0F7Qvkf5FoFY0XyrrZgq4FyUbfUSafyiI7Q58QzLGqGUtoe/cgPYf6//r8Y2NJ7b9ZBGLVL4gRPzVX4sTAIFP/HEn1nwLtn5MDAwdnhA/fL/PgFCgv6k2wFMRuTdettoBAbFQ4eGwwf06+Q4uAk58/zXvLpfEkwD/bPpYXTFEiGpXUdWQgU41G6tI4JlrxsAuwQ+hZHj+fbcw98T6KwLOvT+C2ktP82gku02S4YtRgK/QoZ3RqVMYgQRjZY08q81inUyByYAgg1APl7UDwwepv0EWywkK4JeHMYjLw+UetilCDLatrykx+92ncc5az0NUzqFkMjbjiSzk9ei7N2N+TrSz7TElMnp+5fnaSJIKSisXGz6Y10EBPexgkHfJyKNNwpe8iiRJUYulHKh1IldHpIKAHN4j5HycZBCy/btbZIor2/crlkxwKCdfl6JZc2VPVcfxiqW9Ghf8GrqjqGt4xRUWds1+0JsOvxYGmNLx7YbDFccIFy1UKG22xfM43ycjQh0YdKvo6mtr3fHmwUTKvAizbbDE+S+MYtTuZRosFLYJSExFY7Y1RjTkgKsK0e0/LvmYIAU2FuXiRXPD2ncN0XgZElkGlRcBVCsrkC862OqGyaAKsuq9yt/b/nEHbuKvUGL47bJYvw2s0h2G9wsJJmEl0IHXgh6zDX76YJXXsNCl1PHOGDX5GyLVCAqpiXDcuTKH/HtX9K2WQSu5BjOouuPLhGTSy2mHSo0B3xostqflMkTqdHiFU+9BAIq0URiFE3c8NzGD2IWaN3j3rIpFeTjKvFx8xwHUshJN8Zhb3jsGA7c9m+sOfmBF9G6MvXQUosoqpHFMceofqt041OIIN2iJr646jQlTBWK6r+2mveQ8ZH2tKIuVvMVZxPUO2NuSVYPcOJkAp6JqZO69vdnMxVJB3n1IwHLN0HxWPsdM/h9UYdCSqwVtJyUIGFsIq24vAjKw+0+tj+9M6y4xe0LVXN+2gXA0x3aUsrMygbIriYDpix1jp6x1rMmfmSsulJVLf2f51cUsdzgYM9fhKtH3DqCsDyrzlCiozmyiSYlFc6gejAbjuUaWo2fTu4O8NLTWdIbgLhKutfK0bCkTdCUGYxoyN+lqQmgrR/pfjiVnBND+9l/Fso1h9CDVEjuBlC1mytlLWn6zmrRKlyUK4sVmrFocjGLF8HPdF1QmlkSiTvFDwH6NBvLGCRyW8QcjYVRXfRoEgXKadn+kXhk88rD43kwHEGuLSBTp3bQ9QzzZqPOhecjwTR0SkRrA0zUIBRIq846mX9Y06WykeGVaySC42opRMAERf7zf3NA7r0v77RLlVYb9ft5nYdSYqE2tDmyLVONjH+XHAU89hDqJn/YJEZ/f+qvcb4+BvkVhAUvNgIi4MM63+mN4Sc/ke6PSDUoiAAAM22bdu2bdt62bZt18+2bZs327Y1i5iFHJIO7rrPPvTgKPksfHmpafsHJUUuZuVhoidbR2WC1GnUuMP9JgBHFR8ObdWz/wj66jmkaNYuJ2hllL3SfdenH2e9mNWqfqp7Jz/j+LufqYfHoB8wtS+sdI3d7q9xAMqGAevcP6VPLVIa7Pdv7yI0hLg9C4FRfWmHCoKYvOunAIvTCPDJNS6YanMMmvJh3XgMXihx+LfnyE4QX1+yv/I6TSdW6uS3s5g3Dmgai+zUn8ZbpYx6H+UjLkWfpiPyYG3rGCzZnWdReod+0UHbkFvMRu13/ONXAw4SW7VNfi2J+GFxYivGS3NKDUOBt9Y4Ut/adRGBz9n6upMImUQ107oZ2EL8bL3r/b2w5PcwmYERDt76oK0Uo9c7/1gW6+tGSjLlmYMGaZC16Lgkc47NLxl3aXwextyhcSMIHQbj95gprlf9iWyNBqCiAl9QlLP25Y61uc87LvyGEdgv+rGm2MEPYkteYJ1twWiEM5jDqN1LTv8fpu4mkdksoSpFystKxnm4k4uanNVh+ty8QYT08+usK+Yzb6kfrTFU/EfwKTpLljDVGzDLvplqcwF21smWhnfwmEyktk3BYY/RXyqaIevVssdqL+f3ktf7YQw7XKQ84hUfF7bOf2VmpOdnw1ynJjocDshJ6hH3VAB5um+dSJIBtB5QsnM+utnmrrC9vhoJ6aJM9+aQORVqCd0KTlmF/p+2oVt0zoZKFlSMu44DWLSNm8aKrFTwNDzq1XbtXJrcxOAH3eXcWt6SunMYFuez/EdTLR3HXGvs8VTXoCRfy2yTkC53bJKXUKDteF/ZjWHetuqR462XDNMyzna2Md2wapU2c33iVkYyQ4SKiCenLYhhrZ5NygMKBda+WvyGSSqbanyATqMuM/pLqzwEeLhUpiFWllt5FWenILH9ArvRGHlw6M/5ZnYBNTJ2gD7IYcUttctnhsaL85an1iRPN0oQr2s97FpuBYnj4IU1SvbfmZFkslyBQA+bC4QCwquZHvSaxtLAHjDj7ql3lt6qfeYHg5/wSG5qc9V7mR5bTtwPDWaKUXeUBXPBDtwe/+n0J0LtmUOSSFEqOWAMvIrOdYKDJmGohyIJDrlFSJAY2W5vfXWXVYGusWX3V+nn86bEg3jkTtHKl+ZpH13IoJyxZO7fPoPgbloixm8CCnKpuBK9c+p/bkRIt6KyN3REdb0pawiR3lHYSvro6Bben07z2Mu13qS1mYdts/+26zqpBTwxMDhL7pohcBkXmukVUjqK9t25+D+q6niaSiZSaBzK6sHsEQ+GhntPPTB1lSQ7cFQ3gUzo3kjGo/zpVhDeJ4U8nmXx0aVEDPj9UXRPJRx9aqmwGm7Kt4GVE3XdoGi2P5uDy3BiWAg+9CNzboXqbqHosnlHuYmcCcYn3kMcL4Vk6U/h7HLx3KIcUeGPjNL1cS95L+emlLTLBkNzoes7C6J/Z/jk3jyNfjxHqWHcwe8Y/Sg5ZkGG2zLQ2DkNaqw8Zveu/Nhymo6dDECZMwnG+3jCHzWKEyPuQatnpDlrtJO1vrMHyG6HYitLqETVc6dMPnzvsRMnvTrXXds4yrq9NAWRjQGQiPevktZwKreGYhzw7AenqXl6EerO0wbKK1AxSNG2UIRGsuzKK7/kiD2k0jBOlGARC4Ivz21c0blK3hMgAVGUj1vzGBgitiOENbjZC+xygVWmSc62uKTxv794ztbZuAk2cTcHufyaUb3z54kjn/4UB9FKwF8o7T6lWaRNRjTX08cOgol3rpCMThlCca/rYUggnYN2+ie1vdqKxIjxh5ZJEhP0KN2NuLinATOOCbfUZodTGUGIJPwNXg8/M+b9nJdGdEI26mYvOIxcQKU5gvshFhENSkx0+70rWRXiCWMtAX9vKSCqcS93uPGSyejPIMhV1y1S5U6plrHq9+gHk5KtAFPJ+52LLK1i9iXQPBrlQuWmuypBDwT25EDWFm0ssyUQ5IhRPTjLmWqLIopn2GD+k/SL2jILTfMThUZGQvAH82fMPzN+PfyP56gd7K+VI8dqwH1gL3rwypZCbN87qi9o588HmkDua9dSztytToKfv0mUSySZafaou7YBTCJMI7qygAlGHYxZPZ/hdvwO7bAPQAnVA639223aIJMWDavzc49XzGggJbLIK00LOxVAh+UmuVXki5h7T62E9zx8UfLkGub1xuo+UgKsFYYYZYOTdw5PH53wa5hRJ7R+BDPsqfJ+kM0gjQPZNcvHHOeL+zxm4Yqo2eEleXcA2tVVt6n656k/uarG2XXf2MNsH1KRcJvb4MFppLLv1PW9y08VINvSfgjRmzY9ZlPW1QCte24xuQ9GAZb8cgYW2573HTL4ETuwKuRCzp8hJWWcqHGoroqUedzEwkEhsSaJ8uLXMI36awfkaPq0AUKa4Z4tM+PnmTD1S+fGtsagzbDNzRsx9xscJ6NNXX2i+gRoMnz57VnhsC2QsmJ89ciWuk/29p+H3Qe7yJl8FEKOE0cf7OoTSqf7feboE04St0S/81SB5r14loJsyGvWSstPNUyILgMajpkJRyOsM0wORLgnyPcFIpZheG5lrK4OlROXFlNad0QlxvjIBSlReR2pJNWcLxVAoUt95JnpgjiGzifZgBJApFY9c1OsLvHczTDY1Skxv6lc7H4lw88CzcxR+Us9otM/ZOe1+JOiVcOyl3ZhOybFLcz8NyXyz9b9gU8UueeVC8PEbYu1ob3Nc2DF6DgTsje0KmCCfKD91B8Z0plcLI19u7zA/fF9g3BQc81ie9khQd1RXX4rQ3JqjJtMMXyqliU676UJTCDl0A5H0ewrt5xfLHZk2fnyfQRN2wPPPNbjdxCQmGU3JsWCHyi6m3Hh9D3Y5X7sFN3cYgE2MXN26/fr6GicVpD7CqfwGb9kxlSqBBXzF4IvjPBUjammGu8izejMS+IkUyHkcInHPy1krTuqLGXc2XGj+EBkD7bZCxn/AaGamGmGW0y3Zw4XUnUtSQKuafKtYmqZEQDL1v9a9SZApzbNmpRghpClYvCxX+envkN6A6C39vEEsDR4pYFTHp0Wxau8iApH1WmYwKoEc/TeEoPiTTrXN6CV5LJyhmXhYoLKwKH8wcJFFM72nTRtj0ixxSuJba/iGqRaNkIuSujZeRQtpz4vlT7tch7lcLSbCYxZyI4SkH099w5Z+eSjHq20S60fnDZ9LPEKlxHK8cyMU1uB9i/eZZg7hBxjT8mqJjxtoKrKVDl+H53lkLxB4FNQvShZX+47/EvHUtdnDg2hlgg5uwZWZ8rVh6ksI++QxrSMKtZMLd54gktMHZQSt2ZIwVP9k1YjRljNpWhvpe7f/PN4LST8Zt8jO3vlpiVcm5CAzz2oHzE6nQWNFekKMnn7jJAHTP8QvVlNULnjb3bCsB56cUL0cQIFCYGv82Csm3a2GQg9471X9kYd2ctJOrHPlzjDScBCK+/aocIonEZw1XIMQHleIt/WFFI+yYbEj+pjFBLhbId2m8lXMmxd0/NW1tuAyMvY9Ux4f2LQmLl+o+3xYWQ4JQxa/uO9gV8G8r7B5rgjClJaQT8w9hyMykCQ7S3TYaNWKQmeUlscaKtdoGybdCrvRRNrDzfZ43tv2TrMrNbGfF2B1Eho/cmHlQ9ToeL8Vl+Rx9kR+443YGht8eP59NqKYiRGQLaC5fYXfljhLOVl/2wW1EuB+LPXgzBbNbJ71it+9eb9pOEU7dtQy/DTlhOZNULLQtsFXTqOy3f6/OqTNBf2QEX/ESs5gMeEcJvXYTF5XSXbVtbZCyKM3OJTy4bSGxRFdOJMguhhSPijWTsLLjlRj7owU3LbFLWvCc5EmSTSGI5uAj0WGfJCcfY8oQ1LcEDBrDhG5Qayixq+QzyaVRYueSn3ZpyLTKkj8zm2psqn0W7pm8cuVBjSpBRyZdEJWgMooj9I2nYY0pcuxHVhHLRZ+SaIeflPmXY8oNYxCH/5u279Y7YDIReC15dKWOcDY+u1N16XHoR+EXZmLYQzzM/wzgsid3hOA83nGIHUc2Cg0P9M9RJE2RHQ0IoeDsjXnoovlHghTuvbnCOedOtevFMpzzX997EVxveBgMRzjCp+klFamQPToPrbkH2hv7TVjCt0aVnaRHyf2uDuZmnzrOMBMzKLPoDz3ykQ5bsQKHj905vDFuCCDxAKCPRznjnmbVu8cKA6WV4HSLiNjKLKPcGwKBNkuToBV9mnUQjKI2VzKeVwGok5vQX02FQOjoeDBrtqsbbOK491DOJgBqlCaRD+giLwcb7/I0KSYNfajSgl3JngJc1l4v/bdn9GZQtSwT94/sooDoorkptCghIQBuF/wAQe73XwJfahPuoNhxJkYmnF9/7iRnr86aDfanpTuRxJUdcVvqCFZ8rb9qjZpYjIapLTBOulYQqyFUapYlpR91czWe+6sOGo5XtdBnGvfZBYCFSjK1SSVnw9XWkA197Jtds2P6YbnQnvwdd34rxeOVlK7lFaCdZfJuW15N8EG42iihbg3ei1uPiZpBllLSeBUHyKLKW0ieeAidcJOu3WSBIaGQ6r+YtCh+gzvG1fI91qOaV1O/ONK9oHUbCvGR1PxD4NsZHoFvrmtk6sVF/KPK6emletm5e+VNo9Tmt+aWWMI7INUXEjEyPQhvFa5a88VPpRx0RTampnThwWJGomPbRhvrhnbERP331QXydXBFGAP4WVZLA6P/ro+cLIA/BTH9JWCGdB1nuI1lFYYCeH/fQMPEDqHg0RU/kAixSwecIPCZOVxyThG+w1tAfXEiYNBjBa2IJLPVdWdF9IMwJaldX3qV7Ui4RNh1lphTPg0L+lWR5qJXC00B+HU3PH+WcKGqVFJmMaDe3W9NxcyFB3f0EFQZdkWKc4/NStf6X2Z9w3FhDnHVwDsoijtW9EvCvuV19QmjOSfLVuXu0ls9mDPL6uvJbhOPIvQyDC4wrC7MzinKrmw6GJtLQYo80yujADShfgopG4xhw3i6SP3N/Sxj4S6O3emC0bSNu6BRPZPkzQd8Tb2LsUad308hd4GmIOv3pA1Eo5itGclm8X7GMq9ppnne88PIww8AcL1LTYxj0PrCyRd1eUJyaMrKOdh3i1i4PBqTskZ2H4XWCzV3atfKHHHQefasbnoPtFj3NtxASSzLO7kcWRyk4COrYpEg+ZkDcCXZNlmQ1BPlySmXwQhhGRpxcAf5w0u0+me2M1tEmFLQyj5OGi/Ff4B8AquUure0UIlcp6YMBcsXmBIO6g0A1ykvdMzR0H9gnsE6y+5ibM0NF8I91pQ4e09sZ2g+BUCaoEADR2uRx94HJX2yl5YaSpLBmJ9QBawAOJb/vDhKUoB7Hat1wF5QmCN3x53RM5ktVcpUB8b8StnHtPlz4QUB34IgE0Jqv13OuqUq+8l8y500+dL5we4AyDvGLh+vQYAi8uRNGu3+Rs/x/bcDsZSqa00+OdjvrCPMTJdLMIYYfdt81GImhLe1dKsjYC+JVeoCWqLI/D7FKWwauVAt9s4OTJQdMaiLKnYDMBsqmvJ+0od9KDbHxoYxzebl19xNJ8+7cy1vppjHyqUx/hgpfPYLmBXwgfBdmSgG2vyp5slInNSawx1az2f1Nb5lPKfwrss0QTn9UxgYCQhQIQ7Oe0o9TS0/pGk/xyh1g3itCZWHdOjSkoq6nAhvjhMKRP9qJzC3zu2y4zvz9JK1TLk2KsPK9kqBpGV92mm+op/x5MJCkJPE42F0q2qISdkDcFraeJ7CVS8rDlkCQtQ10pZS80RQf2E+TxJO3zA9TICxX34gK2+5mXX2Vj3AAi3bz78B3mZ7SeUW0uFAB/FbCh8Ade8gbWKwgr/GsIVTWa1CZZduHcCTI0Iyqu6Chyyzo9eP6zUSQcaPRhStKCzXlWVknuUcXRRpMjr2yunfFwXXLasW3wUqBO1ATVVI54UT/TIF4bip/wSKvfjyV8oLrnqgDsR/gD2AolGJjV10dqBc62R+4f1XXg0fQJlAeAP1I+MTimzXGmZfTmgrz3rQ/2pg0zhb0wFijGMzXosNJkORI2Sm1zuhDso6s3AZiWUON7R6gKNnuhAUEF5sREML/nrVSpxzWDOxzN6tQL6IQMiqG87SXjXrqzPtryYhAW1sXE4nrcZL/rfkLpK7yaRxosLP7ks9WweMXPYRlE/QNaDtP2PNn0hlOdLxedvrU6rq5HmMDC1K+L+DevqmRUj8qe1PG2Lb5S/voTisKWKjIH6Q75aN47WF0wliRw56TqKWnQaBYmOxKTBewkFyGJSFrC/3DUzyOdKxr4SNXaulsczb+edhP+Or2T7xZW6QgzJTrIdQvSvhSJ83ndTflBo9wFjKi117L89wPuXiqYf7HWlU5VEOPPHzL4ELFytnuL392x/64oo2BuklKfsLtjkmA2Dmf4MoFNECrvbEMYV4F71w/OdND9eMetjKIfnDmqE1u6L9kvDC8O4XXPbsA24CyoiVJLdKMih19Zk3tdN0Co77ihkZH+YOMnV/fko5pU4kBHQ8LavhB6Vob+E2GvEGei3uq1vumSwAsE+SNSSnVTLeUNI+/lDISE3ISA/SCf10Wx4zLNUeKFbHBzh8FfSKUgCzq5/6e1SfzHGqVZQ9EceE3WDJ4+2cEwxVUxbyv1gfGdG7m+DGwbZAp1MQrm7gG9NrSic/mZmclch52/77lYVC0jcXaq02vv0m2rWEmxTbFlxEsFaiTAFZL6Ivit/j0HObOrjGIkNhKv4KHzgLvD2RW8rbcvGD8gSzOSltZxnlwWJAJvah6MCW35ytOU07r4wuaz/CdLeO5gyMqMLMGRnZ0yYrG68kLg5WaZdSE2qkO2czvBW/jarVfo9Nnuwh/cM1L71NO56ef2NyXOVaBO9SQjcNUJI/l0zlIks3MZskNkK+CH6q+Jk5jpUJ3fKFCB3hkEV5kOyOT1YnhIFQO2LabT5Tb+9KPC4K7wIksq5PzDIwoI3yuRlO3zLqBzBqZohm85zBC6LmtP3bMm8tmZj/4yz1LLRMqGtezeYWD8YnSJq9bk/QGxJlfyKfJGlPuLYFQbAgt4VHShUXPRtKt0OsR6xY8jpad2kSNBZI0h6bNSd65dU5d6yGq6DGzKRVsVDMux9pE2ZM5Df9yltBJZ3imxJFHyUZBEWliNKHYUm6b8A6hyAe+GPcnVGfCOl/qxzFqMh1vXrLO7Nj47C8fkZvKe/B8xsbRHnadmKozM6EZ2+6dPYMR7tAhx//5Q3Z6ClwkG2hkLTbO05hD/OguE1uGiDHpB5z8qxyID/tvzfpQa9WvJb7SJX4/JBLG9oemzAnf98bVXxA9EBHJ9nP88ptIkidDfdY8ybg2BZtwiocVT6z+77qqRWdGvwjNBvCQtrqnlt1i12V7PlOuNyrrc/2wRDqB7xFjASl9disl8fR0G8e/Bo4if5g2SofgtwyxlhPpUcTT9N60VYXENSf9VQXVn8a5Ncq99H44PTLbSbQYAbWbNeolOzFMCBTXvqOKE3taYfuWdqzlmhY0uCa7YxXGvHkJ09UN05CdLMmozz58sOrh+BTEhR/JRwvWyvtTyUn5B1e3GlJ5CnIz9WfV94afE1ropqHyVch4Q7E80QIhgqDWRmFooMcOfR6eHPeAvxJBiiJukpJBmm1nv/Gxaf77/soe+nKpH5VeJNQYmUSUzSkKY3l4beCKZurqv2l3QHTXzcnfzb073q4t68M2P5+6ecvMx+LmP9LKikm3/uBNZp1AihWavXQF3oQtQiRkMaYbXF85moW2S7GanTzfPHULilVehpfqszr/nOBKDW+ZuQ/AwbP83wT0PdsbsHBJ8aLJNGpjPWBz1HLpFbvK27qZXq1Zmh8eyzfTCuPqWi1e3R+aQv6iMJvik3ZbN7thLL/alL+AfwCASXgoN5r6cWdvwbRq+V2uOXQDZP03YbdIcmRbRvE/BZGzvmUkzy9Sfsj40D0iUozQMFdEZE3FH1iakKYraJaZ60zpfkbkzUTtiOUeuXOnsbvpS9eNwFR3Mv1HQmL96thumcMcdAp1m4MfTfHhZWPutaEqrqxuWD8hH5Tcm4kLnJ+ddmKACtvyK4U85XE/F6HDRdxV8jgAitEv7b665XWAhbVzYnXNLCcJrxGJeysODEFd3R8d6s14PJqo1dIB2KASOiqEUBiPeNslA7iD3tayNhyj+BLWdg0XIhceqNvI/zrJqyi8pJ7I+rPp/7CX8bxMVQPSoMXi1nGAvRlgO2Ktu9m63WCZTVw7XQc8kZZ5q2sRsEneTdCphWipiVoXmp+y+C3yGehww62zMhGrGH3c5QS7YvdzpIA6hpmOmOPn0/7LDsl7bHbbSXewx6wmjPZ1Coc+pKHQq1Y0J1yqWdeYLBbHO92OkK/u/eFUyYNmPhBFWB9rHIpG5b3h5yqbfiFjwFepTWATZQgdVx5d3tUvkfNRzWKRwPBeEUv8F4LH9jfNkW9X/R3yhPPCC4GW0PIQ041E4uA+IiQ0MkA+lRUTHKMpr1LGKdrE6Dqox1Hw885pBFp236nMKtpL3vbl0XTE96NpOmNhhewjgXgCC7uKoJ9dMeVBykWI4iqGankjrUNDXlEWKuHv87XWUn3hX5/SrVlG40xlNyDolrIZSbIyIxyc+YIVPtO26g0Dqd9VH2XSsTQfl0ltRQwD//X2/em4y6P/ZwE9HUBoWm3mDUHXbPRQM8ayu47C/9khB9P0PWLunzjvnj3V7lB7ZW+RVvkQ+Qd4TrPBvKlaWo2aGCn+e+6E40B8yQDLwS+tY8W8f7MdaN+VJNfLoKvuL5eyR0NTAGmoh4nOLRASf72/+47gkNl75po0FDclNsBCvcj1JaS1EDuXel/o2s+DogOedu5eYT7mTT9Ty2Q7FWtmB1DwlfhOz/+PlE761t4zRexlCWA2WD/a6oY+phz9qWNC8vKD56rc3VcYif3vIOvuUBr5j9Va9h3By2pkxIzAgtSXoCGHQENNNeJni5RA24l/l95sPopSdHlVj9N2MRLS1XMgr8V121YtYysRONGH+X8LwyspSIRyBuQ+pgZkhzX8+gbtinRoGiDLnSmeivpjzIhcicrHiT4unXyYnmhpSpXczY8LtdcYWCaTSsqcGT+oUH5WHPfqJec4Cqf4GqfYNIXZvXgavQy1QW2AVXTE9HygObD631fJROjt56kLGtWWnlv24RluNODXomYpQfjPaH3P33tmeWO546g3Ivi5gnscfDUnRMV2go49QxdAjUEP3TzZY41fuIHj1VmEywHN2An5BmjohJzDe9ObWa5O73qQrznkIsafcTeA2OrzE6rWOWph2JagIvGm/CuF+/Ze1qhEv8S7/NTXjqrTA7WFHv8pD7XeyswXQKitXrJNgXoNIX0fXRbSawRCR/MM+7eXoy/lip8okE7+Mjfehes66oZOL26Ct2qvm70Z3aICThcb44zK/rDyjqsHMuVJf1D55GmM/IV5UnAeJq257Kcd6ATPG5RrMdaIBe2I9C578gHGNiSzqYLurLe+h5DS8Kku+w6+1KQY1xpze7v2AYRTQmNhcknMrnMdO6ez5+ths0glC29oVIfidGTmE61b9ljtWE9P6xac/lx9cnJ/2w0I6KY/ud0t5zno4IZtOIbOO2JHRXHtJetMS+XQBmOIqIUX9Q+PCduZXfzrX01TEqSxQ6mL+k1SThPmGCi3rzL3HIW5TnkHzrS2c4Rf9s8c2wmsVd2lRYpHEP4RWPkZIWe2vJLo/TRtkv7cnox2Ip8d3QU1sBNvMTKhEO1L67ZqGEq6ejPjRZNNL1oCfV6RVKZArVhNWENyGazi20inF/mJveZpP4IXOv2aHFeq+++C1V3A0C22yc5aEPE7x6GhXXtpdcsGqbA+zjksluCIoJjjuK5jLHvNuvdaxMkO4cGZcka7fIPTn0/QhwZ8XtLYBRF1VIeaU8uhX97w62nzrhi2ppwV6c2BVoFERy41SftIA/xEmlarCBodagS8vep2wlY0QjgC2DhnHbQ91pm7z4+C45vZaS61XqLPZlVqbeQ11xsUIflR3/+Nf5eXCd0uK1QqDPYfQmreXXn19JA/L2KVY5Db4dhhH1nd7l4AWDorYNuNODx1kakSZVUDm7cvr87Jf8g31BODdzM6i/FZhL4ANPUWAQ5kj6YG/F6doRwM1+CDt/OeTNQ+XMpC09q6EYmzOh09b/IcstkVGViTUPJtu6LGiWjQZEbiZIDRzdHxM2xFywnaNDaqz0b+GTI7TSx58+VKzG0+cwAI7qLEDNAjJ7OZVeN2LXW0hEr40syHQM3/fGR6Zto+2s3aZhchGRcbV/yb79LZEyuv3cf6VcLX2qsfCnUbh66LW8JTx/N+NrKOEdVHiOJHWr2782a2FhR1ATZP4PWkemdp15Mav0khkEi24f7Hk+DIt72eT5BaCX6hSbxgIf7fJIS+ZKkCsjsHk/L8A30SV3ukYSeLT8gKjVWPZOq6XEE0xDmYg7cnWpo5N1l/Fq8fDCt/f7T9bv5MKd+cipeBUgRqywEyv+Yjp3MUswaCswNQrW8GRLW/TGwY0GqRIjADSKfO2FYrInKLXvNVF4/l/zThWgyHCWAhntYLWY6yp0XXt93Zig4yASSI62ypXYlnQfaRUw8XHDyeYu0ED0iHxdRDS6oKDsucfwvD1tRcvo+gc+PdyTXEsYY5y/q/a9zepZSCqcq2G4OXL9S5DkulstVyeEcoeHPtmkGB+ZNuqc15zUnRjkTJxehgHaq4Ra/HPe7UIsOK2lGBYjVIHZqpGF0QU2RfH/zpWIcBnNSaP7w7lwUd46bIsztKD+K9MniqegUCU1ULARtPrcVbp9+XCWs6uJFZYDa14Jk0JBk8C2mQD1v5byKQyQbUd2x3kXjNRRaA28bEZABy2q5PD4YAkCcNZnaVDRCJPDFcoN0QUfVMNhQ+7k8DjIqJDev4FVJTwoRstYHVakZ6Xpckhs9Yqx4hXXd9lNKbGFSy5scxIpQjSeKLIutmzMlDSxIiHUV6HmPXK7M/4ivAvGMySh+/c0Yv78uMcmonFmGpFD8oNa7bSI0K/3HJNDpUDLxAmLUl3DhD6xsinnhBqCsKoOkJ9eqwUBwBHERyTJeBLTXZqVPl4zmFcmEcsjyqdJpXtesph/+kSCWOEKczTIrpFsB1i1v5eK8gO2DyBZ90Y512ZngVlLXULGPd1YyEI7l6jfhwTekP1IuPDlwURg+LgaejSW+87yX1M6jOjFaOJOYM7j+x7wQIhLoozeB3uhm3esaVKOog6w4JecXHk4MCSTvXSNCcXs2lyR2Gx//Ss2KSl5GYgYRzJX/POtlpPrynzuFSScjh912o+GqcmnUmtekXejvkOqqfz1OCs6nHTWz2F/hoZIb4YG8jSef1tquAHN0xWP1yU2Non5xdTLokLVbsBYHdAZeHedtGXLSdPygcVXBhXz7qRGgp83nKMZmnN8oRg7nJ1sCLg5Tw/xo8aMkDA088D3fdetaFsS6XUbi4pb4Klp8ki5f9Kj+fUe2BrEjvMPPqjiOFvzknzsuuFqaq/MPnMPhAqEafa+NCba5eGl7qTdza/taRst1EiQFvxUrlEUdGdwjPYdvigFyjM5MfqUePgqilvHVy18aMTLmAFN3QUTr+m0MFxm7Qlbku0aMk3lJx7BStoQtYgIC4g0wyNIE+5uPGnNcfILd5h0jNQqfP8aEf/LDf8ZY0ebmjV3v7zPdvZMkJI+wL71RvUpi/wh6QHW1QDMn6Oph5lf46bmFzudKuW3G3ECKgKE2Su/9fT/c8o1t88G0wnr4mMLTBf7vcw4duKPxyBiwVu0309aYoJXQtLgh5PdpiyndAwC0mgdZawD1b/1MQ1AZFL+HmGt4wN4WHP6Y/LTvVUIor+psDziQ4c8FP6ht48VO1jNzw4WUvO9B7ORJDYeySsuXvwLoIRBhNUzVREzzl47od2eN2anV3OQQhyVO2Zq4HuTU60l9klCGBLRFo4GH2qn4orNp6pxH/Cv0+eXoUQlm25LpikOjOVwPv6T2MNZrU4Qn9zbafEBU/8wxtmAjsVmp0DHLJ39dMgyrebEnrcMvoMUaJGGizM9gNiSTUHxsfunDroGVXEsz8JopwfLODh7fCLTGjbrHMW6PzLk5Tjc1GtsSaHtTd2PzQgwER4PnDxZ7pjaZ5jxMokDm4hgi3XUiN9vzir7HHsW5M1Ievs0B8164L4AJZ6Gq2OCGUBZlhRZWcMoVHgUK9vu4RpLI47Ba9HRePhAlI5qt1Soe0BxZEeksHZPyz8TeKgYql2BnSyoiEmXFqixq4Q+Crs1mdPnYyKrrAyzV7mtrH+IqKEfzsA6DdmotgG+Ak96W8PIX67mLxFnBbJfTgCCC94r6iswx76QLy49npKyMbKA6I2feqJVrJExCdv0aSQLJ8r9L2g/mgaVR/mcWIj7xJ2akHij8QLLSh0S9T7DzJn4j+JNiEgdkJNLmyhxpZscvZuivkSSk1Afp3C5Z4bbHsN7zVeTfxOtgk+lQQJLBZqLWiuewNsy0myDNyvAWCAR31HhzROOFzLfyUqJiDFfbOQHZwRXNuARObQYuEumUl3aIoKkd+WxjNz/L8gEFKjaQjZLTJb3CROkbfTiyDTHd0oOa0FQdJBP3YI9YSNact6AIMHmZ+uZENUaFTbBdU4AK6Abz78fmC8jDUQWbbfjH/SHzsYlCBbd66QhGI7AfbaSJqIrsmJbSDdrZDs3uH3ndlqSUAB2pkpR2PoamXkUN3nHw66uuODOj7KEI7aDfuq2gUOQsyp0VXinyXHtW8zyE/MZuxWWZU0TfkjHPGNPnrYJYoze0GpJhpz6zPFzGOyWyYyJ3iaZWA0bmgBbACtGuCij7DuC12L5NETaYc3KUDeibLtqKB7JUKwwlbz/FKGNMywNh+eGenlYCyY0zuNvlswYB8iG5djULdudjmI7u/A/Kx7ij6x3vAxU7E/iHwWP82y7pu0UB3jg60sNDFa/XHl3bnBcA2x60lKg7u6Mdlfj6IM65F3LAyvj8t9auhb0WMjBuG0kQ1StWkwa7DTqlvc9UeuW4PGLRdLwf65HX1v74EZnpCfuqLGuVrnGAMgIlHTfBOYsHBMseBOYm5ACcbTy6jf/2p33BYJAWzN2SXtdD9HHD5ehs/JgZhA6kpaPXLYVKA/hMMXLxM32xKlTX5oUfze+p9yZWMq1VkePxG9DGb124kq8tbioV11wK7iTsEvnhOvIQZ/cfWIsAAspM6Y/Hkwrr9/zwF3QqvfCcR6wTsWRYbc+qR6xbSWE6lckwDGHiMqu198d4sVBVFphoyJkxjHXXZ1BZIe4XBig+M1v3nHvDNRUMvDbqfcRh8gOY1QBJDpViTMqsdL86SHEgnUVn9EVjA2sxeyPm4nU1Fcv5z68LUc/AVQke1jOaX0fdHJ1rvVhjRAV8KoJVwsHX/d9UZuiz6JMpFXayuLM9il3iEeQYrh7MXsZ1ruMBvdSjiHU3eUgi//3VTaII5HVFjw1QcIrdF1BTCRwFD7a75/8YJCtJRG782M3EXj9Ds+jiYNTToZr7APA66nEMQJr5xNhDX67j/VbdAj5Lx9j72KeobSMlJXQLpe/ZlQzWgRp/UmnAmZeFCbqIRt+WYJrHVRDSwuV5qAUsNdudqNYgUP51TmDiQZwOw9g8pC1e2/e7jkfvlg2fCXTvjg6yQlaQ4dbdqCZ5OrJEcTxkJekzJiw4nfq7Ty1MX5ltw4zm0w8rR2wWXVNQCyOhikehsa2wkNgG4AqE1ZGeGw72+bvX2VUrXwVJui2fHJ17RQMblz9dVRXv1Du9mp+jxx5mR8rM2Icnlhj26Xaer+MhVRvm1ZIwrmFSr1XZMxu2+XF+4IW5/3u9P1Ic1NtiMxEEJ/P3p2ELtkYMDExvA+0KgexatfYZJth4jRWYadO1+nsJaVHyclQXmnTZx02V6AfAEGgOKc3WgnAcEDnNzX+yI36IOkPweuu09PXnzsa1qx+bO6ouXVeNWVtrjsxP/vZd7Je7VIUsPuNMdCUj09qIRtA8F38FvobzrWex0qCf822DYTBejqP/sBM8833qbrejzlSaU/1wp7kwvodDvtOWyqq4/JW2JgvduJrj7uf3Qmcx0xeTySVla40gA5bTCCt70f8QSRUGTGefaabl5w4QUP7zIk4+Ry8gvhwvi/bLS0ZBIUY5TYbfHNUrNGhXV37DNcHveoVe4pXZMUC2xB+u7+10UNYuE7Dc1Uf0W7k1eWQXgZgvYLAuCytRnzgk3vPJjGGHxedtzT9JJ3ckscKOUW6efaDm2fcOiIwmO2CT+QNXa2NE1eIGToKWu4QhYdkuZD3/X09LdQns/J8rIl4mQyolJyk1xV6H+rtf4XWkE8tQp+XiO8qnxDCBmP8FaxWCCywLzavw6FhPYZUjH5gOxmxfmw9bnsH0ZLGszmgkghKg105KmfIWem62HlnfOnM5FfM0XMSw3zLv9kF8Qv1XzliX0nn/h/oqIhSne6gS1Ud1Rj8C+UqjyxoFAowokBtE9YPiTz3s9FHSAKefLrJq8GEMLh/MJLZ2y53O5SDFTib4uDgaI7ikdnVBcq0ykP4yNA2WIFTiQdyo77cfdcNHycoZV92ThtMgpcgaXCQ5ixYrHgxPZ5/TnudUusDbxlaHGMd3ChQ2tO6b3IN94btaqDO0gjatn7s1pMId29NZoKkkU2B99NgmuCohgogDQi3TkX4tZ/c20U6I5YKeMDc4gcTJDntt9kQhuLoqEHXYhdc7/JGlBuVcp5U1AHwWCFn8DKaZiQfbXdldbQAM/eUs31kAQOBnY2fb07Y15+QWZ9KL0G32N9BBsc2Hz0qC1CxqoiE4nU92t8z4mXYcTtqNXELRQXUbxWXmBiVSy/OWG5S6iuN554ypaLjObKKLcXI/W/m1G429XzZVlCku4bJ2CPg51e9Zl5zK9mYm8k0wAz8zMFq0pOAgvLiM8FmTadQ200CAsolpTt8c+A5MOwqPuRiUfUuNJUGjfZ/1hefSlQRB5yDCPxNEfv0xQbO8oE7F09cRiYWb7oZ5sC/Z0e3o8uROl40znEV1WWkYgb2r6wiqfFQIzWTouZlGiBJPsUtt4jFIFiDnf+CY0jMCBkbWWPH5ny7N9Exb0RnucB7Gw9PrlF88JAzNroo+iXtsGqcv7TCcLEjkD+T11Q7C0AVVg2BteTrktbW2ctxh4rWyKWIfZA0chpH3igrC1suwnnaBBqMO9EcAsyGdw8dfCKjnfZpiefhVXi3eDhKcy/fHR848jL2YYarJ3hlhuml4ykzK31wMVbmE9lWWbtcElEsG5qx4fwBs8Wvi5OnmkWtZUj0EFzACzHV77yxo7u6bY75FdSlCyeYpr7/Nj2OMCkIDec9EDZKZpfBOJnTBNrKnPmC9S1h5D89x3E47ofW5JD8blVl7xco/84Q+GHb3n0mEIkX9m7tGibGXOPLyJiluwhxSlo2ql19P6q5/Qny4Ht2xeraq6UUf78aVcf1a9W+1giFw9hFa/xcDRBB+WrKbSJtPe6cAkFNOf5XoCC3j6rxGA7OWYg1jsG5lIh0HifAxfyj5TG86cWK68Blz38vxpN4ObUHI5ZA9RAKgqrHSvCfrazS3HwGrselyW7Om8VIg790GPDYKvtz15koJpLGhw4Sw6oNzGTj/KgfHedulcH0FI4rmzjTIAEgXB7ibfWDr8Oy2pVX42gZYzpYqbi1kwk68G5pRHJsBYRS25xlBHCZzIeF3UIWQss4gyiNMkyQ2a1YwiIoOZ1TIMo7SYzGFsLaKgfOzbDN16GUOFCldX40i59WcPo9OP38wQdKUQleWIs2S40czZRL3z4WdWqgb3dADDQOddqXoCoQLiUxn5vs7UuCVs9bNI20R5hvRgW8BbqSDWwNesruYl4sHKfW/Jof9x6kI8E/c7t2Tu8Z5d7l6l9W/c94cN/9064jTDf95w0/mya42ykPCyaVP8RR6SLO/TnDNYhvkwto0la/sfHI0Dt+jpZ9aFQT+xDupn0HXVcow4FtoP99RxFgczidMvkSUG3blUzIAnHz60KGF4asdsOo0baAWH9zjRK6BodH+xymtk1SoMHfMK9oEFg7zO/THvLRgQZ+VXLMwHgV+f0OOXFjsCGfWcN57u+Xx+oPL6atHkbhV6c0KvS71XJc+a99b64hZ+R//ihJwbvZOarqT5VrBfpiVhKb9BkChR8vIPd43akOHm+G6DfM9oJPPJ+pqk2kwGlbsouaDXn/kPlRzUgbhy46+c0SGCvPellxhGK7wtzHV8z9kV1+DTeWIv4xJee9KEHAre4RM5qUNfl14rRJXv8JHXyDsVpuhcMiTcML5ZuCTTlu7drKiFPQvzRHsCL/6KhOLXEChxqZiCNONQbxJPSdFtn8OqTJ8De6152Y+SNCKQ6r0ZIrdwQC5WIrz5PRCArFNt6yDp0Jt1PeKRwzN7/kDBIrvEGrW3oTt0xGoMSKdGSoEMTbHjTd1bmqX9B0r4FH0i0TYAWPDHcS8KZ/5UvXX23TmoD+YrZZLUqvCLPz9KpSkblh669qBULt1xUOQ0NHcZHsbwjLS1Oyns3BnY993HwTBrXtrdDdEWyU2Ge2aGBT4oOICrKiXGWbPsnBXDl45hFKQPrcNEtuGdqy2xF9R/BXeA0Zkfz25d8d08zRxuCS710iwzYnaodEit+ULhAXbADj30ApNi/3QSFr0WLq/HHjnn9fVXy/ekeca12X1BwYmXOXaziSrFx6zmyz6WmpHztuBADqeKlGO7XDokDMwr/qzbnkbNi+GAIZXemByW4TSRcu6f3BHqB/QZstB3K1OtXk1SG1HuJCF+p69MBF86GrCSp4mRcoRRu5OFAvFVzHV0GHs0IPsU9oSu5JIRhBXMMgISE1/0SmoHKrxLQXkberKhY7QjaiE/XvfvvWJe+MMdOuSOLgiJJM42xPclMWM3KTc8hJpWmJKiM3HLlgxZUHS5CGkg6GRaaTFxdHuXlrWXmbcAJxfKIQ3JCBVZlbj/MzGhw7nPR9tZW6RRnEsqWcATiKixu0CizGc4eklB+btBocp0ROZD1zXv45HLT5vY0ypPnvaBhcv13X9gS3yTZH6jcGsSEIaWV9pUgur0L53tc6Aut/mrzCCGaAK8KDo+e52UaUNVu7J6N90oLYEKwQs0TfkQQWaSl1k3eTc/tg6d43RuhXpLddWn0SZ1/nGcatRFRfGVNS8Wcdh3ALnWFZzUcG4TuJHrTs1J20CX4JM5xPy/txWUtAfrXGD3WcAz4z1kV8xyYCZmbupyVNX6wrA2NVNHp4BGP17f5BUrkDRI8BQhnolw7g32w0cNMSzv6HQZbaax5rpp7K40to9NXUIPQX6GsXx0B7QX+DLT7m51DeUpasxsZ2FTTF/fWti26OI84zlW5M+6ZsqCtDxi77hqTHtiXEwZfMjO0Ll56knbc4vhqemVqj0svdrOnA6qEVJd0GxGRTCJN6govd5bZEAoFdtOZC9+bkmP74PThaU6VcfTOzTVS+nakgcwKP8PU/cKfF8XYpqDITBKUm2UD/CG+u9jBmhR5wsabKH2M2hZB3K519hWUbz62svNeYnG93imHh4b/y52TMj7NF+HGlewy7qzXLPycpwinItgw5mX/TdulzeI2hoRAMvyuk5bw2gfNLApQ0ojKVfPGuu5lALb4qoM3nAAMnMUgwJlmPXVTW0TWBWLVguVaTn2IJsrtVEehMtPhxM9A4q2f098mm20XUk1G2E1UQc0Un4KZUbfHM6/eY5cxx3Z1JcH2i4SEsAwAcPEhl6tWMpNeTHAR/uNfn1JO9HYrg87Fv6lZvGoETvO3PIo6xfkDpJNK7vIkSdmRdq46KPtfMH2cbKNt2OtjMmHtKXoJ2xjHrhxa6dOxeV9OYyt6TY7LG1iGPQWtYwAXFvtFD+3F2BYnFpPWPfhbmfDdkYY1618tfNbhhHwiP2N+X5sZAmedY+Q38ZM8jcWunCbVf14jyOtTfj5FoSsKFhzTEPot0Fuun/d5dvhNvd1dMbsvglqddlj1ZJhdHSEpV7pIZnBxq8Jxt0WCXNf5gWSKyoVLFZWLkdpCpfV8UDYXLXiesnRgrEj6J6qujTySB5Eob8dPG8RX7zpulrS1983oY1VI6vLWtJ9/qZXzhaf6laejQdfYlLX/yUPxFa/4zwyehptrlE0uhdLeEgs0PDF0V+uw8d+56GVJMUyRvvxiMPip7bJXftVuDEL0Z2OnBGFEVkqB5vqchoNvcz3AzC0pItcLoN8OBKremL1+VqUSHdhUpvGcXptOe0ffeas2RRlTXcp++qTcnhjI9OPLAsGpk3gwlTdeUMI/fdD6sqo3OYIVPFNDEhFRoIKGc8y3Ezy9vI6YiIv8E7sm7JwJaSK7nf79VM2wYyOtCcfJRrjteGYyYL3PwjqfYRR1pmdNdkoVyG2Lud54c5Nt0rwWvdgKK3IgP0x+hWXTA5qgZzP9i6PJt3HLWVlfCZ9eDFKY3Tst6odYZCnHOQXf9V0UdwfjIzvv+wXJKwVDmbDXLzIlkQO/h/iFcQFE4vui2uwEjVsKAanPBXjElptE5kG83g2G98pcXbwkhWEQyt1UjWi597a0v5ttl6Qbwjg2b0X5C9wE3gIFe646xERvL8/lf7bVMDD+U7s9k737wrFiJ1//OGfRD2mZe4UPkYjhk104vq5PZHp5C7cx2TWE2a6TrAqmz3dS+g83s2kxyByF7rZ7Q4iuFzdOC9gl+5PkZKfele55/bBVlOG9yt4HG3NsZ9hFW6l6sxmVpd+ISD8Qr3tjDyH6WWavUJhSB406denG6RmqZm0FAyQB0KiJTCUGd7cRr004YlY3JqZIpV2d9o8ymgCnp2tv2qYj/ynlFpY+xL8KOVAbFTjRjU2RuhqjM76jzyXMyvg/X5OcME7SbgbOTocjb3tV8cLdt8v9+R0kXBWewP3XhW9ovsgjMbqgGexxhRn8riC9xVu78LLTBCqpGD0jMDnkAU9ljk3IBeZgxl1PLM64wGnyZOFwQPoNUKJ35xMQG0hrBwskQXOtrY9P70CBCmvhRPGjEcrk0g1GShlrG1tDfjykUOQpjgOHwIFlGiTAsVNcxal4UfS1ihYqJWafppzkbsPffoTZUffHuh7aHJ486u6ieS0kA8kfo8hod5W+3Wr+QCZNG/lApgQV1uUjsfyUP+hFSPmTOoBXhUbkjD6mfSzlbwnoWIRGn8ppXWczcg6PmvuotjSEn+NtfrTa+tt+Vcw3QILIkcRR6FxYG3HqHkWbAHPwjSO8ct6SdkEfNxonla35ZVjbcmUuu2ERoRgA6uI/W0VhFfFOq7ta9j446jwTYZDLRobyQvDnv+nDfuHvTQbm9HbKne5VfPAaHmVZRHbkOwM9UsLOjX9hgZiAwDM2kEuBbvr9i9DA9r6gHDHT4PQTSY0hEWHSRZY01XLj5ges68t7KzYphJYDusoyUm1MnVOISO8f0rDW73pK4sZTsGuZzL3JOFTuHs5vNgfUixMKf+8b14wi8ukMM43kWq+LElYjV1XEtT+wO7bM5i5IBy00D7tw7ejF8f4qpChvNCw4thfR1RyllB1VDNwKS4LxD463G9ibOLMSeW50D0EPpG1/ynT8AM6wnzSTIktTXvvVytgsUOUbMFJvu6TkaZ988xkVaU3Spr3mRcVoQz+aglUNts0It29Gh4Fs3gba4rlIQo6rGt6cyc1xrNbqpL+f1ul9mfOxxn8rqsJPrtP+amr3vV4vG+eqrCvzqfx7IczL7/aHiPUmD7B3L2zCD54kS+KThllRtR1Ztw0CF2nhV9fay1vt0Bhhb3vzsYa44x7jUXkEkwhY7j2z2Th1E5geGhZ7co4mb+UV0P+ByDEiMS/HE3r1+vxQOho696TMZtCxxwxksGi6IjB05UUSY8cdO78KjtBFgN+EgO9TTn5bsIkg2rcQA8rB17fxN2cukqByOkOuRz8ZD3bIblpIMlym/qsITm36ZB5TgHaJ2uTmL8TJOmvlXnvZOfudX3qjc8raRsyI0Iy9but2mVzw9AjWNTutJVfo7Mr6ICKF/vqDPKzODeqeptDVTlf9Fj5doBMDmYYLFfVXRKXl7QxmrNjOEslPXK9rs4J/1pFe00EkK3DOPPAM2MRzln0Z+LrZONyiAhVyLmTNHArCMWUNbN7aPtqHrKu6jzC0x/eJaYIXz3i4z15GC2NBA90wREzElzkyy7RJW2R34G6M9z+MedDS/7ejM7+OVdaXDpMJzjDjooeJ44HcbSnV912wOxqkwNwAfHyLqUiwSGxRtXYF6SPpox//yArpesLYBU7DxedcG+zMAf8CvNecavSnfpQbXW+4p2prNFsgxZYRukhqLiPRbsbO9BZtXEkrry+E3q3HTPdRGVYIwsQh+MxNx26ZxjZF9kUr+5mRzZ2FZoFqYz56LE5Ve2OQP1pSduKxtrzq8yT6XzHWM/4vS6YymkBjnddqlPE2ib3XQwVgqVzqiqq313687P/1/x2bqaA0aW5YV4hmrJoEEcZzf+McKecKO9LoMBqKlvagVGB7JVLghcdrk7mT7lRQDckzWvJpf7zKaJHHZwo/r2d2HIvgN7Gjc6qP09bRbUQfEtkCek26r6crqM9oo/XONsddemxHHhb5zutjtmmjQNTbo1kcLUjR7KOO0jioH99+EIlWf9DBi7Ancu6EaIsqAzp9PkhcOg+jl9iheWMHbP86TI/2E/t3KTbDtmgXwsVoSl6juFPHQOx9WqPV1khsUXnaeg/glmvgFqxCPi3uG46uUKEHVGWDSMIPi+FKhbayuqZMIHPOnda+mTErbVAvcrkWORuOrAdETyMEj3A3PYujYtlSpskzjFXCt/Uaz20PIGJ70dhi/3CYTpO+twtfC93etMOrNUrV8kLVyFRq7FOayucz/qNdyy2pHULEUDTzheCTIGO/y5Dl5gVv9sOLWoOFj9K6WhVe01Kg7XFmH087DMjpRAUNBRMltIBPwQtry5eZrvmUruwVZNmG1kUG0WpCPfxdSoq5x1HJWgOhoVQw0JjoxCwQpaqy3ufjJ+DOJPkvfgiRng9lGEDQH1LCO7zKhqF7tbvodk3XO8Zi2EHgZcF29FCrcf/TBZGXB9lz2MW48uSys9zl4dFP+R+S1KZ8/GHbfBNslMauq4uOF+WjKrn7+vyalMn5Rx9ZIcQ8sCwvzzrG+IeAPj5sWZKRPQRCyNEZ/B47JFAf4XWR7iGbwTUIb9hmT66n24XdT+yAnPDZT/I8MW4oBK8/aDJCiMlQ1liDva1nXRjtqt5usWHWktm0zjlHu181UfbkgArwgU45dzIHcqAuV6Qi2Y+H3Fqv6/ZLSA3NRe0yM4/SC27xfz6RbTsNiukgIMoNlFHmfdTuDfeH45aQnRZs5YdRLwYsH2+h42BIb33MEi3UCeOEAycc+1dIRY3Ns7zmvwSyuwvMdZlf7EZO2tFtg9GOpno4Ux1eoPwacj+c5Q+2eF3VwsrpLRdqWgrx9nIQtdPy83ZfMopbAMNtOW6wgDDkfxSr0aIX065SR2e2U3vLXy4lIykv0CdWnicYarmx+L/4tN+Y4f8KtELmkcqpUamoKP+pbXQR6/U69H8/eunGrkKTYJzrrNuksZtxd3pp+twsXNgz3Di7d1Fkmzj0aol5pQ+4QjXUtZBJC3wR++vtI6zqELeJ1OZq5KlJRfaYdaDjSxEvOXE7IYcgzdql5/JRXirciEFUZc0FGqAIK/eR5XoZpI5+tDS41VxiuuiGqdC2DAOcUmVk9tqNG4AZhrOI4ufOES/RxAibefyiNzsl9QOqibDvvpQ1ypMfCjE9jwb29G8XdwNM44E8CKLcY4unUiXp0Q0C61kYKr9bqHFtJq89CVDCnKiq+DiXxRdNzOva0L9naGaRx3ukzT4qCr1c2acYc0ct440OK7WQoTrz/0e6PSDUoiAAAM22bdu2bdu2bftl2zZvdj/btm3MImYhxyaB6jMoA0ojMlwL5+g/mHu5UVN4ydaHqJezm6rL5pOYxUGhMcp4U2Xfz6m4L7PqOcD4dV1jdqDJIOF/SgrYrSf37gQHXe2XEzt43ULk1vF/x5YxG/iAUmRdVgM/jFy/0yACAkP5sgj3gBErlzyX98sVOPX/xrg2xJcIZ1+L9oUKqR69Zsi+NnxwPI4qhUIKSArTG1Ekx2qT3XPplWde4LsUu5rcUM4aOOZz3kHqBBqC2Bs1QPZ96AmjNyjjfpqtW0fUYh90QvLsGkpRg+/tGryci9hUeQgclxsTYvBzVxkBokyN55wy91Op4YVGjg19m+4QSoYN/fmc8pVerCMOhE8oSoQt82nRgukqjaJzBlLxmeeOwSvOAjCutPKZ8+FV7lOTbYLlys4nc9LPVjJeGn5kv1UzHU9QhqJPDbrE/n92AJmttvsVTeZMhf22Gn+WJMN7fohIK7sQjrjxKPMPa5CIs+ddzi1hIHivj9b/YKyl1P820i3xND27TpiDEeHbinRZlZMiD0yweSNXxTGgaoAFqLiXB5DveCQj8AV/tOVxkhc+smw9odzlTn4IFzgwN/+mnJKaZTNDy9EtqDxFsIFoqSGaGSUbNTSuGnMDGaj9cyU9Jx3njvpWz+pUE9BXdL4N7Zy5uFpldFBpdvJ5bA/DOq9/b6BBxQl9UABoEtzNkFuNkwVYpea+iqRq6yTOYrkPhE3gz89CZ5cWR5T0VavNHQ7zCOllEB8qZGxT8A+qQqPhxk7oQVOixqiQKwRnotKk06rKFWH3Ewrt+343eI2g7pprb/zmL9Pt/wDZTIxQ4bmij8BdbcltKPV0qbiZe+TOjecrC2gmomGfn/irdqcakPm+ARn5YOgrcmMdEoUCE5b8FnwLJNbG85m85l040tG7E0WOT7TTF8j1kOos3IJsxsfA5o6NYUlQT/cTpPrR6h2xLg8nBqUpHkx0qG3MxjAsUQIX5C9cfztzUluQdim4bSNnzBbqYlfBbuGgz0MYEJjkk+ii9gJ9ntxn1jX/zgQH4kx+U5GWLGFBEmAUKHckTmUNiMPQNibv69h3FxE8SsOKlnG5UrvzeI8BrWq2nPgI8tHW/5hMnY7Mu5cgEu7Ys6uXdExmd18X7KZHRPL+SdCZ+ZHs5mMEMxjSugHsUBYKxDe/P5Ja3SDgrASulUe88+fwnfkdnlHQYkS4nbkvbIAumDx8+apZcKCQBl1GWRn+uMXgiu4TSlHMM3FGbgh+fThR6Ke5PBc/MOgbz2IRQ2e0yicaF2cMDkgaotW28xQYQYoJ5EWsB4qNGn0HrDtjBGVR36/1zFeChvdDUDzisNTKJM904u29bX/jEN2R7jxozzUwFh4yqEnyQOrFbN7mKxmZlbey5DaszGeMny81eIk8M9kHNXAP8U7gO6Ie+GAuUX9OIwgy/qMbLw/s5cS8/dwOUulujYunSNfGqkHi+u+SCpSWxoMTv5rnhKjMzH/H7oZBv4bM7g7cqRykIEnpn173CzHVuqz0gwwzwRB70UzjJ00UXt19AS5i5hen5OFRfNlyp5aNeljDbL0U0lAk+NCLlSXfX6MFzfP4wQJi6H9qEO9DGcs4sleogyy3y7g4tUQVfAi1dhVB/L3dmlMPaoItdAoQmhY7DAcJNjVLrbFe66Yj4lrx0GFbaLReqeSXgwzb9r89sFE8cvXhTXUwms7HY1CsEhE9v+uwqTy3oxNONYmGexW8LTfpFqA/xpvdf6YcM/JVNJ7GvGaYu7X42fB1uPcnELsUbdSBf72gMhKmT6iHlNB5G0KFwGTv+4nrEJQUs9hKeJFjKYz01qjaEyNyjgGRAeIe6x7SH2knjXscw2YTm0kI6HezjFxWck5ShngyYon42ehiUHpdZrXS1y1biIiJw7kzeV2/ZcIRsASYJb35HBeELtXarkkXTFYjic8mBhvv1BOS5M8xybJ+cxc1tbhO6QH2LASp+XSTofc6PUIrgmH+gsbK/dv4Tth4rVkjpvy82tmTsK2uQamL2qn8nvsH9prajrltJrsafNZZFI/7/GtU2x9shEkj9HesVaNhXY/T/+VUH5qLoCvUIQRMERrKUYb5VhGP8d2pxGr+vuhuqYohlPuQDrqGxI/+yzqgC0o/I5SKVZkNEVWN4dz15NTdRQSqrBLxGf1U4CiCLTeigBNzTmlR2xMWQv9KnLgkX097+Ra/S/XVCuTVtOgCCmnD2uKz/8HHtUckWSK8/vU9zQKkXKn6YF34QAXPBoBahLAhR7ptdoMtwkth10gpelEfZGsZVvdqnErBpvqTxbCekCd6WxEFDBltlXp6+VcWe8mgviD9eAFbrWLa0J+8J3odc5XaMnhUq4FsQIFJN1BKqnizcnyElRiFW2dHaLxKUYIADlomW3rj2uDzSa/MyIx+bXBiysy+qWYC2COJ8/0uVSjK7IlnxPu3GgMvLZyEyPyc7kQ3qRbee+4dFB7LFQeJINIKZ3iSnHKhxY3QMmCH2fPGDnS83q6tOYutByauzd6FCVty1KfBG/XcMX4MkIIVWIvKwjXCDl8CLK4ylp+lecsAhrl5H7nKLcaNqeWi3hKWzq4NgMO2ghAdENdpfYaxrZEicAqohjtrU76jJoiVLkmhIyVsOlDt6zVtenWY5Jn3XAfxK603pE+KuNU0BBrfmTpkZTcKB32EBuXQCu/zWEGL7iwW3AHGibjbGfuQIDITqb2+mz0Kuj/OwLJjoBo0766mfcpD6qXjFhDoFxG3H/wBIyPy0Gsm2AaranT+6gZELx3TDDhzUmZTaObbKvdREvboDA7cAMP/hDKDPm0WgAXQaAXe5dMs4bgzxr51LTn7gdomuVRw0nE4LWxSnSKeVeO/Wa760R0AMTT33jukA1MPEqciTVdcU2F7v+bST2Jr6KueOzPzIrifVLyRSDDpB6yuDC4IGHkeIzSvYirQRhz98qvDcBHZ5k8dG+WQdDRhEyl0ra29RIOH/zakAvRpOvItVXjuuGsyd07CUATCDw3awX//A6MYeRvySdLaSXPLwY570VJmnHk1Q90u6EMi/nnqg88cNFGkEyZUAh5OSRk/UIDBXLkuHkxppQvivjcu3CIxccDZjVva9mOumvmtyI7DmPVyr/rE0/WQ/KozI/zWdf1i3mCXwePDq/k+a5kzU4RZtVgZSLWLksDWStaWAFB+X71J3WwzyonbMnI0NvjbF6XKyWHveXClYk7XeZKVrGu7MmfykFUHWU4hyCWovenYhoK0zPPO537lxUsHkbb/R4OyZ2VtNhp6QRtlK5+RZf0e7hBEATVHLYQGzxPceMRQ/k/gsEa92p7EDw5A8hDPIo/9u1D8uwrbhYbuKEc0V3IoZ46f4y6YMpZeuULd2r+iEtWMURmikESb6WKuvGSADY9PN0st+gPbz5f7wzo3h1TNJavrBIKgoEIm5eprcDRJm82mqEoCWq0D3XMZ3aXLMaMCSjJx0uBm/HFQqPbID+ICZv3pNmPJo1LdVrvJkNLqnZG9NSEtWe42y/tpnP+7JKDQSdg5tVY+pRBDXrWb/xCWX0xFZh/AOkPOYi4jAJrGN602rozCGsmm4K25W/CohIS+/JGBXDQ8f+lksGXUFx2BbdLOTbhGaui30jdzmppIxOEqsP82BskGKU/bg9QbrQvqe2lFXo+J6Yz4gH4nspLp7DYxMQqCghYkVfRoqf5Wzmu4Li2dDIF641XoK8D/L6aJli5cr2D+yh8zTtEfOOaLXNLvVcfukt5AUeVXQbpVzJER7m4uCITQKuDc5N3cD4K6xImLNcSjJFokNlrwv2wVmUf7k2ouoJjhbWLtYD8nk5PJlcmzt52o7HVmbh3xrwAovDfXIzWBy0/4MSUDQIZVMHUd+mLqKT0TS++uCMsP7M2x8aQ5KYw2eF8goX1EVdxvgSvZBboqIJmji7vJ79brnAqcbCx2Q78js36X0lUwVbKTyQja8d6UL/819b8G2nQ7vNG3d+qlKsjgnHEEwGAz9cY57iYlSlYsklC5QLdx3z2/KZ/h57TEOs2VndkwFh+N5KtXLCbQvYGyUrw/u4/4HciHADAm+78ukLuwSUdr51LoyS0EkDV44RagUCj/9zx/Cti6JW8nquEgk9eFQja5gIU6aqAvyncaMhYYqIbLAeirQHLZeG79UMcphzJKBUamvL2OByJGXL6t97p0/b5T04fCIwp8hh992+0tPXQm5e19C1YeWEKlL/v/ivqmHG/O5NcwwNolPdeSPDsov2tUXupByoVD/cj0TpjVV8kJGZMCXtsZgGyM/rq5+3J3TUUVLaWFdKIwHI9CjY4QCSQe4Q1JGVlMWP9L0unotB7NmjVICdci9/2znUG9qbkxt+lvz816DS/mmLWWQTdGpuVbyAC4BPQHgJDWOhScVGBQOUO6j6I8syljcRZupLeOHU2eNdgVQ8REF276N/nqT2htHphm9RD5iYTB1dCCZWfGI54f2+1JWGRXUnXS4SPFPqfqIeyZTKPb3ociAvmIT+FWmzoUuOd09kPGqAfg9djQpyuJHuOZYtWco0HRFqNKo5NIT/UEakG6hFAd2uXbz+ivaoGyeV7jQULuPAJkpfftnUmNgjbciYn40FsmBCi225pJ4Q8N547fvJq11pHwDVI5TxKw6s3cvsowF9BIPRdb93Yr5ionztSRiAztjGCH1rGjgjDwmnlryHC/Fl6uYfw7DJEZ3FK9gJGEbZqed/MOIB9QE9uxJhE0VBwdo1Ik8CEv4eh3cEDqN51DAh5hxPLetIsWXghafagj9eB2dhuJvfe0DACq+YQz/3hGfo9sZC+em6CknLSmpFVO5IOkHH/d4gneY2r+PHpCxkr1Ozg8bhHf1vz2Pums5/1W6r5aaUHTykEuyMUe4UqlAytVWlYXvGTq3Mc7OCPb3mbVA5CTYWRg+s46r+xONXZi63+WW9fQKeKzjvE7Y0nRSFTmvRZ2hSD0dTvu5WphcI8ErH2MXNk9/AogLL2obsivSVSurK8r5wK4/FvJD0mJak5tcLgrRaPo/k0Dtc2r+J5wGyLsHhmbO4Wn9JGz61kBjXPDXZcrZwCNmOz1Ts4fAVcJB6tx4vL6hJ44w4quTsfYyStXMz5f7xCmp9Y8C/mnNMGLy0l06n/Z5rv0JQhYnyv0Mrahc8UGT2kw8zb7uEl8Ni4HjmnZMM9F4P8ZwHZOo7TDqCIxQU5kyHQSybyB+pZJEUfXgE3muwgprMX8p7EIIEtaWSLCrc2WlfcY4E+mP4A+w9pck3j5bFlkknQDzvx87W1MRkIg+FgJUoyoLOBgKYceK3vF6oxi0twV62eyP0ALyI42+5MFJ+ZIqIXmq+XEH27CfRP5patnIzYUknc1fjjQjoJRziKUlf5NLNAbxcAYjIQu9fwjBj5jUTtjJ0lj07p1J8DHrBR8YtFR4jt51zKc7iL6cbQjnNeqFQ7AcmmbjefJ3mFFJg9YEHJ87zMD31vTyRjmRmUaC/P2aHyB62xjw8HfZwdCnS7TBWa4sDDlsG2OWJ1wFNGh19CaxndlIc0futidqHFJk2XJFHd04ZmWfnU2fpWawJX4F9eNPQXjUCA2BB9DkC3MwFqcwSX2mNAIlwuB1xJ+2NcAc9zs1y3Ew1PkndyZorUWuOUCVJpuKcCSzN0DKb8cI7lH+60rNFhEGMi4mzfHP8F0ELyomDRLSfWpnGbToHO6Mdvu90cordhgUEiu+Mj9PZjI/CKaYja7zVrsXqPv/sawSa/NgZ7rEihdrBSQunf/JKXQG40toldxgDpqN3MBlXDHGwRgkpyzu5tsN2fcfVImHFaEPEXCXPC124ilCSlS/ZzlgWNR1FqDBIPmpjpBXWw2Yx/NHICs1R/2+bVKo15HvxByoICiZvxE8CXbM2fxWp/FslNH6IYZhxaYnjOuZasFkB6yxr768AT9xOVzqeP3tlqWceBcPbLjvXk5fuyrWLUbmV2odHum7Qlge8hHxNBwAW4hgDGJGVjKWbz15EGbkNvsXBH4DCXa/mu5kSUikM9ss8cFhvkeRelzJLzBrNtmdd30X0gSTRNJpdsfS7UR5PHr6AjEqd+REjCz+mbtvqVG32WROXn0GXdCIywrekIV34Pln7Fio6w4euBUyombSWJ/Sw/22eGZG3a+yExRe0VwWujCalMbJfrs0BSkMNDmf2l2rRBFb5PF6VmYaU59s+T0H5pAPbJtNaMnAJn6PlOjGdv4GSJm2FJjsVTkOhQZ97MhXRmjU2BRNgmZYOQtD7+FmKWM4RSFcVb7ps1sLrl9KaMlkrh7QuAJt0L4lTthBXAz7S6TW9as/0O16gMF/3p+u7LvtbroTxQPYoI7DCIe3EOPu/kmQx4jsH2V3TvKXLujCug7ZyoNznpCKJAaecAqkYvk7KwOn+5RvtqPxpQ9iJqB5ESGo65r0kjMY7PcW8sLhBi4Y8NjD+jUjgX55BDJPOymj8Q6bUD0Dwn1KA78skeVuoOxrdNBPXDO2wGBKmEaGhp1uzNOClOHz3ZgTeMgpDwgj2JGo6IHjSsuz5ivPnULrOWPy0YLgr4cWlR3465skxA3bClVwojG+eCkB2S9NZ4qUAQt/SycLdF0PwJosiZx7bOPoHeiVkL+Sts0lvOxpM1VoU9JENNj5H8FdYgV+JqlqsK9tLzHWfs6ERDd2E85NqzA4NpAR7FOvvoUEzaKTY3rzQKnba+O1iAVo0y8qLJmu+wmTnb4yEn46hU3zMZuNOGTKCvEOt9PdN/lVp1qS6GRi6OBR6VdqF0sQ7NB4M92FjxuHJXnagzRP2kCnn7mWDJqW5ysX767IP3KcIgOzg6vUOGyeB5acTvw7cN7fhQpbJaqnk7Vq/IsTaKKd5GEv+vhYwGbv6UynRQLhnNg5lXg1TFFm0quK2TnJUwNFgLPTK7aAzxydiOkO9kVOQeZ4P77Atvv2v+Af1ex25hDJ/lgxkrIvLKCimIXbnwQZYl9ocbrkEIQCx1OoZcbxhLjOsuiVuNCeCy3wDG0B2NvQ6vu+corXEK3iJwbuJC54KDstbMKfLPwyQe9dyFrACDthmDBu5c4ZMEgvVJlXdwDb13/bEkR+D7A0sy4Y/lLE5y/WUpkBHX+SWb0PKZcu4uCqfezam+FdWBVGltZ5E3oONSDfcDEBu4yICbny4pw0cPdoyQZQl4GD9xJHOPsALGGoPrEbEZFFCi8qHF3l+G4ESup1ukel3HyHJjgA5mV+2dXlKAmCWwcSZ0xQWPa4avM95m+yFYLnD89kPioIEV9BhC1GV4nnx5YPBEH9Ro0EondEki9itX8/h6y6Ijg4kl5D75ZAgmdT0hOX4HPclvGV9l0xk29r6q7tB5moGfyUwB1NFJoYm09aGk9GSBd+3J+EpLyr9Lq/wCl5D6uahnYTHzTZLBkj3FVC9FWtL9Z5SDcDngmxlvwg0LR056rR5fAKZEweoRg0/R6aiYSwqniD16K6L/5vcjdimsf8nN3iOtgPVu+k16nZZ5YfRyquBR7GGDS78mbD8do31oQGgVQdhc/MVWKeAK4jX3t3uPMx0SoYiXYioBCJE1Xb6yCOU7wp6wwYVHthX49MmHll5/Zf44SmTCzdl3lmqXRlf2HGFAeCeg2MrPa9UxKDf1COwgCURm1O9CBsDjWbNEw3DxqvgTbgAuSBCHkrC8GcafFiiCTZzcaOhbAU/k18RtBNg5RD1NHco4oj0nQ57zPrSgz1Aus478V91coFDZmye9mXsVBMLXeC+/3vdtWUkvJHd0zTzeDYCRRdwkRU0ZV/uGuNuMxNCUlCa6w81xb9tMIRE8iYzig2JUiHbVrTvl/y6CcLEJjlkn7XXoqIzCL4w6LKwOArMxZb+kuy+fZXG8fvJmqdqfx6dxQkPfWCAf0Cy3FlZ3ds2Rd7zhdIoyghWHTf/2bBav9UQQy4HBCqnzJim2C0ASUtsc2qnAcghfZH/WqSDG9LApduRxzGKzUs2dIEfaA7fiC52HxEACWUdJC2EX+gpp5fcZuSYsIbDW+Zxq4TnJJJE+rP/wWS3zZKKNoZottNfh4UlbUEYQwvpUJWan8XZZYjGz05eAR63XRIkA8aA9hcClSA70gJEU+QtG8IyoNXPrPRRMr2EdsElU/oGlpZZA+8rH09gg/x/u2P54c3meKmUJLMUCnXB7RouwnZWMzcZtwMyvlyo/d0yQiZaXteKr4cZ068BKkkbis4nClAz6YPGHVCN/Ub1QmS7N3tdsr8FpRHY8QHoYIPyaGf6LirD7FBJ9QYa7lFgwrbGBOFWkepRwMTNuAN5Nle5WXLEZhfW0qZCJCvBkbidHsf932ez9phXDCjn5n3ZVulojOcGEQpzcCeirkyuPOkG5v/WBf+Xp7Dv+1JPI+jXWYS9MQrE2qLHYe8UFzOi16pcmPCxdz2pfnHrJAjTXQdBd3Nt0xfdo72MQvQVDORvplpfce4hr8p5om2vAFlHG2mxTdnVR6GE7N5y9ht3UKT/JdB5U1tPqUmloNmhOWc/k2AxC2FY1LfcVwmCmgs5wSRYAQSQVr1oZdDQMjfXTo5Fh44MGD7TXmoQnJxL7bhjeN4+fp8pVE9cTt4KAfkheBn/uWUdd8LK/1YafUCaG6ZjEo7N3oVxmGqgjBBLJdw/DKDsglBR3DV3nTtfBwv/y0dDHzjpdmwMjB9i1h67v49yTsaepoQGU7EnFtidp8aRBxaBPVBEFmAl3xaToUC9r8xJ5sgAIulZjyJJz/97ABMX6zJqwgNnuWSgwg9Nl7eYnvo8jQAOup4BnOB1ojEoowGXKTchBKFSaYi98SiSlW9inTI2Xx7+KlL9U5MHEQtv+JVX0dJBVkwISMZqB3q89j8lzgXUOJ9CUjdAYIEAN9QtgkB0jmQ+I5c/YlgtkbepX3VBGgQyGuwpmMKrQXW525rhkEIH5/zQhPMNjfOvudYM5ecwTWiZqrgGetmYzZoej0/QXPuohs9x6fZQlRag84EG4R4F8FYcS93v1aNtMJGnnGV8aJCRN1Ij6F8nBAQJPc6vff9COfPQ2/Iq3PhwJHY6BULvy5ET2+edwUax/y3gKrZpdhNPrtl8Y2dEbre1qadParaHZw9Vl/REsHixoL6hR+wPEQeaki0HxRB8UONZjZ8m7ULNc0lL9rRVm9L/BrL4G6zd+nDDFp1HUZPMxHdeMA2amDhF6rh8hNZ8s0qh+GQOp6GcaK/ffm88TPvLA3mVb5j/ovegWSBUzWk031pbH9PTK8lordNiNLr3in6Q7WgUXPKV0/TNnyVrSUZkmRaFY57ItaWowlYiahGPgKnSxS2gG9uv3pkjxj+b9nv2e4qMS89KMyam42F757OoX3qUg9UVUAP2d/a5KgD0vLxTX2p7Up4ub298glKhSwSlJw37o2kGOoAh58d6bGPk1+LcQ7u47MR7JFDIIoQeN/OGm/vwzin8ExUXXXy7zRdrEOETTsGlZSSu3Lz9zF2kqSNa9pA7FmaB86qOzxK6o/lwZQ/xAW8FrhkQD0z1cFRWS1fDpYrKHz66CbSwI5JzcHXoUYhWD+iVzJAUbJG+tfPE9stdWNB8mLBKSfpYPIM1rex8o4m+44M5zM6kJxgvLbbHMi3aSQcXRluFD8KZ7SJKjSwvnmrA2C8iB3DiLOfARR0Q6BU1RTCObzIxs8/+k8xklm/nBEWoxFloB/NAan3MUZA02kkn8/bt988Zj9Svw8Vxl4g4yxeJIL+Gt/YU1vNA2B3HEdXAxDg8tFpKpUhx3UWCgd3T7N1nug0gJB2OevdZhWcnVz0Fg1a7sdVK3+Ja0CFNrijl8pqbhjUbsJNrbEO34Dv3g+rl9CCv+74VmuIF7vHxPTuzYKmabvaRiAGcMqLTCUP2Bv71qnggJJaYkjNRXNAVys7xFHhcwV/oi9/2GFmqXumbekg9l8jPm9/YNORcow5gyrBnbMj+/sSwJ92bqaFPL0DlEGN/DNNN2Suq+o6LW7r5nmqHmMBDhJ42W4gmfgMxr861PaK2zXxbTPoCbP0Av9Dpn40MVlAIr3Wn6DQQlnXs+SFkwwNagUbvf9Zf5KUh3/7vo7JlWoZseOEO3baR/DAYE/hEoSvmVfe+SFTQpRI/I9l4RPRO+tDJ/3OCacmoa4To32lyEmSwpCHuC8tegVSaYD1un8HJN0g8ZiNNpTMq/domFa0UpKwh+/oTjaozRL02z2wouXErZLKOfyyB75oRg0h1O6zKQwcex5DvopqxfQkYPiIt3TC0895CN5Uel2dyO9Q+n+I1/QoTQwFm88jJWL2ualcLmecVtsq5QotNKQrl+xq4wKj397aDwx3FCOnPo5zccNRQbGrR0FXPaGrv5m4oNWbU7URMG4gx34HtzBSAhwwXJLtL1bpQClnU+MTU8o2BQcRfgwtNjUsYww/IkKT/Q8K4E1+IiynHyHP339gny/nHjiTvEUu88maS8xlX46oAyELIMqPL/2Cos0Z3AJfEEMap/bLDcQevl7kmgNRdZMXj8N+awUgbqQJaTXvXFiN3cAz12UNUal5r4omPvI8K/tfQMgjv0Kc7vs8KyDDzbDHU9S6TNYDK0+2ERLI7nbJUpjsYvhfGZdhFYzpSpb0l2twKWzMf5DtQvzGq6TBt7tbHy/7nnypDiGFZBoaLfrCi5Nj0zOa4OIkHPM3HcVvsONa/ZN40ObGvS+GMy1MxXidCmgr3257IazVA6NE6xmGwmI52Jk8A52JVXLoiAJzuqjfyxR/9zNnYMpZ+rgn8a4IcnTwxzOUGrebuyCk2baq3VRWJ94VCgqXiRIOEJfZbgE18GRNbb3JkkBV4wdz83+VaKidKKwZSoRqSyeKVV8UdLLzLoSzgXmXZ3nd4lL60L9j+JVGmDg3biFo66yKA/52EJ2ITSJOx2se9V/hsn6JFVRDIyAQVIAJWqpJMZ3qJopyo1gFN8uptdRmjGLyY4ig0rGHvV7WcGdCtVpo2PEcC1RUT8fF6GutxkjFmH0hmdXcvKg7kaecrEOJ6pTqEvvjFJbG8HZbXXfPU8HQWs899iy1W4beJMUC+5+nu2U87eZ3uo+mr0ePDdd+UZqYNclDkO7LWesM8xxGNLk3dfYUWk2lhqwsLwQJGqN/17hmKXaOkOvUk4nhoQuuhoVFaReOkpJQguPZ5RPsAGO6EyUIaKHEA12lEep9SG1ZZJbYCvycCgTWMO+00iGDjnA6xnEf06DPdcF8qwbsJ3jJmAMn142LHXPrUGCNAnzT+8w3MaK/hPBleb1FMM3wNoNiQyh/KgbpxjjYtHg00AHBPqH2qRGukYDKFhklIjoj3DIjbXN1lkV+Hc3TMKma84C+CuuVh4TfMB9YEyMAceUTvzU9IJbWQu5Cj9pvYq1bdyXB4rIfTPTJBjSqRfIGqH60I+PhHrtVFyX2VyTa3X0KAgD2OPFHrrXkZrezzoU7d2+22CjfM5+ePT1MR3omeKJOa6/0+NXh2h3YafPrmLhhR4BId6gCOxvsGUXYVFUzCsZmuLYAGP0tez9tMq5YFmr03HJBOP+ecNIQ+HhpTxx6WuX+Orl/QQ1aZbMJDehfB6E0dRhI9ykUyNrnn477+8ihNTSt+FjAzrO5hCpnM7YGkjEFyDsX60wS7amZSx9iVRqRHfRqBAbGDwRaYfKQXihGZ1LI6VH/k8XwQoQOu+Dm6wRxBBRJwAacZTsOB8b8YZOMiJI8VKSH0C2SKOkEF+BqGAfwofZi7IJxWrJdLW5R2ofUZpXEbQdr4NW84IxADBka8vgCBMrn4B61sHgw1UFJSybxXbQ2FiLaYfJdRWj3rwMT24y/ml8oK7liZlYeWkgMz3zEgbx+ggOF8HRqQByyf75GGIHyIs0CbyCxKpkDBVV8iVsZWGEoDe8udBgAmRTJV4FKhqmqF2BMTgx2Hvkzbb05/RiNLbZLT+O8G+XDZ058MsF8KChGFWTYF6tdnXKQglhiZgUnIOVXi2LQ09ARjnViV5NBAdhGo7q8aD+gnc9NDMCVQA83vCBpNlftqPEv9vkrvOdSFOjKnzVTAgJIvY5KhfuKug3c00kPHJHSunxL93Mx466I0nRu7zvK8uo/D5DF15ZM/0iEPIwBr6gerOLeFc09y+xT9IpIqR/97DnRWeK29LhJr8C5EDaiUz2zNTp2ApkcGiU0s8a6lMWu9G5L+VXe+RNyBl8g+OBOASJ91GcS3X3DmRhctAsGwdAGtXXr1TLBfKvYmazpM9kMnNwWr2aF3mt3zRGnr1fjFdzr1zd9UcRBaVdcbknDDJdnBMpkeYrBOxJQy8ZAZ8EMrC8xeoCHrzqxSwkSDpg/q5mrAw3l08wWZo46swTupNiIReMGR1yBsP0YBXFmD5CmvQ+F1RwoeY+FRmS8RJav9pKmeIhf7ztnVO0JrxWja0BsMJbj+GSHo4/M0FWA9Bxk/USk8czoPhPwKuyq76PZT/3PB8C+dVeYj042LLc/IZ63RPjH0u3T7G6sgF9vCytPo36murl+kKYF6JyYTred6KhQs6IA5vCu3U6rGhadmubtMksGAEK04PcANk6vvR0rKYN7FhSQskDTman+6T+Fi71gorapNkbrJroRBqAtQ5QjxfGfcaEJk0Lg3RBREuGEDQP6nx4zaZE5cUUxBFxi3BoEngBRvSa3W4W3W5pMsVIZ3TusV9irIycEkLqqcC3hwIqXtVNLRoOOjmYBLTpcprxBVcwdsuxqgA1UK0akUDoDnpPvsbv+vbKA5qdmvPCfJSgnGhE9ua2l8onxnnuEeGnNx2iYY12AE8g5kB2HA0UFU8n9o291HVgiLodsLxcGpUU83NObWoR7uiQefXkgHZSywEPi9Uu7d/H4IUzQX7y1nzSkG5Ok2e9T4H3kFQ36unyMO4V+d+7rBkqfS2B+5+dQXxL2gPopJBvtl75cHcl4v4OJNfE+2j/vUt77udVFiBsyywchaHdrwNcv2RdAU7HWWa0yPvj38dAg5f11j/K+jCruZ/ujOizwhOqeI/zLM2bxpS3pSb4PHrJ96Ux0pINAVHE7ikqecT6NL1jtJAsxU1bxGrZg051j8/WLCoCohZP/UgfdE/10lp7srW0pAytyPessViTthQSTYEHOlMHzjszxAcnONLp1LDWDhBBh3yV/K11ORPI/5KcA0aX7qJ8+ROzSnaI7TH5UVs0FoQxgasoxJTQnAwHYUnuEAZO9zvFkaYghystFxBFxglTGmQp4DVLJ7/zPYT4PHGpmiq0YiPcGswsK/y370VkG3LYW7dmdQdhEtcQaX6Tf8RrfWtN60BNKg6LliJAZNuTbYMCM/XMeimcQ18Y5FBK/l5ZD4HRlJI7MDWKwZATDX8Df2Lp23Ij110G4/uu2PtRdswCLnEtdNUZKA+1J6hXItZpf2p2dK3WU/XiDyGDOqvUUbOL7HmJiaxUzE3tSwx0bBOmat8XCZl+GOsf5BV4oHuYubLKpTrulrm4DU8Mcdh2U8fXBpPDFRDVAfFybu22Vs0dQ3HA9uuAdcjC80tfUJb6ijVS2PV4MPtizL3qIOld4W8N0iMRBacTp2ZREx+xtNk9f9oWWvfj+aXQ/26IvU5O9/Heu5My+rfFNSJmcO6rC4m1DUIkCWdFityFoZPcCDMQxjsolv0329ovs9dTpkh/8rQz6UkYqPyhEin7qEWN7E8/CeoVtlXWrCWTLWQZdcFJ4YGJnnwkiNf/sgawe1Pq+Bg+a3CuUxdIuhnTaVOP9+QvSSV8lkoax3u1az+3CLZpvXKxIdAAp41zM7K3jIS9cVkrprc8VrCMMjw4RJYKvQI/g99gj3yqZQf1WsyK3vddocEO0P9JB9zuOkIgoJfDagD3bFonEGSFqF8NIz1HiJ6vP71i3KthQlMGna8fmPUa0Ps2T6fWylRNTe5vuseRvXgi/ZbkQ+I7x0d4lgwSQ5kDmRaykWaTv+WbVUbvK1ucfLoVqsUw0+aa2B1Z/prdKyVchJ/3nUC+i9mR7P8D1YzjHtAzCUKGS/31+KwHhnJVSiXZj+PG7dqyN4nWIw2/R7p7p0ETgg9LBllG3EMejGbx7Q35ZPgXeVuNKIuVLzBJuFPCEXaZmZdB8a0AEbGXMYPHZuROrtSa4TqetYmCR0tyh13ZO5bnpqnjsdCnNEPsW8fHbaQKDU8eW/afAKXSVXv4PIQR+fhMfZiJSBLR/LrAezilmN+NE+ZjnBspA86mF+hCnR/VcS+jEMSSyA6ndwfs027eq1UXPbqJfBrXUWMSO8fxR5QghZT/IB6ne5Y2l18IQiPubII3IStP4n41ZzSrHe3kEmUD80/dBRfINOPz513XjagVmH1AC/Z81DEJanYkd3oVxRzQu8UmCP0HSOMm5sDDlq2WBnCmuedhM2i1t5Gj8WGAgSDv0L0xEOsmB2gTJ5N8Aou23Zcx3ZoM1T84IK8WCSqTxrqKIT9TI5VSS5DQdXD/e2n8TYTj9IQ8kbiWCoEe/c2GGMLfN879Ky1xzeFKVRCojC3mVfl8xRCM2BJlxeAaqtkZ3A/czPsni8s2tYM5rqbF9Wwp3yoppbBopePYmBvyFfbF7mGPB5a1QxntCq34NHDqPATvFnz2wLogQpO/5pM38ahXrzCywh1ldFK2Wgdv/kYNW2kKw9u6PHtUuAYiby9Z3+dqWUv7Rp9wieLQH/ipmsDklTzYgW4X1mKyjt+IQvvCvl17q78/i2DfwpL789kpRA9g7xOidbOzaT8MIFcq1xlarmdI+BBsATFvNizBP4sUYJ9oYC5zL0wIULUg21X5+9cbs2XvC672UDjch0aG2HcyE5TdXkPMse14vpt/JgGdyw13BcTxtL3M99N1m0zX87XckyC8h50iWoKXMxXGiIWD0ce844NPeKZQEGozfvyoRtlycoaJYFKlrYUjkck4ZzvkotEXod8W6VkzNxYZW+/IFhCa89HYHopojFj5UQ/Z/OGtyG9xl6y/O9ZA5sQfMSrZ3KnO808Z6EOTONzvT6Nrc3tOYe43SB9vV6ysh8NWavcW5exhws57l/rIERp9G0BjbEmfqqn9KMyNMOwmTQBydrOwVNB+cUh6VOd7rLhBRuFvQ7pdwwFghDFFxJOTG9uLnPiOYe7VTxlEXabM5KuIilurZkPU3bKkEerRaN4ZtuBbTrcnICqPrNU7FTGytB99HGSHp7rKDjWfnqdGKudHKVTYlTZZ06wyoTnx7leIoaOMUgEzk6s4cZwIss4deH/R1kWgDL7nJDiW+DWmPUA6n0/QVUPwWEiTXUiMgmdBPd3ad1ijpVvLV9e+u4qs4L1fgXhuNbbhwMS8Msw3yaUkN4XxYVsatoKz90zD93Hkgb+EWuWhT8HHR3hVjYtQuUhO2bIp8J39S84FrEgVxiF4emk03kBUxsjHOm0FtT3xAQ1YE2Yg6cM7Mn/kTNTAbPKPLUmC6fOHiS2lQlp1TUSBcJoRF7AXfufLSN5Im5tCkwdP+wQ864Om8D8YcGOV1UhqBMhCr8BQfa7X8NXSqpu7/k5kZFwak8k0BFi0h8JpCndsNe8IjIXWLvvy9LNI0pxbf83XduNiwioQmbv6ghMn6F3gCqtwH9Ny33cBGdHjllFhhnW+7NiOFl38oCS4z8nhfD2du/LBwgt1llgMtEupBTsDpp819GK8z7VwMsj87U4F0tWYRymOHPs6mScnGiadkX7L5+PwS6LWOzIx1Y3o6xdtTNt8PAridxQFmLYWTwCzYREfRPlY4BfC06JkzdCMK2XC9YVuXsGclFHcxKN+5mkS0O6/iam+fvCdLXTV6c/Pkk6UXFO7EIQlszI+V0sQ0szTTUW/sKq7XPwzbGXkDt9XPohtAueLi2kHLdpIwqzPSujN8f98dpHVOn5xEmPZdaftdflw4cvQ5007bGE3VjjGHE4QCtmyg7SF5bmyIwO93OcxkvRqDo10ni2rkwwFj56WIxrqkKSjA1SciIeAT2P/pdCx5pGUb7Gx7WHnx5EBdM9hrrX2Iga3NHzNzRx832zFhpJ2nJpqMsFKMsO8Fo258n6P5T0dLcCv3wYQoJiBhu0uoirR+x5zq/AbAqwzUjioQnC4HmOXBXXBtQ9R8njo8aBBKuWtqnSetk3n0+WyyEEkSEju91Q+RIxndHiTzpU5pNIUBphZkA61oZuYwVwNLW0bU5nDkpQtJyTZiG+5WnRswEZmqm+4dAppHtpyuet92c7YL6fDk5zxFv3rUO0CBlO5GK0ZEcfgVe0CVmGZ9nUxUqlz+sdMje35d7Vl34aL6NpK8n0NP3UKqbUSYb2bp6iDWS8BS1l60YwWVtxMZkpRzB14ugIL7Vysxj33giDwRtphm2G5PQA8q+eaZzfvZ/lyVlbSdUX60lhdhTP9jMloq8Tk/KoOqiJYrzdqWdRvqbZV3BKD9LNumOcfaA2JWqDeZWOrJb6qa7aEX5Xq9gEIjKSCCecaFd0QVEJAXW2BRMADr6A2R1m4Ng97byvOHolfPfV9vi5jtdlBoZb9yIrssS1SU+5OYXCU3ItWWM5lTUA3L8Mg0C28ubof8pzHpwTZhitiRMPTNYTiHR4o75f09c9MygcPY1TEMc+ubwzX20VARlcebL61R0EWZxmjm17fTj2GnrsDUIMFH06ds3Pk3rgXDPYXYEH254bcDrfPImG9llRVFyXuq078PSEOmy2VnctgvOGi0jy4XteZ/01NYfm1oKWP80dn1GFrN1WbrF3WbPyjj9YbskCXp7lN13hXz8eyFdR/kH9wmkNHpzXZfjU29ezjNRp677Ssl7/wsmeB+3YkZlFlsmeGudhEImuYEgzrxCv48F+YGKNfYDGl+NNm25rKQRkN5FT+729JdUIjPkFB7U5PJNPWJWs+ihvc+L9twZuoaXsbTb5RCHPeXDXDp0+BHWiJEgVmTHY4VCIkB0ow/aTNnDh1F4OiEnOorCeZc77hUNrhqBJ6GA+j6+HTdyvf6DE7L/UmQ79I12lrj8zGDKee9dtFmqU2PZKIBYIgYSDOt2SdSjxwxWh+iBpWob9cn/cDfsBC8Hv9E7P/lJy3Wi1bWeQXBIniv2nM4ktAiOH5l5/k4qmD124fIyDQOqm93DiX6CTNDeJI2qbo478hqAWchjTv9Cf1GRBKAcYW5TYloSxiRhl13WEZN2q702ktQFUP0VIaPKR1/vwfvpEriWnajfnATSjz2UNr1SYFIJveevM3FeMVSknTs+EUmWu1MrizjjTSb1uef9VHmPWsrIr0HoXSlNU9mokJvhKUWmsrRBwLpg1SmuGiXwPfC1RQN+ZjNxBzbvjTLTZ6wCSVuHW6H4D1gU7j2ZU32yDBE+Hwa+NeDAApTIdJKRsRJwv5R4Oyb0RZMqToxeGHupTejUzdnoB8JXaluY5/Kg/Brh1Plv/11YDeawOcoG8MxwKYURCbZLFtW4aqxTYVAFwOiQGTxZfd6HNxSxFb01L3gV8QbkbsdfxxL11J+gjZcCyxcsWliNMftXQNwULkiXEYKF5vWWF7wTrMgp3Anje9zCTaFvDOnv+daBcmbO+6hvRNSDf/xkJ/Lupo3mwJz9P4o/58aFnduZXk6VJygatKafj2J9IKe9st+0RkxNwxjjU/glOj1K6Uppf8BNzxKQY3zJDZ2jaY7mqTNrmEKtWwiD94W1i4VfZd4056KFp4fYjKcAn4CbrC3qiJEbuOaC/OTPTA4Np2eB7bCkXFOAZ43lqlyicdZUZqMGx/CrR8j6rGfJyMGoPZ76Zx0H1Q3A4+YnYxw0pTilH5t+wuuZux1pgIHeYF8gRQf8DlKm4nqTt60LQ8nbWRyiiQgBMvD/dbNtHGd6svtcpfGWFHTnkRFU9KIevARGSM2WKlvJXylH0GeCA6P9zy7ijR0u17IFUNp7UoW18yonLyH0kM6DxhGDyeW6GDjA4mjeR0OSze2x1bRGDaOKgVUyaws3lkJhONEGmzOObEYsK6e5DvlsUeJ1L7Q3MmSrKgn3nZeR3EFy4gT6v2Tts0VBbu+lgYzLYF4hVovMDiS1hTMZcWgjuGSODTbQJtFvWYg+qsctBFVTYhwC+eTjK/vjv35Di7SbeP42mq9hKOCnk2/AChBhoFWjpDdid54njeOlI2WIMfTspxBU4il0S+tOTeosMgdS8ple3ZtlJnOHtW6uf+1Yg9cyy97JmWR9hEtMjwyNbA1FpNvbAWCqDgLMy+K/vo623MyaVetRYkd3iCtM0Czm7q11AnO8Wgg81glxpxyKNxMwZjLUyrYBiq+ZptIlJTfXhXJwLGaf2kw71CCfKX+J5QpFcaiSBh58zkt7VABW8Rp8KK8A6ZoB4EKTlC/8DzJubOmqDNVgXcPjm6yFc2jxSFOvYTWbZ1Q1+rrHl0Umpkgw9UQOk4GHm8l2KiWwXUl1ALKpHHhjDNS7vykq/CzNUiNo9ruxnJDUyP6bO+30E1lvKJ2/v+SqjbGeziiBEmXQhSaXz0ho1juyclcoikP//JI2/GYNo9IFUL0RiYUYmax6RsO8IW6X+jJSmz8P6CKDgQl92OjOK/0KJirsZ7JaxB8VlouW2bHUxb3Q/npECN858ab2rlTGFB6KKCU/XYq3uh0d+gr7r6vkzxA/AiL5s1dOFv4J5z8YgqnPdeRv0vDSJE3lT5/kKS8YOq/aZ0R0AXIXtUxNCgUdSgBwz8gzsyqMP+cIX43leJMttequKRxFMWlImC22/hbZ/3CKsiYBYCCGdu9Njro4dgLfIWCh8dLuWTL86pztjkFiy2dErUoZpJySeS+RVOL6GnErvEGJT9CkGCBAwVSX71HM4DoKFoxsNrx+gHzAHCuNAYiPu+mF51yrV+050UHHNJRO6NmW4y3ZXk5jHudqGOWDmySFoSaXfcf9A8dM20dIM4Hyl7yVvZ/5EZeGD8YHOhFbcxUyTtVKfmf4mWpm1gtcZClGTBAZRLiKcevx9eVEyYHK1hixbVnp6FvzZTO0rATqui+pn5uCi2uW5kjlMsiqfxYLiNSO5VCxS9oQLZsEWqCnMSmUidaWMNVUsrG0BiJuWh/p7dwqK6MGq4N04Y0YlLC/9z+lm8t3EPDu6U+PEkldoRtSpxYlDaAep+VNJcbuEO6jfYmxqYX4lIT/MRhke+1baFQWIPDV+T0UC5rZwt7NAM573E7CyM7H5d+Bj3kFXkODNu9J1VExgWOstK1zWtlLk690VrXSqp8fIpRzC/fGNQeR9PKMTv1wQ5o2WUv2EpfHnKUlUFhmERyotTorWtxnwuR0/Px0pV2xb0w7CvM+gjQTf4AW5IszvlAGkjI6q5wnsMpJDPdlyTePt9+gnjEW/j8lIAQmpP5HN8xBfpzuXYGUScb/8QFUHB0kK9SIhMuba1eZV3qdP+Ip6CK2yjTxxBt4PQ8QiZH5iEkDjf5arnKuJpAUnWRsFkTQX2k+0NNskZxEjuxMakHoTtZf2PqXEbM+z6NRrqFaRPx6psaE9YvNJUG08bL7xu6JTSgXf5i1lnD1MGQ3SOZeaIJ6a5vpYvi+Zzd8jJrAL83vXztw6cZJ3Hai1DspxagqMZCjfK3snAjLapHpajK5eujPiZf6hpWikBkc+UFyuH4zDg4jMie/IRXmpz5F6TSXdq8mR7sUTUjkpCct7VPln2P1B4u1s3isXbIOcv7msVprDGAE08HfxrKYLXyUFbx5u/g2pusd2+0GVyfLFLIC7L6vnmO4fSnLh+PmBkw4K5+97yF+5XgLIsKpGyUgPaGjCDeiK/mr+OFrtjDuHH4LlcX7z/1vetnJnd8da3wYzfSZ0c3yppzuxEbL+l2LLIdfHbi02RhD7GIC5Df6qEaT8U9iFlKge8IsLGNO4hxLfC8fiKEcRZrHq6jjwFLj4NnTcmyCjyVjM5C4+o2mF+l3/ydTSA5rfSbof0/K8wA3QU+dh4XYcVbAlmFkJGYkTjeMeulzNi2erWTdez7kuuI0fH5axmQGDZxLpQ6b8Wc3vmnC0yYdGuDmrQ5zku8cJAbcm85zhtAm5eYlD/qqVHcq3RQnhgb6S6ZGSkCmXwO/r+XgY7V/B2jw6EKF4iwzK5qg6mzxTj9btqmuyHnLaz3LoyO3V7Kv6HTy7nhUARXgcn+J+ZxzQdXhzAKyynP2ZY0YX45+gJ5SVgW6E++AhmcaSA/8pByS68Zwk46ICUd3V1XmV/nfsoVQ0QJHOJFqbbSEb7kQbftCtshfhb5jzpuAGKNePo1n6+xVU3tt9Prf6r1d28wAIcs1hie3qmbu0OeNrmNhCJnLxP0cPlUyTYN9WxLXRj25BGAbl6UZs583rZiZ6N7IqMXWNotKw0CnzWME8vcy8YZ39RJmvJ5PcFVXBP5prbFHdykVFMuWcs/uba0KUV2nB6jW9pJq8ChXnjPnntuRmeLGdq1tgcj40arXiWEtVSgIopl73nEwtqlmMzYEytl4aT+3/9/zjXqC1oc83BORVutZtgYnai35joHsV/tzxNSmS0/3X+ixZs80rDWVQNdE5kqO3v6QlhVrjzCpq7wDxj37DODhtW9wozmPEDdWNvobFOPqtONXPOuJTunDzeqW70kjCpwT3Fh8yItT8cni2fL8OnEVJ9Va1ws9E322W2x3oCd2RH7hwxpgwRNJZAKhYq0STa85mSyzmGWkVgZOtuYI00XolPzN+wU3Y68kcr1XrNeBoFF3QH8zJA8tM1TSjszkNSLpV84uKKKjGblukvAeDDXc+QPJCqfR9lGaW+W3emxtpRS01lxyOCUnNiI0hZ6grbOWP1SKdFfaRWEuweoCCoBnxArTgUOIM4v5UoTab8ZnM4Pz37d92jymzTN7aAgcMCCRURh9f3wmQXE/6gw8R7N4e/ro24IGCG9LUlUPjoCs4X2QIJsWIFx9P6eaHoJ0jBMEzd9ssI/z3ZF8lf4iBb/AkJz+DynFd/TqZHixZqwz7eQ1qFvp493VxcuaEi5Dl3Sm6jPrIsM86aCh8iZvie6nBGZeMZLMGaOL2Ym+6havIGTkJ3B4nks8HxuSFthlXBeDiJqzlhCsniidQuUskR0pYPDBCy1Qt3zh1btE6Ola28or04rX73vVlV4tglgv/JbMGGvfQdswfq+aX4wAq5d87OAEQXnBZ0w78V/lxr2ZHMyQfoT4H3AiF5HtLMt4ixK0nsiC+d7ujRGhm1YSPrc69iCi4XJ1C4+OCiFODn6pBXOMeE92DMIykkoeFRZccO3XlSGAESCf9t8ZPklwK186YUP9HigNDuOHAoYa+l0OHg1fEHqSGyaVYCT6r5y0/gwwwIB6C7ZVO1h1UnW/uzsQv97uTh0r0f557XFMhV1Ato1c+GDFBrS/8SBmW+XuvwsZUxkA1jqXbuN/aFM9MlygLV1bdJZPWHZJi0yT4f8HuiDbO6iorjdfKVqBAke7RIdEn0ggpkkn33gKkg2Qat9hMy3F1w4wbTbaLuPeG80TGrZ3ZziCBQGJt8GGiJ8DUSSgkgsEQAHErj6G/pAw3ColpcQXD/FaEaeeQoqC1Ut5ekT4kFu3zycqYrMv/iX4kLjtk0Igk/SSTaMLQQSG0EHQbomETyC1QKkPPx+a1w3thjrk25b92qWQZQ9stJbd5TXrxuiHmX9dv8VJhBz92O+ohmxjwz5Za7fzFnbYCHyVm1ssl0MQUN01NJh+wTQYd0NWiIr785Xr84VT9pmbAhkV474NGC0ZJJ0Li+Py5cKssZfitk2Hxzp62BvWdSExBcRa6nmZhLtesGH5qNm/plUHkurDEY8iQroXEyFJhHvQ7K/vZXcz0CAupdEsagsIaFrkBF4k1pGy0PvDkE1vT9swhFjz9pIYGo5LUuU1QYkWP2Q/e0vjA2wjC+qA+K7zuMoFSQwNHS/h23TbWqwA0DYaVoHUr4GIIjbaub0x0wBYZf8MzBlIWE53sbfph881kQlp2lbqhEkjdVsN69RqMcXmhOUMxzRin86yy1Jev7H+n2gFCLggAANNt2L9vGzbZt27Zt27Zt27bt+hmziFnIwSLqF5bXojb11rKACPJ14ZTaXwdKAayZUilkSRWIgpqDTmmIZKmcOzXVOgH9ZNktKIYo1pSWdrn35XKqqbStIL7mHgpQ0j6x5seXRG0fiUw/8k8UIXegpROrrPcKfo97yWD7/ujRbJkmHBJhm5UesqGg6/oL/0f3ornIPH2KCRRAD2zbpESoO0RudvrfDbcizxuPBnzWe6+yqrfVn/t0juJNiZwCU5U7hPLf2ejKI55ieMqZ81OeyPEa9l4SAvifjvMdoRC/MjztIMWdsnymCS45gXdYvLcYP6ry1e51m0MVkkCIz8qCzSQO0906IAZfR6aFkVdf0AqJwzbWPYgTDM6kmKhAOHvjP1UMDaZVV4vUrqCWGM2W7hvikrBAX1Yab5SjFjjIXmpPUpgEE7ipD9+Dhdv7S30rI9AO9s3eEh2iZFqtS4Md4D1Z+RGrjpDzNfk60D1HokM5P2padbdWB/oJ7hAdSg8dz4TMAkiVVsSplk2KnMeUARimXPApJdjCtGU2MDvxT0oWSSR4gvF/VH4M6R30NXqiNZbYkSKtkcBLbN9AN2AR3aiQoZ1uZLIZmzAVL4HmES8lhmCy088eEqCcGIn2Kor87uUnahI/YkEbsm4WGYl73+1DER9pIIp1HM3Ws9uoZlxWeMtq2mx/RuMuWU4Roq/Lv7R/oyuDOo3k400mvyH3L2FDCSqSF6bPtMS4/HzTLUeqhNBeJM9LzAEk0vysuZsTjMWLLPBbZ4V2OSB0NLACpbEkPOGPyznbTDOUVKXd6ns7IdAdGxaKRxw0VH7LjYzNSEHh1h8ZcCIogdp7akzh4Q1ezTj5MWnzHqh5AVfjnrEM57DckuwyBS9kxBJDnSzXxJm2WEHFT5dV3D4zpMkTZqAqiQIetGARH0tYft4/wlB2+ZxOzWRPfDrUd2glXGfM2jC2DhhJ87VIeTUnRa4brwsHOMd+Z8PXNcNCybPuNUcjGmzdzU5U/VjhruPMsT0WiGBG/cPHSjLir3mxJf/kd4UkMj+w8DiJsheDMDBEs8sw7oPyipGcoY7S8b0slrjTNvusaJ1iQ2tgmbtT1U6J0f8pi2w3dSPChltpYzguQlk0A4Hc7DAjQGoz+EEVFxQY0jPGnCGvdUSozX2Ub/6g2t1HOoLJRXELG/0DLTIHvan3QhiB4+/S1pR2hwwhJYqtYH0eU5oxL1xMbB29L/V5anyLot6vmDW8ux9z3H3U/qS61/+upQ26fPqiYeTEtWlsJoRzoRJW1vNHMGJvPrM2P4X8cBET02RbxlNenw/wv7m5L+goaTHYNWcgutQfuQN58uujWlfEXdzdOdOcbp5iMgdfZgucj6brHAc37Z4F17JdThe2mJIAYE+in6ZQVNGT6ArnDo6vG2X+foFODo/a29jxF2//yk7+hCSJI9RS7tbXDNuNJR0DVcvQpBu9EjNlaPAkLcpBcvfkM/k0MixcRLIjNVGRLDmDdv7ehGaj+xzsbaE1XSwbxoOBqMPgotKv01H5B5vgx2fi+5bKln0j3rn86ZA2tEyf0FwEPBqAUAm+d0H2GBoueJ3UUImwlbmK3Ufjj+IbiCSeuUS7FAxulFI+yH8WxvCLPCaJ/FCcxeo9QXWbNGgEfXQFf7RH7wfJb+r5cCimsUBMu6eo+F/pMmOMRKymsYenl1qhW0MZJec8WkNfXv1VbWHIKM4trqPrhoLluwr+kz7/PZ+YpgOAPGnT+SDT8F/2tfnJDJ6SqolCQsRH45NenXoQPXJkhcq1TPp9IhNz8qTt9/gomR3nxFgsczEdnPAVEXSPGwcopLo4eSihX63wz29AkH1HD5TTbaFiigU52e0R+k1XWbQ0cryuCGUsy2G2fyMd5koddgENtvaOiba6RbvpZmk+mdrBwfsAL0YZRvxWsKNJmPIgZd9tIW8B03ApMWnOP7J/b0Va/4V+HXHD9uP3Xr8SuQMLXFfv7z+EWzoPPjcLJYbMNutkjSVAeR4cVD17AjRhdLao2Ell72CoeP/hrSFLRjjgbhmFZumono6KKf20j3NEM30970kTStUS5ydvAM/Y9MAHMOU1JJraKcBFGUkOQGsiLEVZdwqVy6Gx6RuevDd7kqzubxm5zgVW746M0Vfl9p/gKDfTyqUdKYaIvGZpGE9TbnpUrbTKtB5C30nPaPgQUaX56WRyJvdBHhcEuBJCXldK/Z290hKkEK7vkwf0YO3qD9HoQ8+2R8UNk3i7+Vq4O16P/iw3a03dGkpywjUbiIrMC4UIul45sTuTMEK50vnXyvd4y2tVVcVONp98rvSGmi1s9e5sAP3SELhJd85jGmy/812+vkVTAnWQysqdKqLs5coLwXR1qfWRH9aWs4o64JV5E24cQ+btOBrJCep8dR0uiebYQUOpP8h3/rsYwesEkPI2H0GQh6lmvFcTmiE9oEsJEZ5fpe3gr7KPC/KifsCP3cfCFqGoN3WcrNl5vTj7Q6qaKd9RU0eO5RlTxhsFQJM1zIrI90b942Bn1YezjfFgcLR9JHdaNtpSMGjBABWKPqkuSAcL6Ar/M6RoV8us24dK++lERtOsoGNflq2X4y7bovPZmTJVfJPqxyRznMQtt3x3Hy42UB+mARmZra8BwCeJapF8gRfbe+j/R7wx6iWs7oiN81DMZLywKQALaXB/bl/+E4r7vEKIZADRe2LFi4JcPbsc0AhyCKaFXsDS8hKpKBO2554VQuP1KNADPsjGDgrOBmSHFWjWnoWNIMFZS7iArV/4iluXL1UqVeAQulpM1NMYiXKcCJgICLXh6FH/TnggPnUmJemI1AeKTmJn2AByBYpJlwjEdgJn+jVFLgvQHY1/XLDzhf0NZ+mEj+adsMSacUVfUZS1Dvx+fZbGFeY9MqMRed2w2fii4VmqpjnF0Xu3+Y/4AUXXfhuiEkMlAb1l8ywa1KoVcTJzMYeRv5YLRhaF6KCcN9bHsiJYdZczGYuqOmSAvIOB1UkM0vSo848Hywp5oWJea0w9z4LifAnawt3F5bTknZ1xrOjO4INNG3L7cK5g9SqloVgGG5LOqaqm0jeBmhOeLCPu27yJDYgVfGzvHDvDHeynyRojytNkSGMJK0qyDt/YWRBmizxAFs05Vfp1WYEr81Xm4smwFIp9YYXZntyVfjWf5UbL3sv5W6CHwoGwWNnB7FU85g2t8YFVi70BSMGMWTHrTVTheZC3xOUVbCqTXKOnLAjSrebPdxVyjbv1MJoSfLQvML3w6jjIgRg52cjrhhQhA593ifJB0mbHYhwtB3SwIZAJ4ghayTWvwX/xJevFd/YPIXPMewW9DU23rs+XkHXz6DQ3F9G/q/98fbK3+vtYtk7l5gtLEcRqpqWlJNJUTnXuV1WonDcCbKG3TXSqBc82hH32Pw6U2QZG90BC53qU5bGb5YL35CIFCl/Kix5m6ofPKJb7uWpJjAlbl+WUek9gTq5L0cb9oWrNq6Et3Limrwi3ZDBkVdfKDg8s4iLMJ5t8Saomi1SCcUG5F7jUD89MLqtWkxv/faQ7rKUmXYxahe/r8jjOk8mKLiGqsVC7kYwNFFd2mKkXu+PR1fWLtjdvYpe9Qv+ZUKOnO6HuWc7Wbw3QXYkFsI0GE/eR/fGIRAl7dB3tFPc6jVv45oPKVAJ+BgSPfodPyqmV3ACzHBrGJ6rG0uvhd0h5lkvGO1wFynnYZqxR61Ne0nVsvwQ3vO7umhngY2VP9BpziY12lMN4N95W74dUIx716McnUml6dEvb9lXF2YmMQUbECIlIKny9nn064ePnP9CZBlSQc5BX/g71HFG9YTEI715ujG2/TPB/M26O+f38sAR7gBcHhhLLxLdREtXen54DJV0mlZgWy2zySnN9rAmNnR6Q2hBJcIhul828uNxN41g0hJwpZXPF/DBcB9CCFX4QKX38kJvYxmle9h3sWryB4vJK0W5m3LkuO6Ow3/e/CTedjSiqY+swjvI8JtfDDp0EYl0nWDPjV0DCC0NGSTxWGAH0EjCdPvfYPZAnfINYpYeWpOBsLnlPPk6/0OMRG67+hG6u0mBlTBfRdr0ihNuNhuiJ423oPEOadCekd3JztOF/gL711NIxsz8ihJ/PMTs5TxxxoTw+MS/dLDg5rk9nQbNsKJCTXfu4L2bZD7h6MNuUqdOTBRK+DWmc5QI7WBrKXYktKPEo9yBDHqdfXfr61freCQlW5mPmIeZWH/0rZ8nTdHmxLKiytxJ7STU8Iii7unWytp647MGGyJpsNKjgf0DpFUlhZSUJGfyvA05iUUKhMXkeCXYqpuMMij3iUoQgD5/K/93wh4AOho10+uylpQlJuQ8DPZlMW3vBaMzGgJGdB/LBs+/15aSgFTbDlv85v6d0kQKGV+9q6sBK5r0r4cpvb0SQlWKcIOEEvbAGKb83nECDCQ7nFLsEmnY9xba8vL2AzqGFEOUoX7g/Y2Xy8ZSXUTTwzzeebsYzGeBCRDvDB+Ym+ugYG0Xh6xFmedApDbmg6acrKMaz+qOhJwXz7EHnHlpfnlsAfbOhcrcgS9KDo/9dBfETcbST7Q50YACHKlmd+hPbRz0yrjJ++8hqyy+Cv0gXDDzEk0ZkU0Ql3zFWJ+sPouiRSmmM8aO7l3kD6KnvTNeDKXrZaFmrOVRSdXf5D+NqzFVp/r2e/FcXeIZMC348qPOM/46QrxOOgFKybNGm1poZJgkGH0VCt41qrkvpF0v5Q+0Ck6RS4V7TvWnMOpJZCghZtFsrzIUw5fzZSNYNqcaRA7EuSpE2Wf7xLmKlYwk4tzCyYzxK3uXQsmL4Pl915U5nRpdE2b3qMN+41pJ4PgGffJMcYZWbUSbfvn8ZDWxSNLK0MJZ4fMoQL5jCOVDMx5juNJ153fbN2T/agfMbYVfOsWkFcjE6zLzFhmIzji+Vgy/B1ynsNivyVdcwIf+R+rIkIRmgd4vlbm8M7WysKA8DKNdgkhzV2axrl0G4Vw+x/PM0kuMwgWVwB7ak+ViqyU6E+hE/QVpGl2Y8HfELgsYXR554TCMURLtOiZXA4w8xctNFTF6ksnkDgbNLzDgM4PCV+xiDfw7Bi+FVk1ytVmnV4vIFkZ3pwohe18ycffSrRafDsuRd00uWRHqVyGY1WYXTOW+csVqGbtV0IvcEFGzFVONRzMbwGHn96vcYRz+EPA1CGhb9FNQbK9yMNxTAwrk8idQbrD+Qx6iWIFfwHlAuZ0iZ2DRoyTSP2MkiB/ZttZfqpeJkSSR/GG0eZ/s5AmTr+7Gup/p2U/u9pmLBs+5xBdLfH991p0sjSJRk73CcXWMevcSX5xDjiTunQQZgcM2H6a2RPpdQFmoudURsdvR1quBm1y4mkwddxnuB+lOK8Rv5ynDWGVHc+ngrCIkYFYR0hMcX8x7aJiJhzTgyv0TtGNAaNofS6mK6ONY1ul7nc1PxDHCQ3bjjzuiPf8gN/Omt5/w7idlg0w50IugRWyY6DavDhALbBS5Za2yhBfIf3GvKzhE9EM044HgD0cObcz6i5kgEG9Ypbi4X0A1XdlsdTLZxs64tNZvy/yESfl5cQWGIgSLVbSPTt+vMr2wkmzPHHIahpsLS9+rfp7lpTA16Wb0otJNKRZwnUlEzsSVqCBI8zgAv8g+sRVB2EH/2gb+1B03aFAzyMkx+OYYlgutHl5TYv+258MNnOKEQ4K/Ua+2zcs47D6pkJdyRs+P9VV6xku6L7Ehg9Aub2c/I/ToXhlxLmlsIS7lbDeDZrBRSYG7Ox/ko77YkqJB9LylRFW7XA3lJ6tWpfhMSt/s9VRS7fjSb7OngoI22nbHoEXq10TrLyZyUI4zjLzKSZZrQ2pLA268NEGzKJGF7Z/lx3V7Pjtsgzm/R2ebCUkjR7FvqX2iI0AQRBcBX+LdGxNgYTUX05ys/UylHA3RhcEd9kSnBwA1cq4ZJdf1ChUDtNJgmEVCki4GlcKAUyJDOkQyeElEx7oAbv2mWJ+KqcE1tuhDi2NBPSpu4JyWXp03YBbgowTJi527SswpBxTgMK1a1HLUCni2TGTYW2lq3lr0jIWUxXpSwfwnjKxJmt1jQIWB3Ze2itciS7qq9pZh4o4y+MTqDrlj3LelbyTkudIDaU1QefZOlncerCKpsM2iD7nwxd48d7l1cYZYxs1JUGprvl7blXXetHwJ0uEaT6WpFOfOR/lggd2Q8IsacXUCR3b8tq/YHLjgK7ihWXomGG9XGBI8idG7PCtcvDTsEprVrqXQ/tX8FtFpnn5ShSbXVaAMlpN12iupVXXCLSEfYfw7tu46rVR7vK4O6dL81n6gfsHh9dCEP3lpKvw9NclIz2pNNnolZSQsdVoZKSVXJMWKn1/uh3WYi7tuVgpuCC7qFQd0VgFgDOnvkBEg26CJcXlHjiePFqc986ErELiTFNtEL78uyaq2SkY9z2rU2RjDDsJ7X7ZIcfstQrcBuDYvPbdvh3gRVbzaXGUSu82WTuHWaIymLjBzsNIVwLh5cep29HPNdeSok1Ss3PZgveu7yDrDguCuJuqpOaWk/YeL7IRvdu53N7YbPcTCXWw0FkxwPRYcHvrCZXGq14PfaaMBLZ6MRrxYLvUklynHHMEB1W7jPuw/68exKTz/1S47GtL8FjozrZDDB1Sa3UsynZrUlZghppOrM0eDfgKfi5o40TkaaRhWLR3g5V6ymhcbzhp55PU12py2UBFcQP5hvpwqDZKN9/61P3nLTrKL204GtUU87pt2w1slwDihGTsAPAnyucwiAj/FTwJ5BoLXY4UVLPU4PvB33xLObFgTyt6hsyBSsa3RLD/n82Pxx5ah0BHc0j/OqCyr32xvDCRuk7KvJYCQYRHX/7pJ0cWlSM+ltcSAbFITSSo7k+l6edoI76xEAmdqT1gBoBi9QmwyGC/LQyuU9TmG7SvaA1aYX1aPjZ5q1NDRNy0s5xXr9tj0HmV6yT5SsYACuybvllcRwFOWeKLLFO8Fi0Ui8Y55mR3Rm287giYVD5TZXBwqNYrAMnHwB6Mg31XqElTXrYAaOanvKmL/5EWZNM9cdnGWZ4LavfRfqFx3pv041Xiqoom5nimUbMfF6PvE2yJJXgEW8J3OPeZLhR9Y6Z00rem45O689S4Npgli2r9quONNEf6drcOFZ1F1e2cFylZhvCrRN6P6zyyZnWaeWMZmtOY7BO2m2iP1aVuQe6WL1FnXKumswjpoki/Bmoch6ZIlSs571hf0jtlIhG4zuHbI8m58DflANuv7XfVaggDuflZIfWZ2UgxCWgrasLMqZL+4jQyaXP3VmaKkIP8PJrzAFQG4Yl/CgSdj5qaeIqzq2jc1GnunFPo1AWKjwT6EwSMAop2Bvhv2wm7T+1YVe3ZFwRsTSyjvzKjeVKibRfgsscvYFwMAkzjT9xxQ9YHGAIEZOAGu3iFMpQzZwyohIgAJ+OyoJaI4V/FL4KX0jSDo9Cw0wGI9t5dD0dIflttg9ojabz9t5knEF4ywRWOFBld2CtWIIe1Am5MMwzOHxaVen0ssT9YoNI75FbVCVFiR0zNxw7Qc0Syvqu3BauBog+wd/Ur9+Y0ATgvobfGVNu3iuaHxshGOdF0nJF5gXrROmX5Hvk9PKRfA+p8ClCb8OvD098ak8/pS5ZrMGJUOJDwKZI3raYa9W9ugWfdDuPpEj3TG+GwFO87wZnmVaa9aDuvTnoqpsrw6DzauCUHcx+Eq5ZN7/0xMPYUBlZHG//TSi8T5Y3WmK5bK9w+ZC5KnW9LszennUSmyioukx1xcwxKxq57i6EAZYWm/uKjwg8Yy9oZn8tw2sxd2HBe5YX2hGM9Mkx07ihqx8QHiVjE8GgKlM51diNnN3LGRAfiE8QumL7HVmFVOIxBoFAneSi+xt/TfBOr6AQIUycQfi1TD8dSd5qM8YHVDCiRy9Lq4oOYWdgAdo+g5PGbt/JQRRZH+YkqXx+4gR7T8jSZPxYHdPF6Er8gFA+DwdSyDbFbYP/3bWoVnflCg+8liGk3WKkwDXX+zL0kuvzJGKsuCw52F+UyKHGuEA+0R99dydoteE6mjbqP84jPhnk4uIbMtr3jOPphm36hMQyPpqrp7T0oupKahOafpY6lkgOQdb4Zcy+GjiR8y2vLXFe7KTm/yFCxfC88adxWzJtBn4Es5cymH/RysTfjYKQthqDRGA4Onys9YHuSW8Hp/0mPvdsQlZDAO6L5sg9xwQGMlyjIfcCeeJmfpuy3Pqmyvve2udbiwjGNdmy9pl0UPhUAdPywYu8Za80eSaIp7Mnnv/CkN8AJ2BdIS2bHFgN6KSew4ZjPNYOCmRKxwazuSwZuJy6UNtstfT8/1FuucCUKwLuki04GFFKw+e88mIBYPApjcEzHInuPhaqyNLrY2KLB+QfJw+X8Inx8hhycCWPT3LIY1PEk/nB1ddYZO5XX8x/Lb3EwgTIXZ0Oa9ePQuZGyykG8+gVOmvoBLGBBTYstTIz7/pBokQCVU4WM1OknDEQFo6k8rzNN7YT6Vshr2CHhwwinOvYfI5TrarmxdO1I6zI+G0mh5w4G7Fzzo9ozQdikUZ/EdJYQwJdXjT0+3F9gU8b2ISRAeOMYmenHB+zvbOMIELst6CWzTJADcp+HA/FsLO0h9Ta9QHMKf45BlSSOOoL0LG3SNEhINZipekuHipTVMzHYsiVeErA+KCghIv8rE3iiTxEVbjQHjPHc4z9ot+7WOC4JZ387luApmwMPHbxvpITHcL3MOm+RG1SJscfbGcWPNdoVK1sC5bmMTpzqswxRlsduEwSkE4Cd4iXJ88Skk7lDS10Oo3k8Q1WPz86/+UhHEs96/IJ/yWo3VW3dmtS3D2F6cID1SGp2EZZArFmBbo9I2xjIzzrOswyaRE+Cj9dHryJHv+9nh+FSKrRa6rR/rwbLy+ktfB0cr0fXNmBlGgNcls1SJxtD3s5K1DHtcwA/WWzV9qDxjKB81FO/WxWkbTvFRbq+OW6OdxIBD5kki95qItnPvzSlqxEa9G8BWj0v6yud6iC3EVHtd7fDjvt84dYKgbg4zW1kkecBDUuuyvA3UqOcQHOqNEK0lMbOZtaJUGNYQb9qnwFARl/KPmd8UryhK+SpOByGQlH2Yv6NrMgQiisi6E2ZlHS7KuhygxN3dTkMS1ldhOSniQF4lG8LdMUkaSOxu+KUkPNB1KcPbNRLPODxfMzbgJJP1YySuXy6Mz2UrU4aCUY89QvfN1+relDxGE7rFux0dcfWvwmdmpTWOyGdl8aqQ2musI7ujqPgCrU15MZ7i5j4oGcoXBi7NUPPnUewnZyKd46pAmzhtUlcIUqKV/2OfERtsytXbBmkjU4P2Tr5kBiepMniSmKP4fwH4yCOLcv+KhQFhM4VCQ8ZlPoRH059EwCzsYs7A3SuW0ITFxCZSNAEzgjP1H7DWWl1PRSSAWZMBOB2X4VxzOlzfU2VU7Oykv4ZyExrPG/OF7Oj1FjCjxVU3Zmn1+fxPI9sIMLfSm4FhUEAkXq6P+gAE0WHLI5eidQq3mEdm4vvAfjvtX6FpnIUYyTbn0vb4NQ5sF9cVhwIPCQ6pzzPD8kjdLgYYv2zfpx5zF2sAVTyQ+mHRbSEQmCmKauzSbZPBGi5MR2fJUGTu4TT2DweccCgT7p72UuWKZPu9s0w8v2FiO8wc/UssQvoiEIrSJg7t1DQGQDR03f96NQrommAQOmc+6M2wxknsKudsr4lBvk9rdlMBODaMW/b4XG33mLh9fHGkZ1RDdPXV8YWQpnecWlwv7h8U2evUDGBVI8Q+lPPDPEqvDz4/snOtSoCqVutPcf3BdGqGlHvGZXMtEFL5gTs5BHCgWnJVXCM/KPTQa92V8ePUTmaqcvYUO4n1d/N56wVUnjfxUMkB6NSyOYedcRGpsmX5XiTSCc4aymaQ1S6PfHcDlLI0Az6gtchMY1xtt7psuUqUswwxzbLImygIp978jpux7CcJIpA9AWdXz38Y7x4C93Q74JspNcMYjmPabknQoE+XRsUdpRBnen/XLb37pt8xGtNs+yc0dfVSxg7TaIctPdqyTRhuDPDCtUEGifsTf01PdEJrhgMDA9WIQWA+FMwokc4sSufWPahUklvKJRl95rZ7EKTLwcvun4to5/iQC3W/u0sSNtPDX5G1XOo0zxw5/gGPYZtQPzwPf11R8ERX3HlS7HmtW+exGCDcBeCd5qjtH/eVHV5+AvZ/PL90KQz2+Z0RihYOgPV1vZRzsvFs5X6mgss5whmCknKx+ucr4FcKuYEJt0dqOP18Vg11GG80ustt0tRiFgVPfgYyew35YWKsvd7KGD4SaNOujlcoJ1pk5KgfYHwev2XzpzVLzG+aC5GcvlJv663jNrIyfglaqPMw55tKG02h2uc/KNJCgAyoiuWG5k5plPe5IkJAIkNXof8SKIvq+pcj81UQrL73+ZEUG94p+R7MG0RwL+TamEwqm1YOn2UGCkHknhhp/A6Dr3/0SSnk3fE5+UfmBMMgddm9hAU+neV57myQ566sXoK4pE1Sleb186fPBNuF0SbNKPrR+HPqGKr3NQxZbuPFBSHmz4Vc/S8fmHMp2/4itv3syi5xlYKLBmWHm+bblQhok21Gj+6hy71LIh+9MmDKqg7p9fH4dFQTTX2JrXK9Y4rfGJHE92WF8LS0qnRUSGbS/Awwat1H/5ZKGFFRlGKK0+0S761UIIkbgh2nWaEandd0pgW1hMfD+jKaCCNKQGBrilpdz1tmaqI9hXPLQdB2PCVgsr2PdVDAnkoaBcGdgGP0y398jIhgqLMRrbymdn6jEbCi3k0mJV3XeoGn65nIuZsQS3aIOOzO7Z82ebtxjE9TisAi1Z1+pn4ocR7Tk/AQzhcaW/BfRAVD06u7BOJT8b1MdWTVlEGu3Sj2JPA/w8AO1TFx//oDnc1kncSMBqJx3v6UjCg3RW7GDuYzzpD1GEAotr3tOtr9eLedfzDxBMm5l0C62nt1hezEnDgRUCdqhC/8o01pG4w55OWwVcMh2YiEpaYyg1VdYx4dhiLTxshFlORCU20p8mJHfovGnwJVkLO4WOt4H4/xLOyiarIF7FCGeUQrtVv5bBClF5fxCylhau23LtH4eXnQDtIOtojBtRMgG6oHAbqvOYFi3ZkxE96WNPStjE3vOVpt/jZsc7CGWmdaoq7EK7La5vCAYF50QbnI8oqmThdgEf6yz2ojc/q0LTgs/SITzl4CgBBo919BWYSp0YchCb0yLomLx2CcEpqeZFdqM2MYT882CCXheI1npYb9Wj8oF47l0qijb8dc1lngCeZxDeLGkI6GTQ3sAoFzUvHvB5zDPBA7P4cKKqSCEkZLlj2AbaUkEAcfn5b4DFHV3zoP4RAc4zZQs8VCDnHgKqQYVaz8rzHt9Inqyhyg1ECCIaCEOmFZG85jqLPIYVufXysXkArX1Yio9itWmmwaYkzguVqZ2ranLuZPdrG47sDhI7E/7XnIVwCFmTESdAkppZnJvB1OOxOGvdoZNWOkIYHskry3bLoKVMoSdi/wcLXvzfvBHaDn+2hvEPQl6gjzP61sNSFrbXn2RuSRl98juh4/eEH1Mbd4nnLpFgqSnVHJQVn0v0Bgo0a+hClJRAhnZB1hiDyRUF2Ms0gyM+aX7cj6mOypGLfkLnJp5GbMKnWRiumocryUcQiUCgqa8NGmXzPi4AVeGjAq/MvxCsaB4XR63vosCsg09mAjCWEBWGFxS+9DrRphxW4Ol3BXn3jIwVDYu319QE80iR4yFsG4+ihUViXGMPnl2s0yCYU0Ek/p/ye5/rxIhJIx1KKVTafM5wsqZw2EjD+n0YWzu/xVKbZRWyvwew0ANzL/1b+0zGyYfO1jgaFwvSQud8gHh81zBNizipV3PC8U0bDsvHg8b3hTlPKcFZdnXWrbR4KWjx62UTp6lxGVmZCPp8pKNKDcRSuAn234DbSsl2JgeQnXcfPd7vbaSd5d+iOwc/2LMywXz7GVNGWLNTiKKhzAu1UfFXPpyQZ5R4DAuOHVb5Fiq/dlkHJNzFd3VcsyIrlfJ3txwvUCNfqaUx/Xgo2ODzxeoE6eUHpltYvS42zs43I6y9erYoMHZMsvTnwkF7RQMw82lw93gObMVecRY4rJcTXYXDavOqR0Cf9QXtBfMigcMKWIkXgvAEf7HtkHqx5bO5Hxy6CO4IkZI/YN+TxfyPs/RfiXgz0Fk1VAPqOyfZ8b06mmCyIi/ZQCdjixA6eUBBPBEWD3snek6Si3Lr9OYAvPS2ct7Q6PjnYqjPf95Bz1pFlgq4ayCyU0Y3uY+ObVTUpikzBPK9GtqZIY4C6WELx2cTCaRCCyZ5BjLtWrM7BJRRCX80KZpIFqk+zKy9gvuN8Vm2S4SLTKSvalcPwZU9Dab9ylzh7SDFuEJVXvj5mKtcWtMnKN71hq/P6zmIFE0rIw8TevNbGsleyxBNTsnSg3FGcSdLSp1WlTHR84qxaFfyvSjn0fCU+Vn17Hr+6o2NbZk1n+ZQ+FIaDLrDod6GZm62/11Vxx4sdOoJo2dQO4ck6BfODchph7lH/n+5EeBDSCn58XJE9ud4uVdPv9j5/UzJfUIzJUnejLDA4Ug12B94E07nMkLpt387R4RXoHedrVnSwnMofYgiz8zqXtCNrx5DVm9jUOusa4lNNQa1sMN8Zs3I7q8UceIGc6vTexo+i9b6RT15Z8IU9SxeqNI+OnfwUwNaxIePALVy7nFhjmdw9yHIXENq6NnUfJIHhxbYV6CS4fmR6aTjZgIrv9UGJTMwQ+/ps9VO0tzx+TTJgSJdAXpY8nf+M39IAVHErKgwvBvhPJtPh/5W08+wL5KoQvB6mjjLz1JGmw3zaOlah6w0Mr7frmysX/V+bUpavmzlawSUxd9uiACzBxH/WWG1Bs8L2zy2C4+B/nJ8NUs72Z+NHKVC313OVKzpfugM60kGaS3hJPu6OsjbyVmlTo2w4MFYtjmw+I66f5Q+jlEYnmL498O2XNuqGV3o4dnpFLx3zPzdLIgQpojO6a36M+hrE3HNVrg4VGB6OnPkWj5pdZo1g0bRi6fJajkqVnicTHdhBD3lXMKBlCfGze6oWqjASAj9lHLPTK+IMFrBnQy9zH1vaL1rVSZtl1VOOCqfAzSpsFJnKmO4SrlTdpXH6wIUxaStMXZzui4SHOuITNQyakKmYAd8igk3hvcrY4UhkK7Je2mbeNBz80kftVyfri/92c3b/pa+/P9Fjlqnt2iNKmpWf6w+01OGS2xBRy5kbRQUOo3dqNcHyvgemb0yCTySNy2JEzNId9ELQFdATBtFNzSs5xMmGa9Ggkn7rhZgjEoKpk37Z9RAf2iOvPsyYx80BY3EY3xOET7Ipcoze5seKt1Cfq5ozKYkWapWaJzFrnM1ra9JBvLqVM2O7swjlMUTQz5MeyvxLFNn15WyVJQF78YneVmQWsk0k4Bmaya9KkUtj7/X/qaQbrhXfe7JJjxlVnD/WdJIaWr/iXn4osKiirCbOMG2UGr9jO9wRSIX4lrUbh3JcselHrtu9joJpd3fDDru9N6uYrECS+mk5RTcLQL4c3GClNSufyo5rn6pOgqAjikjLhq1vCgP8epVGUUCu11+39r4vX6fketZPbnuSzfocf/Wl64JafgZTHFOn38TM4eCdwJ1+W/3F/vqMX8r41EMaCqBhdTQI8YguJAy0pzqqIJIKWU+PFchDvW8Rve6mTi/vEg7s3AJQrqoDtfgFbDfRdvTu6yEcnpV1IBeh6oOsT8SUJ98vBZce5EPRb9GVUIPv4RXML9OPZ1X2OxApWLDivcVTIbXZ6MoMTqX1aXLuj8kN7fw38LSiyTK+zIrb6UVQppeJ+X70C4aGVsf1cOB9+OAhqhUx346rAf/f/FdzECZvwC5baraUglXuNYmTVM+CJH2KMuV4anIKnoZXKEtbkuqJViAWRd17/Xk4luF8Q2Ro1KEzo71GrioymCcNbkr5Ux5IkBbzxM3R5pbt0dgvpvCkl0PvwaWctdrXUos9rfarrr5PXRvm7g30H25hVH2VUWCEOs/Q7SXLR1fxLgJ99+7urZDNRt4GMX5t4nSU/ugOkW7FzJK8wtHwLm+Yx1zpo3K0SmMKP+UqFqSJTkTfB51vCNllYRI1BA5Ze94+BNsLok7DcYEDsRDR0AF+MWxJ7caZbUxXWmb/zKxTUdx9o2hZ49ejbTM9yuvsSIjgkQmKsXlDhBV6nB2EejHU5o9SKGXVaC9M1ngfP0EKGY08/msNYBzB3fWZWjyZVz3AyCUXdsKBPU4Bu008sYsMFYDl47qqIYpgmCyq32yoKFSHwo+qnBFYiiSBA5xG0Y4nmwucjqJsOkOcbTrS8jH7EoR8yZUTf6LenLjkqywG2YUQhhhk9Dtio3qgywH/+ksTxGLcH9/TAdERj4WYO2Acuxp9TYkI8U0slhox3S8yutZpUwn2cxyWrM60n3BafhbTgAn3m/IFaVvzaclQx7bCb85Z6ZRwBtTPM/eFsPDSXJRbmLiS2+Qs3pe4HFo2mr+3ksa+kLIVrRjrZPyS/+/sdV1PaU0Fd/ixCOxgWkM87Qe0uEaTBIZh5spZiALk2pT7rZ+XrfEfiPUUy00GgD72oiQahf12RbVGUDZpoQpZflnCNUvztCZkF8ZTsZsypIxRrrlWsOvxCLbJplKpjNwqh9pfH5HFwocbmqelIxLBCPOLIs2yMY7kBUp1rW34wpqpcYQdk0DE8z7/3BDmODhLuBtk2KQg4cT0qdHXcN6p8+xYn015uywYvwz09HekddWeNmckQj265XhFgqWxaWAxQ1OFgLRfnPxplXlbWKKmCodq0oa1/SvCH+YbETYr5l6FS0pS6BAySBjawCs51AxTaJbhIkl3moeYAwXIFFSwn5DlomvPNIcoHuzXHifOa+Ym7yA8gjkVpm5ijihbxXiUa1uyiMVLnIPvklRvHDN/FIu9SG4/s3GOdaPrJu48Ax0bgtdMXj0CmjiTn7Nma8l9sOXQRd060eG4JxdO8aBkVPhr7FLajZK26lviAlkfU2aOpNOkSUiBj1nW5G7uiNBCfFx9eE7uYfKsQK/DNw60sNvjUBOCxr4/quFnV8xhTbwIh9je6lkgrnwM10glMLrfhupAS2oHh+NLA3w9/NpgL68Ol3ItOjNidcC3hswO4urbG3LpMtFtD4HivGpI8H/tje7VQTiH9eIIC+GTU1ojicYCFTQ+WSQ+l2kOQvUozPKsu3UlITWXWRr/sWv56GourDoNrAZRY568811FbO3woLXWMmhf/9zApa6A4IGBJKx2XYigr5WAVByCEcJa/cd51/PX/L+wGXJIJ/3j/wNUscZQ3b++yNsI9YnWfG2i+W8ZaXkHEHVXMqT4sKkUiTMVKfOx+UrCbswY6+2PY7j3cM43Bp+dHpCDpjyURcTcJ4Lnzg1qIDubbRAuUjv27buxnxOr2fH5cViemw9XJ4OosaL+A8LR7vxH3uGS9cPgW8phQab46ErWF2gssMOYtA8FRkSyJkLibc3mV60wKMooOK0HIxSeADnXMSTCqVOjZzyLTMSlGtDVzNXcewp6kCx4g10ooJgySGut3RbrjQe+cDieAgs/TL+AV4lLOJcrl+JknoS2A1Qp62KVB1D+YxtflF6hhd56jNSvORyHFel4jpwiDgjod6hLhnhmt1kqsvACOCtWTVwlQj449+st2rU12IQ0gLt+6U8Z4NEx4c3xoOR+Rk14WvCq8oxJniWXDsgYruQCp6+RTixFH7zSbpLfidlG4H3Zsav6ufMANV1i+prj/pJCA1r1C1HeNtT+fB2Y9ha8uhcGrABwNqRE0b/0eklUVc9FtDOwVN4ADy+Snk58EXydp2c1FQpvqwnor4UW5CsHuUyLDHjDiZtlOZQ1bUGUmer1kqXSUEzO0suLfBolCRci4LtcFIjNWSUU6ZQgUqKf7ra1AO60cn5GjY8ZicbIeothpFLj9IUX0eJNQIFTIlRWl6xOMrVE6wXRf+yXf+TgzkcY8RD7SF6T1iY5aR42auUj5FZRgUV965aSnKiu6Wsgf6YId/4o9Boi2mdTNrJX6iXhxX6Wxi7/Dtb6dSc8fNyIO4796U5j8s80aJjzYZ2H0/3QLDv/Mjcf88XguJRG8QlYtbEcta1Lrz9ophA4iJPUm7FDrWMoz1qRLdnVAkHZqTl29WLordJke3/UsJAuYmCiHnuDuS6JW75s6uCzylrrixNup0LPgUWXOc4dbQlbHs9lbuz4X4UGheFdZnFyLOhaoxtYlS+3Ik03RvBcI/GTQudlTuMWzzlUGXXQhFv86XHdMt76PI1ckgxaBE/IJ71aGUyur4BY34cM9o11Q8J6oWFO6GmD2SySQOAJ1/SwZenVjANysKgwObpw1Q6JX376LtgbK20JnR3H8j1Ilhq6wJGOJSAlNNOCstJqfGv3Jr8RYay3oQY0Wd69CQHCZtcuSR+wKPyWSn8z+pXOxW/iaz1rL1CKxz2YS22bOJaZVivYIXi+wWg4bUBx6BKov6ktg9r3Vuiqu38Rz9Wwy87ymAgXX+1EEoMKeEvs/zTSXap6ZFnA0ZdeIC4YB6v/ZQR5EQbZcRkgqlQ+TSP8f9IlhXaAknZKC/uUC5juv0pZS/vhwM7Ac6tiEPoe6uh0TeFg9SHihqAqXNwWO1FCjOTeOH8KUdhKqg6Xm3EUXBh5SsBW0wOfKRFrR+byxQBxA5WltTmqeP9OJ2ofcdTeHrUOeqacWwhaclD9qd6pLk+YX9HjCIN1xUodAuzIzbVJK6ejBXC26KD1dbKmHN5ziMebQvoJ1/sBzXM3E5nX77B0HNXwBtFnlFxwnfQfm3Os6XvqcywzYpow8946x2jO0+yozyXgFpGy2slOMLINGB3oMCN9xwpTkNuJaY6u9buVCbTr7jKZIvykfM9ddQUTfgbNXs//7rQYdDZefzdkJhZWsR3IxetNylUIfVHSR+clhwq+fbPhXlGcRekyfw+uQVCH/UCdvnyHy445opnOyKVTkvaC82La9u5eFbKC/Ev2j5NCiDWABJU9y9HF4kmxo8b/i1aUstseveD8QEK2Xv0Rk2QhGMqAIUU6FuPh86mbwuUji5PcRV75mRVS03gTtZlrJrr6Y2VrMppvmXl8BSzwATfI+UKR8ZB4yufkJ1U8OqplQWO+RBqYeYUfGL38v82atnNWtouorGx5N6CY55qc+dS5Vu1QscHhkO8EgdnYxM7otuDr/SZnbJMzBbFsO9i4NSJM4XAjtfwZWdfRzWtU/5FnH11/Byu76Kf+qJE6rZR+5pBOkwR+4C+X6X2LthfmV5KZCtZg66Qel5K6couNtS1oDAnOuKvbV+rBEaCZbrtmfeBbTv6weTYyR6ZHiHNK1vCEiCDELWVT62HXxNVEZXsCV5/h5/bPpoNxUivw1hj0RG3nLZxZX2O9qvB5HnAfuGCkkUPzkVsNSaire/TAKKnKJqS+vtebupAMjPnDvoaOu826X7vCzoLotirOTu7ZdPmgTfA5p9c3axfIs1NZ31BFA9ehCkWCXVwzhEyIopUbLgSPnlSV3/pcapjUa0V3Lm9sJMcxjFZZ9FyfKtsP3QRSZ2r/xz/ZzXTetCJ+8wJOYysw0vzAtFvErpKv8tsnhjTFrGFhHxuU7w7qgVeU3Wdlfjotsa63NqEu0UX4BVpXmJEOlac3MfFcx2MnJsZhKQHKwDJtlwfyU1xJ+qctjyBZdQHucoxR/r0WSeOanEcin0n5ZsPx0FL8V3Fy6zB+r+wGE7leZ8abPBCYlqQEH9JUOGiJKqo9Fa2rvUi+gMl/OWHhqG92jYPUClP5PTEnMofOKKKBlk/XcLqxgMSmZpxaQd7Wdj34dWE1Z+4FVgmUWIrCEsgAFN9apYIqHtyn7VYrZ8Q2i3JqBezCb6dwoTvs9GfgW9yzNChJGtK7aDCG9Fq8L5/K0NpcyFJZ05r1YORrr8t+v1jixq5o3MfJNcSU+K+LsBcHABRiRMZmdbaow8UQXvy9/HIQ85dsW+YQDmq0czjEbicJr3/oqE0ItusPZwJZRSa24BBVFiZaER7G29wxuFqVJArUtPxSZtiqZKQ72oZsX5LpcQ5WCjvXjXhX1P+a7xRzG6bJ7VaeMfZRtF/7Tp+iai1S3XkeBSePF9NxBds6K+Wa/ZLscBJPRS1RMu0DIcXAPpryVA3hC5AXPkX+ZieaZet5671utbLiw/+XSEyb/kE++gVgRnDvjqrsb4Ccbn9Av99lqt+LEccKLMUfGp0fHAZevApU1OEx7HZ3izJkP0RMp9L3y5pHc5rKmk4ZRnW42Wp6Yq+wRA0Srp0IoMF1Jv9doiPUiCkpbH2IxcKIF40M/sezlA+UThVYLK03YtWjIvRIn1x7Hous8w0RdC+R/chMcXtZaol9HPHFOcsAcnBOwEb5OfQ3Bv2a6ue+yC4ePBaP3Hm9PpyQodnG3xj4RHALhrNb6ta2Zu99KOM1qEbt/XVDrIQCpCaUmnf1poc2YzQsgEn6/i9YxaYskWLnZtD/bx7WyPHobwahGkik1M7zBxv5lQAbisNM59CwK9ahin/RpMlfeqwqgRUza/o7eorecQiFjrvwKCGaXBaVB1dYcX7VfOnbfWZBStrbTOrWGjRlWHmECxJF0nlp/fOOM8ne1mO+j/Trf8lIRzOtVSpWauuq6sc5krUk7UkfHlmsGjc6LXvoptFtAvvjuNA1yWz4BdbnNYoKhT5spGh/8eYIVaE4QmkPQNp5fXhAqCtyI8ObRt+ENBA/d/qwQ/h8p5xCEENB/DIjuRhIBL2rQynHHds5PLfoE/peFoy1Zt+RUgmsgQRESRpRHCf4MEjN0mrgEI5cYs2iB5tnotdnn63LTy2Jmoh3gK3F5pkrM4xJ1FbGpAb56rC2GVTIedmcpcWC8JMNvr2Tif7iXRHc1bZkrDi8YyVHaer+tPfnsuHnr0gMVjKtfqgsfcxjxfbNewUrnEWegsuF9rUFOt5WHwH8tT2wmUBNi66yisOqhHrfk7QwmO8hADbqeKBVtXC3N3lFwUbTYyWTbSxfNBzmCoTtzuCCUk8mtsOVnqnXt+oTkPP9W2j08IaChr56fltYT9vP7z8sZRNGQ30xSSacoZeUfLLLeDqLZmoG82taeGX2d9VbVvtZbdr93Zbu2GPlSPAMsRd0mIMJoKPgb1tsOh63XKFuWC6dRgoFnx1+vlzIgzs2O4cqr5kjaJUanO25BKzsT3ZZW2KFauXuXvxX6tSljrr05fZ9Hu81nJlunUtiLMPMc/+XPNYNz3olnzS1a6Ohmc+GQdcsI7Kd45jZ8U+MFokA4l5D80ki55qMbUXb2xcyzzDLLozRGo7J9CgLaKMCxtx/3jH5r0YG6S0sAmfP5rn18jbiBFI+xBUKRmvHYgOQzfyul4kQaXL6iw7qCKG9hh5DQsbN/X2g06COOvGIMxypvqztPOEmHMtrwJNNSz4571VIiW5YvuMq/S6t1mybgXx7+FrF3ZIKxntwaJxeQz60py5mAtbtZFc9KvI2sj+WY3gGkEfpgy/RMeRgrsn2SlNw/L41/V1lMv4rm+Lsnv+ylRQaqLEJRdBEHXBgt+TNfK8yFaLUa6nuOeUMsq5L6xp0N7DU4vM1/JgwrhCmWROi5IfBnRzaiWYsoovxl6kgRIE3HulcOf21LpOWItNvhAEvLxGh2+KcPMOnT5G4yKV4W1K2oGltQ4uHF8i6KN8W5/HzZBFvRXsSbIjQKKmvb5Dp7Ku6qVLthRBCIumDR6/ArOx06p4L15r0FsGJDXw3d1hRXt5PIr1YeEn/o1blWz8o2Xr3AjyNe4miUp+V8tFaSIhLMAr33oEyW7+MN3E+Bm5dTMHqkHEq0NZxx4IAJo9JZKiqOxcJSg+RQIyC5Afqu5Xz1NYMaiQjvLCHpMXTYMgFX7nqPzDv0PAnfBfwdM8mYC4jQZDquTVorgyD2jmb6mJMLBCthCXc40STdIjtbItO/PWiE/Ew6N2t3bVyj7AP9IdacZr+2iq0M36Z3w0vQL7AG+oncSwilxsPlFdRKzH2d6TdddE+ytwAEhnF8559gd84f3hYFMrYLz82jrzqaKWR5UL3lYoG9yP9GxR50GCB5ZcAo9fHgdwfdcl/s2FRlB1O49TWjF0Lbm2HXXGUCm36pk6UNIR4JNDH9eqhMPLy3DDxR4XGV65WUjRjcRaJViwLXEjXr224jQYA2gmfEzAsv67sOcy4UDnYRRBIow2YfPVX3vyQHEjICpnTdLTRqyye5/DWeY49cW/aOZHLw1xOgMGahcyrOCBGZTjx/e7DY1Ab10twCqnNUXT/HBQdaPx2oy4H9UVaT9f/3/b7kcoM89GhfpibPW0MKqdNwO0NQnDO7GLORHKfBF0WYHbrGdDYtK5fPqsjfTVp+MYq1pnc7v7e6AD2h/4u20v00RnGAUCvjDQgDdULiYRehWRzjd42CUiN/0SSNMVzsB9OXl/UZ/YtcXuqFEoyYSQD9xuPzKXkb/IUB/8HP317nRRw8zI/9WENtselNgZT4/eZtkegNJN+lu4Fz2RaSrDVCmkMM8X/rw+ow8SgYdi2M7bTNJhHzqkR1pHviUtbtQbVrGnXn58E/+Lr+StoQraiP1bshXyzKidy2wrOI9Yvsv0Cr6TtCIUnihqIA9Ttp35KQgJJcPpfFGuIC7ghtz7UP6iu8bpLLGYfd2k0C9VBIo1XwioV1ckb/ppUO7Ll29G3pFAN37i5BOCOlkFCn5RBGMdsj3LrlX8dLlMSXbp3J2ULuqZy1b54rfmzyWVuiQ+hOB+KmNDYWrBpSWOYoafGyUpduTE7xQ9quxGpqwavqmPPHcOxqF1FY4s5a7UOXl8/m6Ri9vgdJj7lWwSKNRsW3L2p0KJ7ZohyR+9cgkrd8shRCU+4pZa4TREE1Xo4OIUpOArqUcYtl09883SkLsolirLyigJtva87f3smjAMP2I8kmFQcD9ifmIKPsLkSYOlu+gQmAYDR2PFF8X9fM3HEp0DcM+J4vXFxNi5xbfhxTMeOp668zn7XAOL9F/KAViSsNFjILBDGsdrDM3VYwEVw8DE783h/MdcchPOjHte1izxuPuW7KDK6S5qCbNWFfikCJ7dl4QqjnM139OFLT2cxbUfaXXurqBoxtxc3++gMbhFQxUmAsMjfRTSHKBt9vQBrq/YMa+wmvC2gmcrGdh2OIplKIrj88cHFFnjVW6TaGic0fFpy09YSLiWo/uct/V/6HFQVTv76Q7OQERh6+4hBIwwVTcKtf+Hquc/PyX/saHNFqXNWlxRyjWfSvIBfGP/ClYP2Pme8GN+Bos2wmWfJxtbP5XKCJRK8y0KZxIj/Loz7Nk+yBPmNUuY7x2k65VTx/fMDr94/kZuWyjokww3jSXifi/wvod85jK9pOyhe71RgUaiPKzN7Z461mwc6ItJ1V9egV7U26M4BBPJJarD+d377p01lCpqLaQBQ2EigJeUaq2ITqQzAMobl9TFd4WjjGbB+7wd/u7/v5BHDSQyFwdof/Y/Om8AHQbTiOhNrvvqFHWvBhC/n/L01lzwIWSWq9xTH3D2Uu7la0n8kB6/FOu8Tz3LdebHwvop16l04ovN4K9qtJHqELrB/8Y2Pza3c4EUV/d5xGQitThNTMtltFFDj3bDYSplAaoRTB797gm4fdj7+RS59UdzNYhAh+W9ahZllpt/zG3NLrSYQPXAOvmt3Y/wIf2KR70Has8HnY4qHHmVSxJLA9UwZj4LJ1JAWkkB+vsYaGKrZtFW07Y18ZmrmTCNB27oH6pEPZBDdELehPsNuabXE2MXIhZeEYAQY3Y3hzY6O4VvXV4FbSSkehE3sqzdgI6l8vc3t++b/vh9cZtQe9RrswcGdLVkMrYQzfEnjDnmXX4eCmPmvZX+W4j1Cs0ObnIIc/NGl/8DBHp3rhzBmbvV4lT5f9Huj0YBmIAAACMbdu2bdu2bdu2bdu2bds2PlaH6CCHXswpaXHZyJ8qVKuW0fM3giMRTNSt0rmICOtVWj3SYFgChTE22QPGmaoNHgJfF9HabVlm6CDzr9DymEapl45f2dDEnrtMrcPebz4JwS+NONGpG19AqPdxedRgRxdSzusjHM1HEHh0Ri1g5ZtFiFi6K7hhY0RTZzMLyuWwln/mwHZgPotM4roKVo/vcq7AgaAo7vroSu4O0reqhS4pFy9phJDxmFMi95pOxOXy3PtO1j6+v+j6IdZEvChzcMt9eiSlDN5RZtSLW7h1o1DTGtIGTWVQMAzinRpUj+I32/nt2tLnnYYhY+puIPdsFpdBfo38SwAR2495wkRomIUaYAQynUB4SkGh3kyGxkf/beMb0ysbjfLsGHe09Ny+YdOvPe0NMdTV8r0X+McqkCAoRHeEgnQGdT4lVDvqyNXffA0wnRmxEpwWxpS2R1pLN/1DvlTOPAXuvejteW5q5Oy2RxmNH6wvR+BAf7zVqtp1hwYiuzAk+GeP20aozsznHZLCsyZlLL788iJy/2rYrHRTn+Lfsu6ezNVTRW9DoWYQsvsurtmU/UTZV5gmL0uRQAniQhV0TCfSd0OyMTlmKi4BjCNwC+u2Rp6a1bz+CaY1PgY4GqlypwWK/0JAShFwLhQ7dQGeXdbKyb5aVQb749DdITL74Yto4w6IKS3sGCW3/aDlsDYT/J2xeEG/cNfh7K88sw0sCURFYT8yVcWQaNSf3XdDFzoJ9nJyXa93A1YQr3x5oaev7EtjEsJiR6Kf0VDME8ReHBsApnHbIBxCLXkiqx5sIRPrW0CkoELzqxwETYqdsQR+ztUEmAfhNCgM3/s0euvUDtisPRejF0/QPc0SbNgcllmdD4uSBckxNr2m1pnI/cs2TewSCTlfadPrZbJoC1lGgyBqHhYxrLlPLiJTrCBOcbaO70+lhCWlxXtjTscvcGs4b/VSPz+ZBUtoRqilx3H+3hMWifDFMwi0rFPgDqM17hJ0xKmF8W7aIsZV36hDVd7JugNdXz0Msr/Xl8BJRfV6fbSgDSHh8xbkOoAl2ZoU5sko6CeOQ5QyXJ7SOPdd+ORGoOEEAoCuuhENZZ0lzkdmBRdieGl2hATi4UdF+mea2k63LQcF32w5jy0rX5RGyWO2OG/ngL+tjNCPTto9N+8e3W0IufPRYoNrnjYjwGKIEXx6riVmiu1i8tDqdNGV28nt1RCZ4e1OGQWmiLGrfiKgra8YH23tTyrAeFjlAGbdOE7JP79A7pw9lw9Y6mcTp4cUGl5MlO38Tf3njIC2fbQ6Pw+t9RjhvAw/yFUkRie5EXaeTzmS3oUsr77HIVF+5KAHXBf7iNfaY8gj7Au3j+kIEVJU9ZZyKnGu+gnlQGslEfogRea5fWrhGyRYNTerhs2zNZTOFd6hEG/UgFaUzhcFA87gKAwcUUz3nNaxY+vOJBlaP8IX18ahBEKqxnJSCZ9TpDPi4/FqSMXqUQZ+8B8y8pNmhn1/mInBNHvcFjgx7v2+yFeLgcXcwaaoklzh96/RwQoiFcorou0vg7eZij/iPGnblYVW0C9v2YcgFDWD5swqzjgTApXPkfHLAVGJPNWf7BGAUrmp3FRsgerPIAjkSoRAC3zdkVtlAzLhVr8XZe9BmNfnRECNmFb25FWo026r0gZQDCttmLV8hhBcNsZyq6HgK9Tq6vJZrzwLRg3A3V5IKXITzjDKKIAfC1+3XyPGhi47UQW7ybXgHyfqVFsRL9ETEaJd1yhzPI0fMZ1ginz3IvH3IM7FYP1WAOpHY8t9Li+1L/vrzxy3hIV39zVjGXPOUx7qekY/Q0BWqr140JHdB8OqT19MC/oDoM2Pyqp088EmaJZKqBEt24aMgOzOFVMQCAC1juER6sTkr2rradEvIiQSf6pfy5GPC32GgLVgpEQtKOaAUpKe8ol5ZROpvzM8vZT05hnWf3VrtpZe9q0xL6YEVNO9Xmw6mDt0kXtyB2DvV7dZxtjaAtvRbxbac07gORA1vBVZyDM5x23K8zPUn9HSyeiwwT4e1IN82m5r1WSGsqtzlK03w+wOSY0OALvRsYUBE/i9oNl8x7HTKRoDiSeZh1iIhw++qoI0Att9ySWqowhwXLsxsU8GgRPpzrcJX3lIiCnrSeRP04QDLqerbgTkt9GOnjmu5AgG2ayOLOpdGN1pcFfr9pb0/cjsCvcVJS24UO74BmlYUQONWNmjbiBYAKNKuLuAviPxd/5auT8DQKpZzBpExEubz6jqyTf2YiHStkMP7OTnXmVfszpWf5awyyDWsCh/JtrzS9QKFXoNFIJaDekrsFUOimj4FDC/k36PwipBmvdUNThI0mIedCCWPRTVIgn2/hcdRkKYl6lPKAsKvzVgTo53Rp8n4ScSxwoR0lrrYbqoot3giUoMvMIDg8CL/rm5u7SqiFVZJx5KNwtx6rWhuZV9pH9OxlXZ8q9y/QRzCpaYIvSgkDgDHnexoXTpGVItEeu0hk53rn62/5D43uZTiG5zL9P/7dCW8YUxPWmUksW3rWBMrogmDkR93ed7qtypSWKr72t7joOAxNLUaeozGze/mnzovDNBP7nZQgkpd7LZeefrx+0mOarMCaqg6ryOU3PP/JuWG7zLibuuZkVVO3w3fDBCZFmNqQtvn4PpYuZnYgAm+Rpgj1RPax08EbE602y4CrK+uA8aRoW2sltmqAFvT8M5lG1X9W4nXHlx6YHDW2daS9w2l+2I8yjfujLQbm+L+agXkIItrrcWhnBAEhnUGrDpiV8PsW0QiHK5/mo6IgIrjQReJVK8+ivTpkZ3exw7dMZCj047/nkhoq4rXdG0rREGhd6yvfkK8mv7faaIybT6wGRgfFPrfZR+M1YrTDHcULjTBH/aCGGiCNw0DBxOu1i5KAS0PGXeDi8ZfuA9rKOsuhE3JHxp6PqGh+cbH5BSKcduMYdU/4cq9RVUdU8VQaSAQGTCq/xQC3UDDun0D7FbFdRdGM5Brl8Q3zqhoUzHSr53FdSLcmlIlKlLKsc9OTavnUDL3SPrOtNw7C14UFshL8DA9yg2kL/IlIxksO8VOfjm0OFg1vZW9wo1eab9zjLs9vHEY6vB4Mr9wSFQn0qcWKCagSGcmu/Naixi96IhwNfMgj/xt6yxDXyRuijAaLCgjzHWTNSW4LUU/V/K+gr3+QHTlbFocF9DE7BuLBb4WzyBI/JNLU48ifzXiapevcEpdGbS3Uegmq/rCITF6wjE2NosEEN9ZtA7FGDu0CIPWDsqdbswqdrLjTUb85030R/8r4I0k1U14+TRwZCHfw3FbGQcoo5y8/1EEctRxJaQ3ESXEdv7VPCVDjqs7S9s6xTwVosDnLSYZg1kpbXiw9143GVy77pBQXhd5lz4lh3BNsjTsKWMnCyfoMZd4/NwY3FXjwtdZxSXMjvLo6WbXixy61SdnKuyGEl1JqXFkkh2j99u0iDIDB8RknOetO0q4UhJ6x3XNGsGUrOOSEuMOsegPY7MM2M3xaRzuyExpUaaqydL7KcUK0r+05CshD0dTVzaEFSRoXBUzCCjy57FrS/BKKiVakJtW5mm+p47Dyi7Kx+YMepJFnyli6BlcAU6TaSpwDAVz8xa+sXRJzUEE+TrfGWzuJ+A+7wnqc+OcXJ43ZOhpb6LAN5f/37SgfePRqE989ebcP3IFZ7C8cf10Z/lomwkFRHU5jXpIqkXq7R3eybk+WeWkWA18kBqcMm4gpuKtMHS9v5XkV9cCre2M88qNiGBTIB+vl2i2ucYnInVU+Wp+M/xZBK3luF1PNE3MbkO17JUoaQW5jyw/Eg09xFmr0lw6onRfD67tG1YU2seHTba2OzJSx/FMTVYoVaL2XBLn0U1Bqb4feLiJD88DgxUojK1PRPW08u5jm8EGiDGEGWyoaYM152sBLdbzjbffXMZ5dijV+wVGvBLm9ZcVkuCFS14cQhj8KNu+JJ0HdJMM0ldVL42CHXZvep+5ERbY8Bwy9sLkWCONDssvo1lYewRc18bzi7kHentW10X1adY633jdWdZ+MLzdiwM/iAxFaqGmEn0IQu27FWy9iKMlnm7VpvJLWpScEdpjh7aeghuJ1G72hT2QUvSo1Otgt2vqdQRaBKrh5NkZFQWlFDeEmJ0kmjnv+QotnTPlJxqmZlA+Y4igp9L/txIBWK1gc6PV+T7OdQ7UxOs6AYk4nfXMUhVWTcmsHNhvuQ1mIhZrzJ/z2Eitz5y/S2ruVBpCzb1isP5Jg3XIy4Z6PbfgVXdcPe2DoFJ7jijeNFFjrr2C9MOE3iI7TrJo7quGrpPu7aFOcnpxx3ubwQYbpzd3ryXNJJYnVKMc6hBxQmLoIKbyDOx2XyjSuPY3HnyYm9lkyi/UCNCJ1PktSyqMD6R4QhQ9aV7LcfpKt30ZV/y2zbWNXTCwNFJ7msYsdKI4dMdVHsZ6JEGhFd49OJS0GNV5g0XSTmraAq89uAiYaMN4aQW/7Sl/GqF76ItJAZ1Yt7wCNpjsXMg1o4NAq33YfHlxyt/EJaPVDFhawGhSeOq2FLkkeIh1rshv2av70s26A0H/na4gKbJf88CJf0U4bUWqK3GYKm8Y7NrVSAYGpN1U7YygH7cF3ICQ83v5VCy61VHAWn+OGYQHkwWmPcDcZwjE/86RZQMGW9i3n9I12GFHKAPHJJ8OMnLj5eM+mqzbfaHimXLjaSA23TUrUjsYRFE74vO7yMwEfTjrv39bxHCCt4WHwMzKa1t2npYnvizARl/BAB6gSgSNBAcbm+LIGJioo6EbE4WgEIT9FIvykIOfmFozXvNNl5YOo1LCdxFonRo6Tsuz0zlm07n5qHCguuBR1MAhgmp0ItW1rGNC5T4rK4PDbU6OW3uCOx8vxv3cdgrG7ibQE+eZUiLR0ZZwmkwHg31RismW/osID+t6kf7o/iKc+c7eTmQ24NClzgP/miOEeCfMcuoWDvF1LTmJJjyNyhE8ivBLIUf8Fbxk+Siyd5yJgmdRTrEPYaxa9AnMauDgttloRgfLOdt4QF2FCPboXDWHjtXZat8sC840OJdRpmtMYWkwE8xKazggsd47x/2a4KYuuDsqt9T+RsFCeJur9nfKeAGIuYZ/CboaD/fHVkWnPDSZ1cTte55o20AjPkuc50IahngTMdY9/9k3CwH3HHBtkO40UuczPlYEH3LBkJPVmTJ9TX6Am6WcSFg6rjm8SRIJVATxRyEEilv/mWgWHf+w1BXxAUCl0Qj7weRGAhTmdKwulaoO6rZ/IOTdCamzJezuQdCn/EQzWKFTy4H2jIWdxY2+MuCZNYZ2ndh73wimKVES9D+EDI6G38Ulkz3b2KWSSFxn7M2xOzdgMia90ORoNg7Si3ylKTWtNnQgYPSqU1LWNQhAwfLE+UV+dQgrzdYz+TbT/NYdqkX4M1eU2Kgq2VwnLXqnLhypYFnEu51vWeQMBh113EIrqj177adVANsDbzKmOjOL9Nzt2iOcicYdpyvLUrjz0AlxFQGIiJExsfrooaweU0QZCAZ6zubf54Axv6Seoes9aAA0suUBqB/gL5f+Vq8i+dPgWgvqhq4n0VCuPFR+YmWLc6QlQOl3ZrvduhoeQf4LA9pXSKMvfzlgvdLstESd2USkmIqwyR7P945aBy6aPYrqYdp9+nRd6Sby4GFiTUW8JbXgiE8mpF+6PCbvnvRZ66nVkQjIYmcF2l1l0TKf40ifHwcek+lqnWBtZoGeH5dqcd+Zgh+Unyjlag/lyQmTKbsr69uyEF+QMoPAWn/W8Wfk3LPEm6lP4i2bWH6xy/J9YiBPa1Jx1+RIVFxm+WVtMbdQLAeTQu39Yne/6gIDmVwTtZ6PedDxPsJzUwkU1hQUNaw1ymEMZSBCzMtEhKJ/zH8xBevHIuUQutO4Lx6XQj3gLB3OfX5xKT7xyCmP+h73+x6Y2sSv1O3o49v1Kh3ykwyRbbToJUx2+QhMiqR0pFDhd1wARQ18aSNG3hhiSoPLIetzDWCYe9VaMQ7rDDahIJUnySifwNmWYSHcNNX7Z1vPq0OhxdxhdOX3+wakSHDqnEbvaEapN338zKT/uEuSpm8ALpXgYKXj3OjKtBQuVw9lF4fPDnVOKy+M8cBqlFRvtX89WdTJulsJVwX4DaPNAmUNLc7TpiV3BTqaZDsArL+yH21xu1IN6KgqXJ7f6N4YVrBYommUwEOI12SH+ccz9g8qxq75XQur6nIWvn7kYyxW9LjZJGFc/yJ3w13niSG+1CGvw1/Jal2ieqslB9tgpQl1vXt8hwX6XaPj546LCoe4v1pAr41FyM69EK7rltLlXxJD81f9lSCsRh0adSqS3KRoUScAOxlf+W5hfqdtQenkESi7s2Ynoz83WrrJ0ekCnd5FmByu5Uf++SbVLuCjvJyFwGsmdg20nOsMdblmaWc1EFf8KOBiwtL0Q8GbJlfRsAzk+JJPkmrzdIlVRmud0T21TrqK9MamZZgJczrdX0HVTzxqDa1CZtqvX9d9p5Qn8SJ3clnjt7fJtzYy56XSikQnmQPCfdJwsrQDW7Fh4bFJSOjzDpXepl3TeymzSKy2eN9BrhlvpbYM0K/wnUqDJdD1MP9vkDglqNEUmD+EPAw12E1MQxDbFP4mM1DQ+Y0vTVtECiFvueywtSKX8om00XzkRdlH9mYGeJSChHf2Es7eOS2Q7T4Pa9QpmL7KKufhslwJedmpOOkp2zQS4+qYUNo5TsUmyEZ8gfjs2HIq23/hvu12W/EOd2t1QDJbyLDvHUo7SupBRLnd6ZmWexowyWIzBAoLYtswuOuHGyvUNGWunMs61InMtBMn41lqJhz8yMRR5P7rh8SeeonezDZ+9YTsFoDwqwFdrCauY7CbgpIK356cqQ4v5qqVVm3adpX4SJjEm+qjXK7D4x+/XSH9KysPYWrDIbpAj6f1hafRxz1oCRYnjcJqFfirHfK7Vss7b8h7ozDnECvPpTcGLhNmbNJaKjPAH+AoJSwThqzyT2m+ydUzDJzE6OYK65jb/F8Oo3OLJK1eSZv+40+25Mgvzom32v5LMmWZShOawDd4PtPTgONHWDfWUywQU1GpiWMU5qtBrCUXJLKQT+kvu6Q2cz41GwthiMfwWph6ueBbK7RxWpjwWGLlDjDX3BQ8Wnk9tKBTVBR9ZkbqRCaV/XMzlyn4xuDqjEV33slvU5ZeislN86wSBaYXzWbyOPntgf4jjHVGdrdSXRj47Uf/ahIUjzxpE/jRmDCwC1ERCr3q7ZVLE+wrWVoJj1PsX/Cap2E8pxwfFw6xvoG8Y+2h8N3RYP+saWgeG46CSOe+RWKsgXmIGd9Q5ikqjMMdyWKKhBjTzPUxEq1G1WpHF1JihXsUxIMJry0RQgJcp1r3XWEv4gpTeI0nOsLucZglDzxNg80jf0fwndd7qxQuqUb/bR+ah7g+rccyRrTq4mNWqIBxwG6MIulf7kvFhGjVoiS57xZ8QVD9UcUMPE8RAW/YYW+VEULWQvN6VLAvrUuaq9F6naNxwhgW0uMzMHbo4EK5Jbgv59tJ4hwHaXa6jznWF1K2zGD/u9z+IDewsguFN5BHSEvvvL84NBNVRZ9Gd7bJvOjwdRTRVQySMndktnrIEqZhYOJWyVooHgXNYl0wsJ+xF9UdivXvOrmvheIg2uVd/9rScc0bt6N0XOc/uZOZ9XLynUmr8HUYQBQcvehris5nE6l8kKy7HpdiQn8iIYG22J84GaXEcyvmQSVjdVRymyixa9qgULQObLUevJ14NjGvibm9UrnCGFCqtmOSuZZGGo1PeY/p2U1xc3iHpBhy3MbDM8MecDhHrq9qjj7JLfDU9wZ2cHPe/pYvp3PSZ2fZJB6vWu8/FWxbLzIhuLMNtxtbmdca5zux2WVkvLlnDU+YE2B+3Twn/LVKvX2Rrs7jrzoDrm05YSFhPmXwT47wr4EwD7LTfmmt+cUYqy6S/nglVRFU3miWbUsXOqaV6xUfiA8sH4HZts1VlPe4ncXeN0DdQMs4UPFJ6gr6N0uY0KHKgMkOmV7yPSw9PQksyVUOXxrbKbmTbro2VNkv9wZYMDQw1jzx1DdJPnMJdGxzZmofZ/Yl5kB14Y3C5dhJi13ILYyMRoVz3eTCUtDcShKDj4rhANUNQxkfTJOTyiyqhv7MIXL2WCE7byvAbiA/MdjrtyR01RgFCSP9mMULDbXdS1vO3fTVeTfCQOw5Y5nr/fe15M7MNIet76rihw0TZiAStsRWOOoLBFP7Id13DtxZTDBXL4KMQ9mAX/j8IjlmZZ0txCLeaVaA00PF34+qutMRI6yrxOr9b/FL0TnfMgP8R8ITKvAGPEbbbz5G/muRDFDFBAWQpXtqZQGznk1Ho7uIKgPv3Z7WMBfa0ePGWyzhTPrnhoPSoqVDeGjiYRDb/vrD9TmpQTgPQ4xWE0ZstKPZIsESW0NnX3IayM95lYx+OSSKD657RJTQpu4YfjkcSPJ6CeA3sPOyOqiMViqRhy1PpuGpFV98HtUpw1hIZNCAh5oal7Qwok5BcKpkH7HCNCgfL0A2kzS5A3MHbytFgn+3xbkFQ6KOGFkRsh6gYT9GOTuokSW82NpBVQZhWv8LCKTCPMV8FxCASitRBUjxmtVumqu2Bsfj1ELLu9fYNF32phtys5ZTbahQJBacLWOYyCTbRNDgDdeitKsPKE86Su27n0Kp/NAItz1WMQigfCyB92pgQ9S3zw8xbHHGaE3YVCuFaaEou1wugCL/LqiYha1Rp6Al9FYMeFRkPw0/Fxx1cBA+RucdG2bQmAbjr3T7nivVKSNGPTZVuzbB26oKggG4wPT8zRG9e1b5JxKU0dY0LZ6GffA4XiZX5hScUknPgTUjI1k975kiZX8LZe8611Lw1R93ShPStUzceKi2xf7/m5xoAhcsK0KGi1iOhoMus8u9wO51WJZ/ZAX1sUa7Qv3m4DR53+ZhK3DrwOd2QzkdyrpufMEkk8r6yT3QCyXv9s4dC2sEO4KyqQfR4FBFqBEUa8qwKwWda5Xv/pdL96nJqGgHV5nAR8l1Uygg7bRDoCTR4jDWF6BcnG6+OUFHBK+e/KGwNmHnbPOK4JHXens+rnZmXD9pO8qeIZh1u6r4jL1i+w9znFVOnBvbh289GFsff/g8j3dhAt0DCgfY35Fp89xhzGJozU3pEPoJmg6ItYt+uSdqCDv4qxTY6QueAjTtOOtatAcTpA8M6Zqevp/CsPnQmW0yexue8Lu2Jk92/SXeffAjBBBww3GDhN9v9dXRnDdSTWixMixkDVHRuAK1c9Fh1MeBKTqbUQKPKb1YX0cc+iD01/c7pJ7gaWviDCPq4cgF32eL8BDIO0vdKUZ6vOcUXmaxPZv2KiufApWWDtqFwzQFbuubC3UN0D1vZOIJlobem6sB+R2AdCvA83z863cKSRZ4Z1M8jGDu6xlf/AM6lVXXsAsfZEm/pjH8omNul3k7lG4mwaTvBXUAlGcds2gA1XfXPThpvECTNrDCawb9WO4271VaFXR/10kylErUKBQjoo7IZvFJ02mp+wN5JG0yii0n/klWChjMH3igi+mgxMPxvXqmd2wUfSRAqF9vpbQzPzzsjyaJVRsHzBF2RyTBn29ZmiENR05FdBKdpYD8zEw85qN59qjV5m2weBaCd7cSG+Y1ZWxYetZyk4u23F72KWvGzl7Jxc4ISf/Cjw7F31d6tOYfXtfSFx4qLHEJtrKQ98YCSBpLs6hv176Q/ODS5e/+0jCmRNBjh9dEOUIpXDbcrAzb5/9NdRllhCSwiwFfjokPqyppwaetEsQv1Y2am5uHuDNRUtl14JMF814J2mQZZrLQTAiT7RwvbV2btoMb0k4p0fhBfl+bxfdPpUM2fOcQwqxxCH+hwRu8gneE6GctuAUnHe1BpCQ3vkmYUZNSxw9NPyqzrchAZRgwVF6AL7a9q3cFXYFWA1UM/81GP4T//QKd2hPgImxWWAYo8xiZ+5nYB6mhR9z5Cmld8pII561uKGDUt/ng3F5Por8HcRXTF+Sgmqq8bYgLEmahC/9soOyY0l+ms+QupBn5pead8+TDsQjRISgZKpZ6oz6c1LoKBVShn9PmiAypoJMb8wE4Ovor3DpgxNRsVxiUxQqgpCqPfs3oiPdCb48ARU8v05JuCWuEcsJ0/p2b3cP6+4x1qvCQt76g0FyfTbh9psUkFSSt5YA1aUX7rqG0ME8mHas7XlNsPGCMrNsV6TN+LAbGwfO8S/wiUdyh0XD4xA1gnM8fCY3MlCsaylmUdw6qWhb9y85LTs4mwXbjSyW3HC97+xtTrukIJKa8VZpLGQ3tCw4a6yiPu3Jd9xtSsH+HTQVZZgJyYG1stXUDq3ymuEsCTPghaT9ee9BPSU+KrVIhQvcBJoJa0q22b5X/xHTmSVDxSCZ5AgERWRq/8gSlIjkNZQngPO1BzGrOx26eaLSEcZL3hjoWCWI3EwrDecmB18IDADwS2fuMyj2kNtWRhA/zqL5GiAgnTZ3X8X86PyOQpOyAl49urCo36dN3Au8uPlCQukC3UJgXhY62g8c0RaY5N2RuEz59XBoN2lOYTfelCg34xyd8CxZgpvMTYgBwXfv0ZP/vh0UR000DG9dg/G+nj9hs268GnqfTtjoXl1RMuC+sYuDIn54KXL/sPrTLYgFeorXeUs8Q8PncfFmnImGEYKQkm5z7cAWLAJzHdvuVXHcGvPzunHdI3umHnDR2RLcD00tr8x7kS9V+ZvAqsjumsle640aa83lUPe1uMPpPCFscRVlaSpcOWglD46TA4bsvM9Q+O53A/cRJuABrbBLKdVgIwsjm3Xuswn1faldXqZux1t7h0wvVHQ1ZvrzA0NtbT5rH2SiWDZ0rpmoy78i09zn8LpVVTj8gfGdE24R4/7fxZxGHSj1XJxhobCsUPO8ErlCoHCEBoCDSrwUecoFS7OYdJ8Z1he6kjE05tdybJfskNkxxF3+tIt2DRiYE/s1MOGBjg9B8OvKNGesi9DJmVxpCWzjrdvqHk7hA/Lr+zX1mvOkz0egWEP+oRKw9DbaXVqqJBDOnDYCpZnauA1+pq4SEpWbxUtKBQrzYopxa2NKGMSEpsguyUUf2yg+I549HEbddiDOVLuv/MBcF1nw3o2BL/f84GKwHT2zObE4ooFJ42KlAgj+2iYKxhdc2JKZhIPfkW4JW6JR8UGl5R9EtGSuRwEcV6hsSACXazmjL9ptrTCfApjZOO7MFi6GHxAgYhkY7/E/n76hbYZvjXpJt01bQ9JST5GyyfRJ5Pyy/aI317VNkZJsVka3GxbM6q9t8B+cPQqqlqeneuNcmE/pUBScjUTYJWtlc22THRoDJH9ZGy7nPzNWHWG3ekV52WXeA3y4ghN49xESPo4zbzW+S7IdEkgNM9h3K0zvoymt5mPlmSoHrJ57rsjnKGFojjFIyC1rfNGR9IDm4wy8hik2ZN0dre3xN6GSy+PzwNVhClnVgKn68miv6ZbvKAWB87ZvbZ8F2Om5T3M0eH1EhQ9lOXAcM/WmAdKURsrEec9YeXOXHdMq8RdBxS1RRwPmwnbguUHZxHSdSTJU6AwYr2afP0eXERRVXOzxnYDDzCTw5gQAuW1LRwwSH5kvpQqjH4RJaE4uMZsQIxg7VhiBubJwPVML2nIp+poWtbHbTAr2s3dqvikYemn59xEQs/mmFXE43zDiZhtWjV72iIvQFwoluegVmRMgGH+hJ3dUJvv62VB3HFThVqGak/LjQx74pIhEry89hd0DueyOB5fEaJ9DfDi0gJtJpvMthXSKWdxYfFxmrlfMhXF51mFO9p1qIZiSdLxcdEVRmzGzenye9YqygqJHZ7EZsD6/LCl7e1KYSdPt1MjMm0B/bF1NzkaJHmTezzGOI9BMisBTRfGn/mhoN/VTaE+vDLc3F/pL4KfslIJrB1Cmqa1tDtLMaKRm7FcuUVzd6aQJDNiGHKEv3BrnbM1tMcvbBUtZClCuG40tpD0IY1WwfHYi10qGjJHl5BGi069WdID2jIq/NewB4q7lzMHKW4cqwjFYe18j5Vyhrt/U0XgG5gFOm0+Yuw9b+sTbNbUEkaY6i7BvOasrW1yy2hWsfbk7ra5B5KRglsEkGcGhZ3ezGugqO7i3blZonczOuXf4dgRh+mu+0PcWNBtzvOCcfG0COMFXnGYjbXK3Avl7pcbEH1w7gjToMtvbYm0HGUyfb4PwZDyFVFF93poCW+gmta5/GrxUWdOtKit7l15NulZcKHr9X7V2yjl3FOyyhvMLdr4qsz21GgPmAGkaHWs4lHK2G5qhhxG3jlXJcP/GDJNSQ0lFppbDqMYoBHTXci/D90+PlDrbdB44e7j8RITThyW4xN1yq/J+IiPb9C9yvXqGEzQURY7jVT6qP1rW29XhD9cWVBKQ8ZMdcTXAdmUOYHhOT8QadE/Sk3nukI6gDu1Lnzu7kxqqE/MtF8HSukV9TCY4wSvVCMhOxKXbXKTMQAyDRR2ECR/6arUwmykmmkFShrUs6WmRIShON56xeLJYccmkdjlZSMsmQnEAiqpNiWSDfRunInXpd1CDL06RFQ6yFYwkmaey5Q6U1rOxK1Rgrho9iex3PkdZ+jCfN34jwhVT51YQzc7biKa9zdCWKiVafKOannVxtEhD2fPWf+VjSSrMreq1Q9Z7um+Y4YM76kgYQMgcBhcyrh51gcC72CX5fYo7053vZxXYqUhuim68krXevoiSsJlnopcn7iIckSnTMNWUNyTOh9C0A1TNyHTx13ip3kSRtcL7pe2qYHLZwCU7UXVlqyyVM5F1Nxzkz1VdmFsdQrbWKxFtZCuBKc0ertJGoK1mBwNVXeQh2QgX8EY5NYz7EeufWkG73SuOvxoiDqkLYerIxkaW7kdj/j5v+cnLoz3SLAla+Mxu8FD66SOehTTyqFvWKgyJIZp8qkQpM6AVhj9MTtig4QQ7YGaN4Zc8dHvIT9QHECSOK/1tz24imDfELJ+JDJRSBJpJRHkp6Zmaujsa4Khz2SQ8XMgG94Npjqd2sngvO8ZVl/vS5orrQdEqee1QYw3ZyCHrvT7rHXOuGcuVpcSQw4JNMKaHUgbn1kUy1tZE2mXWy7sbx2/E4P8aSLvp/3JI8C31dMeK19W5FmmCBN9saXvxzlW/kUIGrUX1q7J4ejHx4Vi7yBfezOF0lr+Ju/2RCsYNSuN+PZbYLTuld1CRNq149H68Kvi1HZzbHVCyj8ELvG/D/CL0GQG5htP/UUsRZNt+//AZERAFPbfHuUJcvHDLwnaki1IQF985OmjKqIRsITzrKeSoAGHE3BQztA5GGkW/bYAUTlN8/yYoyzy/huI1kY5Nq1CMi/EZKtzaq5XXNlXIZMBsiUueM1VCVkOHoBXOl2twHaiVt2T2tE3WIwUvXkBnHZ+4glTs3lltoFe9BF1G+jwMhNdzbhjjzSZ9mXYi6/YmQ2m/Q06PjUBnTGAqdfD/lhYB/elN6lZagtXziUuA6PIY7S94OQi3kXWSMrWIWHReXjhBWSps5EtvqP42FZoG4WH4pGMOKRX1AZ7V+9q5icMVr51DKUcJZ9pZwL9DCJp/OToJ8JJGhle9D9CsgGdBtO56Bu05xDxHhCyufZmF0/qMi4+rv7x8fES1rqb5incHmWpSXktzAvpTBcLxYWlG7lUZg6DiUWEMB3LxYNvlmZS3vIKFOPcrvCPAECnVG8GprLCEeX0rlemTZpKP+l6QrmCNNcw3kOnhDYheCVtDz/WF1f2Z/qI48FEyi5Yqt4U5IsU+dEuenASDCRwPGc3uk5zui+qQ7Yyfr2hbCDzEAvXKshaSEgkPjdwOUj+3JwSyF6XbMSFO2iQAAn1h4w9I3DhXrFAfPXGn742ranTcJtCVsHDR1bzArJZHeTgSltIhmitb8BQmAvUpt4KmfhpB+5YP6CbN0OcbVaDV0BuvuuKs/gxEP3V8UV2bj+nrPeId7n383XpcBSsmyeE8Rlcv70DV4BvNe6qW0H1LscS6qxUx/fb4N6yU02fl4yRHQth3ZtsrouZa+oKkjg4YvfDShSskYJCFDQH1+hoqOup8CibiabpYIfpXD0yVKn6IrSoXKmQNOk1NG9/HyVqxx97wDyYlGUPI4VQNHKWtAm/bKNsm/ajn4Gipw8hCW5UgPSHofrSqzBlg0T2+kSM0F4l3RGORm263Uae+j8FqWqzr92qGObS8RGZ3Hp1S8AY0ILLBTs+z3mNRX/zYq2CVicJXVjkuKHk7+6EOY9CpVTV5JgZApECLFby4fZS1JE0Xw6JM7hoWWRyEF8/4MgDsb3/OQ8JPtvloBNUGl02dOSRE3+1PtPAdt9ArR71wqEsjryYTQ7jptQu6bF56SOK3oYhdkNcTVgBc7CrVw2oI27Hnv04IpEMRtRJi429+vSx0Hbi7aHD2RtkO6FxfT8OU/pN5nLmNv5ltNy95ATadSKle0MMblZZxs8JanrSTeJ05+9e84qr/GNojXowTUd50Cb6WYggZ4JhLg5Wl1cnUsDgGSFFZgUoau89mvO8p9wBR0nL6ok0EBIT8W/8Y1Q6uIyAhqrvsfwpi5I5rrH6qqTEEqvwguUudjnIdHq/wgOgH5fqQ7JTGcZ7VWmqyYcOkav/1dRiV8ljrqNdpBBAa/Xz8QcHLQWtWA+0GYpjet8GMgf/0DHEqLqmQh4PVzCdVxagVAnncPIWEMelUNC+buzcWwjiNxGlh7ut2W8D/g4drqlWhBW5e+hDjKsjSmwbKO6K7KNsZUmxtVj/PW2uGiKdpAqWOtsOq+wRUNNwCjPMoRaFROtWbeLk81Kc9eEYo/29PJUPfRgBKO/g58arMsDKZZPT6+XdmQfwEfflKflUBAb4Usr0Ay9QCXu2E4We4+PJo9Id5/NQLTLJNwkgrcSAocZOGgLQbinNjdAhyC40qc6IcpTqlTt3jKW5R6gLsClLkgZR6Sp1TtNvyL2PEqB0XOPUdTc0TmCkG24F6Emlkt7KghauhNmjggGsAA7cvcv/Nmlg1o6Wb3Q1rChbNd3u85tRQpStel4aq1mgaaGhFCcm4F/rAZ6SKmkCgAck/FIEGbSTgytlNMDmLmvnFk4Gqgh6r/b3MKZnEzrYul+9EQ4mvvAeYYczpqeSHTakOPmZV4dJ2twSQSwLR3wwLcGsZGIIpegaRq1lvqAU9W60nBLBxNEmco2mJKOAAdc5yaixdQzqNT9iWGTtUQDFlNuxy6C+mzInAJICF74By2oMl200MCozNo1Fr5lQ9bjocFBkJ7l/KKMBAuWcnC02s2GDMPyKO6yk2yHq20ZQ/PputDAZJTH2fD+DnaUBiQgILNel1vaebJP7zBXvgBbjWYiluYzyqjaVCzoJaR3pnp2uRYHqRGya2gn6VtNQ9qwP23uOC6XvPTYJH10CnTZoGE0weUABlmLDma3rT1zrWR2NBaORLoYSpjSV9tG1N4OQJtutRU8ROvo0q/duY5KPgmcNYpppsguTNEpwazRrgUGMxFELWNjzM7VVszhTLatV7zzPVjd0e/sF5j8sSKy3tAIeLnEtBxDHLo0aphJUb+CN99QQB5wWuHr2qzWv8VgvdrnG7x4uZUnANSq4f7VMvHCGJZ5P5jE6kGaktlFJE3JvsupwlYzDwqlR1oXKLwCIg/Dt0grGJdLwuk7Tr9a+2XvpFlTzHtHr1RhjlrocxtaIl+v2Xzb+ETP/uxfrI0AO8lQ0fK8oL8Ppqpv6c8H8cIsXohNGmAabpuh4jmO2JZOEvkdyFSiaa2NtZDy4iwlEk2Gan7BFDaUiqlqixyH+KQdojdBPrtlbGensUsXFN7SYburb0FFuezPvBesiApxtFKtbH1isYkMAgvoCiPgOD7tmKnI1fYQfZa0ZdY4e0ZqzuEO+VsqIM3Bz2IciCn6oU5vpw3c5LaJPaHfkEiIwDukfdlcS2jTi2yN7gMgqyBkfLN1c0F+FSFemSkqLpAffIBYbS1jyIjgqAPAtZ/17/DuDJrkH6hW2GHrf2iiRXiLbAZREGD2lXeCmhl0xRWJrE80Dq1rA+7E+bjdjRDXC+FRR+/aaXWKWcOg8nCgKGcRnHr4gJT/16flmReFLJg/7k/va8ro/bdiUgjeaJrqQorpLpHOdpylGzWmlms9DmmNOwhm/I/WJM7S8cKhtKXJQ53qG3pf7M/84u5gRcpx86T2fvRPV7d8eYphhc4Kn43eAlgEu5pYoWzcX+mZGj815moGPndMGd8oPCzq9BLpzBpa00dVOq+OhEP8GcKTUybeyZt9bdFqTZn5nxE0NkPwND4pxoMzKUhoTulLhVBG3XZjioBWZFi7oAH1R0FtDno1UolMxIMwViolmpx0WDg5se62LWPdoaVa57aoThdyg9jmcSkyKgDOyFjib9MlYF8F45xDStwCYAskJPfi+5I6eay9b7DZl5D7DI55LxTGavXbYErf2e66lA5XzJf9YgoW1zieG1ZE5zbnG11h2Isn3ELqBxMxClT7R4aBdabUybciTca3R31aqbD7hPE6luwyLGO1Lrwh7ZKpOktvIX6j+D1SApKHP8+zgooSMKkF+ArMdYqfan38Q/v3s+a0JS5+Ta/ro4AXorh1YRGX0bVR7Dirge/m6r093rRtBwdwkrg+EmHv+24/UepDqqXsLLvCphvz7Fqgzc6H6jXGh7oxdvmvrdyRx6T5bPeiKBc4KiasnnAJ+B4Z4cTF7CKQfyjwuk1abcUR3d5wSBYbezqSyArKAJ2Q3VYDEi3GPEufNk9uur5wvGI+IQtqwUI0wR2uQ8uUA2/W4DOXIVCI+rb+F7p4uRElrmNdQfkLIHeEy2f8eoK+Q8mCf1ugqNujGn6895Pvqlq/K5olwIhYOkYENnNBGIJVn0mLvEZqknKmGcTsv/arGm6pG7P2TXx+dizZEJ4gE54RXpUer9qQBEGy3Ljm9LOSc7pYbu25O0r49A879dZnYpdsblx06+zczQY9QPgjfrFtes0lC5whUX36li6v+uqmTo1HbXpagoLBm53x2hXx0vJTA5ZN3Glg/79UkxqhMqOTXh8qxsl+ar46FJOWIH04llG88POS2gDjcxWK8bBUdXRg9avA0RMLhJSggAkO8B11aZv/5a4XzHXhlCJsnFs4/7UdJmmYB/RnusNK+d4xmFLzVmIMWtx5GU609fro+LCig82+DtPFAZ+2GFGLXLjzo0Uuz8d3hsVqRtTqHF7i9aVkZ5xxIvumAnZjTxEA6tx/R+1UAQw9S3T4t2puosORqeqvuvtqI1w7nVuprGBsnCpgjSC7zJKx8WP1y+SFAB6pJAswr1xpyh9gFR6JqNr6d59FOLHMNo/AI44ppqoX7zpmcbY9gMMKlilspWOSWxklrTSp5iE/Asfkz7eKbsS8S4adgjFQD/MnbhFrypn8RWPXXj0BVBBZNulC5/85rhT3iocoljHum6U4mPuCwrK5QwHlMzMzvJlIhqUDyou36aSq5ngjjGPfxK+XasD5hW6xp18OFv4V1sAAt3FuJLZRAYTHFak5aCWlniMUn7+EF2S4G5h/Po2xvPUAE23bLVh5QggrnbayCjRo6E+EItaBiTShbDOd8D1P+5FPSZie0462W+CKImQ3JKq3/lYJCt7VQ0Yy9POuNJo1JyWzUHaEgT1Q64fj2QgR7hLiURN/+FXVKMGZRFkTK/1kr9GyBlUk3QpFxVdM29AANJMC6RlY1qjC062Sp0EnlFijB8oV4Llrn6Jg9Lnb8hQ6dLIkAfOs2ZqX22dNxGd5hC81JXTGQIif7nQJcN1d4s7kOO/l8GCRFfr4N87NB4RWj7H/qYeqrVJu1W1NvAlFBlBZlBp2Uilzgr84DlS3FFBlJcSe1VmWwOJYkTy64RLHtB0qOXEaPzx1DqKsN8pjYR8xsH/8xU5lXjuVm4w42ytFugyGQnnMCDIAAEd8TrkvnWgMTs2Kcnwk/ZNnRhiOGjHTuJflJj+QBJoKwwmOnXNmT68EioQ1TCSvC+/Gb9SssBrLL/NBQN36Vph/qQot9+mMsVPbAgMrZo0BUUpMoyV1Fz+foTmmdsw9FILTfNQPcuAoM7cCaSD1hmKgpI/FJDNiHzs2rUTA2CVUC5xe+Yy+WZZQzeYYOCPCfdH66Ch4EBcokfVHS3cpjzmjI5OkjIA+sO6qYwGaJ7EhSXlDavl+YnNsp4WxwxZ/Np1dUK43mnbBJ5BAO7qVvfA13ntTnBKVUt4p7lxqmQMXeGk9FL6oAhHbT0GYXrLbrCYeeydXAYl+vwpR35RxIwYM2OSU0BhFn9YnTUtAW0VhosCNzMHCaaMlB043m0oIzFxctgUshgkNy6XwNGJtu31k4pPgWVeLaIEDik+r2qhRhz+D4fHfGYEG1c4we3Fuy33DUubWU8svNw04MwgNwp09lIHD2LQGK3M8utpEw8bkIh2K7Q3ACq+cKBAXAqjiFpPM8UdDP8NHAX6EdJoSTnL1RICLfki//tyFzi5SKKCH2f6c07dqOhPeftwd7J6LlpJLxMmo/O5zhLFVwO9oqxAdrg5zc+j025xqbTOGREfPxmwo6zuqgPaNtChi8hqY2v67r9kPdbMV3HbZ9myETomyUz2aEBZM1pZUbFpxZjDS6IYjbBGpbZU/wLvVacNpcS4jlDEknQSF20jBqYU0Dx5kvJcRBj3i7vRpBACg6DG5FqZetqNm6qGdivqrWJfVeykOWrDGx/3MkVZZ+DCZ+694Zd00ijZfHB/FSTxzArA0+rGqYrYAEls1qaM4oWxQP01TFqTbnvbKPoxOIqy46NOKoVgzSKvEkfdAi49OTGeqbt8IvNW7g1dbfSGGK128ysC/FwEcm3/i1lFFX94ciZwIXHe1jy/qTT0/mA81nAL9ngJRWo7pZ4Vnwp77vge2KlXqP2NbBW8/E4GRSzJmkki3gJu6d1tllGa15z/Nl+RkPC4BZQ18UjznOY93eYN6Gk5HvP+5mndVLXS/hMZcTPuw5YZI/wve/BhDuT956d9LO5bDuV+YgXBPxuLQPzy2ahcasXoM3Klh9/6qCvWY2B5d90h3+iMN6cDqbcgWgdToZtcbVOWPO2knHdVS9Z3glJDWdnFn0UJvg7+ODX+WoPsn2ohqqTLD2DI2jb2OaLzP5+WlzupSZEQhPKKAnZvRkV0ZVjhTJeZm3T1q5ZHwS5R0CMlaMxPZzjAmLXFE/YezVgu2ADuhsUq4mmCs95uhKLtMvto9QdE0elvIhi3/edWHWPmnOlBqXlE2Ivz7aRHK/6vlQKbPIjjIlCNX2TPUagdL0wmMzQ1YAWavefpaJRsYnIZK5SahiAyCAXhFqnIDpAeXGd2KEdkHWMlISTcGmccre/BY39zZ+QZwv8qGjZaTZcwLi/mrp727eUaeQDbMCAVRULZEmZh26fW7WbBExGuIFtVKcC73KIFaH2fOvbikF7v0fVXQokSz7J6mOXj+IcV86PScN4drtCzSrinlI9vbGhwvKy3MWoXVqdHpGKeZJw8qBvnMqz2IYWcOec6oTPnv39iTTb3PJqwQCsKy1oYAnzgVs2KVDtvMTr2gRtj2jtVdtIBzqmuXUpQhlIr6ZOjqIzC9TYJHdXxSSdLwdth1S/qsNhABxsjBPxQWRsydMqmoYaguiq685UWaoLTtRlixb2py1S3p3FKJt19tcGVOo5QDNCP1mJJIZD44j6B/glH7/P8AOBbczNoYeb7MMv49CD3F8FLRkszgVzjxMwJv3KCTZrty0xUJ+zyJZY+/HVzrmYemgpIaByyM5IRN/fGL9xIsJW5uygLbZV1TCT9PYhQHzMOC62Qjpnd1R9RMM1q/OuQkygGqBDiYOWsESJErMZnLSjAvvvTDE5xhLTdC+dVpMAn6OpMkQbh4V63eszj26cjQd4yRixMMr2WAcXJwqbEi83hYg1UqxZaq91kOF+zd683tm6ZMmaoHfudzjG7dZKrsw8gUuopqRXzSNs6z5H18n+tULLWLlOigx1O0vZSPBiCLrJtydWSBA01H7Sl/UwZWaunhxY5sFwMchUOJLiNtz4b1NJVJ5FcH3UCIwl2gLw7Lefs98tjVts8jN4WJ4RccQE57OcvPT84EaX/xQK0kznXL47vzgMQf6ckUGR+pC8zmbQ3vYV9IMEuPa0fulqeMo2tkiq3RUk8osoe4dMauSuwGlpUkIC05SZoHofMFaoXMV+HJdEoFdwmxeO3TWQRFGVIXtbUWLGv3/An9fnsA17zTekayKsYoamEcuG2H73Z1J4p//X/xPyeDnqrK4oayx0f+zAHBCBNELvHhW/hArUWkIsqKarSNvWQmudGCk4INeCkWgXJnRLKLw8W/nd7j8NubK0aDzh6KkEMrVk+ZvV5d8GE0vAwWJ1uhjR+TxEJkfqwU3418h/v8IH+4vFU3dqfuLKUC0X79Vck/0yyajqevPqD63cd8y9HrryFjCkih2kdSovOtXxF9/KYFbyKdMGIOaB7swwUEcZlM1k91vJSrmcLPVm7rv6D1tZWyAzoBTMz9iqK4iR24IBjZrF7r1eCiF0j8/hJ+Bg/q5OOhzBTxeuKOP2wMH7k6zIuHZIURKFq8PeZkpG1379HfZk1oczQKaFdRwBfzGfe7Y4hx5QQe/RdetLOuB7W+tvp7eE9v4esnM3Yz1bbSqpxP1S4MGgYVV3q1OPafm4jbgJrBzL9Fn6EurkcamjMSTr4AOt/YSL0EAQGMzqeVLh2Cf1wsjdAlrYvsCGHnI4t2Y+0yva8z+dOhTsHlS943V7YCEuoUZTXGiaZ0uj+yxC3vAhGj7ae8vCe7E5+rANhmq3D5S2ol7vZkm18E2aXZYefZr+VLw6JRtZS9ixH6k+uuCrRtXunxhrtb3dX1nclJcNycuZXhPiMCYaWygc1Ox4bGQaotwjK9ED4FZ1iIflavCgbKECgxc7ZqGfIKfmN4eEOw5SCOY794ig6oUDK4EVeYENFXBQdnbOug5RCWqIl+S82It9UTbgEJdop5nIfeUkEZ6jqy7W0djRFqHjhkvJzG8hoWdaI2yGfcxD74sXP2qT6DiyDXHxcTyZ4VdUyj7l90YL3hMNJe7YcQwd4NgZviCWjBK3nHpivLZ82/xQtz5V5jgPimx7Baadig4DJsNaQIk4kcgb1Fy7sJT/npjahH3h87MuAXjDmB1YscDFGKXq+enYb3Fq9vSnnwbFYWsR5274qy85Pq046FmF8coT6E/3xiGdlugO9DUXsHt2+SbNHlVRm7/H+0EOqWOBR3HE7Y+IVCy+pgCxlMUpsBO3zB93r1GYS+xPpTeVCKF6qbwEFDLfEekDNmcEFTw0WA6Pi0hpYEIHm0R3/crrD7Ulmxnm5fekx/f6MQ5sqKLi5KB3k9hXSwF8mSiBF/5dxZ8iSkcZB4NxeyAOk5bAYAqJoqqb57keNYNUJx7CgBA2/PioWCII+wiLlKZS+OleYQLk5rLWi3Gbp5+qYCfEwfOKcphiVzpgYmw6gL9BC37SiCLwz9su0V1Xky9k2xrp3YhZ69mz+iQG+khntXZPJWv4KbY6XpjrCSwvIsj5kFN5/WuWi+1OVioVzAo/Co5xqB2j2X2Y4eKG+FrlQXfJcbPJhcY4tF/It723wFaGTkPpf9tlPmhOwUDfqTpjA9fN3IuZLYMUaAejaqfJGBlJsQH7OLW0GNzw4fo8dw0QGGo1hzLUciwfccInkBy45I/jwyTbjh/JHnDUdGYHmxViij0pNCYexXuPgPYz7/q4w1EOQsNOMD3cRu3d+dtkRfqQC/SUjpfMIrSJji0gtg5brqh6RQGCv70ZuN354MlrLBVLRQFtQcalaPL8VojaJ4/VurvxLbXzxD48yT4IYqpvuBirrRU7DHiQSsjDC4afr2nX9o6Tqf7CmZMUJbKiKWuuIe4b3UPAsfmSDvTzskaEsi57PvWRHQ0d8lvL1VC3qxVsaa3nQ+T4j3R7MAzEAAAAGNu2beNj27Zt27Zt27Zt27atDtFBrr/8j4cXFDMwRwpy/chS54vtWtwuQiLf5Zmam4CYlRA2QRDlr5qenx1r4fuiyWPsp0CxOL/waefM6Gcz8jqAaxonElVUgIZMFuyPrZJrCzrosGTe+p+mQshcCfwpA02LGmuywZPLznBQqdUoassHQ1WPJOhYLYw3c1dANdvBe3wXbeaYMcJHlbFjmHnE33PRJON4PgNQoQGy5Hi/NflHHVUY42Q1DEeizvw6Oj1fG+MH0p/ByLo002+ecMRXQL3bD6SKHeQcw37ALKcU06vgS5AOvjf3/bZigEML8eAPYDbC4dvdbrkrMCbMR6GKv37uZHjpzqtJkBcNp44XDHmpRtRsjGhuXHMKgQDP2C6gAP883qyfOAzZZo7+8DTMSLuwnM6+J0mFPxHaF20wiSm36nUOlB8CqI5zwq8cDHKm9O97NbCawFoulyERoTO1JVWUw+zGxL7BNvWxDuh5tnpDIFIEdOvlB6sd8JHnT4pjgmQbVprnOysst1z+FxuELolEmj5+AkOI3wCrbrvjqy6ySWAsoXrVi4rYOfrJAoW86gnj3DXWP7SuU4/0LAFcOdakGMGgYgo7yCTytVsO2HJ7Ct73zL9StAJfLG90eadPlCjlWxLigGzp8GDyGWZKdImKHb6e9wz9/ITC0Li25kckJi0PFFNx5ZvfQW8cDnMX/DQzPWFsrt/lXZvg4ez5gVX6LThhrSityI3DARwXDMfYXOfLod33JWob2VHYH9VcIL3ZBTHTyG9bF7/ewZ3oQXrlHpHnPXWXjwb7dKYcrnFdEScI2ZehLF3GjvYdH/am2Ai65LGSc/Txv0ocnBcLjz8/0gAtlygbyCeStBeF0AI4sIYA9zpFu/CQSkROTvjQEgTLg0HlTqoKy6UHANOtuz6WfaJ+SSqpMbjhLVC75gCkRrxvA/Xp6PO2J2o8CsXDZkGvhfrOz23RAv8trRZmgL9sgetXjR2ipoh49Hqy1PA4skiyPFkOnewsxEdN7Luw+MNAPFtvW2wZoVqazZYmlvsuEY8gNtaoz+MT7P4PpkYkgMyrzXWUKmqKsnPyJjOUewIOX8hi7OdpG28/zP430Av2ZSZ7QT9Va5V84VYbCiHG4TLBck6mYC+YAXKQ1HeDK67dE/54o9rmYKShiO7wi+4GtXXWkDrYH3HzFKYjf+EORK/ImVkuw6lzVK0vAF7IlTEIRtOP3eT9PuEctA86CM03Ie1YPpr7K85KRlGP8uGQoVGxn8UU6S/Xf8ZXywuIZ34SglC126Qba4zd145+m3Wssg8Gt8w/aT37WQcILl4AZ09ITMso9Bl65kbiy+f0gQE3ZgBftTFAvKYzcMcTFXdL3m7zKsQS48AzgU5CsvDF5blGL7Tv/t1ENNXjzdMQ7WuVWKLeIiHMk+pBtgEDiaPtR6cjlyldkgzYHYEfltl96Wi2MuQ+OQVq7KJNJPBg2GLZhIUVj4bugkgfw3TaqO6HTVBKaH/1RCAXwSEP2vI21os53nyZGpdSn/Um7utWRo+6zkdwnpepy/L9sjzxy88n75uyhUeIKu/MyGgvh+Sw/n7kG+QMu72wB3DvJMW8uVbT4eZqlzOVXYaep5e2q5ZF+OY24Sz9sWJahCx2ej93HcFwpmTFWD1sNIkV0H+afdaRH+Y7IhZfJITurms64fRvodikRV2cirFA76GES2CjWsD1S0UrB2I5iQxf8LI6QOt54NnXPyJfuf26ySb/1uTX8m/3wuj34EeCbqF9+UZVeYXk2hBUOk6/pOBpc9OmGfk2zD/WKR08IrtnPYESxrsy+SfP76bV5/JZYTNfFVbd/xRKcDx5zYuz3fVNW43pEJEzi4D9gVHqLX9k0uCCUAp2c+yBtLy6njtGFpFwz4ocrepX4a/ukzNMKHFUXCRwbXTkK/fTCopM4PZ7EF9IoCGz9s90a7wQ3br+5ufbS9PbN9sr7P1J3djPXPvwx2N0zWLiOEbggDtJPVztAt0x49SkZjTBR4io0d7JRMkqs8Pd0B2mBpqmDBvBD2WvUBW1aqQM1nwDq7uGQm/OIqjyxrdfkj1SfV5UNOUdZc35pxbMkv6xuP8aWpYrg4i2aXglgmXSJRv1sBCuwaNYYVJJcQFIgIeog0Tan94iMlxEGZ016UfQkHbWGtvylNsI+S+I6sbGwT6E7tV1g8xt6EPYha70+KuaqaRlM60I5HQn2xgVgcBO29nYzIjDz4hLGnrSqwLx2Am+7HN8V0r2Nd3HURfR5aviTiNaoDkHUI9MMMuD7Sflqvc2p4CjyP3r8fPzWN+y+g6jalhL+COe5pvXOxxbp5zMu9moTnr+FRq2l8tMLf5EzvmmR0UHEpdlQJgQ0XY5lqG+qCUpIeYGR7+odIG5pQw2PiFs+kvFnM+vJD6CBb+m5qamhbuqsQfbTfw33jWNddwBZLHpGwk/YRNuOCGSiv4wF5voOwxiHJV5W+laOgUHTDbXbrusg3RL3CDbH0K/hct7zj7wblaZ0NcFsJc0jYvuPaAABE5ZZb1Ss5IyP2V8uTG06FI6y8GHmsT/+YIYO7iY1VFKW1XWcU28ztNKRbf7YHBzohWYF6941LGLlw4ZbEsO0360n6Di01YeDcesyaeYzQ8zuLsF4s8J4j+KMIlB4fXkbG0JB678HKXBO5fPVDncBhnoZzo9BTS3CVE/Nuw02HM9aT10JhO7cAyh8LL+jf+X4tP7Tu15Qg/y28TF+ll08eQQLZdB4Wlra+Xe9k2O9EtL1K9cU+Az6wP820N7ukbcinidFl94GsRX/E5iYe5CgSwMMbG646Z1U9S0Mtj+yS7pI+d1Bgy6glBxw5uwbqF10yghjF6FwY01v9yninnZ7XtLuiKC3AEhs130CIeRzEEUj6i58rjtOGjWGazFbbnL+fFg5/nm/w9BWQNyqRfVHr1iJ8lh3pJ4Fmi5VZ0rRbP/tVBq/p9t6WQ38DfloRcaGqWkayS9a7NRHdIICLQ6mTHH6R1Sq5mSHOSwAKbcjSmxMcB9B8cuxzHEgGiA1/7rHzHTpSTfyZyyNwxyhuuiXHpnBw6IpAci2vW6mM/83+U9mlD6xgmVUTzqZl4HIUzOqwq/1WdkZALP5hvtJytAxAhI0xgeMqpj6wQh/+Ktwfm/8/5cxjVLsRmU878o9JLG1dnsqt5caEGPtEEZ7Z5cmbb49KiUzPWJLIY+Qa33Q5rVm550Kp3ieUocDtW42qsxm6fH+pMOnPml1sUnAjewIW2htoq8NKqr+RQ72dlPDK/DmTHyVOp75ujTgSE2lIhoveUDN/swwE2o9eVwqwE+zkwqdRsAmKjCK27D1lwGhLYttKIzpwSrEVcpTPYsqsKRbjj7PTO4tX8/XWPE0WGIhNzc23tdvUCqfZXLDlJ88rfQKhnV6rGxwt8GphDCzkDgawv24f74u1jjqxYC/psmFNR6puQeibnMPAor3T9nXh9KXddlyFAC0xotvGIwMLNuGJYc1hSGckBrQlzBhQ87zBywcP68cMvXkGGQfiSsvjactXtt7zg37kd7iqEPMFlAi4S2A/ljv/zDUE8Keq/69y1cca29PBtI741jxU6LvBKLgLL3f0+nB1bbeYDJxcvJwZ7RfDyIY41IlDjEXyphU0DPaEEo9uPOz887STo+OvgrUL8nsVBmIWc8u2hQdTXyYpF6Ml6qzSMBsz7DCUFI7rnVDGcGuiCwlQsmBFJUeg3t3mavwm+28XrL5c+OsUZEfnAUjC99CS1B5XVkrAW+eg707cyrcabCOiD3mrttLaFz7rpwKOFvSwbpq5FTuvYO+idgYiG2PNhuh8QIwpMeVxOpdMVwHQARo9k18pF4drLUk/V+g/2tbY+a9EpzMY/Huy0svPWeJmZFvxSzHXr3HVJ2lRPEDD6WKusqNKdP2Uq4unU7SsjvgCQ0hHvgTaWqoxjWZAJJpa+/qlfXzNtWLD0JmBqX1n/WtUeYAfsAaWIQVoDftdZLwJbiVkuVlrO9iuE9vbZnvUoo3FlXbm5hRHkqxTkEbjbUtAS3lUAxfg+rJN054NIFaXjpnj6lK/bKSC05QX1Bu4lig9aTpTKW0+WZ2BZbWK8ft+/wRz6FhGwZwo57gBqT1UpD8I+/0CxcAh3UWboji1RjwIZuLDV1PVShoIhtw8KhGiXfFAei2kHQXWQ0bMW0uwR3kjstyxKzBtqwFms6FjkGxRJ3yff67Ci7VMngRLPOxCzjxGyADaxYEETE4lK5B9RkVIQFMWSBVYCDCbc4v1xSuXRJX2aVEEo6TbRZZ/yPOUAyGepp+SHygBRClngCvZxCIU924SeMg10vvZ3+IrBvCeMaMij8XP54fIEIm1tiQ0t/gJGeQaY2DV+ZKuM708AnFkId6eh/2ofBPZm13Jvv0je/do3SG5CqJQoQo/jru0Hp0gz2H2dli6+Lw+81OW4GuPjKqwV/2gvQX33jIl6xBTXmq4LCMljm3QS8DpFeiSXUkzSVwOzJoAOiOwLKHFF3xLjH4KiDK3hmS9Vog7D0zjLgb+CqV/s7MKBf+ZW2dbcXse/grqoOILS8IOUgAvBMVLgHcTmJ4a8+xseiJv2c6GG/0ChoVQji//RJ6xrhojG5uoqFKYxHwpYedX0z38ElmgwqvLSoBs0oxXwuwxcR9EFLQo5gZegFMiXvlakx1fdOqQGd2wFSBvo/rbfFJKLCIO7f73ny0r+4TcLg7aYUVd7UxMxGMn5fnjqrUVhY0OVirCdXJO1QnpFguLzHCE8yuTp8KdtUUdkpJkgIN/b0piTkcGt6ZKfYkD41HSWEfD4FYFX331qPPfDNnZ+YLYUkl1gJ+c8gUnI0oTmuiZpLfDPFnOV2SUfjvDTDhp11Uz3tOHCxp7O1O/i2hM4dIXoEj7AS0/b16w8EMoBN8K8G7w5wslgWys2LJDD8gFdeXik6CKHDI4cfWuhd+FGV8fI40OhScywznt/XU5TkaRyfhXuTmZ4dNO3Iwie1YR0xVlTek5EeMrp46zZY+NwNaP0A6/UN9wbJ/bSfbVma4RMUu87IsVjpiRmAmlawM+IluYHolN6G1bwJ0CSUhT9//NMURlAwxcB9ulLQBkbI5lpn1e482mDNaFJkCVaL7Rmc0RU+rDw+/aXKOrZQ/bxZgFE4wcjIRbIjk3evXbQBeyYqpsW0jkpwtNYE9iUxjC1uArvSO8VeDu20oQTVoBiyNACPZvLe9qkyTAw0soCc8KV0XnYv2hsVSfxXo9l47Ev2+FVVmY8zMBj/FCQguWXIMAXdDbnpP6xj6RHhc/e4kIcYMkC91B5ymhVGC6sBKurEFogNEfW7kRJFhyF1cqYXhShNr19UGWrnhvZ9abo3gsdd2hBr8AzF0MalYMIFXiAPfAFlaJIhy/VHI5uWEyMoZQs/KIH1yjpowRay6nPJzj4dQ1zgepCaTOkPHDAZ9qWQ1sW5cURalQm5r+yuuzLA5MEbXgCYWq6I5Kopz96gNXS+rNS+Cpp8y3Pm1v7RrtEppzsV58BV7vU284BxMxJL2sTBsEx035yap9IcbntEF64zMtXp3k3Er+uLE9gXEfD1GGjqDVCbQmpq2rBkpkMBc/3aDD+X6uQcPdWmplmGtO4NtyLvHKkN4rBilLxoexUuUrpq0LoycWmGiFoQZ9w7wMhJKAqES6b+1EsSxU5m5AYB1z75XcMU99SkPGx0uzTlD99pxIOnsSGtxhcloRAcpp59JW/jDfq5bBLoJ6D4vEjmsAtcywISxCK/9v6le1/U+ucg6e+Fb+EGsmjTS9Y66mgfIat7SRqBAekdBu2R2WCJ0jKMijKrSrkAyfB/V+w+vbD2xYfasC3t7rfqQEaLuJp/ILsf03T1PI2cgKLVsu2spM+8WfHM6jKMdp0jQ6Ns8LWmJR5LfUDOB/WBCCngG1lGvD11OVRbNAKWacpTsm+IMTdk0IqOBDAKui2l7xNF7jBcHBBcNhiz9TIMKteaAJo6Rl0ipu/iN/hFmeyxK8Z339xLYIJy22mp7Zl/CUxXTszPkhh3wLgnTpO/OEMdz5EQdX7sJgZigEvpo2ilzkDHf1v9s+ZEsYAz7kibi8rxa/S0O+EnV4kMkJ1JYaVAw6MX15Mjb5oxXAJS60vUICnrZzCey77RRIhlsO5GPLol2yxQTGSuatP4bdHE/avvNYy+R0mUU2wAYAjDGSefRsk1YKfJs1ylwqRutzuIHR1T0d6wccvgCbFvL9rlZemJEdbfpILj6IAN8iDq5Ds1Y8xyef1zl+OPlUMZXT1jyNjn+jW3W5vJTqTLgYlLeCnxLIY1hC4GX7ctMYsyGZctfxQ4rNyxeRRUNNIMRZseDKbkPhXoh4aSgPVQQ1mn0hVczUoiCO866oW4UOFzx2VuSrbnaOlXMmERvI4xamEyWiPCDU5zfDLNjlm18hgxMQzk2pF4LfnnOYQHkgm+WMsGmMaPVaYs14aXjJYOKqs03pwB+OcPB1VzJ4WPCbf9zG09tQH7S00GacBvQb+wY8srJyOQj4dhvQZZRphTUO4pv6n2naaUJMFwxhRKuo9+6umEvqyIlJ/TwdhPwzYD9aPJKJQpWMOIePnGdI45G7lRuNj9pTrYderSRIjyzZPprUH1eUBCJjWqnO5+TEd7wfevUid3W0mfcokps7/vt458eYf/Rf6E592jsGRgI2lTqocCFhd/4Mn/0u1CV75bktb9Hp8JelVE+BTN8uix5hTFREup/gTXOeIjSfisNZZfR7QPNRZab/3xBKzBLY9HCqgcCs4YN40D9ncQBE+WVuvK30zwEd/eUomAfyqaBk8faX6+BTPcXcIUhnRIcRfhHtTRtvNLxKGqnMBhNbC+YBhWANypAYBP3WJiG3Gsr98khLcZJYbByQMTvIJisn0cJQ58phU7sp8iyPhGjG5eH/inOMwm/PtnuUmWaQXbbhrxVa9gPP/mBd/YLPsh38K2gkE+wx2HDV0gh6xJMIhdRWdMDY98tUPF2j1L0n85bwldwK54X9K3kXurPTqb9J+w+MbBwuhMYJpmZ4qQYccfB0hiQq36DAckZ5PgQwVhI0Z2wvbdfufEnINPyEIRMlF80fqTmULH3BFPHG3bspJKjtaRqJcCLK5lf0XizVhSNlek2lbeIMnfjn/RturmdYGKLcPrP8Eb0w4f6wlPy/noaNwjpHfKRh7uAYPLCkehyYeAlM8sahs6jIla4xR7cLxCqNggJnMLcYfh8jvG1kvlrlRS/JpqaMuE4TGuDyFnI1Ssdks97CHjxRfKLE77PwuMqIdqdg+aCfikBmBeTPWKP5R6vIliF25AkfRlpJgRbV6gQDmpN/pESqp8fnVPGKUU6bCznW6dhQyo14RYm1Z6vAvlq+hh8LGlmhNIoHVwGWmpXRIiIfiknGEmD/FmmIkunJUc3IQHuNHATKiVrDNchGBZ1NgW/rbHGSNtz9QkbTyZcOMfZg0pQJ5w3EV1xF6sll/mbuR35uFvDorK4VHW8ZNMkcK0GSRYSNQmulQ86Tc7tofTwL+J6An4IFK8NlA3+wEVLJ/VYMPSQhJQHC2z+vIjIepyJhF/Eksu9uJqvnJUQ/ZNhmW+UMHEPTWcK5Rg3ATsicIW7GsRX7X3O4SP5DRV0NtUUyJ79xtsQMOICAcXeswfQqYSdTGWc6zGSP+RuT8g9T9r2LSMQBYxNy2XDoyAfduuNepiineSaO1HFNlczMnUohZ7qfs0sKhdY/4GfJ5wouYosW43MLsesLBHMUQP+xLpBEoHqZB0UEFSTfDSZzUvZi4W/685s3L9cNqV6ABbD2oORK2wk1I+BsQ/MfXjDaUwrursIWVyEMilTCXgNO41pQ/GurDy7DNnRS/TlEBsJf9rCNJzNnHPItOgT445TZohmKIiY16kVqaWQus19+CDuIuA3T29xjY/STU/G5Nq/0r9TCQ78SJpioUhiyRqOepL/C1SWvTH7UqSYsVTOUAm0+yl7mizdeVPf8jLK4LPAxks2wGeGQ4gceMrUDzUjovBOLc9cHX2/OkUkKqi4sw7svKYDGW+ssVyF0Polbc76xQQvs2xN5FZqNIYd5w1M6VwoL9dg3ge0w25EWKjr6oZJUORGL3k7zuQu0OW7gPCwQqA17a1FsVt5NlFWrk5En0tBvprw7lD5yDauy2SEaWgipK/Ug2qoA+41BrDsUEQ6iDOkyLJ5tYqMl9Vc7mxPm93OkbGSpSwSdQvzfHI4OSrfOeaUHZUzMUMrPGxgKbrTj08UaD5mrelGWyHPSt1vZrU0iI7UiA2NxxqNAu6zLQtp0FpmJmy9zt0/Cn4hqksxpPWMPtksnGgZP1iANBpH5bSEKBX4z+uTtOFa1RhBvBHoaHXD9cEfsZhPFgJA2Yn/qzl0oBHOGmPJLnBXYJ0RWQ2xvyUhyTvm1SD7+o0RHecA4vQe3jBqVcr+TCZtFgRz0+o6E0sJqv8o5TGjATIA9Fka6mhAuAutoca6ROTDoLK5o9SkTzdFpCuOEr+CbRp1lRAcQ3Xnuj+S2F7R3p5npUNytkApLYllPCtVYRjGHtAubXiwvt9W1Ql+r7QKoEbFbXf1ri86M6BWXHBmjcERFdlVylar9Dd575kf+8B9LLEzWhFmvF1/a9tbH2/ZNAzcaBMvENknz60Sc3lFSbd3XNVP7BGEtgDz+ECV+z2PaCiHTtD7Jg3B2PONVASPvkToGG9DaLk72HrJsTIUKE6gAUjqdXkvf7b9l7a5ZUgURt8WH2h/DX3i4PEg41EuH3x7eGkn/sNRBwrQAsxSFPhnqco8IstsHK0QxlS4YpPzzWGVWVrSP5FocSwDOUx3w5DMMLyZKXU9oeiyK7Spwc9hKa4EmOwDLZOKIT8Kqkwr33IvrPlfn1PDUi0ayTb3zBFDv9XfQ7iWDo/OYsg2S1M7Qwl98K/TTPhv0H5fUcYbdoNJWiI2qrcexj7u/29qdmh1No7555eqs7qZRC1RHDZgOPxFDu7ivhLC6DSe86q8LlYZO92Y6v83cQxWDdj71VEryD6dhdNI5XQeHel6GsxfhWhETpz7VxrlzQDu96bNtjnX252nbVFMUpqJAiFHV5e9bmP7MgCcB5gj86NdLupmG24kxkK/aPfFK30CuKyKO6qKyC+2HowPg/5mVN1zrRRoWbNJFeN+xSptCQApxCbQ3tABNpZlJWKqU/uMSoDFnvbWR8ib7mcHMqPED8EJr8w6yQTUpVP/3zl1CgxpCTYCQakW6uYASCjG66IMsPGZZDLJkV+svoMC1JdzLQOFyV5+98en9/svA2NDgJmQPplbEOAzTEpJCH+SDT7Y6T8YDbXsYK5EVDgIP3fzhl/fX5bDQTeIS5xFjaUomY6WWhnpahqDeE7Nqltl7BuRRdOBHDBD/bvQQOgmc7lecdiu0QlvMVyJLKQQLGy0MIT6ku1sfTx/TgkxpVdn2CcdQGVsbgc3BWbEF1Q+d4euPx0V1mNh6nXMI2uhwPbZBiAKpu70bEawUXREO6K8noECGuKhn4HP3SWAgKIeIK7xgqCK8EATr7twlIN2eV2LbHfDk/TJMR9ovX6LYdFE7QW9HdiwWk8BMNk499S+SWoQfOLdqKH3w1tk2Od6mq1Db55u1wzMoJCSlcAku0x0R8nesIsy8FrzaFfvzkzvQkSNoyHqomt6kK7eldq2qmetbHV1KXPGTqsYvaOx+g1ae/6zWafdE5FFjVltGeJbOmKPJQ5yfX13Ob6QuoSpKuaG7nKovHN/XXWi/OHlck29tFvIn/wRGKbMPWPNuFXaGbG2fiMjd3G6SUMxvMpAbrVp62b9M1BB1w9q+WuiYZM1qF4lV9guTFFOtwXfHrZqjSWRBZcROfrhMjCtr+UWkcUkOhAqu9QE8d7rXcoph8WA0SDGJDeAt0s9YAb5QA0i268mamTbHDV6AvsVQuDX5YnHW2P4bR1UmfIx7TDD8jYvsn0tkDZXHXlc0uwaQ4Pe73R0hRLTtOXABugrj05bgoIzivZGjtaSHzAQ/MWqpXUYJWD49ZmFil8Zh0D9yaHgCvjYpXf1GIkeyLNJNjpQAbVgGGlIOyqvHimkKZ/RyduoM9Orrpjb8Hdwu220iOSpGte8xdJArUBHGfvK1dNO+rM1uB9CeG3pT8je5N/4NJezNeWXDxJEFtbR4PrHF7y+IkfW67XZSnu+xymUaXORHbU7dqG6LPjbnOAxobxwqd0OQOKM33SNNcwFK+nGm0DgHsj01o71dOLRmpnTFpNsXzZ1WpP5OK5ECJuvGe/+8ERqzjOf3JzQ9gfhogtHcOEqLzI9kRq9Nz5sexTZ1yHq5XpbpiihtwdDh76hhY+nUV0+Lyp9cAUtZ/JS6ASnEcPcKFAaRYO90vXSYeIa6xfhZ6r/RbYlI7DflGj6dAeCEWVF3qhBtzEku34NWHjxoqgdg+cDLjhYgi1g4A2GlYi4SpA25PcBDY9mTbph9ZytvjryOR6vZ8aO+LQElDSa/Py2pdm7LP6sQU5mazgK7HsijLeQ8bw8cAg9cmfcmIsLUcl6DQ9izdR+u0Ys2mvOQB+rVgd7kNJKCbn4jB9Wj58cBH3NH+KYp2WWZ2O1w8KODQgz/UEaiYO3MhMpCBcahjQnBn9fXZgyG54HE2SiPVMQNCPJ+o9sbk6KG/VFp51R9R39u64LQo2l04eaIS2NneSTCAV8bFzkEGvXi9nH8JsOPjoIm/EdjJ135Xf6VlHOhYU7+MYnrXXOwLDS8CN31k55LiU1vj9J2RKHm8kNwqYNXALYaAGYxVpvD6f7Zjc/+QgFHIu1m5i7IiIG3CANjTAzphA9Ot7hVR9isSIqxhyylEDoGQOg4ghpKBsfPQudjw5GZHQxDfwI852Znbl8R6r+UYysGx4h+YSJXIUcbXVKVuCnQhMATz4Hwr/EMjYY79Z6ptP7K4kTZtj3E7570kyaVJa04wnysPA/Zd5Jv1SYSiCbhgxlh2K1hQBUWiyITUbbmtXQ0n7TjOBJXPTsStH2MzMkP6iFqVz3oHnqHmwPfcnvF9nzyKEkTW6Bl6xGsOvCU1jWK3Xgko8ZMztrk2sbesn4Mk52QanHx2pYQf0Q0GVDtTNEEfL+Ab57zL2seudZtJLPttnJO4jbbappFTlHX+6b4WEx5tKRZ3kiB31PoRKhodKPCMr+RH4iGB4xLwbSFu2Dcqg448B3R8qeVQ5G3ndZxhRNv6mYyj30eAFv4YfvmjIvAXrjt3MgMS4GlvUHryljrOH6IwaU/JQuxE/at68yFv2w5DJT+iLLQ6DPiwh78vjdj3hLAppRCVM5ShpHzLbyI9VYMR2Yw7mmI7HjTuBKSzvpmBq9XJuKHvYNfLaYcohOih6aeNBItoj2Mkk49UcdMFEQB2mRCD3ROe0YZ5IdPQ+3ZkF95UAtWnWnfbm0j+rjB5ieIi/SAzkw/7wFY4R/eMRt0KzNSMn+dbbblMLy85y1SEBvsxpiPSeSH4hnK76SaIOiETbF350A7X/CDYBcczTYoA6qaIknHESNn4PQxHyoLES4b5roUuvZsUKi1gCOpogpj7xUDaQwWUfyNTmVwRTy7cyGdqJzVCLQBWrOAHYBbi9Oc65QyCCeFY/lKKpCgQHdNtQGYyPf5C3gmDlLYgOS5jBdETnBPHE+up3UY1aE0hKHRY2AX+TkLAXr0rJedA0UzrqF+jCCBuSShAOqCgpCuq3440XZU3qRGe0CGWDkMH0bjEhWzDaW7JGvxEbGBAHoezIHywiEF018yHH5HHKra++xefpj59nc7ChFwTT9RkKyA8xC4LLzFftGnXvqhU7QCLNQFbZ7bYHqbHEM4w3esk5nkZ8LvJ6379I9iNSOFj5lArxvjPLyy2QXpUfE5L2OFHloaT6A6tZUxhxqGhBZVZM3D/EzPc+TGHkexyF4fMsM+wcRLIO8oGBoV7356oEyqDesIIGu/vAeTtmqKFhnkY2rTKqtYdWOiisgQEtrRU70nXH9Ao98hAzVIy8OIHmWug3nrKMkUUdagb8SUrYq5dAiQe1uRfFitYKQ6j4R/6erubJ+MipxSjYTaBlJGVnmPQpwSi+2mPi3mLtlbV4rKBYyjfbK3M1h01ltoUEnMLOKQ6nrx2qHc1mheUBRpxmefyl8zUW8AOhsoXcY9rCLTq4NnNScTbRqa2mxpTx6moEJYis8+fd2Fnr5Mz5Z3u+waG/UbzrRg97CapMaPwBx7U+CZjOe4A+u6Vk6owJqHQkHi6BIvUbB6tZzZGIhNyWKZmTRnPeCgcP/Q5lbqzffNkHN9sEHdT4EueL2MNHhU4A3o6C5iKsbpYR2yjoyHXKy6TtpcuTJDiCEPh9v0UzG6bxC8jbgo+A+p8FnRDIa3c2svB0AgGyny67MnaS/wQTg7NwQ18t4RQRNchd9jgcL6Lbe2w7clSxFe3evL/fXuigIXP0HEK9ld6yGVcImOSqeRCxW2FzRfYsmEOmPQOSwqL+tbkglWcv/8NiascNfl88oaso+9QvAu2vc271XwSPeM+gTulpPGeQFRSfTyq94Qj5uxQlmsdYG8igkCJJ1IrPzBOJUU1PycSa64EcpqFNR11mljkoWAqu3INUwaIWCnzdBjh6M5G61N0dT9HwiKca1nzVt7MGaGOr/HmmnZcN8KJ9n7KoWy+/pxKKVXscbfIWbVPMyjNzUapJ3jwLeLYTRcPvQwNA6zKdUFSpN87f1wHc4Vqp/5vgwjiMPZibWzGfZO/CstTUupTsHWEpnQ/dhlCQk4DCMMus6oYlIjtQKuujB7kixyZpI0t0/kZpcEKCv1ujSQ9pvtHZvDFUAI4KGyLZB//dTLdCf2NcRw9rs5TjXP+itp3rN/Ed4JIDtr2NMYbDgZQkoNHWYRNogsGmWHAtdLIbWlwJqi7ei7UqstQeFjGsgzHFykNCSfbgiH4Goe4WzbMbE4M1vmJOq8zGbsIOAdlf4dXPUCZpRyuganf06R6l1dZXyVd1RVQzIx6UOfg40narwTf7FnTbZA66fvpA+UgsB/Y3ZXswRBeTUUuMqmGlxZ37QJlCb0VIK0X57UUSpkcXmU4tQPfEFJ++Epn9NZpnXYD7egTCyMugPvd9gX/VUh56rqIwP0RR9igmvx8P/roajGg60QZhEkHYSVOpQiY7P0EbBbXPEnRMwjJZldKqlVhkzBDS7fPjnJQBeAeyxfhX3nFrfxj6ojo9lBuWqkdxBiDPMRT/NWrq2ptZidirj6Krgkv+pFjmitZOK6Gg0VI1CjzNnu3CknNXsyohUuhGD87QqFziEqcdwJ8WpRwlHUYHW2BOAdvonAHr7OIMU6kiLX3XYAQxxjrSPU0+LbO4Xrshb7wrTC+8grJqUiPU8YP3ce9AjCUEx+biyJUmOuskq07Q+qmeI/1WPA3GfV/r00YngkYw/urC5Lc0fWw5YgdJE5dxHtQ22WHGe98dCWA3DYFt8aXG0UKeE8cbDQhgib/9DywUVo74T0NTN46h43zvUvrfR2NpLQpibegpyLQIxABwpk7eFOWmCCCHy9VWKkWBis7lLpV8ZcgHF8dszJm3snYSPfBrT8FGPeHQt6Jqmu49YTAQaP5ywOOiDg+oKsCRDDIJ3FJ9XlFRGoNDKhXyFsBcu3qR2Ch/EYm9OY4nbE4z5PeBrvh1UG1t6oWoryKMgzH2ZT6sD+xeDg0bI/IGoZIuB7cJhbgChTt0G0k0KqbjppkX+gADMeeLvKtVSnoqfJ1HcU1FuAuwOcEcr/anGP6yFwTcs7yuRC++Moy5sEXfNg+l57Pi9y4FGUmONY/xjEQ6gYz02DX05O5ddhRlKAldAY30Or24LjPUfJkj37zboRJS4AS22b5EIu357ZsmjJJZxYZLAYg6C2tTzLVv6CmIHO32xJecnmBsRuWRmkB0fzzX7QPAsBmqF4EbfY7aNbUFrJpg3UhEbn/lQ+5X6BiFOMKCrLzLubenjYICktjSmqW8sw3VFXmtcL8ciSlfc+e2Eq/+VciyLN+hkhFOpSc1HyK8n0Oov8IxhBtNeZRO7/wDq5gPNhjLWuNpRWhiwCiGpgHHInm7pQ6jRS5dMg8RohSAoMNINglf7hjT9OAeVAowD5dxYq3EIfri2fSX8N1u0OxMzYPwObkpn6LdptgJhs4kF141qOIXIC4cVwize16hPKOzrKxVhbI1/nDPL3AilXLmpsEnjv8NsnN7Scsq7S2XqM946nOhtXd36wRcFZkpXVJbVwsSUfT92V9Cr192pFfi7zxaKm+3PbLCcCj1b7IBRTN0THJqWP+4uEJGeFhDJhJOu4UeaZm+Lhw62P33w3gFocfa9kvphc2OWDIDLFJY5rmSeGcrBL+Ygn1Ntin8d22NetBlQAt5PF2NuZgXo86qj31pP2RbSv4nNZYvxZiK5LErzDWftiO/liOEypvsiMXGkj7F2+GreWsSA1QGAxXXEO9hNPfLasVkYHc7RKFWRjkxKroQHnbo5cBwymz/Ywl4duDiT5PReKGwBani+GOvqwLpXDcKKBA/fzP4XNCaOE+wDJxeoEANJE2+hkzfbMXjM6SNcwGj401TY3L9aUS0UyHfQNEDK1E6jE/IQy4/LSCZmHCqykjsX1JUBVtqFWFPOkIncDTI8EPsP9ILrHL8GsBJfnoYRoq6Co8SI7EQ/1Vbq6KLrZuwZphi/rB5/zY6UZQpbvaX3gLEIJZGQ8BZsfmX/SWXp3JCOrBdmiPE7+G3pwPCHgPxWVafFolAL9TR9K8MI1iJlQjkvJOOGnjKUw36ezWLPaHpTL272vk0FUmY3NO8VYBrE+C4enq8UY/Ry65hv9y2NfEGU5Iscw+xJQDPFvTUo6y/6KfZq/pVq09QeTm2unHZjPp7vAw1ZLiGzrZZvzPURmwTyI3WWQvp3SkA7bkC1sHoy3OsBGfSHiogKa5lM7+t4h4CHOanVRPF1lCATasiwvBLsB47G52CAzXeZXV3k14vzYNiMSRZ1Rbx5ErrEVVcM+BM81XP6qCh+qGpbltiOaL6NpMUYK8ItplLBS8d4ilZpWtAiyY2hil+EzM5eTAjsalrM1bz61I/IUZl2ZreCtacaBG2aSnVFzpv4hlpJnO8o/OoutOAGlX0wjxecpvh4eZNLKhUckhY+MuwN/B6i6hg/3yrK2k8+HODTwb39fgl3z3JK7f4JGC9MEf6mcwwGt1iTFlHiuFPEkxdCdCsavWfvR6Jb8QkAq9L9KVJRpHtOJMeDTTt94hIkvZPbLdFyBbFI/uAtxvUsS4hEx+efbKaeaDxpSWLYjlw21KTeJTU2APg9Lv+jChTh55uYaGuJJ/5pdv+aVcryft+wtdXuN4nF2M1MWwaaw4RU35H/wK+7wmoOnyGYZpv+G70aieeD9eIcVUQbdVpmIpKvj9Tv4zvzaRLHeN9Uo7PkXQz74c1bM8FRAHsC4Wo+2h4WBRBY//9ArLkD884CCoY1FJf9ysIypqkhlh1B1l45xHa8LKUuvzzP0KujQwT0gyR6SQYF4WOOdzJ5iaPu3gCNIbo8FMaxJLKtZdjllmg55jp0Tu8EqllVyelj1ujdlA0GwXtj2CNuwjdUzvP1UoUEZuwx4ddSSz5KWWzXh3is95ykNidKfu9QZg8b03s/vD9CLgAwcmeWlKabeL+X7oSxXvy5DRamlbUYKGeyCvbFQavG3u7CUsgNLcdN0s3mQXxYwCIC4jPrU0265Xw0N3bDnlOQxuDkanwW0E4v5CDJJOadYyqRMX8uERqkIgF24xMXX/FVliZ7/ioHvfIUc2jmvgWIEGSkfyOPKwbcpHf1YgqaR98IvR5tH0hfknVJ38pK9DXrQI71ivQPJqACyJYugQrTBLfTlqM1upqQrCKlK9PbhNvPSvrOT3J1ANX7GU9lZi1GalXxEZFpH/3T+AMmRLQB2wQkCD8x/J4LDTwzpse7vG68sO+XrZIaIzjwvoySs54E6M8HQEiQCBorSfRPTmE+X/ZGMpePl171ljauhp4EKKeciLQX9w/rSPorYGIDSIkKdKTrh1ykKVlm31hOOhgdgZYWXju/jZSwBM/EKRSwG/oiDOoncbLP2LZJNoqod0FRA/0I+GslmMC8emwATOuffzdqj/LYJAMFnMtZMl1d4+Ta8Xc6QI/Cci1a2Q4xU45hw9slsaTSi51TcrsZYHkkxYbtYmwljW5rmLbOJpb9ojWZ+5HpOvRyQxSDIbq8CvgrE2s4A2REs6U6lax+jFB9IGEwPs0yHsOlkbMwvWFfJVp17QLjdWsIGH7we4hdxCAhOO/G/+CbB3LaFjeMRcmKfyCz6GCyLjqt3T5FLl57WLOc3RZ2Lcf/zpGd3wt5tXcZ+xPRSo2mrvoJ9Tcu0AAEkpGwXqTvPsXWM2z+345m/TcYweuQbh6AcLBGqn+kiw/VjysAXPGUrWoZuiBOWtRpP/loqsfKbgkdIpO3SJvB1KADSyjp2W60fAM9pJrxAlCsrSgOwdGFC/dqAg40S2LG5ZVPKDyIdGmL79i6CPiRaz5IgBcq69HBE6rSf7xH99BHRNDKUh7vgg2hqmpOwZ2Woo7pG3WNyH8Gsjw+cbEpAxhaaqA2mE7IG70KPjOxgfKojgug1HnCYzkLXIn2X0x+MVamYNMF15mYnA4ZZTIZiaoVwD0jLy3PXZVAPQLOHYIfaygC8Vi3h5/tzn3BnyZ2PK4ZUT21FUOd2aCJfb+aq/OhZqf39H/lm7AG4VZ+N40CdqTnhOuj62pSmCfaHUqmucXlzXV7rDyQHweR2+kcNMrMPoQ/MTLQCXVmKNArJw7GimYtNSFJ9SsHIMHmA02h+JTjhGHwIQHvmMoQ0nY6tyuAhTruTCSz8vZlyQdl7yA+eKnp4zgHRiluNPk5dd8NWHNN9FgyJp+hAyJzY9L8TXUUC8v8HFE+EvU0hID72TKcG7p5B3dx2ikwiHsb8b3XEV2KNLaiPq1gjKkrIvf5KSO+KgZMaHnSBr2j/OjR3UwgIO1wJgH0eseWrxy2e0l3NbsUEEMPzJNExLCMXBO6g6MTgGr/tQ3hoGh7mlmWbmHc19XSmLMhCv54ZmoOBfEu7zYIdOmEApHsvyjvV3om4cLrdp5Q93z6J2Ooqih62bbl9AVIqYlwqqpmau6csYnhi95neOg8yGAj5XRrqsSO5ectwM/gqC+zUZtFTpeSNnSNIdLwirLe+2LHJt8K/hJm/T8Wtf+9uRiwBy4Tp3kxuQK2seRx96U+NvCpHbmkhsCq2vYgXkgU/uDZ6bdtb5re4lV/whO2Ac0X7itWCw67J/EL0I5264lRB4P+lwHwkSMyoiizu5o/VWDp9645JpTKFUo3Qh5mBixAVYCXYR04V8eRREpg2zCrnNfi30qaHsJ7ppyH2STtoxz62uL++Jj2GRU6MrexmYrHbawbDZ34AlIa/GSc5fiEQ0mBlRjTN42H86ljkZNLU7Cfu+RjBV8kq0/JPBLERp7BQu8REv/RW6KO0P1Ee8FeEDluhfPJghaaAo/ReS/urUUurVsYt55QQ2tQMYXflZcLZSMexAN3tjVSc4R4bOCY7WZy04r46Z3GlFDh8KYY6J9SpL/hdBwNxYbYL1W8MVvfLV2a3WNruQLvaiCKQ+zGG50J1vTDN2Ka2NZ8xTZC3qeDLo/Fz+zLbn2huCptWdAoRrDYG+NvIxreAyjpZxLhcHm+0RW3N86RuBETSMkcPenH8QW7MMrJR3tZt5+ympMoRNpl81YKEh7IG5Est/JOHxsSjWwW1zngYFZb2+vx25AQ+Pg1A4NDCOiQb++P5zWrzFsC/TWOmXrxTckmywIcw3nVPR+R5ohzOLWukhSjeGtO1k2REOKB5QxwNE2/Hv3AMmB3pbm0su0n6iY8DP/jDWZbhcQOuK+1ls2/XUiSOp7B+qoVySMhomWtFoq70aF9l/x5uXkCXKHFrZQvGlsW9CjWm9logKSa9dt9SHgn030V1v+J/MGGsKzMy+oz0DIeVjreX/JldY3Q9hJTShjA/7lBCFNtzHC03aJL88rIKtf2Z9JFPAQKFpi2aKmJqbxBHRTDIvZd6zwikJRupVVgACowvBAPcotHN3awJru+LvFPLriavW6bgsycT3lrd1rVYqKrWptRk0HxIyVmsxlu+mlvIBMMI5JPUImRNlQzZdYLEuQ7Mx5NUMPpDmuqU6BkWm9PL4YnF9JbMpshNwDg9Oqp87LGh6SD7cuvfO3WHFj3EHjkcZ2Rk2KFgbQ42OudLUJqcrBlsHXFa8EYJuDq8bNywNenIK494nTY0aKg96o1/OZIePF1ik2Hzn/7cMvJitBQJaaqt8JbGMhX/RBNqoTr4NAhij5iCa6TSBsDqfjyMaOxVlCQCD9alkmGlW59Cxhj/ldpcld/RJhUwlw+ALVBbxfSIMj07KLREwinm2SMg9b1IJKV96znRriL/ajFxn9AsRg3MHllGCRVBmNy+lh3ey9IezY155HxWlccD4ZKCeAmOkl3P0WZLpJlm76FR1lCydKj4x2gheXg9CJfICgK/87IAOD4G6wgldehQrwNBc/+FoamCay84DrtygoTwKQbxVkVY6IsQ1/LFcwvVWhXz+/tCtJNMgrl4mBUnHjzGzkb7bWzDd6WQ0CCNA88dsoMXAfOfC0pZ69rmBdOiK2x/QIra4ptGWt84QGUhP/WbfiM7iqA0RxGVzzxvGp/7FXtRVlL1JP8MZWnsZ6KCudQF6WUP46ACuG1a7WZQiBF0EsHHlaj5pcah9U4TcmQ3BLqIr0wb7mnKqueYn/rPsdf4dvfAKzpmQmWouFlHVAG3SxzbiOCAQyG9FhqchszQ9A9W963bjnJrajjsZHWQ6jdUap/VxQpg3ohdoj0y9lcBuVoZW11wI6yBIOd7un37NHOhAk6DVKe5xmaRyP6QloeQ1EfXZE0K1g00kl+SrU96hP9y4/zr10SzB9G7athe3cljqG8OlJy4FMntGIZEVUO4nGbWhHUDeuZlpkYWVC0h9NrTuhzAUFCZf4OsLiJYkNtZvmUe4ixJUhJwLY9meXC7UZI+9dNsdGV/O5jbVIokfsq+bt3VhH1OcSAzL0tYmrADCAtnduUolR46E9fD7W6vmv50yntE3KreXcOvaHTS9xHVkS+EKiqn98NxHmRpjEAjlm/qVPgrmHzCcKXS1cHcPHJPu8UCLeTpTrLHvdLm7Ngc5GRI0OQ18t4xfaYWW1NbhRfgd713ptccZYZsK5JNZ3dzBm+U0TzSy6RdTNvyHxivasOlCjemE8Ipcx1BQWIAITIAk7BFSkuwZ9MifrGZ3Sq7okStz/or/eK7lN8DpbrH8PylQRBKXT7Pz0OxtnjmW7RPZip5ndhUrNiCCtbnrXt8jtTgFckMvUhmj8kcYuWei0vKCEqO+I66MkB+3G3HUUyr+lZ/yNHIpIHzkvN53N/J92jx6iEK5Hd4HPhe3js4jgMaB3hZaejlsiOfmvHV885RNZtrr/v/60+DzFqTmhhHysFyEZ6TkvnyyeDSRDQp2NPCtB+M34Bb6u8kfcAd0MF20CIGUD1MhSXaZSlkbKEWFwcr9y7EvNtxJjhaqjCaTFaRuQYA1sDROGbMGmSMdbwV/o2exJNhVmRpgPldrV1czM+vHd0i/8aY+wDP2TlkL1cgQJg5TQJpt0/jb4LHFvcqkDJcY6JEs5ZEwUbYE1e7IQN52H6GzLK7Z4Dk0wOuXX+tc3BNdhol9fY856wEMe248RU9e1mUuDMoIiEjbrfKv7xTdUjzeKmBUNXtZ9kq2XPgYDFg7CtKsgyraFNpcCUh1vvEaFeOasCFKOc9iu7kCztf0p8anvhyd4CYO/r54pBPtL/ucC3BpVRiniQmuvWMroljCkN9oIxpeEgjf6poudhlIS59jHxu/fJ4RtVcUrdtmYyfRc3Q7SQJsEHRyqK+gilRR60kXKljF8twWUHJUXRP4soptlGH1/PHosrWIkScrjUF5qnlvI/F1sjofZtpNHko0lq4gAhURzyPCKJFn8p+KXsn9hdQbPiJWpu1sOGG5CvGGf40bS1hdvkbqPpCJwekkSkaTdx2NOQ0e4kZnDZ9B21yW/qDtBwhcrnzb/6lWg2mFUUyTWPxfCoze876MOGM3HkcvMHNdMM1+TOKNSUbhtsEsimA/eVcAGYRVDCey8H5ktJfWnCn4/WYzDOikm8B485rHTiP6v/z8B9tmvG1hf5gkSF5DqEwAG4R1/cenbFqJUmo4hYP+kFXFQkqpMXmr5+IiB/MqA3unR0wK+FsdH2D/ZDsZ10dos2YctNvtRTn6Gtn5bXMXmHaoi9Pagot2KNhofy+VhuYOMO3k50Q9REUQn+83oE1rU0t/+9YAEqG5XGZml99ySzomw/PeXyjx7EiLUOaeYUNtyoJIUfpPyyhEZaYvu6+ayk1BxzCfRB518YXzjKJ/oc54x43uYQcp1N2fzrxPUNKEiGfXl7h81PyvmcxdO1tewjeQPnbCbGtWpk4P+WgeSYuD8qXRtwLtUx+EDPMnra2tEJlqNBrzMjlORW7XUzaBxEHn7rQMae+L8FZQaU+DzjGp4u+0FsJi13i7ILjcVUxJIhi+Z7X5FLEYXb6mhh31wat1m8y2TpYDVuPJBLOxHD8BcNC1tO8beh1+sMSM5cfAelbN6vLpaMGqKH150vOxSZQ0AXFnAsgDmjjb2CvY6IlEGrscSvTl5PWQql4ed24DmMVUllU2DedqqKW5cZuwqJC0xsVTMFxYJYaO1iGG/YMbGP2/QP1C8Km3cbX2OHMnk/a0UBO/1rQ/Q6b04D7LsFjvZvFy5hW+f2J399A/Wd2kXWHs9GGTq9WGTVktleHSm8xMWyuly1aVtxJp7dVpcwmVKo6npcmXRwu9aIAAE2sTBf8dkBeNXGrWtca+D2PCysWtEO5Jnpk3iARS6IXUGAtLvcYAq2M2Qj+grYxwrHrTENYGtFHY3so5sFkOW1JD/OhUUJl/FuRbOjkfvv7cYxCfaIn6sK3kpoIoDNU4WmqLxHIn7HhXm442v/cXWaCYOgo00FndtT8u1zGIEnwDCfk/0o13r9hDC55/RLd/3BGmrbsY+MC7ZZqkeGsdarso5wDUSWH7V6xpQgD4SwwZkj/z/BPvilT34FHTYYqCztAH8s/zwz6KxlzW/wt+ydiKE6KxQ24o1T7GpNcKj3+d+3tH3ifoEyo2AwsfZdhV/JpCK+D83qPOSNnBFCmJanugFT4SvEk/NTgkKkJtHqSy/C+OeqiheeBQDjjqluI0mn/Pelfc/cEYd3vCJYNAFdN2dSuC8rO5GHjA8ryFTsynHmXZOd5YUl8sE9twlxL4xqWqoS/Lh7kd5rA3idd9ucj8EFry2WQjMug/Zij9k1AeeNa6x2bPTnxuxuoLNgfHbHTxvglpNlGiOgjJsF4vt4X7vCyAfOviZgvwLe9J14SELXtqV9TIZ77x9Nn+gGoBpY9ej6Bng+SSVQXSid0NCfJz2jRzrnwvBa14u43Xj0B2az863FkZLR9pIQy2hjr/PSD3jI5y7D6J/kQq9Gms0xaPXxCXdwhDE7RvtWT5kbxl3SERhbOqWxVEx9Qu1hmSJ5r4qLO1C0T5pdYWlm2/rJtjrkD5F1OEvWCRDJzqWWLuTyr5UEsJxlitGHyphOP9xa8gy2UwcMkJOOIiuqpEHKNNY2FUukJOdzuhJFRhz63gj7KNPadleXteoubCezEIC062qTJQhvVjYpJBeHxp37nTZ8lXF5uZOIDXI0l4OCaPyuG3sGBHpKSFDADhkxcZp5pbJGIyIZrPov3m/PE3Aypcii6XkpslpoPJNPR7R2zTK/xAMobtxr9A4Wb2yOthhaIAZ9RSWLVX0TCQk0O4jA7dvhqD0w1VMNUi7Yt0ewRJLYqhXhzQimMfrtB26/ya+lX7L/KIJ9acZlKkA2KhW4Ci1RmPpHzGOZohFEB9ZxrrNTD2gLDZ6OwNe/Ee6PRgGYgAAAIxt287Htm3btm3btm3btm3bdofoIHfl5AKsrWpi92ZUHihF1eh8d0DNyMWZZDgPuKhfx9P7tNTDQXkIxk/X0MuYeqGFeXi4+EwVFjMnFSSQG8A/TGt1iDYUxHsLPrHJSQPIRDbNcHEBBcsvoVVUBAqSzTA5vQ/22Vngjv/VWQIYCvo1fpSQL+mcfQ3LF+3zD66lxjBr0s8IhCzyyZlxSMP/qpiwg4u1nfhbECOWTsS6cPjd32pWug6JUdgKnBl0u8qLe7ufxGfeo2aslOx9zqGs2FheWYYQv9WYP08JSDqmZ8C2tuJyth6ghDyFkQpCow9hDjQhQh9CVz66uqYyCjZ7OvP3Tt8HYVsE0JfSKS1p6iBa7s3dfaeHr7bJKdYYnHKmTkQr6tA43ydY92TccdEhIWsYbO0Hpla1n4vkYr76zBWZ4auSjd1OqVF+WaiskwwBnt0JBuZvb5/GnGIrct1aPuRtK1aVhoh0HFIcNfq7SficyeHcN7lPa5rbU1jrBr0Q5AcutvZYgLYvj6nJYzZ9B+8q8zdhSfdYDuK9gLL9+LOHG1E6G+3N/TYQOEOK60OwXvhsvHaoif+x4aufuaywncYGFSqF043X2tMxfR6EqBKikfZMQ7yMI8wuryNDmKGMyk9rzqSqgQ/tXUX1wRNh7JgCA/k7GuIzwnl2jcPSaRwMO+vpOR66bqI4QzxWYvdA+ZlfuL3EerZ2RXMn+rDG4DpVPIO9NC8M4V2FVx+zWBbuqF98By+YA0SrXq5i2kKQYokY9v1mTxNd3PLM+WEqXqnQGLyo+YkCRa3XjlPtu5ClKvTTXiNi7gfbC4xYCSpFktqwpPX75Bcv88uWZHuhuFek7W/dssl7Qbwj2QJyqBhrygu3rNwXGEetvxbCd5r9Vnh0zTtA5O4HWQeuGlm6EMQqLjumh16uho8bRLdvS3b0hvsXsvaYd/GPV8IItteDJUtQpA+B67EBPaMa8zW17RHZ9cARjMCHEDJ8FxI83JnZxsXdWAcox5xRrXyRMWOsl9U15xYXO3NUjQaqKAW0E5+5ldN9jXV+iCdXXp11kEPeXWCtST2n0vUpbDSdXptkwIrx4cu22aDGMjydL/0b7aJMVFt4XMKsLHPGP7rQjKTmR2Y6I8KD1W15GC0Qa9QRhmWb98w8z9APkmuzRqZQPFOMDL+iSsRBBUH2425rJynuCa3GkoWEPgQc5wXwabX0hrsyxW9E3heqy0E0qPOLvE0c0PBPd/qKNNFz1+01KSvMUoQju9BeuIZInlNWYLPaUbiWqoom59KhG35Mn73m+HthKxeZ1gZMNpk+OtoaDC+N5I+AoOUlrXHc69wkmKzd5Wgv6aiygwD9ZgDlTfJqqu31eoAaZxqMbt/vOntwSnVj3mQiJ4ZnNnW0e02xV4wbdpiaKJpdclUnuCfAQAFJkpt1yIwPr4QkDoVe/UN2r2YbaSOowuRoSi/XoHATPhB1k4q8tO/2yeJKwe1VrOtS1TaSkGob5WTgCBHWCm3jGY29YrleiKy5Md1gVmjxNIxC2bdn9f7W41d2uVHatMt8jA59kL6veKjwkUdlD0gmopjzCIbFw1W1ii5tvLw7mW4U/vEABT4zjccXOsrNIlBZDdIf9APDNi1XCDRpxef9frPtJ50LWD16Nlq6CpiK6VihNQS8TJy6xv7Lrpvt4YRDmM1ybKjvZ/FblaHvcQtXbC+LONmfIp4be0bKRku/EUHRZt4VB9TMkPHwHDfU5opu9Pd5kgS2TqBBbIYF2xFCSXYPiCisKTKWrFPaDbwmq6RaP3plK4AiGJT2S8Oo0qxmk/7nSzUDJuxrvKhOaCFxeA9avOhO30D7mVmcLdV3CW3Sarxiri/K3lbR1deg7bmhpQluysmgboUkcUoOMhMB8CrWvfJgAQW30/tgq9PvfHQOlDZo36bF3jtwyJSbuKvJkoiN2HRnPT4ItXIOkjoMlpZtGA7wuev9vczlsAzUsFzcW/WyZzUYn3zM5COvAQHT7nZVRTou69qGEo/D4xS9IriHP4PgMkEnGy4EO3mlX6V/xwwSYCW55+0PYKlYuyTyjY5lV1i99df3U96WcwiSBi3qRc9CMOyW2Pa81qkNWzgTGpplE7sc052lFu8Paf+YC1d7RkRTcWQ16U8LKNGFT00Q+SKie5LAVRs1+K7DxfqLTPBy4vGneXqNB5d8VB2adeiFtKulwHqDORgW4CONaHHLnGLbNckw8220UhrPEs4y45g0DuymFQivD1m7vUiUVYfi+PAZOLQsFrVw1Khfq5RYFqBINCIhpD6urFHGjTOlZiBqqzIgfJMxxpKJ4SJGzxGHfSnzZ45cDRl9J0ThUoH6Y5q4vsuyd6K9X7ag0GaW1YJzlAodNCenGBWerAbECJQbHQWcwj5tEahPsyRMu7jbKN+P/PKVV4lmWO2DaB1A5hCiX+t86O7fAoh2aBpkhp4Lv9CC0mXmn0o6zLfjtZglQMfdDEQ/tj2wIyz9b0beNQzZ3w+uZmlkIXDVxCi46smQPivS10+FJCl7KrNjWdx5eJdLGRPpfi0GLVfyFRQWRj/zMK4bGlMxj9U4p2bYy/hg2Sg5qJXRZ3bQ7omAKbhmuJ2M+0PKe+E817TYyRgXwf6/XEVAbFXZu8CiQ4+Pf0rbmGjdOV0VHL6D9HBUZ/6jIdBqfNzZLpr8vncX7TD1EldiIm21mJ3rRgPROaAxJ42l4V5GQqDPzXfz7tEA/5pA8NLW3zPp6lX8ilfRuwc1Q+4Cq8KHZ2fkSS7Dk+aGCu4GDZJIerHCujeCzlNSdHz53chiOaPSEL1RCaqw/8hcihMfmsby2iBqhs4iTNq7j5eQlcx7hMNghRwQJ7r8n/FQj4niClc3Hs0d7SGfhb8NTMj36n3zUtcfTF1BeLjRz80j7Gs/ltDgOMfZgn4vTemrU9m6wynsXoxnWn0RNQravoqVZ8nEDHEy6QlKNNixnrGT5Q47OzYMr72V7hq1j/EfilWVK/JEN7wPYVA4IOMjFaN3mq5POXrSWM3//np7blkypco4IYwFnrUM9yV6xsUvEpRNd2uLQBkvb+EYOs7nZ2VlejKntMx6/7Huh0gQy7DHrKVom2DmYx6XviyP9ZW8Lm5UTN7Qu55EBhjeQwVJrH1c1Zrd9DtSWqFwLpOQ+GEs3wv3YiidwjeY+qgA8uODGMNGWSZqH+v9HSOFx4nWmVeiTaf4i+JEqmMo3X2Cc40y66Zpxvf0Yk362Xmu2rtCZQfl1p9ilGaYL2WeCINJazjDsZ7DbpOAi9SDU2+3imgPENWpqMIYVllhsSXiMYaiEH0dJEbeL2i90rK0QpF0pX0dQI3z9YcAH1ZhsX8qg/jIxltQiPr0yRffhT3i8Cjpec33A8W3L6WN0Sqzd+LcmXoGzSW/Hs4nRJ7ruTYTzZQlcQNQHg6cICh6L5HkCW8BY73WCCgHcYjCp9hkxYPTYhnqMiY5ZwFbRwvFkkXvOLbtzcl+gR/XEy8/glNEB4fXYK+hWLsOrOXsJaXDw101hYUbZotMMXYEnM4jk0ZojxS15YqPLcqnaN9yRyx1tV+5Z+Ly8/BWUxkGVfS6rxCMP6lZZy/n13SDBWrM8IMzkZ8tjN6Yt4rfryoMZAPdvlFHR4Zrit5A29EbRK66h8Q74V9iBiUi5nhuNeO/JEe6g4xroYuZgR3v9RVvU1yWj+fIGw0nw72MLuIgVxqCPQzvTOR7T3rNWPVwKYO5SaD7QOtqwZZXUcKy3R/EJ1kZp2Z+JjPxByEtTSdfkMDfSltL2w/JLJPBavS1g3OjREXpmtsZPFuwK+E+8reXJheePg9JW7FQ+EwHLOhVTPUULpcQT17xDQD3EgXQGa6NC+PGoqiv86YucHbv+G2pr8TQFKKxL4SWm9uYJGMfVs0jQnDrIDywjqEBX5fyDQQOcnch/GPogo9NPuKoxn+su9GRqiwBEJ74nNAcvR09BOm+q7+vNKDCmTCqwHlGN+t8BagmBx2SMH/OkSlZA5ws5ISPXGPk7HtG5qGZvLLsJIT+6FoIBNIL9wYyn0xYd0HRoxPgxFIEfbXqCzFuniyN3OZ9TRAp+niIcME/DMZQkkM8us5Tkj5bG3nk8xa60IQEFgy7GfH63W9wy3dg/ZmnQ6LCxrWRcKaliaJjodbXwXn1fzHUyGRSQIqOvVAwKAq7h0KPyUIPPBedC9AFhm5q0lkLZeVQxA6Zs7Z/HYM2lkUMWUijFxiS/rioqIFiQuH+3KdXYR/gnKOJ0CjKdbX8ThnROv0qXUZ33j2cFwMoZO6/1Yvi6fLA26bXSQ/jWZC29U/rIdoZVWEUdQh/uTIt4+1IjZyox58Ppx3VjMvT1Kz/uqY53O4Ln860bwv3Lsk8acSba+kAZk8/5akipUJ3MEx5GzUxWVPyPz/RsNiCA5rj+J42RCmTCquWdx+YPkrdgo73/bP2U+u9OGnTiRSQos9DtihY9z1yLoHzdAr9M4E4cL9rCXQTUCrmDECJJ5kuvitFsT1kSHQEUgxiU3J1NI0DrWkZnpuVMKBD9pHpKApW/b3m9YmBNB0AZS6dLI5MsU49ZY1zcobO7UROU9F+ExKcPBPxTGEHr1EuLpKRzzh05aCMIu+rm7MrXdutM3E9vV+aqeYtbkJesL8zEwzjvDtt6pvXk2tox1uwfkuSzwq6+aLGgwKnesmGQe4FpcUV/Z2Su0pPlgczaEhOu+OSwifbascEj2v/zA5YwcHi5qhbOCikPG5zDvIgruRzhOvP48xHXzmd8KUAPkwyGK6lDZ2bbuZa3cFzaN7vP+td8CE8dRbGuydzL9/wG/w6w+mEpPPLi7MUu74RK7pFoMJvgr5O2HPjry4+JjBj95udb+5P//xHgcIExneq61pDhV/rx9Rw7f/8eo1cpPmpHrxnf7w1VEo2KoxECVfM/esRyubAJbQanSfUn2YBdxC9Uu0W5oMHT9yj4XlbZeUwM+cu/sVT0eMee0sydwfwFKOYZ3C0ZhVH5hFeoVawifbzwdy69E0WuNduKZJ9Rb97CX984Ta7R7vfqIDSnTAuzpG6Szd8v5dRrAeF8Rolprz8FrApq9GcvufZhxYtQSSwrGgXfR/dC884hnRsQhoAgAh9l/YNEte2XjMERv43KzGeZyTBlqxYgEqO1sxGpYL6OI9ysGaXd4YE4JlIHewIVtWSEqM5G4CHUm77XTattJOYX3sszT7pls1s1Q5A9B1ELsr7zdJG3Xmh465Wi60Tupg+4pEpqqGDtxCOGxAZ5hdKn8va/1I8PTooe7PugfLQqhY5uz4jCT6d5khe0mOFxAFLR8ENurAwgLOPkbgsRUbcBHsBjcFsqLZmBCucVUBRZ4ZpLgNI2RiBZDr9Q01pgBZFQjdHCTSvQRnkYfigcvRz2X4X6/Fg42Vxmb5+5D+rAspQ4pEUKEFPFcedA0QNeqipmJPr7YsuUymz9pP26KQuAgXjvjL6NSoy4CNx1q0VUDTpb421tx00supBxNZ08HpF9aGGH3Wu4jpPjuO8C4yW7GNHIfAYmOu71xlhEnDdWiQgzu8qECpkCtl/FeL+zlgzf3lR4JpyIlOg+wjWzM1335LWlEqIiGC7aCiUPZqnKaHrsv85ofciHRvuKggXvRMSTykgrBc+89iJDpTuVFg8W7vvfs8QFq0y/Ox+AdDID6y9H6+9EXiqsYQC256jlUSoZ81HyeWSh0ol3UHLkHMyTx0IrzPXTXtWwvPzdmd9qKHNKBGsLPZ4eJoNFLW0U0IljL2OFZggihcsDHa4tK9Vyan39acEw9Dpl98iqSs8B2vT0//su3jtIBpMadrRPXrWPOUhdtU8qA6wPyjt9pn0CZkjY7zvDdpiZoaSwMP9ZFJimq9d1TzFQgeVVYAUkDaZhVAzk367kfTYB7NecwbCvo4H0aNotmbwFe+zmc1WH3sP0UW+zEdy+u9TVebvyTlQcwhpQ6QZsHpcdJCtLBUIl4MpeFoBkslYTUL3bnLJkik4wTPOToCkzH92tvrisg5AATG9KJlOceRoQGIl6fzWiUS9uXtEsUX3a58QpEcUoHYhXQHixpjCIDFWn4jLAb+DdB8ZWoMn7qVfB12k3sVrZEK5E7nWD9RW2nKE+Aedb+ig410HoLxqO1Hm9z1d1ubmrULgZvBvWwhoN3SWTVOlYwLlghjsJCX6queZIRuaqdMyo/BOsbLj0k2vlgp/PzxtWf0a6ptdyE2xJ8MEdk4OiAmLEHMMs6vdTME+1oZPzB41zJGq58fT8HN1QMGYQQZn39mpAOHBmlIkfAnb4pBkhHLDeQVAi7Hl3t6avaTF2m+rtl5i9aO8Ib8Rs81NtYRSkcM9kMFaqBMyIt2UHPE7SW9TbNaZryqxBt/ae3BRBv9PmpJqG4epLsgD5vOPKdkjJa3iKzEIehB0x2ZI08QQha8+39EUe3a/VezpITUSZvj6dZ3XlMZJN0cmMqeV+8hyHoe3opOYQ+3p7YfzRitsk3scdeZaOKDPepIrAYowG11xsipKFa4vJIDYF3nOQn6fSt20EWkjs5Itze2+VPYVi986wEnoWE9PozF/DIpikakPOyvsCd+muOG5le7fAk40Nwd9KGdyCBtQp6o+w7tW6RNa47NLvud5Xy2HlD+DnL0QX6kKtVmmkYyOEcGgQl0CDXlnq0hF9fEaCGdeRX9p/FzxAlAIjfFE1ZdDpitz4TwEotMcmebWv+f1C2v89Ga3R3R3CB2bGDM2yX8xzBBRbMxECGKAT4VLR8GxLH8zlXykP4rxu0fQbejTtCru4dsdW3KqG/iMrZeaUoY6M3rAH6J3cR2MlUawl/8C7HHuM4zqAqLbJxxlvyFEGfJog3dY39LItkz8UQ0hD6tWYTFNv6Q/DCkDSJ5DoLl90+KQhgBKwV1qtXqzFYC1cOEiypcfSJ3umjV7AJIzo4BjA4bTZmQUZbZpJMceBWfAiVqGxj+hYGWpFI7aGRAfOLYxZzr+aECjVNVTo6PtAhtjcOWnO4Ma7RxUPLWhn+Yw+61OAxk6XkW/IG4oSzZnuVSqMgWlQI3bws6PZ66nYK5zqZBb8kTJsb3ibotuJB/mssB/m1nuXHEhsHEfAFzT3uJONiLALa+lVB5dEIRiyf6NoZmdxlXB73HqEIRYfsmyBEl8ZfQCLzKIqIG/uk6XnUBlnhlndmWD3PdN7V5p93Ru4nmTP8w6OqX3Iw56w/pQmA2siZG91N+9N78QTkMVuRKqkjBISq4re59EYYR88VdrM5cxS+fwvOoEOciNPTstNjeKRVibE1nU9MAhQnyDNQlq+kOpiKfveRocHqS4Kso4XjK4CeIm+nMYZnGno/MDlBxDPZs8mr75UVYXfk2JzU0Ynx5gYUzuvAsihQGm//3b8fWUfybPPFAJAIuzKQYTefPJw2EosavZFYqID0vksC2FLSY1Q8TpBGf8B5/8e9MutoS1O3swxBEayC+5GGN8rkPq0IYY8yWZr6On3QHm1FNHDVWus2tutynuknJG7b9yG58Lr5qtJzFt6bdz4U2wobqTwt4FOGB6HwZByNhGpLT2NrUSTuPIiL8sgUusHvzGahZ+vUTzeoeDkaUyllL7l9UXmZGZ+FGltbv/Irhv3xI60Sb2YYk+cLdrS2msJHacngg5pmCDKeYGO7POnfH4Z0Z08TX1CNd6aQEMtKRgXZ2j8UX+GHD1Q376Sd/pJcG+QikxYRMHZFfVMU8Lo4Zmd0j1MMLZK3YgY2N+DJRGrJlaEsFNHWQ84Ckl9NvYT+wziX+dyVe1kH3q/pWuF3SubS2+AHyadsnBok2jUqWhPHng/oapj1+belDQ4pcHVAV31O6zu20d9uipCBJIgrjCWSYAFLQ/Xl0JxrvS+BRffdD72tArufW8tYm9SXi7HRrbHOxY5FrmKn2XNjBur3C+Fnf7TEamBhPBneQ31QNYVAmwIig8NNfLuXjnf1eNxlCea5bJFsuEa+waxp7GYEdNp8/0JuDV8YkoSvNKnU4IscIk5pJFxztPUot9eWsCTVE/aWjls8kdjk/swxVtF9BxRcqpzMnSccT49Ghu0Cf/419D+9Te94FpfFU28xVcJNSuFk6KIU0hNxv/AiL3XpbQ6M6+k1643g49M+Yy0Fv08kdK4t4u6wBYrj2tA14KhAwE7yzLxQqpOW2m/QYS6yHE6Mv4eBxfn90ht4bBz1jN8uBDE8kq7OuupucTLNPQJRJd1IZWf/xmwCQKr2+kUJbas8t/DXOTPrd0any5V0ZOKJWGIhNrt8onSZG7DZ1JkhAPgLgu4WDGUcBh2ot+37hzmcTZBpDXTXGApf1YZpnKB6TsKk/3Yp3evQDM8g6g3lHlIuzM3CpITpRvvZ+egWUB/eoWfVyZ9s998eMfXUMbISFBgCOweuun1Vhd9NTJkxqVNTjTB4WEKBIAAImTmH0jYjtHpVHDaxZVmWyPb5QVTQrtX92Jsm7HRVaqe6GCanqaolj1ON99wAHvkkBauAowTDVk70kBgT62EJ/+99UOAV2E9YL9I4z99CTo+V3Rx3OlPg1QHdkfoac543XuZaaZQ382JHzgRLgLjqn1MYKac438XvR2kAjnjO85TuhTo0fYBMeNJEX611FVS/bBMAK+FmQxPtDYu7Guiu5oANCTxNkHnJVmBtIh+9RA5WbD4/MmGN25LNYPIjHQZR+vqjN28VGy2SMxJ/rGrNJGi8SB/yVHTp6xoihPuimzK63WDd37dT97CRVPAvaRegdB2iVqNAOzFUjS1GS8du6oZIvY+RAveSL3jkqRboLyv0ZQjdTGxSVxsgYwxyjz9hj9qXmSKupj0uKeMDoHNBfd0i/NTGavugFPn1l+D6D1NFL1m9PvXKUlU8U5wfRwl7l9k8MJJHx1d7VzKjbhOqlUq9VVIDqszfHgYUyNlSyjKSuvLJv+Q6dgElRBuGB77G2MadfyCE4iQujiX3DMimX8iBFdxCwzz8WpcZK9ODmzrVrJsoqnP6odnxWLIyASBAgyISP+9ol9F9OMujqlCCcMqGF7HR5lCBtcDpl+st8/hgSgGUH53n8AbST1RIYboFLUjriWkMCJTt9032mH0BKu4ICj7SMQccUv0iNzLeOIS2s3IkdX1QDZgLlwYhb031Mb+fxpkn2E8EhtuFJjDaUEKhn1WI4uX/zHcR9NfkC1yR1igJeb9N9bYXo27pDwWcjH1SNYcztVerEXlrLyMaGoX6c8wUtpQG8ITcpLyrnxnumuGpHf2ru0xtUMI9ZGAmP0sGL+Xc3/zgIRF87q2PssXg8ucKVueATsr5T6FKff886Vezq6iLFfr0LF2gbCFYoud+fzeepGeGpSJwGRSo7x6oiHaq2dacL2nPfy3y2cJn4DzROyskmFazr3oNpUXxyKr3uhwIoJ66sao7ggJIZKIxPDrdrM4SfO38OAn4J66btaSR6ZUwjiqbhW0PiWwtIXFLfexq5sDXzp1hjOnz+XDGTOaXcSZtv6/CYZ7Laf/MLy6UeMM1cUJd1ylhbslp0rE+RAlUzYDDtWTnByT5npxvtheGnMZUCXUo0W4K/Bk7TC3UgobLYBR6n9yk/F3hNK/0B6Z6Vqg3qSu3VKogpLiKpx7yePULdTd4PBHpdlZxbbulzZTaxO2aeoMs63EB6vCZ8aMM9Re9RsijhPWP6sqaahz2zt4HBbiRbHSIn3gJZG8S2im5tdtjuqIWfaFhadQJwfB4TV4uauMC+tf4T7Kji3oQ10cnDLZv1jwudLTgATZaxdB73VetQ3kcpUvKMQxUV48t+1EMWOr0aA7A3gGwt9lNfdXqC/4W6brnI1z2depShcmTOrht60s3NSilgYUeZnYIWQuIFM6mM8PW4RIUNmRINd7OwyRtHM9AqN+Z2Yg44NCsRvei6E2k38AnkFeMosj+DW9fGZJVQAg0xZunyrwQx/f8lRBm1sCsq0iNXRgrte0tTfZwv10qDqeBvTmC3O9tcwql1dhBh/TfeXKTauT67BHvQLxN6D/8QWzD2q5GLUwsj0VRmamu+lapt3UaWEhoeu1TPvMLFQR7jNsKwes300gWpEqkOZGmSZ3vN2gKo4iEZL7VkFmQluunDTIq0N8v41HE1FB5oEagnp5J6iaU9l4F38KJqoJU5N8hsN5+mEUqNSTZS62HfPBZESum3ToPuN/r3S8W1jDAgmmdL0ReRdD9elNeBW/wtthDy0AathavM0mJdegviA7mpODawr0OxXuC0cb5ZAZI8pLljAZnydQGWGkEu01RKhGmKL5FdeKgXaTSoquo1Pn6j/nX5uHyoEtEgYdzrG23HQ64CfgEtbZ+qjahgimtor27/8yNppXOyLBojGFhHKW9mbzkGz+bRHbfEadYUIwT2fsgmBBAsXzQ2ACLt9SjxBc1iQ88WYcvRMGKnzu1xFmpmU8ERW6cPKc9ZGhQKRBehFlJp0xF87ZwtTlvOXIdCpEkulA30g9VP2gVefX7b6Oxe6jRDPxh9DFwRH1fgXZxDlXWgW32w+wZzjN4C/mwm3ewqfVIQYuDgExGC0ie1OzEd6H3Vl2/K16bxWrSloLD9qySjRejb2hIQI3LNmq1ClG2lQj8OeydLFZ0Fxxh9uZ4ZMiJ8Hrzx6BSipk39xa+AHZzw31jbE6ATWnHblySa+YP2VywWlvO4C8xlQWbCI3DHgpy7TyFrewx1NiDs/0Xwmg6JZzudcP6uqRYfL78tUF4abOw3AAa0txacTM2TeUcwDDQewNIg0/OxcB7ToE2QHJfeE0o3s8yeh9dFRkz/GmY0hD/cMlkOeqDxeBfjKN7naR0O2aSH+4LAgEh5Ft43bKBa4BAMxnyn68G4O9VX5+2KyXg7GH4Op+8QSemIkk6wOjkCsx49MMo2z4GRlmmRDR2EAQl6UJ80xbVfzT+Gnp1GQ6wxOck/1/MPoJs6IQN7mSt3EmozvYvYtV7aWQqetN94c+99bvNWBz+ao61ClEJB0yz/vffKas+keF1u3bBCGMHxh+IVq2yTW4DpQUqiEB2zEXOCbyvgboJfPBg6MF4cEzwa8CoUN+zbsNOEAb4cDjMGSsrgFKxusOOU/FHVFTqiu5Xc7zugA+fT5zfZ9KEnBbxLn1GsywPVS+B/B5Wf17ofJzfIGDwcATuNVFXiEyQNgjtiqh6CQdhSZfme6yO85L1zBtRVv1T9hbXHfixS9jEOMs829m+TNNVBZXui0ziRi9i2LG1Aru8g9lZouPLOalruCH0nCW18KfrQXqhS9G5dBdychTt6f4k6Tp38rbfhPu1qeomHmVu20T+eAM2HyWUS6h3xcos4xjcMJ/WNmeBuR0MvlVaAnyMcc9IeRVVhAJtu3UEM8VDlsZr8qZysSyADxZBmT0QxJnfxDNhJRPNXYtt1McBrwbo5IwiJU2L9Vp/xkM82mbiUg/I6XqUE4IhTYTMz1wW67sNT0wksVsIpjdrwWWZjFXxhXlnp2I6J3603csxs0AxDhr8Udv2OdCQJZ0tQZeVG9S9QYcInH1SU9jNZGNEqvBsna2aYkyozg62u6EmkpdAK52Ud+bS6caWInxxrfa08CcxyLHR68v8U7GVa43qG2EsKuBtprhehe3EdKFJId7Kn2W9Lzy+Gg3KvPq7eiNFfqeucuaEFbSbDtQ03ADcHuUpK73dIOE1sa+ufH6N3SstXw+AjqDIv5C87AyzbDg7nrfkgnASr+1z24Z/VOGdvg79yzLkF2H/BspaUUXN5jl2xOAF0X7LwE/DSFXKrRBK6OqJ8Y8TXm+NhgoEdv45VgkBTmVF2fjA87uTXmjZPxN6mlS5w7Jn4aDOjb0GWuQbwpDSlaMJH5KQXKodeiKFD46LZdGG9ZyUCFnAASbBoJmrxuUjIpVNlFjHgWtVVwBo1Ls46dpPfflbur9cFfkC0yMNcIDe4YkqY75LkigrVjYS1Tsp2ShXvjcsqbsUdWyF3zFox2U1miynoo4I8kllEXn1P4eUDT8KznvGdeTXgn65EjqK4y1hL4Pbx8leveV6PlzAQgtyQPe8n3+lbImK/XVYwKrt47ybD6dO8wDfhebx440SHhJM4vnlYZYIpfDGYvhnpqVtbvM2IhcUzHaK9oTWTtCn+Y4g8iXFQJeGDjr5MsroE9AaoJttSvF9Z9L+9hwHjbI8UR84AJxBrizVWso8lEtEjsKZaIqBtL6GPwoP1mzyfB+dDKVqZ0RGwKFb1Psa+8NYrjSNx5mcJaRXwVr97O9P3mSminEUlO7i8Ta/imGO0VS6AICSxa7LYEI/0J/Ac2YWRBshJoGmcWSietf+M+VSjXnJZEtVv6FqOAVSbbNHJtqyfwsXl37oaTXs5kqL6SvJCXMlUOtIaDO1TxrLMg6I8O9Y5PEFj2ZFRMh0phnNOnzw78C3LNB1F/k74a4bia/CbYLq0CYIhrbi1Kw8EJXo1vEmihew0QS36P7r3tyrrK6cXkqynfy+0K56FJzWOSEMKVAGweldbrJ3qmXhELAHdJoqolECtuTpMz2XXoQiL96eq9RS1hYZvetfx2dlmtzS34U00X+rYMa8YayjJJR9EBNTF765OygE/+7pe/U/CxW6WxTCcm9JmcG/dV+1ev8Ep48PkvnrptHl1uTOgnS1AF0qr7vC+YYvNteSRo6IDp5Zba8CTwHcT8k5TmSBnydCjjRRS9SQLPQ0/J8L0pXwBXKvsh+MAlPmUZteXXL0DRHzYmR/W+uLtxmp1bXs0mAqPC84eMzexQ3BY+vWY6pPb5IuqAefvV+wravBM7rq36FnIwp6kQIRXqR4JFb0hmojEklJj+tjStpf1RuV+sBpmkK6vO0R1CKDb1XcsgiOd9UsfF2ugnx5dnwJZy2L1qZwlFYwBkpr3uWT8AorKesJQX0FHfOY5TvhXBGw5lHB2rKPPfH4uAOVUcxxutiKRNGzfY+C1E/7Kijo7vUxhxkFjwz+Rqx2m0S6p38ekGdxZfc1HiZhIg1i2Kjw4CMM/sLhH+OGZHK5gis3ilIkaXUi+R3028oDXRM1uGND2RJGRpvEFIksRSK5wmStHxP9GJMtVR5o4udk6us8TBHW1fLLPxyu1gvG44V9fEtBEY8Bhzk6a8p6TDMbG6I701S+hWKJiZwUQ0IGYjdfY5remFqikNel/GZ66RJURtnDNkt8Icpf/RvQEw3rw0IvRhX810TV1DxqOxs+rHPQvxHQjq2UnL8BI4fETgOeUyL7r7wNIm/KOwfbx+UHSOjdKnmjFIS+9/0PLEs2230YqpDnpon2u0SjzvGtm9DYn/hRuvje/kP6x0KaZd2qGdkJDLduo9PE7VmmcIP/TMHLw8oRBeIis+hol2GrHkQ2MSyD3kaOD75PPt6dm7foQ1Uiif0Mura9zqtPxgdJQzpdw3FvRqMqPufItzOJmfujtKhqLmy+DiAC3Y4+3kJW4AigA4f5wmKNs+Hwm5DumKhr+8XkBxrjQexSACNjNQP+oIKk4QIsOibN1HxN+s8p68S7h57qBZQw1vFOTNJKEz/XIesvdnizD/NM1nJpNWPze3CuPhVYk7BBjX2GGJfuV4UN+UC5ysj/R/MvS/Ir3jwo3i9c42z3wsjMudZ6IK/wy3Y6TjPwmUujri1qfiY/ctOcKARA+CEZA/MZnQG5bNplAFdcl6yjKJfeSiWKZXFNGCaM4u6/gKfBncTDNo8XRVHbCrla87S3ZMB8zajlO0oLXFS1Jywoi3kOlOSdYuEUilV9ASscCWLjPaS4x3DcI1Zf0lIfD9iJR8RxU9D5/BNyFnY+xZKuMfWYVxJ7De/eZ+rSgyvB2mvfFBJtYVk0uTVt2azgZqXMyohOk64z++AhoxBx91cpt9ffemGZDz6PnhkRqE0EgeVDJ4hDy8w5/0Z22un7dsM1swjEG+cZf2fNW0zt9TXjQFX3xXDpqDNRHlZurk1EsIaxG1X+wobUMcj2HhutV3LHjZmHWhiJG1TkD03xEg88OYXKW5Rd2iFqKHFSUcmOWYC8AdItsIkPkXAx8GTml3sFyHADeY1YeqRgdWuG5AxdzgAysGiEqARJc+3uOlJQd7ivqWJXFFuWtV3Zr6dsynV2ZJjE/683ECutimS/gL0+bu0vTbU8ADpOPNsExBF9bUmoDCdZfU1Vpo2qfXLUcGtETBt/+zqkCP6ucje7U6A6s8A8sZg08W0rsgjGgOnhSIfFXob113iblVnd2d4q6kH6ZZgCLlxTIQY+kIwzAZqGaduCf7AvektN8zzhqntJ17hrKsfxIY+ctO748K7sfR1gN9A9WJd6CKlAsXB8Fu46ISFL7HRNSS404NUQ94/HrGtMYKgqMtHqRM0rqrGxLAhgLVzojlsLPc8sfnEwFrtsGe2ge+FK0RoMthrQGIqRw5qa8c+20RCaGJmEqDj2DojaattgKEV/SSzpEEww55ee+1TVWO083OCDW1O2MrHLA9dK5WzED+5U7NPbGjTNKxLLxM5VXlKloM515BR9BoqO0kgV8a4UT57UpvIAfsYyBuUmR7ZCg02V9rBrO2ThIx73qkNwaUeWo2CdPVgOm9+ogNnTGdlonY7Cj0sr6Lk2zVxTsZ3To5E+SUR3nRuUDy6kXJ1XSN4GJtnQpk2RusVswC/lGjbLb0u+0vqTHq0rT6h8vxBSkpEnGNZ03fAQftHUczIc23hoVCAz7dDH3ev01FwKqkkaXJBwCdwYm/TRCpnSX5eRZqeyMagiOeemYc2GekxM+W4m/yySMPG84dvOfCLijotsrS8/ekTUQPGUqgTt6UY4PvyY/NC2VfL126vmhK9wn4dESn8UA4hoylOI6hsxUby0dEDL2+rU8KwyI1Q80PMAwB5wZr8Qc1s0oToMrUqwsaWRDjEVY3nMrRhGUCpPdT0CvJYk7UNWuXMW4NG1YikewbAJUJHXk2TCA6wGVtrgvre/QaburPwnCv7gXSNroebBJzXr+jZHcn4XDKT+KWXMaxZZDNSfTnFNRr2GPOyOETxImV7Es7ZKBlO4uOrKFeR+lj//ZSqXfIggm7MoCuH79xloocYferzLJkxlv/RDw/2AT2mjyPk84IkLLwnNZD4+naVZjmzD9oEU3Djp1ifmRf+wQaqGNDbHUR2iuYkwkh5BMUwzH5vgcWEYCAFGrdh2u3zZgKiwd20kv8C2YYU4ffOZ3qTs2QAiH6mJ3jLhgaMvLKfL1azTZYn2I66QA4+7zH2iLmWwu1H53n2yVmeb8eYoX+mw+7FZOJJ+lIeKMi+czCYRU/ZahIP/yKuyJaqCG7Ux3yQKzZlV0aIbm7cc45Vh3oXprkd4KSvvKuWcGdE3Yky9Wk4ZPkf192MlLPO4jJTzqeK4hkrESCveKdETmCnAsBf4hFQHggJepKbyk4IaSRSHwPgKiZUR/Usn9X4OOV9uXae1iSyLoGGZSSrsoiS9ZCHDqq+HE+CqG4R6Cz02lz5XGl0aEwZOHm7bv18QfahwpMZv/9BSMzMP686a3pEDvY351/z0BmCmApKhivxR5lr/aYBJK7ekScKyh4eZnMwj3c9OL+ULtSpTsI0NJC5/jM2AndJFM+KyRcckYi4vh6s0niYGe0sXHaElc1+o4wEdxKXG7lfNjMr102j0c9fsM/oPTPdtfHsZLsTF/HX+aL2eM2M3GjHR/IvexvcCLxX7WzHj0NkDt0n+MMfHpfAka9O4oB7fiATVBLVorLBuXHoz1O0usxw55G6FNKC03r+wPpDCAuTK+0bDe9AEsqHZFwAWCvb+JEQDKFGZ7IInOjrPPL4D8zVUxiAPT3zcz61xCQg8GqEPo3rO+Px2DYhikrWVTLbGiAwOravwmfoFxobgP4Ds403Iw8Qzxn/pjdjxNaGZnm+d8tf3Z6ms+JCOggLVgzkCXRHSFv21iLwQvlQZCbiIIqC8DIakBFILbbF15t5SXUBd2ASZMNXwXEdeCW8mcQ7qovJ9Hkvy0r0Q917xYXmWFTQqK80MFRpeTuJwZzAcygvuyfT9ujL00aM7E57Pj2xGAXz9JEeYWw7QDQgrQ4xFUrP5nsJzwiifStpP6BiaJNjOF10Y8d5LWYi83ydzD3E/Sonr8z4g8dFSXdN1YD89mAIUoTyaM/71VGChVDg60tCm8N2IHwsNrGqajh2X5nmHOQTuy6MkpUqTLGwxdiTha4kKyB8RrrAsizRdxeMLTzOLejZBeuXCRAUyK394qwDaeUpqNZ8dkVYDr38pFftU+mdW8kXH1H8I/9Tu0QBCqqcFmY4Z9MAMQpwIeNV1xUD74Uz0cnBV2izHVMeBU7cwJuAi1i0M5JRzOHLdiK05W8/WxdOQiP/gH5klGaa7TWPOjcL2ND3l49pP3suIemE2cyCMTmc7P+YmcyYU3XHLUizx//uwQ7AG0R2mxl5k6MZ1vEzX+YyBVfwCSoSy3gBVyrd9h2oqCcTAPBYXoy86udjEdlGIGS1HIJY9Do/It6U2WNybNfZQJ6Pa3nW0Dpcpbmsb/wEl23LTm2dQga7VmZse3ZC6KhGTKFCpW/jJfMzPxZA4zgvOqR82fe6X7ql1SSgImCMTsQwo01BrM88eZy3kAWRWK7k8J2cC6EYZU04F7UfIJouKj/AGfBxuCz4ss+K5vZyqS325TdT50+Xf0/MVCNm/eMDgGm0BJVjlq9+5BBnDROwBEWiUcie/VhcwpsXTXI3N4tlD22G1HraLomwAjPnqbKTUCNfrGpwkyshRG8xnux8aSNH1bKxmczPFsOZ5E+Vja5cPAAhXzQehBVwImum+yknEs6SC09WsgCH+Nart3BgidLuRLrmjFsMHLZMgbdAFQV4H40sYrMQJwHohCi6BT9UG+v8yGjEphaAmFGs2PLMlYjMEiSKG7nFemF/9jG1pc8H47YJU33qnhxf4Xi9cNJRC07B0XPMwcJ4CySXVRQnmS4ynDJeNVqZbdUTG1OhurlYeQA6SCQ//IaWwuK9B7yPGs1Fe4QbrKzRZJ9yKi/zWNo6/xM2tDLA+5Ie3cKvgFifcH6CH6FdgfK+79/610WxBy1hm+HhDeCzNZ/a1usLcxXNWxeeQgqYb0e+rM/UfxT6DnlQUjg26ioN6SkwLjLjpR6iMZJ+hd7D/4w3xwcg8VNTNAh2gr21tkJdCsaAO0hYNdldS1o2Gd7YowflZwIhg3c555He9rI0DC2D0V2yz2CtIbJ3qfM/Y5SATr1WKnnzU/zNNylaEoPS9vxVNtXZZVBzJTwcPbwe5AgGp2RhD23rHHU38BeGPnkPduBIC0LMAQNLGesyL8NFUMOLAmTq+o1RDhcilaJ3NgB8BiE153kcSWSGazRk8jPqe7lDa1bO6tmQ3pZdaK3ZoyY41t1yNHDJcoPGUFiB7RDHOtlZ6S8vjrvDnAKyiROHXC3jLc8qSU421lbQjX612rMTZou4YyL1Gd3RGhskmQAU8l0zHFHn79of+2ezYhn0hRx2q8Qvz9A9DSJq/UohjySBXhewQkdmAKzyj9cZvZcTRZkTKBv0ksz+FfI3l/rNT3zni4aF7zOGvEmijKZpcO47NRhPuLTD0cg1jUikCyiUMzZQGribFNG+Gzr/G4QXXGYgCdJO1MdmzKKVBtf2+jQ7n/pyWzUpO2kH414XPo2p/Kd/Mu0lzHs42r/kDN4iS046PCwhxRy01uYH6xMbLbyXTk/ew4VP5MGfcS5oSIoJPm0grzeGvniDYqgHRzj3J250wyzFK7lvQ0Nk9jLCdvVKcrMnDO1LM46Z1DlvsXagJmCZwzt13IXHwzDosK8LEXDp7AaFhM5KYBbL0X/OcOHdSXuq5CdYDG62SpeO/cLVF46YZTdVuALkUamJRaWJksD9rW5m6FdaGs7aVJq9WNCH72QaqyzrbaJyH4FYctpEMHNzI8JFVYxb2R3GI4FTKqwvHmBRZV/qMBVW/3A9nZemdtL5OPA8wzrc4KobeU7zrKP5f5eZFPYVkBSWt7AVEqfSHlpOQvFavLuqfDdDC4aEDXTBwZTtCz9FcaFGheVO+/8jAYdpAP6JAzuIS8ce06kWZDHUYASaXRpr6dTu20CulI2zAHqqsNNb3vugiSifpeDnfnrwrGk4D7eaLf7sMtTZZh1QSqA1DgyaK12OzzRPjXvbsNvdb1Yvk2aa+xgwAanK6gwVHfEBMhyS9oShaEVNeyT3e+F/ANfF5ffBpeVOWnVzKIJ5HS64akL6rWTdVhvj1B39J+X0R+X6/WWLUngsmAuJvrgO2G5fbUyiluf0NXScEQIUgrHbvv3re7iYVvrOM2KRXUCZCWswqjcci2E6tVJTRQraYokv89qDesZyDMouax7Ipi2z5vOUMe2UCoeVaFhx0JU/EnxzPX4JJA+gDK3BsVZzmQhPmfpbLk10W1+A7rUlzu65n+cBOGxVQBGslFW3Z68m+GmHj0tMV055tRWs+zQtj0KCt8L/2W2CmQ7PGH+2e8or2zE+inuKkrsHZfNWtuGOKZi5lTdc/lhh8zxjWAiR7GK5i3DROs2/EnhUqiJxtrYFzMSnBIbOP5xrDo5SZlJYLWH3kga12JPG0H/FCy/TsgxueLv4ZX094jsxtzJQU6cNbCvsuoRoGqQ8zD4B1JCOtoTCQ0lt9bvbrMbZgmE9k4btz2QfOwCEHBV4BZzTyFxKkItRPWJI6ZKukRecwO23uLsTEMHf9D1478AVSqC4TDNJDGaUrv0ZIKDMgZTMM+JCMHF+RPiD2/nNEbJEHDm4DKI20BWr/mjPsu+S5QrVLNbUWwwRwRnMUvtLoDjQ98OrhacaNJ9/7dfcsJmRKi1wk7VURxy/BVGhv1L8TCD+8TrmX2VYZjYNu/ZMkoD3pGd+pwVPlISI8MIkUsPX52iWjHCWHlcvQexSMtxdAfH1OH5zZZ/ahEQWRuTFHuGdrPUiRKz2ilBtMMeyXS0iRYHtsb6QZPObkfFB3hv9no4VVqX4/HYybU3SgRyB9v4UWU4OHKp/VETzeizBgSLbRlnXIJDluSKOmDsrlchoMIGXflUxd4Z0CwWe6ltxZlFaW+CNmh/CGsIXc268Vh8x6uUspCPwllczufHL8XGgp/GQ1qWGR5m7YV3NrmSpwqjmWT/LoMxqK5CnW7aUabodjHDRstsMXOfgH9oavVAnYl+vJFXnWZuC54tTRdLLYaX7Q6OZtAy7iJEAIF/ZOBOIlW9/KYWme5EkGMnYpxcvvdpDk07pEpqDgP2kj7n3xX5OERlvZskovmsNLlohYUI309RYEmv27GvOWvtvYcHSra1qo7YpLrwhaVN3+E6W4Sgm5EPQhFZfZNABideWJ5BvcYxKgNAHvF9xf/+hsQh/PrgDZ6bdMoZGiD+1Sj+wem/iobD4Fm66QPEF/Xy9MrP7sNpWE7n5vtxXE4UOjAiWN5SEBZ/Rk2Vehev6xPuL1NLi4jhjmmUtafH7N8R1w4n/iy7ZfETBRh7nUIC8qUhMl6k3ceKdj4mWjtD3XenvpezeJUCNWtwmY8NUHostgr1lhS8z4uUPMMkELkKnd9pn7AngqC20KIkdUHdT5irc9WQ52hLd3lDbt9CS0tyGGG+Jof/cwmqCvzqxOF8NhxubJVtGBZGtEsD8G+kj3IqByryCe7ZvmFAypfYB4nk/Gan+/xjtneMlt/alNmlv54hbcUntPwcGdC1UIhZ0Bpl/pL0S+L8+dxqhxt8+7phZpLT7PX2NNYZ54UgH7xk5ox/SRgYGvsjv5y6Ti8zZXw2O8d3N6FRejmWYUaYMavlBqa1g5nDL0ccLbIqq1mlLGcN/vC8klp3I48eWbz99m+6S2xPnpQiNUglMOGK7MrZOIXswff37YjWeHQ7zv3HsHPLWO7AIn9faD8HWH5yQlBVLoDmZiU2SpCCwP79/V7p+yJgWR8w3apoN+WfQTOZyz7mxd9GlGRiJHuNrgOAT8yS6GByAOyz6C2BuFRsvJdR5gD4iPI7vq4LrlKvnJtaMw538JwZGvHNN0D9l3BCJWjlN6/0+DDgxUJhGU/g3xiasNFXbJ678xwk0QblscukDryOh8PUNkufssiJyI2AQ5EZJezyTLmS2sn13ASBtmjSmNdZb6z8ccgoLtBccuCPkWGfxi7xlT19QGF/KAjM//X/kOJ9/lmY4cVWYBMPCcnIDfCJZKPwpuGMfxsadfrjVFBKJYYhNlOmottWziMLQwLRNjJ/EBPei73LU37h6+MnEWQp//zYaDVYYbWPRw0T7lvv0AaY8AJ1FThs60V8owCgXpAvpdUBSsMNuyN4aMyDiXZ7OeJMeMwYonn48UHv7DUzRBq/jgKpu1uGOGwIVN8ljJEEb00OKxUdA3/12ser/XjvbgKl4Clo9H2a5b4o7l4o6DyJbzdnPlM3hAosaovqR2nVT7+JR3p6H3ujkPEfhV4IMU0cV+svHoDUI1yR5VKF0gJ9RBoKzKJ5zMyUUPBHur9CWIyccHgDyX2XF6Q/fKkYpK0I1O1PifUCiRzAV34R8oUhFLsOXm9wPY5Sc1k4UYuroZ80yauxRHnFrCqx85EjlSu3yNT7BjXIDk6c9c1gfBC14ZqNfOrN3f65Tej0cS2675IGMMRyVUqkJ37PGUuWO3thktYLexaTtItFPoiIu+sEpWSexii9LjVRuP/ua2iUALQBf9NIRNnVcVpZJ2DLCYDt+PF9RBw+5160S5gC258642uXp30IIQXCjlPZJwBs90f/kpDxlxoRholxWiI46yvCXWs7owUxZjqYEtW34wATQIDvprGGvvtDY0R3VZ0s92cSakSzMFplZb9IIpLQwjrHzPKUAq61suKTE5LkXjFwsBHRnR+gwYjSa+nl+Kwbcvve8F1xwWQlUo82OB0QPuwjwq3EpFBV/XR1Yh3BlWH1zp/BSKVwGMKmEZfhKsJmZj1tVkxhLJxDF3px03kVZ8l0J3omi/hmTVAtzpGekD1nIZDj1Yrf1zo0d6aBUVdBmyD22Y8ZQFsHFYZg4HNMyx2PvWG8oF01svvbpoX7HMO7QG6kJKg5CvP5joM+XTeI/dMUfX9CxTNNmhPc9enqUGPjm0A6XkjIH+DkFl/SGx3AMzBPYSlKCS3HfmLHPtgCv/NU6KGN0ryqVo5RJUI4h1POLNjYysbspKln9ZyCA62BOB3ro4eFDQYErEYL+pD2dza93rYwT/zbNd5aXhxDpkhdXH0PS1qla5klUofTzfbbNgsh0kxI7nut0gx4TQ+7FmFkw5UNOCF+dSDfCtQsZjpZd1y9YGYgl2S6gawhW/MxF3G6y0DibAXvL1ov2Zkt+DanOsvELEquHaMoMUAT036hybF9/tLzfbPhpi6Lnm63tlxZMPzuJzDfwsxYlCep0JAB3vS0OoauQRh+xFd7T1U3dMERssi6U84OUy+RStyuDjTupXh7ILt0zPnNN9Sf9RPFABySA/Pev6dwKn+oP5b5tnChmr5ex3yKWz/vpimntitpcSi7j5dws8R7u15MJPZXGEgfKJdNaWyuRT96HE7581JDxJ/JVsxz0oV7XvXAaCUVFJs2gpIoiRjVnoQ9bk61H8csaUOY+n6wNiqZgu7wsbKJGxe4ScUSaF++Q/0e0g1ctZnYa3mbmBt8aWRmiA09K8j9KgjC72mKa8G4gsdD8N0F/jH/k/yW5cNpPyI+RIei9F9W8h1fr1ipUxfeDR/2x4Ixgq7tbd45BahkeXKi1oVx3z91gh4GID3KFog0J5HO/TfErPQENmqKP8LsSNJAGgjp6q31snEAfhrp0SG9/Ch9Ajy2gX4sO9T6Sp7T4GWf9+ie244aJTAlDmqPmS06yqIFrpH2mkTcF10c0FiNJbTzI75yW2U11XLsiMkPK/NEAUKA8uROnJLzISl9bMxqEFpTcSzJE7uvJjSvrpg+QRqeEJuFkbtK6+bTJ9Kj7NP9g6RB81wEms/n6Tc6+2P0npLGGQIakPdl2k0iTYW/lYOw+sdUmt4Ftzr9V8b5tZnkRPwf6faAUAmDAAA427ZtTrZt27bNyfbLrpdtu/5s28Zk7SH2IB9OCmZzxxj5zb0bY6vjYuO7Zc4JPussRUKmcPDFY8ZxNJuwhwQW5GGDXf7T5DycgHWbEMJNyix+RVtREZK9N7S9UGAhAAJaZI1AtcP3dzEO7sO5U/aTcvKMfCd2BUgmSBIuzKwbmHgfHIAfimSMuTnjmSHR3DxhOAfJmNMDprbo5AJ6w3yGstz+lccaLH+d9fIKO1BdIQFR+6o2W+byhztkjn748k/Dxd/Jp7CB+UHQ7zaLlG+xkli9pR6pH05UISV+iwsH2IVFcxdqMx73mlP2kYlJYxkEeVgh6xk/wncVXgmiH8ElxlavuG8CRiYVgfyPZSnghx/oMMwd53FqvIVF2pNlGINZPR82IADRxu1tYavub8sT/UOSJdlrn1Nrxpog5OEF+XkNWPp/zJnFVNNYFX9X/50Ve7g0JtI0bQl9pa6te8sg5DMHJY/n4SY1XHDVk0V2fuH4RcrOGCeYoxlV8MCH4+s7pZznNc7DnAt4wb14rt0H15XmgaJbUyI2FaZtaYm+q+tr/MZspowPdC1s+gCU9TgZ/9OPVeLmCyTRGRaQvm//hx+btivIml4QlMlCFK9mLP2dKKqStkR6Dtu2xuALHLJhNIQqjUN8MxE8L4pjk/yabcW0ZeXw1T5U4E+Pin2tpru8xd2y8RPDK/JTJ12i9XpOXuT1yRhe/jfSK90T112tKDGdPUUp2wx8XhSU04AD5PKK8DpHs4jq6VTddrRdC509ymXSgOlZGBymjrcQKZDFbnAE6RX4ViKVvozzPMQCLxlF8nJQtw8Ey1pZLyOXxx4SWjf/rm9KSdfiue1OCOecK+GhY53qq2RULQZCW7o7CUj5M2qnxapZ6lsmadupWbDejfekNRSSsI+slJP4hDiNL+w4U8hXyu3Fh/rsaM3nABzErWISkvb5iAap3a+BPDFWRmusuMUMVOsB/4VW3BB+XGlTjKaP5y6CdJSbDeRjam9j47d/4XIKvv0u1IanyRl6wIP2e4npWTV3mmQxfQz7HM3V1EfpZNVemSFU/R3F0LdK8yOPuk9fGD2EQnD0ICu3fn7tQtHoiVpSm/eDNKWvINMIvb+v6W1t7A7kburv2yQRClkxi/q055XTK8xPJGHUFQZBdv2pApcgaZEr1UgK9anclJju90qU1pHsVYyaPXg62sTKumtMqPgm8W3/8RY4I8w9qrz7MazFWMuxrjMPJCBUDEo/p/jFzpv30EOUzIS1TZllqAkCilQ9CbaY5yepaHhwgwOaZtUbl30L/9K0bK2jotnK4ZD5QFQI3Jrx/YZBrgkOYrDV13dny+yAh/0k5WT03tIKK5kZyEsjHruByrfCmyxAU2FTU+6IsPbbXnQrnjCNRz1e298Jke8qEcot6+m2q5hL7zzrynOliIWlnWgWj86HBqSiPQ4DLOkYEnriDDEc9NvsjL6zNTWEvbYBOhdLEjAvGy7CKNJ2FULPz5ZiE4x2+SyXpNlQfu1vk461sKgvkliU2zgvcu1j9JYa1HhO10uz4yGzjopEoeftAvUVOGdJzybuJ4aqnSzXWqjtXuJO/7Xm5MsLrl3m2SRYPsjQ5In+sak2uB6qKYFOGE1TNNQWsPCg4Z7lJwWQzOxG0e/GGMCMvPGW7uxl7cXGHnbSssKB5eXsirxNaUy71ZVaKemM2BNv8jdgimYdLy4G1z/DDvl7aEECErYg71xo101tgZJLwnZNFMtzgIYGGi/P8CkMjq039JD8VZ54r5xSEN8Yq/wBK112BZt/2gTit5evFMuWSRpTp7+CWrjChLg7Ndkns4tj+ZITlIpMf71yEl9UzK//GRL7DInc9jJv4KrHmzc175GIwIHaTW6MTkJO2L3es7OdNfu4a6eEHKWwppxuK/yj++d8uUJYOFt0Z00cydafRYoTaGr42U0L5FEoruWQGR+V0H6NaCwJ1hvBYoHnpOfW3bygTiw6mtICUWoNluRG13jiAB09fCwK7sS3uWRhOVRUvgZEvs10tYGOXC3ILNgA/f/jLMQj25eSToUIEfOuajO1E6+YvNfqAJ04DtC3Yu5lykZLdxZAEPBaD2DDHzrzAOlyFsYAPaYZeW7iOJ77Kvp0ouvNgEThNt+n7v5z9mi8SkiP6hrgASXJ9kSnUXRekwrkbLgKGrH5qffrG5ykXBFoQv+QNXj8rmEljVygpoEI3RsWgIzmemTnE+AecZJGzOfV64YvtRBBtwRzmaWIhNMF302xpbb/tXJXi6wMd3da6vdXsuYXWa+3rXEKr3Vf8zIax9lqBiyqCHHEbZpMnto5F+ijESXfMcmU63jvvVjVRsxCZ5Mrv47KlRennE8sVyzw3A3u94wjcQb6zhvaLPlCBkKgbxh8504J9kkur6bA+qKnNRGfiamTHrHYVmRSEbW07m8/W7R8ZgbLLXoHSciM13i9l5PShntVyXLvZ6I87vjgsgHnS/erqw0zvXz6gu0FF3j12sghPoYo3OJsmcZ8NBtUTRmPMugOxtMpv7HsOlgoVMQNSyhM3sd0VzPFpPNVyAfXEjsELnXdyn5zfPNRYZfblJCjIniIWYWxFQzTP7KsiY2HrvhDZN6IHFIDETnkeZEpUqu8pNbR+6VaycYwefLuLkD0SO/8MzpJZpe0lNDTKXBpeDEVvFNNlzqsY7P3+7uGrZYUQV6TPxZEsWvAMZN4NvDqdn5jYt4VKSMMI2hWlh+CHVSZvNyZp5g4y4WjbSfyh3kRQiR3sxN6sgbpNnMKEnnatW3H8mPkSMw7IGxX+OD2fLXw0tQJUfaRZzhAaOT2HPr1WjXhxL/nUSzzoDrI7nn5JLBXHqtfyVzdNZ16LsPRPtF6H1Rae+kDU0xGKp8aFa788mZ5Ga0NrKIYjlb27b+46k9VYqg2Rt+lEmZL4nD87f4Q6JQc0BbHWhgQjnZr5fb/lT8M/ZVGmvB1W09L27q0cBzo31KWQcWivK33kv4YA1e5UsOjCqBORwtqvYaYk/bLwcXGelzvZ1FmqKBQBUQkgdscsYrKlsewxdRslUTeb7bzIE/lqpwzwQ5eUPMcq5yHAK8WvtuHXOXY6TOL43ddMGRhrHITyGvn9/qadA13Wtv9LMKKof1EvTDULhAOYlQxkdBUNoWpIoAM+FdqcirSGDl3TYuBYvunWmdKFk494XFJ26+3Xn2zTbdtOX/YxanJeARfTGCEprmimxbMy6OwTFyssu1vDJ0T50RDxn65PqCn/TYTM+Lbni7lafq+tCcn3CAHJ1IiRiQMmTlBFhd5Avk512cK9++FHy4hM2vi5FIh4diWLw83vkkMnBcw2mMi9piLJ3HmRw8sAvKm+ImpborkBrlsGGSZAo0kw2c/sTqN6Knj9s6YZS2CyA2jUt6Q5mkHCgOEp1ktcagKmF/JXI+TFu9MWJOBBNSbcSEOVP8JbPubNb4FXQuGeofPgaaufO/0iMg0U8+TrYy6nJ4y6K25j/bXrMEXrIeeE4VdGqx1Kz20d/wn8GHDZ2Rq2zAg8byvVKAvSuH0iB58M6RbVtcgLfHlwSYoMNHPuzb68Fg665BA52J5di2XEQeTcjEteHSrQBSIgrwtdS+tzMpoDx+8cbkFD6xj6NNhuFnMcbHNk1bNbItag+LIlhkjplttKlCuEDbz8yl5fulFAXEaQHGdyYMndjeatbVxwMxlynvX1Bh5axLk8b/ywga/VHsvL85f8H2N2TGPSDK6kAHPtTsEcJVT97XMyZoP5L0KwdPdgt1PsOJIQ4o5zUTnBYiH3vyB77EQovZD+DsmkE78Aa/pt1xAuMZ/930OMZDK3qG3pKtJq/UMOyy6z3odqsdVmYnzvOBOf6v77sbMAA/8ovmcaULrNm2uLn4zGmAgVZYHwQm4VclAMYLInU5az8A3emicTjXVPlYfh3Ia1voJCnIHkm/Wnn/hdlDne1KXVPyOeYZqvlsMKizXbJlO2xSNy86U3gQA6tnBi+z8GNpd+jZAE1CjeXfHPSN2aHYIKHPQnHKdVFVOJJhQEvALlwmNGlkWFn3yb3FUr8BTZ7CQ7G6hasFrGG5UDT156IjY9jjbryvHXIADX0Yt1o22SalR4awf+gcZprzrBSxGXsj8Nbk1WKhq0DDXQaoBNeurnUrmU/TAtUFQ5VryRo89PnrKnEsIbS+wGdM2R7bMQc7B0r1aV476DkqJjhQc7pUqIY4MUhs4XGO+Cixr/mUd9GUeSYKsZliFaVxMWV9X2rC1BzSk9T+viUgpAfpAotm9lf/qtSQFJjbj6d67Di6FUst2dHnFyPgUywpxuNFW3Ax1rwxDyGlKtTmteOe5R9n4wdE8GkstDVgqT6wVNWmoJf6lmkwHvZerh0b55TrsTs+/2TSfOGZ31LB32pDUGAjazTlgNimFRq7nYF+jCIyQ9CB0KrxsNvKug6kR+1zAOiqTdPytZoF2EKZ5spX2ss26d132UgVyHi/indMCmJZz7RcOmgNm+vnlGbKmpuzoA8EYwGSsr0zAIkfTfR31aJWAHcZElCzqqGEu4a5tLRFQQHgwbt98t6ro8kTAQ45I30kC/gCkikrOCcwQp/K08XMnt8ofVQldLQ1SbfsLC7TEaGLMHW8UZ3RLSZsOAG5XhzeOSa4TXEk5JebWeOXLROa71Zwuvurr1TNxsq2V6r1rfS+YMrfmFdxUojN3x9yV++Uu6E/mHdbip1cfTrFPBgX40QjSZs53Z0hz5wEq667PDnLgpGQO3tfoOuf541ndkTPWhGTyApT32WN7DK7YxJweuAHkiR1hq/TdxPjMf0n9lE/Vnm6NhkC6H1H2QYy+M54qSxXDC/oQ1eO6o3jN2KASM+/LlNwqf1NlSKr+MpQxg70NaWWsf07waYN4pqfC0hQWRZyfmgESKja+LH6RFq31g+g4y/wGl9mpPzU+SB9P12+GcgHfBgFChyYX7nZw02abrbJFQvriiGqaKI96JAZu0qNALMR1wfnbVj3oxRb+cp/xVuaTNW6M3jmnqkY5rgbfrdhNgc1vkpQ7AkQFGJfFczQxMm5r9hobC4KdnXE5zT6YCUUeUo76k+7Ww/fkprPkwrF87cDSq+6pRSFYUdLc29TfVNN0Houd9Zoh2Rk7hK2A+RjR9yqFBwHTDAIObhxR750NNQIP00IuY7GyRUG13yL5wOVWtbtNbNRIEaSSghVwvODWOd3xDUcwV+pjDlZ7ovd/bXZ0KtWw87aOt3eDxjgOybiXCj4koZoQojoMXU4CpKSj45P5nkskdDwIOCiH48LTKrJDGIWeNjz2PhrJdKFZBp6RNZd8bofihwfBjFbb8sLk+lkXaxcc//D5RXhzr+P2OHBkKIY9g24YlxO+JLbzKx0lP5KXPpZzCQhzniJi+8Qb/aeCXyHr/9FaI9NCMcaCbKJkjQMHgtNXskbQN9P40+CbsdyGVijm5XpsfNSZj3lduIEL+lFCXz8r/Tp/1ZuMWMkToC1Iil1sBzlCmvu8AkSKS6M3/3RzgUJJ1mZ8IHFOjWVPJg9bgMkRe/mT3jKuuJFc9CQf9Amv1LKwlol4u7aHdRzm9xFkQdlqXKVppImI1cqLhRqpd0VXtGQ3jRgh+Bu/v2QZ084Acc721SQ7sd1ry7RKSum5PhPEDZMH1fn6vdzMrk6uTYExTt7l1S+3d68twSRv7HjZEjHC/imgnQ8wetHqknTtlXMyBM7Ejg725XLj+tfoilyazGD45M4BUohsKvrRr4+Vy5IOCjSw580NlTnmF4yeAzW2KlZj7sjsVW4YtsBZQPo+/xVJM96J3T/tpc6C74EWn2iGtUpwaPoFKaIapKm0MBkQ3EuJsDiFvIOEbXyWc76fsnICQ9WCh4CiPtlBv7u6q1KA3Ot5jY2o7povV5tyFUxPVjXITvKY6/6jhaAW28WrpPQMn/VoYp8rgCENlCyqk9RKZ5viwfJrwCssU3gFGcI5s8DJ1sCTttEjUZzYTv3onhDzj0pkp0AZVKPOelNgjuqoWgtYptNgs4X0Gfm6t1lstfE78dD9ldRNai/QAwwpkFGZNT9aUPQi/Puw+iqb4s01MQdJs4lFhmdE4HXQhd1NvQKL30vHwhE+2H3hTLfccmhm+d7OYpDIbTscDrKqpHcpu1L7jUauRgrLJUSp0soUc0Vq/j+93fOO/X1ZwtvqMo0S0dg3H8xMV/ART2ZziSksK++afy+RTCd/wQL2OmMmz7xKGXHmKN0Uk4Te1IvQEnGbxNIggxTcn4lA5+TFBwk73rKe9gK9BxePSNgAfA1wzlhdJq1P48XGlpeSCPdEuCDHu9m+NHvehIoDmsE6yJLaLvwxyT+PXGaRO207VrKcEw1n4joPMvnltujI2P8hNnonPVKnlfjolYHWOcHF7/sP3BlaOA3iY7+Sr06IGaCuTPhzPq0a95iUeg1cWGx9pjm30/pjMTU4lN8iamwaifBfZqbJ8Xo1Y9U17f/xsRzTLgv2r3Ixngvg7UCD9F6zKOfp9DrArviFA7HUJtIZ+9JfvGeMQ26qsGXvdpIw5ZAIfSilypYcikW6XMoQsBcn3yYPkltS6sKqY7APFBFUv9Nki9MJTa4nLYMoGH+jpxKkG3/nmY5I09dB1W8rzJLnwh7DWa+kjvkGaf0T0UpLRfhT0Oz3GINDfMsqMrqJA3Zy6KyfrRUkp1CuDPaeFTdrTe4BH6i3THKk2YIvtvvdKwnD3zrlfVWOXcJ9uHyjtnpY0mTTuQF4A3YkhHt7ctO6IDhLQefupnyvt9DitWgsu1ElErTmGPjrnQNPRHiv2IsAFyaSSg1aBTfD7XnJ3V9RlISTZF4eWxym2M/PrK6iXgiVLK0BiNwBBZXK4kfuloTJ/6YdtU7/kpMfqzWKlmLnEUI0nZ9CJ9Kqvp8E7uMssCmHXv9yAhhaBnU58QAIbodzKIaWqMbgBVUdeQbI+NA3DURUeHkT/lADDJzn3f5t1VoA+bj2AijPgQQHt7AR63nRdbpegd3ViaJE9DvmEReai0vmOf4Gxd1Fw20F71MF97zJV9x/6WtBRcgedSvoXCCXELWayW4ww33QsxFThNGFS0FIVO2XT1yuVA34gbHeIVWAZJP5OmNdthxVMMSIUGysLXQt40Fu0Xh9mFaMP3mkpNvnpDBeavWEMKHRZW0PqDZChYZzZ80tK71PrXKDivpqdJyPo1KsQcHwa/MNEavH8a9hf3ZgRN9/HYs8kbIynZ7oe+XeisSZD12nDmZOY3h7hXUxUOQ6inJGc2hBu/HxSkDF2SJWTB2y3O4OKl51Wgt7WVEKmSK+A8ObKqjlZUGmoaVGkSn3riwQ75YwSzLQo24jpZjz3DYOgGjFwRNWFwx7mpd5vxoK4PLQnmppUJ1tjfM9kWmSbFpFjL/3wjFhMxm6XDguuDACYABsACpJRXZsdHeMsM3nULP03jPKRUWcXvMEDocSP+zZcThjOEE3zrgKuBCX3aoqpxpa8UKOq+vOKntBtV0F6fIc3OIFOxsWKDrjlALwTA4nDLvOD0GJDUxnJAEx+lq0kzgE9i1nS/nKoFBhLZG32ohTQg0NTRypkohRgmUrPsx1eY54vcOZCGMPKvCFiuYE/uRUe2/lCuCTWRotajSEFXkLmzX5mCKXulQEcwGuWKksaateXA6LQS2HlRwqiOg8eTd5f9GSd00NI6GBtIu4jJJFQIVGX6o2zMghFsLtqZfV2quyI7GRlMH5qpq7BueYh1KMYZRDChvK6MvpjB1NMRaV4yVsNoQAdjwT9dNzrF/KAT+0qQ7TdP0O6RKQduqLOQitKzLyhwZxLZAY8Fo9HUzpH2jK6WxDzjXTnItHpSDKZ6wPNooQsCVi47Aq/hilzlg+BWU2gMTIYYdvSiQ0j8N1HLSHLU7Yc29fYXv0X5rs1lToazTyroAbb9wH67vP2bVQ8i7sRiWeMIx3kw//1UuNq5qW/xn0x9VMad2FAvRsfqM5gD77bBYNHQMCk4+p1+RQSmqZgTGAz+fBRGyLNNAz1JBZzoDGQ2xdEsxKb2yGLtZVdvANFDajyI7n9R1C5dtxg3sl643gL5nReIH/1JwydxYBROfef7HDURgNvbAaFosscvF4VRM05wIlL9WU7LTIXw/G398q3RsYN4HpVe9ACOdZWtK5f2AwwG1ssnaJ9VQ8f0nFKBDfISoiHTWbxrODJR4FolpYqpH0xeZqGuVc1hhsb9piDV5iUD7VaWV9O+VBmEnKZqxo/qnJ2q2JDf5Hbw60yC/OQurqagvujoxCxlYLF2SZfS6Hy3ULxgTKHKVKA4vtJIhQcLUpNLTGYKpWmZcwrSrKmCGmzgVFw4raj+eI02OhFzMUaR5fgE/LJAs0KF0WhGbC1rT7qGSM87zS/U6Ky7JO75tS+8q8vX4iSZZKlfhv2A+wmRSC/dDM2yHGP4GaE7HMVtIj+JzU/oBU/KfAgZ/c7R0lM5XkKdvHv+iiV+EXoLhcKSQOfyjmUS2eslkMbXqrf+zPL1QZjZBbcFM+1JrQbwIxB9e/6DfZBYLOCZtk/Gcf0YnFPY9Jt/8POrV2Ef4jCs8Eu/ayWc3mFLE5t3OKudB3sgZ5pFRCDAd1L6KSJLVgnb4nRhTipTmla2C8YU9/VM6erOjgbKW8pRJ+1Z/vt8RIw5btbnNs0MPa1lHYbvyEpaWq4XNpK/03ys/qrA85UC1ZiTx0L/S22vRgvQePNQsP3WGVXzW9DUrwtdWS/hqJTPaZd+Q1AnYZ9+fRjQPr2rGhRu2TW2eYNmzvmiTwu2fZqjO+U9m578W/hKjegU0qB/kJoQsVUevsofqeFZ5+4mniDMsV+sBZI4MkcSPmSkeSIVmme5JBM5XCf/q4UpIEv6X8kkV1+9+KhS1HbWGj+iBg6AO0sprvIAhsJX1GInpkOKPgWQHOrNBVcoY/irYYo1xJVBdDuxusmXbsGTboW15UYl8Rg9AHr6H/Zd85VsIQ5x3hWopmVzUY4jwDdXemTyyrpN9Y3+4hHQ7Kjd4A7jYqE0z1Tfo2pzJwUI+grIKMPBWKRn9DmVl2zm9R2gH81uWeOePaov3PCdthltdridHKt7JN5+g6G8gA0g8BBgmTPrk37D/MZ2FdE9vHs69CBlIiRV6eft5utSsp8L8NG9ZGCdGOqr8SLbFIejVN8geqy9/AKmTnq9whAK7LZTb6BqG9To3bfN3M6SPi5rbqa+JeQ2tkfOvmm9Xc5yLj2/mJ+aJua3l66fe0aoPcLlNAD1YPy1XoWWIMC0m7PflL+0y6uP4PYbogdwF8b5m1ocjbHfFFPjp3DwtBaZ13P/8q76tSeArTUJbnF0nCbiC3PvOVTBs2s2HV1+a5eSSkTad5kIEL0L4W2+ZPXmaDzwBRwXqJeCTDXVwbOdFjVSLqI5rEX7ztg9zR9y7yyaSLBhk1pB/nqmP8rgRMyENh65njna+x2KStr7Nhzh3I76GZWGPqEU2sMxoT2Xj8IVPFI8l8fuZFCGhL1cEAi70boybaksS/wDjIO/5hoHH/hrdPaq1T5N5OiDf/UeQ8WmzsQkJCpai+TNCO7IKKY8ufk6T+qf/K6nnnObz6JTHf/mNsMheSqva/DxGPZpYoh+cek58WDwWAXazG99Y4Sjoh41eFZILyTir0Y7NyLtSIAPniTvtnekYqx67t/qLF6xvvro34feHm+kd0RKxsYsmI+f3gl8BcnrCNJwd72wekKrj6/Q7Xf1BV5Qns3QT/du4d0IKIzY2/o/l/WXxjsB4OwdPrW+aALjVZwJsiFWHZKWGT7CpcfBoTfUG2Jbqc/tH+3OesuwuUkmj4lTL57wK9lkir55OFT3Z9+YaSbkddIEUAtjFoHtsBSik2PpfbUmttjyRE+L6iqjPjHBYSQ0FOrxpA2OHmPV8k4E0KHliRFHqk+UFRSAdG1QHxxMi5khm5yIBxhOkfBZJo8AJQfDOH+VMX9W76OXpaTPzcL1cCJ70nyCss3GCnSRhSp1MgMaEdxYyYAeougcK5V7SpZSQOc9r/tA0fk2rswd7FXzh5pj3IMNyz9UxoOzFqRBq8J/bcQe5DXZL2OC4A6wzxs/iie2OUkDh0EM/bt4Aot7a1HeQgQqD8RZSzwShSKnbUWe1znRbtPNLuUCgOp5yCOGzTJiHQFqgEmhTuyAf1kuEchZyOvdwrOBiXOnFl/WJ0+2skdvgNp66RCR6n1Go06J+wDpeql64idsVOqrW+mGErV03wqguENIyoIoNIsW4hYlSBeDsSYLkbBP4j6bJsbfbFhJxBeFYv34SlLWq9+SsdgfxNEUMgmxluGN7et+ZjeA8/XZX1PGU4bb45O/+GYGKXqn0GaDiAWP1VTuHCizTiUHTh6Q6xiVFyh4I5+wVNDXQ9aio+PoC3YY0GHN5BSYN40Vs7GDBEvrf8808CbO9xBQktQAx5NNmTxGzK/1O0Ge374hlKKcZEBUHu/4upxR/6L5771QGVmOAtFFZmif9+5DwgacacHRKExSIv1FRumwCk8aid0w4yXi4JCTXT/pzqxCtjkSAFvx4hfdMcvbioXTaBUMzMCZfGTcGRJvQX+rtzQhIrbJ9W9eRBGFawKws4WGBLJpWFv4JSjC71XMXrQe353dtoG3/o5cwtwdC9raOE19MmYiKWRONJkZ6hMKe56frNVSAEHJnvaSsf9gsf74t/EDVP4KnYZyXBCPSEPKpqFD4d/K9GBByKA2GEGBupnnFhw40zCuGBw0y5i8jADIilZwXfZ4YXZ78X8I6nvha3GHhAoVGDjbPZsPjxwL0bkQ+Cds1g9jeJRO3zNc3mSQcwSoXvvsksO41cJ6u17OWhAZlsSptl0QQWe0wTLwtlcdwssu+I3pHQexRz/mUmvhLA32yn5/c7nUVD8TfYwYhmc5OtJcBXE17BlWZMVqVxX6UOJlrZuPL+CI/YPrLvE4Y8nm1d3KxAA029NJksk3TcM8ilRiAkZk7o3S3bU7dN88JzUJD8SCMoZu2ytPKfE64FkGZwbpv4dL5r+fyvHZ7FdYkfDPkHqHKPaZK+R+Gu2d8S5uJvxP8GLFrpXF1JEF6NHhFi2xb+gS7xkN41D2JcoK6/1VEJGsBYFD2Vjt9Qqr163h8Cg532sMfLSBaRWIs0JBysDr6FKtwzqDIPM9nmef9DY8NELJCclT13OQ7RPCiAYonELZ11Guwofn79l3VbonrP/h8zpj7RZ+gLMuwPBRXUhkrzJYecx7/xM2YnphzEfNkokHwDYfPM7/4McS1p+ebVuZy4snhypcqHMjmmh8JgwT0PELr39NkHVCacX1FBFxVltJw2whw0E5ZcsEFo5lSfZO8lPPhInf8KE4Pm2zy+AtFbCZmuxQcd4iR0mokn01DRr+4Ui5mImICUHV6brHnrtK/PXKxhY/9Bmrgf42a5JkT+iA1qw/70P551VDW96/bwCToKxImJ4zb4nL1MbduFh+SERdQhUElRhXj+fCJecYDRMLjRDSo5JFnw7WkeEPU9iICUR9prG1GavVLhh+LKdbTHpPuTboV5gTJsdfYoMUDykEsLShbXW7WEFRbOaJl3DTRg3w2D768w1WnkcuhQBH0pa8ptLFC9/VRnTuMom7ii1fe0jtOcQmfkMWirSHIwdILLgMuK67yhtoG6FJdYHu1rait30nJfBZT52TeVbmseVdCbC22jdrVq+UuEBVrbtv2pXnQUWXcQeZCe/NpcN6yrkgG2MnJl9wfdkPOw0GsXfQOMMhkQ3Y//sKYAsoPgM65O4TBf1ZoNP0TNaSSCQGzanXP9T0SfSzxk2RulrdlMLI++7tqb5F/rkeMPE9SG9ugX9poGaHJQWMnzCtMS0PJJ+mOV2p0HXnRi9Z7k94L/M1rbyF8o41Nx7R42ZjrrvnSoeVOHfCi5mhw1gCl3sY6lAeJN63T+WHb0UWqnLEPQWlk2h6a6/S6D5+V/Il5bghXT736DvGy9jjVIYUFmrLSAmgeqxtudASh/kAoizifSRewum+yMvhhMP5AdSrhV1xsRQLg8G7Rm2AvrwL0yUKem/fqLws7EMcBMDxicGKzy0/4Saz6l05lXUxk5uqtZcasnqeTFTTPknoxriTxSZHOkxy+yHNoa1yCK5Sq9DZ/kD/uunXxVdYEDBgJCRk6TFB/MXRZ932snIJ2hW+gcWNHYVUstK/jQYmQTV6v/695dMID9xKr2+wPyIr5e/0b0GO6jGk8wgKIeVZLivJA148JgW8FXCHJ9Xz3tuuXrvESPHT3rBy5qqYzxxMjRcWU/M3mDJPGhyBVOQRClUEgvsQuniQwP912/KXdrUVEI8mc7bqL27TjWOSgEP8eewhueW1m5G8sNXty/HwvvLrLmYV59qvMNSgS9gqXoyDewtQ8iPoQ2W//Kzfo6UN4GnWKjBIl87kmJj69S+xTXfs9VKTvGQ5rFgx3F3yuDTqd8SOjWNP926hNdUq6WBdRCdhVHo6vR4Ct7lUw1vau+zi7LBCJFRsdJLLKIkwlxPEt4X6LsP7sqxSTts54Ehi+ki8WG4jim2oBuJNP3U0zxsyGTY1CKjpKkX9lPhSS+LZXR1NZN2Io7qxRe5sHMsZH3nR5GTHMLtHk3YXNjkbjW71xLJQXqTPKwq6SQ58OEMG2xUhF7shUE0FPC3oFtuwtEH/NlwuE6kdLcOO37aH7JyNDTIxGr23dSjG/xL6KSwM2y1vsUVvlTu7/CeTSPBIKaht+B2M9B6r7QTZjd3u9AvunfsoKIsdhfelnLdL/mp1H+pEDEEeJcN1TtWtU1370ai4YHx0ZWnULTYlSoFSWstP/claBuqSJUUe/z87QjZA/1ir6W7SyhB/apGif/ueFRjEWtpNy3XOcRxjndTndNC6dHVyEWlaR7YgHLOSIVboJAEMGYKtN4glSSTa/hhDWv9Xto0F9Y7wON7JetNIZWjVjlN1NnDwu/S63iykZVz2CGzxw8e+Jh/TRLaVXvS7BIOdn+uowWvMWot+IlgfhpwpVKz76/BKE4msCXjh2mDXIwxRzXz0UEJsZP9Cr141qaj51AKmxOvZfyyA+snPdWky9wtcKMjQbfIQ2Louhal4EazJr55RtApmcz1yY43EKIePY/7PwA5tyHcbJPMc0YZhWG7wLx8XzOq1tQOFnW/fx5zbEeaUBLUkjSkoHLgJan/8IjfxW/rjA6voKeySuOemlB42gVawBCzwW50ezvinxQlvlkW+ZVdrzCqx0LX+tlFapGsBS1Gkg+vIp98v1ajE36ZMZF9rQESJAkULGUweV4BqWVwtseff/G462Tpxgncf9oiJQZSySw/Pd8/53ap5ZzB6JiGEvzrUuBia4JrGE7UftMU41sOjzea+D47KBnHyuW+fWRg76qU/PRPb4i6+h1fPaxEA+Tp3Muu331XSulAF3YGeryatv0mDPsEVaD39sPLzTvMpTGq/AjTEA0sNnx7/J8GTyJj61Wta4PkgPdck5IrSe8AUV94/k3o/3fYC0FezuCSUrrv2Hr/A0cBKfqJ2A0BDTNeiEFr2+h7r3o8ZicdyGAy+9GTLCsOvMLbrHZap4FLUj/y5sAohEw9n1IQbLwRjDxkyrA4osahxO0eppMJCLwtKrimKnfTv70lpJ9HmjePgReo87GNLMy1XZAUkBc656h+zOsnfsw9GBuGl5KtkyezrquYUX+SM9vusxcYB6P8TiWX86KS3uhhX+oVDdr2FA+kXZ3qOl5df/wo/tEZtZiSlKVjR4a85THon/K9E+BsFROGsjhKOv4FP1yAXjfRLKXcsrStgjIMQxFpKiLMLdF43p7wVzVFPCiosPDZtpZ7dSm1+YZsiHkqh7jxVAI+HZlpeyC4ZznTjntGVlrPNHwP9lP927+8RwSi+p5ys1n+f3S5FcTxvV+7z/gxdb4WQldij7/nHAsT0hcbmPIqVCxjPyhDC5DgaUgiQotLifUtxDP3n9NHoXk+2m7O2feTpGvK3JJnMIvI0LeM6IIqRijQuNrebhO2i1zvwrfw79qAePL/rPACgCLTOf49Eq0iYC4yOAm1pb+Hgt+brdO6KJ9x486zhJqbopFlC5qIAeyQ5BKyTBL2KM1WwjmbfRWTGRKEdQtJRzTrMMJu2LiSeMt4vk9tuw6ZF8WWY1n1m3usPYe/Serq/1Cv0ac70gxTiOHh4QOXnWsdO5F1KEWNIISQjwAMfMupKKkq9O1YyNveptDDBSlLQ5jsNnFhH8MlbJkLh32z1wN3TQkCHGaYuZbBFEV+rlsK7RI3fy0jiHZrp0jlupEI7f5twzlN3mARY76eCgHmvQxe0z0jpp7SfDt7VFLhGLYzvmi7kQhfWHDeU3n2l3IOwrQo4shNsmPg0K1crJ61Utqa5xOVu6bzZPQYVzUeGXIDLVxer6IR4EIb6KB0TZbASEyvBcz7Zyxt3NP8Q+4hd4z4rKpUqVJJ9KAuvg03TalsHrCzvRJs4SwwOckA25I4uTcE8uzoBJDjCgsvCRy2PAablpdFbBd4jX6u/DJnMhLxJidVn2wC16koJgd68Vg2sq23gpHGMEXIbUYZZB1X6LwV0y/g5B/cDIF1taI32tdcjjkjdKOo4qpjjBra3P3Imv9AX1OIqLRWtWxubSlx/83UTlTDO/c+KcBWbIV9Jec4suDyAf6oT5NoYw/vRypYKI6dPh9RYmSps3Quq1ROkfA50XjgYO6ZXXRggGm9KTvYAT0W+d28hcEnsu98ytWn2XvhFAY5yhFUMyWpqJ+DxJmL86KKIqGy4LtIhyC5SMwjQSNSJg1pYgMNl+zScphB0+YyoxFgEeQqxKGHtKDa6ivnOrsnhLCSNqt5K+khabKvxScF34kyhcOIHuhS11KbFAIYPFuQZSeloFGRZSJt/IkCdLm2jCGZzoHqDefHvgnkSQWwt7uZyGRKl45AHoXZ7CxRoUqhmUIoQeCzG+/qAt3EFW0LHNKsPRZ73epO/0VIh/t+mcrYyoLqoS1/yhJSTs5MQrZXeQO7rVLF3v9jp/sBaZJEO0S7u4Rq/yJIh2pM1EHFOXKwWCZgzrj4kgN8QnV/ENYDLFiuPEBnylH92cS5dD2GzavuOrQvbaBOYpuN0jxS8kMDisl7XHKKussmIS5JtqcYhvTL2CTxjmsc0lQKjMD7Wms2oyItp4o4+nZ92A3w9mVzOMgX8+1Vhz/LbdCepDZngDSo905iEuHf1ySoip04eOJ3Pl1hJFF2K1IRTYS1rF+3H41PKG+64fIQ10ltUkMf9dIiZ2qnZ0/6ZX3yO9Kbt9zq+MG6fUx8yhOxEM58EL8NLrY4INNYPEsYJR4jkd3O2ugOAMUX4WxkvO/kahIWGkWoF5YNzfpY2DabSb8lGAMWbkDrLq9aNJ+ipCxH4sidIw7Vac5//f1ls64cYEtdFw4j5QAVDj/eFkw3rtr0AspJmYS6KlZ76mv5qC4ct3rPJLhlRsZkakiVZXwOOz8vwCILrNraULlMCHTIzG0QwJFCYlYsxVmY3dkUO6S2p82g0RSI0KvFIswFnUFdDWh0wJcWKZHyYlWw9noKvcRYIQT08ss+z6zA95AVmjwlPwPRana6Gdl56kFu1P/obvH5+jz37qeytXPvxllnB9YfYOScEu1mgPL2pnGqmFw3ze01BgxlnhsU8J9Tg/HANXBO7QMIgKovdJUIv+cyQ2OKD7oQSMRnA2aR1h6wtCPnbHFPJ8saAfpLWlWH5VlseHJMk8rseFSQ/o8Se9wW5LJaI6IJKHy81GLtVoQ0jr2LnV7izY8Ppl+zXdY81by31Fuzrnnxp8kiFo1mF5ZexGILt8XavVBdne+NX5seWHOYNXWalH9/c8jhZCH+B/qRyjTzlaTMl7hOLMCmcbt3RK6Cuv7SN+H1KVqnMhrrZnVkkFNmyEKH1Hy0rVGnigrDbLpG7Rg5VHFpGxT/hn9Go1BDy855aAqNVc8FwuZUWUnGBGfkhhqjTrNzci4Wc662UfJHWUPna22GbW0RdpghHpfmRfy6ZJdR50aFINp4X1NqixoAxR48vWciH00AgX4U1imkkNp9H66e10iESU+4Dvz/QjFYcOGjAr+230lKtSSNces7m/lBxSuHVcgkf2Vck6FMv8nGyxdUktqtcjXFtd8t6+m6YXHDJuS1wAjiywb+qWPPUZrWBTPTPxuaQDyt58WuXx6/l6xIlFO3QOaP2SVr00p4JJvNeod5EmV6++l3rVIj/pmCUJgZgpv9e9uvYHhDn/yVwc0ot3HCFhc5TR47S+i3Xxd3Y1i1iOmfz68og8DYFpl4l/d7jQQUEdZzh6U+UXla584DWVH6vy4yc2EoQumsdsGAnJ11VdKvPMH7Q/+0ABpjsFoO0hNEbI/GFuBlY2gn7bgo+92cLwnO5Qh+8Dj463oDGZe7G4heCNETSwbYjBWo0iPbbsN8GajvZbigllLrMpGHfVZQbEriIQO+ibAMhU1VNQLW3R3DocJGYqEsjWOjsuroEpJrYLx7iU0OGptk4+bU4m9/9KZF6qwWVEhHt1XSh4tgH68Wr7lnVBGz5q2pi8XJrtXgBEq+Mg+VYZEc6f4+3p+lEaxe7s1H1iUMkwS2IBpcrIEKpCfymU2D2WZnAooIO4qjSHBiYOh3+VsZJcvZTJrAW1WYcs3VJHK2CedNIlMV3jrt/HvaY68ACiahQER109woRCYdzSDpfYluIDpxs0YmRdz2rPfN5oLcG1FLczYUTU/WPcFX2emRGTcq5GIX5GbCsBYk9RpagIOYw8a7o6dV1/uH0978l9170RkMj0Hqk1UM3czc7cp13IwBuhJ+xckwiKm1vk4m7ypnZo47pi/DK+uEnfTgnQX9rXmQ2d+hQM9UOfPMk7FbCtio89Wgjh685qGIkE3x5U3Wwv/2fnRE3g2Fs5FBKgwKuVOwZKWRjYRhoT3nHBzXHB+kTj0fD1heA9aYVfgISGsfqN/ed/sBzOz9dxEUwRxs0FRL1tmuUCYXP22P2BlDTnAGkWk9QhOMBvXa+KU2tvMVUf0006iXkj7b6PuAEU+EyY092Vx0/J+bMjVbYE8ECQPxWZyP9I+RQag/5HyAUOgM9aelUCeUdd0UoUYz8WLT9cU6MAAZH9XxOjTWN2Whv/2DjSuXk2MvDJR2CKuoXEON7RpgZ2XIru0HmrmoKvgt2WAySuCh9Fpc9aHbqj81X473p1W7aPMprZdKcRWSssEUkE/FDGVaYAtvJ+II6UWTOXjevF47VJrBpxKfWw9U1Vskzo3FpOTfCaJ+I+Yuge4eCdPlR+TteWEsChoMaDtZxzyv5PENLG0C1hLQaD2C0gD9h8yPN5oj5hBZjDi+aHXJnbBx+Qx6yMMVK5PpywGrUjyJMMnVjpGcxFkkzc35W9hGG8Esyq0K4b0vHTAnFCYGbDQAiKoP3nNHvzT/foZzGDQ7zjOCUY7tRxb5T2nDryeOGqeOrFF8sDxL/ZaTjnuQKLLLp0rnyiefqJ95DheH/a3yP/Gpwc7phADrFx9PoZNXxbv1fpXD2cCKsxCTSbkj53Xim5d9Jk1O49dKMwbYVIooQpn9rpLnyU5HOq5CFEkNgXSmX4ziizuX346MaGSU/elBMmlIW0roaZptNNMZk+NFEbkiRpvvFfPAsPq7X35qACO6Hf6/Zedd3bzsNKM7czhBTxDN+NWMfIvE6nyFZxLJA+SMgsFmNrhVshvssKcf7I5DJDUAhFjka5BYLlft+CmfaA+SheuAjmPO04v6SepfZWk2qtIUWgbOdGwl9XvW+uaT763ObHd3bIOFS3DP6siJ40gzM0XS7M5cgqvT0Gj/K8ZrN4+cyuSern5wC5DsCK1uX+Q50f3fW9XTpoQieEilo8u2dLanPI0zZrOSsI4b0SPc3HWl5CUGtlp/BkT9LWuHzArVQqyAxfxIdDe6RzGNLvKeIffjCbypBJltCKwzo4kN+v2PBicxRk+u/5ZlPRcqAHNOAI1zzfcpk9KkBt//Oy5i/oBUsekXJg0SJfoDiWBzjDIlocEfE5s+UMNh9ieRw6nFJesFoakV6geF08DTFyCRbriZ828OQ05zQ/Jn2ZX6mfKvYds25NWcNaLWTeuuujwTZzN25jHFUIgubJsUQFhEeIEYiQyoPSjRM0KWYXERDd7/vST4vqvZ5pfp0N94WEB7u4E0mh/Ovb2pk5/lOSerc4lboXXA6ompIQgYnG7DMO2XmRDnWukXrsqUNTyT+a7I6Q5W4/MizJmQKs04D+FFsGXtQksUZ8AnORj+RaAO5O+3VP+Mip6bd7WDxVkOxgwVfSpHJW0KIIjd3FSIw7jpRYe2ksk0YJDFI2eAAtMhC8igPuNI4krTuE1rro0HfthTYtD8c1rfYxUXdM3PEZDvIgovXySq54RGdfojEVxyKwL4NGshwWjFh44Fa12FG3r9hBEl+xtiUrc6AEtv2MuSHTpXTCKuz5Q9leT1PHAZWPkYiLnuky177OZNSAvyqIy5XZXKAPcvILsyS4D1FoqqZGMxRwoR8HlsPztfUT7kCvEYKfWZNKXgdD+js1MFcr9hXWUVtd3ZIIHzn2ZsyaEAykHK3QtCHsD2XYL+d/XzAy47BhqeNYRuk0EVI/JOLHkmAUsgg+bgckXe6ACZteOEo2QnLsO/WUYrzSOtVxEKwy4yWsTNKRNhOXfYSsJfbJ9efD+hiKJqwThw95TZK+o0fSSSsponSy+cgVeVbh5m/+M2WGQkBzDN8zHRU1ta4q8EjFM5CfebUlDvmMVg5s77SG6mG5Ub5x/OZKdwHgYHzbH167YeKcl+EkLBihXX9Hlap17OupGM9LWfwDi/E867M4akmR9TXMB21RegLoR/y3S+5twG7V5JfBKWpOLSEfkua2Xycg7KRbM7h/rBXgOcmrCtKLE1pVJT6XJzdYk0Tw3/+0Wt1k4O7M2bsanUQWdYQq5GIrrWaEI+lYwmW7rFaYV0bhcM2/L+XmXoxOUg9Th5SwUGZYSn1Yc7gNNBHsHXgjtW+Fm0KVtqu6ildp0JQAnMnvYNFg4WSj3+Ct6otos3vRksZY7VZabu5SL5LH93GvbwmhIHgbymEIF6/6xSIbbEO2AoqVdOKhnif4ZhfKb/QGLwsnlb/6BKpR6pkLn2R9NAx82W87cJmWBye9sems5+F1zo/2lwV+hT3CZWgV6Pmgtr+qHPccpdsO55mmaUgd10shCqEi8ruvrIf6T1ZE6OZdCVA4K5age5CiGEyZun2MfmPXchXO9gNkkdbPzVkkQPBYp+PcQHcJ/22FFsrogLtJHV5qo88IYyMvbkhymmslmKaj8DPCyr6exkJ8b4OxR+NNtwsqPgAnvpvyAZjCYHjFOO4pEsc/wXINeD4X4G4IdKye0aH0zyU4ycyXo2EMvn3SuExY/kgq3+9qBXJUqJFiWaHGV162Te51N26+wSikIKJuNwxuRQ2DOH2AHqkGwHTjEsHcFKn4Lltt9c4Ecr7ky+43l+eY30wW0bfAFtc65upm6xA9MUwJ8rnOiqB4Wlc/p1EBilU2XdLtQ8RCn3XssVsniMr58JEmwdQ8WWf/+KWq1rXJvecvjv+7WjTlo5wd9LiEbZzr9XnsLpfGaKDuOImPei/qN3bOoh0vf9BgE+Ul3hQGDGcTPqMwngXyDbCEnEZUuzcZurh5sMFPuq6liPbop7sYTvVeRlxjXJZm/uy0JKZGFQUabJFuA22MDmt54eb5aq2jpDaHNT8zyE8RUx2FW5ihP7DGQUpod5fub/NVunivD5uf0XCxvK/Xpu7hNv1Gqo96Q7lMzgNPUMFV8nc+W5SYtbzgMYdyVi/m6K8uGaakyaHYQKfW2zwkHWHcQ38QuDGp6c+N0jPwkivMYZdxX0N17FEPaOYsHPTQK+idZjSUTnv0aZ8eNKTvPcUBBWwwvKbEb3CfHhnb17frLAcwOB+sWpsCAM60vKFUnp4pxDlSnNelKkbIRl7sqUyG7p5t106YdkaCvmoysc8UknOe2SQLZHRSRXRBZotDyuTqu+QKEGN8ENSMcfu9deQMB+eY+1v/r/9nJlkEp+BUpQ4sx/FEUHP97dV8w20I2VENv3z8TnT/t1TyDzrywVHzub8DtUdh8b6eBfBEsjuR9oXgqvrCnaSqMpyIBU57trUNDesKHmEBYQZTfmbjcmqP9snFgG6g9Nr264Wtx0fvHtUTsxDh8+6eLUPZT0dUqlnd9v7ITmUoKwbur/glUH7LLgDSjeSwItAcenTbX0W2YpXpj6REyuwVQK7e9tQU1UF8/Fb5/EwrDlGzGdBuPpgKwJiTH+dnP/EbjHvB0rOsDzgpbFz2nzfMlLKciQjPmxiabmwtiYp2yx+Q1MHZTo+usTvocLgxvY8NKUjJOu4PRQM6004z7eUM6OT+iFskeLQ/Bh2fJ0FbjkTvn/HnjJZkSTz2w/4aqOfUhimFJA/Nzc0pff5LqLFD2YEu37NMdvYWIveFoA1vZxR5wXbHbQQ+tCJXGjyldJGdsT+PWdu6pHYZiZymbUfPZAO39RyNQaH7mjyZVTwiuFK1T40mlV4v4OrWv0ggzQ3mgk9eUZ1uK+QYr85AMPr0cdyOt53JhKrWdcHRrfIsv7MNuSk4Xslx4Vx8tW8U8Plf+h9e9hw2Qd7BKZbiWmFpOTagH7fxfLuZZ06VHjC1q58z2Gg090hAWbgOmqneCr6UBb6k2x9m/5GOteYBMTWTQYaHFkXJMqKJvvKv2Pn8mdTc8RCaEsI2mmIXpkoAEjs49UiL4JBrM4RvhB43MAIKgsNWxtlp9gZmFAXORb2OD6o8VhGKSwSScT3YWI2q6LmcN3X85pIBsZ3VomGpr9lDLuhfl23UCPZZNosj1SiB5u2e46F/UQVcFguwPxAeB/Gbjhvl+tEVfpeJtNNm1GZLybV0RDZF07/wc87w+aF2j/jGzVpVWQcOz/TO9XMgdtlPZp8pyoQKrCofZskPQfh1x5sfbJM/jnLngsy1fD1lAZxQ1AZgpasp7lx3lXW4TpWMNW9k78aIhUCt1AGeKMb5uDXdHTdnFs0qYIJb3pPq5Zhgk0mEcex7wKWqPLEjktYHpiOvME8mX18euXuYCqggoghKlbKU15sxf1quRED/7z24gviCRNSkAyKNFZxYmd56WsRJv2MIz46CWMlBLCceF2cGDaMHoxn2QsJ+cuX/g6f420vXB5Pc3Ua92MjeaAtnb/hEh4Scehrd3bXq8rADQDG5VrY/HzsnEnP92RHUvK2rTM/uftVio3ZRnMc0ZFrr0haldAW6/+tbnUO5oltrUDT0BXugAVjv5hDxwhLUU9ptHuOz9pLJkMz3hmbNjyeqvFUMA4377Jv2CF2K0pba1O75Q9qs5e6YroftiCEd1mL80fo6eWNOVmTtEzofp0MlZEPbXKG2i+MQRrJIPOHp5olYVT59XQC77rK+TrUIp5OTV4AlwiWYk3lMr69Dvz5aitADi3fs35HeSb14qRVQstAAnQRYtHneWGwNcMw39xI0psVdW7RpZw7ahaq7Ttd3eCwx1QyPDX5y1rG1PYhQPLevBCE1OJHCCi7oukldpirQEb901xfWb1DzxooanVm2o/k4VDRVQIcllwEIQ/shQcr0LganP1bmYm0e1rHeT8QWzEqTBgpvFJriDvdHE9WZTglDHylSUVC1TIOeNljDU5fIHMxwm1v4Gncd/vk+pSLaUbvITftMyRrRtjh1pUoUtKu773Ky/pLPU3fD0Tgydz4uVEhtMm1C6xn96fLDYQsKuKaoCwPzfXO2Rn579zHm1/DvrLPPcsHLI4x2ilogUwoK+p089rpYwj3v3M/87be/4bXKeIHFdrN8B+cdoHvBtlI5M0oYyYOeZllet+mUik7mo3geHzlIoI0RUTGgcZU714hcYMDIAxSQkzSyG9Qo8S+tCT9qj3DC/DPDzaLZQ5n8uAsz23iLUFge9G959BVqmzMstuCKCnwwkGl/8aGeyCXJuh0dO/I90e0CoRUEAAJpt27ZtGz/btm3btm3b7mXXzbaNWcQs5IyGJvMYWVgjBJaS+NpTAAg5oGl22Y+HuzHn2v2DGARkqKutQ5Az7cdqRAnCyB7IDr6qlD5/CE5Ay/Wtw8RruSFQO0n3PJFyN2xko+zSbq0Onwh/hDYVMkcXobHk02J8J6W6ur4iZDS+UW5lzstkTg8VZy7eOpvCgGx8/uURJEL7yF0oi3DZ6ESOK7hkX9w5uVcvjvRUobEQZ1dVv9goYWxr6gRhAGbqkK1kMXbj8toutSlz5+WhcfG9dx1R+ifM2kR7wFZ9Ey7QGsx5H7zE+u2yAyvjG0/bShgH0keHJmkVXAiPk4gYYY9reb5vhVOSzwTtG9OAgdb6w7iYAp3P5bFfcKiTGgNY/tC6yTqy0SvrtzMRNSRQcenqId1i7c7RLOIvfxncBOkgUkLr8IIF06iiprdxT7xsLb1tDLkvc3dCI6tVYQCQziCNqQUpzNVgoaCbi0D6C1GA4gBqxiXs/c3zD8q0wLgUzH0a/zX1ruKXS+1g2ZPAaGTjr7NQ0oUkKJ8JG2L6fS5KoQUNZ8XZDuaX8SuQgKX0EdkCCclsK2F8jZGJS+u9StYPwmak7B+miUS6I9J6WbR+uBRqKkIfGzZro0z+imPcTegm5O4rXwx1/Q3X96FoH1dbdZ6wOhKm4pcZuYEHbi9Cq55K0pvUCHqoveAeYWkK7ZYYCHMwtRv8Sxx4cH0/5blIOdSOCnoWfxY1rkaIsFsXYhW5eRtXxPxBjqFwFPTJckLeqEuJPQa79KyYxF5CzG/yVSfdxtwY+m0JmiMAy6qoGszRkscAgAqwjhdgP39u7z53UlgJAWD33+t8rWYCQ/lZYFhADJ33aiOgmcDXpPTLOsYjUrPHGIUF7xvRYUcUHFN+fsI9qXPDwe6CSaH2/u/DmSgmFnEzFN59r3z/yfbFX4mRSkeEEbYoBzkNyIYegAz+epr04s6c5uFziaqBZgd7ZSpn1nDe5u22JpSTLYvRmYYc15RxUT/8Q67mAdxXEiR+KopMaMLCGPavR7dOBfsqF+ReJ70hokWQmupvdliofGW4BdLYflZKSuP4wkwPat2Y7So1RfFyf+ZjmM6Z36bsGots0mR0KhEXrBPT/AJBo3FAtzg5vuYmC3x+tYQa3YyI1jSFxRlfgCh+QEHjIX1YSHslnKE8EDm72QKEG7Xfi/4aryGlinrlpkRDRWbmfb0Gwa7wYUBayBCohE1Hh1LfmP5hNFXiuz+dsfKUbgobi+Gas8gAgNfQOhhmcG6rxgZcEbvTkiqZyYyL9+z3Zll7dMZU8qvuiJHAgD2tej2V0fRo1dqMqJAcUo/+8/dkNMvnPO+tX7eR7LnHu23KnvkzKqNNpaFjvM8PmVyuvoWZfwzuhAwXfj4/8BO1UGKdPdabrFHxDvhFLq4WsRF36iK/cxUUxZPhQO8ZvUaCHvIHirNm/44VIi7MuMHfCsszOX01CWf4jdGFWyraS4H7XZ6au7U1YSswF1rEYdlhHFEhadSioP6npzEJ43oiMbchC7ldOa+CbcyTR/fWHpuPX/iLsYs5PPwdwUJNp0cl+YWQfibX25Z1UmRGvUHP+EKhbev/ZLFO+rs1Hy6iafTqq1MMstsAZnWEHenNw4PCyK1h3eqP1fvwJyR6aMHGX2LBc/FXjRMDGqgP44VQbHJcGmQ/VBVZklwNRFQsZG/J5+qvWn59lV4Iru9jzzMu7m7wOUAMPvBjzjMIPsSeJrHGGnGBjatyihzcr17lYyW4r7niusRvF3LuNscdDvlOjs1SVxclJRenZFT+F48MoQVkixLcdFBL+0r/wuIqQHim71NJ1KEBI866gnY9SKvLPyycUBDHX0th7bPQABlreV4Uq1k7sm2ekUM1fEXucDt4dauITyeUjq+oN4IvV67Df6gQn3Wd+wM1b5norayW4yW/iy0ZtcAo/B5D6JvYiOS5rpXtWHSVS4qkda4uwtlYQocFDBPk5P+smEtV+gRo7Nfos1xKJMzmV0u9wAmF3wbCbd3slhPGxKK8F359Yrl8astn8HB4N8KRTWl/vrwZisQbFMP7EiptjvsqhOs6QR0d9XigEWEpPELlHzDfjlyJqi2Dcq3lcv34e9gYspRyh9QUQuMfjWDMtSQ/b/+5d+YJ6U4Eqja7IIUqGrTqNAacpyIHpEDps8MHmF98ypdMbDogzcPAxeUsxA4+CnekKjpeUdb3GOova5JBWreYrZeJBZv33Vi7oujo7f+KDamBC/jmk8DxYDFz1SMoKXGUykzrkgqZlDi3icO+2Gh2jBrH1Vb1op33msiv56HmxXcUr6BK6AooEe8QgdMeZRDZqBK9vuSszFEv+HBvtbS2/Lc2PZkbxU99fS3s+28YZ7nSm0H1aaYhxae4vpEhypw+IrDvTn/DbcUOwn8EPJy7hOeBzlRZOnJeAluXuOVCC/PecNcj6sLTLZtmlf3khNXQMy8H4pQc0+VDyu+WE6gIdv8e/dvOCwmuM2JUER3gMHGKr1PS5xhLpgcQVETdq0TgIsla0VqD0rflTD+yhVx7pNTR/UtNyyt2/sFDcb2jNIAAFavtG3eI4dwPUELVFtF++I6BP0QswkABYSTUI1JuN3lKGSicofovCjZrckdqtL3puF4nWHC6LjWtKxjzU6b1/cIQ0nfrI0VPPDauIMnPZNYajGSxNcYl8mbuwQucuGgpzgEHawTZ72eRYiBX9MzaZUdB/Y9p5Yg5fg3d5Rcox/K8fCdU2HQU8EXy31li47Rxaix9lPn4ia1mu3BcqpzwLIRdxBcaISSWVV2nv2EDpQmImDfC8/irzuYzqJVl/tKmjSGcR47g+ujtceBt8AUrHYIAGz7X3uJVDYXTAAkuUVQKGMHaIJ6EwORMf/e3TvjiGUpulcux7hQydLz1Ax3edGyCG6ZR+8KbeykqjAmvC/I3wBumHWEXOjpzCQt0r4ahHbWcW07wNgv4JwWlGC8iTwGXbLU4q5MIBt3e7EBShkhtspiDjfRY3nbE9Q4SU9UWFfkCMh9II9MfvVzSXirmnnTxPmV7+NWVcavJ7+vjb7ndbGsmy6LPFpNTKi+SOoPqX0nCXrOzaPxbA505pcxvst6y7yrr5/7MEu2NBDSwcmtePN7QQCjqIqrjqOB+kVd85GfA1OX2euf927016h8NKCsFJE/CuGjQLVf2oi2HVKSY7zgGOKXgbERAm4FVfsQwz2X5Ttr+BCwTV8TrliNdXc4tEmWm1+IvXW2mTfho+x418I1URH+j2nOVNnDQEBnuO2Rhao1RYBvQrQzUXsM7YTGb4qSzxB6MMMQa4ch4c5HC/NUl/7w/F0ROQvWYOwxsNK5vvI8+iaVBqs9jDtm5BTOu7Yzi7IPzYFs4eVXxU/48GaKw6kpNF0gZdf+MCd1koDGO038VqlpYnaTZGNzKTck9Wco2WsTLWi7wCcr/WdwSmR6FtXjDIkeViG6TfGGVtQf5lo9RVeET6t5GdDxBEaARYb1X33iBwEtef00uTWBCk573n7e7RrMoHO7FH2g7RU0Ctpab34HUC4xjm8mB+DGqhSHEhkKLZZYi+alSeKW+vfcNsmM8KKQH0cWKsxunsDLwILqlJ6RhATvyFupXUOJqflrNDM6uxF365z7Cba0nPxhWu3QjRVt89jYJdDTxnThYbo9dBLlL4cJSxmsaxjaSpajrAIvhN7tyX9pFEVF8SqLbVH4YJhdPZclZchwsDDaIdNOMnTgXr45/Q2fg6V1uciaHtTMrl0OdctWNQkB0LTZpRzwkKSxcaNk6h+Ru35f1C0Tw7VFp/7VAO81d+ve9vtLXhKo5Nru5YN1BFrkKiebFQHjqECF8MN+BJbNounilpKpVCoMZRj+37v0ofSm/UwOYh4fRPs+ln4YwkRGtr2ZrPLjhEokFsbEM0CimMVL0NJgiJYzUVoCcwA8S+pg1ys01Qm03YEqcud7vIwq95NZR7wlZOVkOpKEy5jvJ4CCbZgNV3kDmy+/nl9yKJxqU2vjhUnvSlUxGWZ8f5xf5swLhS+OGdXQIRAuXkxeclXnLzSy/Qo5tX+SCU5chtb2YH6AIFU7ZUmFNKFYkRhVzPFRAa5sg1tsJmysLkU6ZYehwa7J2yVIMkQnhe/rpqmMAY5UsFO6sDXkkfNQFeEy8I6KHI7n+Xz7WcJG8wwNpnOLKapFpxP5NP2Db1MqBt4lnA6zfwWtF4WSaHLSF4ldvbpFh16CHn2s5c13ilrxb/KZmyWtOn03bB7agIDld4SbeXAaFRHyD+B0TaRA4RbRuCmTljnlSJUez1oneRQYT3QXSw/uKhIY0pFoiUMMa8CJ5uNCM5Z4mtYxAmGA/Suj3N0w3LyusY5d0B0ohvEW6+J1lx0vPMRngl8iqVbUcEH6y/xm1mC/0vA1qqg+ZbQElYEvGLBzya4Am4IUMWMVBsPB2+E7dBNOsDRCX8xDdeZH0+hIFEGuJ8TqQembSpAH2ov/aR6Aam40N/1mlT92uUHxaIorshSj3RxVNs2G6lQoC0806YAZaAtE53GL3u3/uou93TG0CgT2SRNJHnwaH4lAcjKkK1UnvuIWwI2lrMO1zJHjQ/gmBESAS45yUlvN84Hfj9/A/fhhJNTWHRxQvJFvWvYDL6Lp8hM8un5ZRlVZps7kVLGpRqaBGv/e7jCHaoS5yZ3/AkczNNLzFGr0eyeWlWIw+48zmslNfbKR6RVW64jv2zk/oBioWUnwnRS0uPAFDgooIttlpt0gt4ou/7Q63VsAo8Pt8VfSmpz+/8IXjjp1iONZ97waYDpDbTKMzCATZOgbXX/WgMKpFUkSHq8uhuAfUboHCW7AXsvXxOYxbapqso2vA655VWzqiZi8KuV2UnFFfIwiGeRbDQ+EYOpjfQGyvlNerSIsggLyCt9tbIC/g9rSHlNiNS8DeaJv1z/URrKQpgLMxi0NsWX0dRRXKx6phOEhBgT5eHTrXO3TH5zkWjrD7/YSC8UqpiiH7dM7qpSrHwz6oUAJe53pv4Pe1t/RcUvE57zCcVl8sdC4WVmcIYWrs/gC0yyJjY+20F5GrhdUdMzTIGhDKTs/arK4LMoCCWbzKTJrSNC36L0J8XdcuEajsyPOsc4+dlW2ngbDUb0bxbvG6dXvxSlkkKs8tQPOPLqmhwYxpWaXsL2CAfS7AtFQ7yu/d3gU/GXiYdEgV3H9yG/omXkB3+AFf25/wMUPbknaRP/LQr3beCpGSSovT9AXX1DuOviq2RNgpGziNXwGooQAnXFoXyPSAdaogaZU+ACiscpx0Q/UDQ8UigKsSZm38guGX7vO7fiEjNDaZAng82xbOXPKfPGVXCYpaz0PVAnlEQB0Ycf2AlMkjm9UQMLuT34zi5VZiI3e1F9I8hMzkVGbbdw+oL8lAfvJVmeGGNJBtPIUrzD/IpUCybSDw9FVgRexy8XnygpVZsx8xjOgiMWrKXVWa1JzSvyyjhoSyDKpxGjy4ut6IHRLDPnfYu0gsgM8IPl1bfoQtwfzahjiz+8GnNNMQe3zQ128OnP0locbD0ZVkdUJh/Qd+9ena5MNo2TcLkHQ/3Gt47JiC4L+lF9YGcxHBnL7WQNoOOl3paIoz4TclO2QZpkZ9LH6btLTkOt9I9iSwV3UQIDUvhR36n/meYx7/ISbhynPxKgdUhTbNy77Esixsv9rjH/ra0SLf88DqR17pvxAreab1JIsqfvD6T+bBIdVBRsuk0KE/NjkL2AOBFjD6E8xHQiOym+fodp6uevCCfNi7oQ26C22y2oMhqvG0LryCmX2YIeoeIXifGCc8vN1C2CwCK/rISTjbWdVQgJl80NSj5A2PkHx7OjGfBwMKrPR9nLqRicCjhem7ZQO/k5pHs2WKSdToYZ4TZJeuu+6XN4KOneA4YfiUlxFG2lad8wRiuXtzzmGGeJXlNZtRABgt9F7MgggIlm0FVa1QJfVDPY0HPnQbtYphmp9LZuvfub28rJPi6kjp8kQBLcfkoFwBSYH5PzNwSTx0lHdw+YA0XQO0pASgSUwymZBTNkHBi5YymnlBblIqUigq/DOjVxDzr2Aq8dvJmCPGBIAMRGcX/+fmiNuaTLzt4EA8Lry6sjWsnjnsZoKFjXQ+MUdOL8/9NzQrnLCrSC7DcXidVkgI3FYmOy67PVRtUtL0NCAziT/O3H3uFctTwaRxhgcC/n0s+QWxVRFFPSRoCl6gVY3AcCBPwUA/cS8+Cld3MaGQRrO/Dk5K6mhkpBNHlx6MOqpezwQWs1uI4WQZAcE6f9wSlmvhGOyxBKLBDI6MWKPiNh7X2Gv0c3N/PyHMOmyNo/9ECDbAQiEBXB6xiKlnqXU0q4eoll0RVfUC1m3YglibPonpIWuz5GdCmgl7/sNUumHabhqLYCLfXiU3a9059tUYdZ9ssACF4mVaD3reonuIXjJIlrbpbA3ZE4mh6aECDKjGXshbDaRD+SPjfpKQYh3xGiwFJbHft2uyUH1rtz8fDOP7SScwzu2GsMUhZeDLROfxERaj3mAX+79PHnxQY+OgXduUKf8NiisL4N8JtzIUUSq6tafjsFeCKcrbNex8HMdZKETN3LMVdou3Uu5t7vsEszo2kKXkAvUhLsCm1zNQTCPfu/5CpCMS8UuFxtPtZAGoY9K1ZGAa+0sQmvNCD1RP11onPdFA9Jrvm6uYWOXjcNDAZw5vKzy0JFOWn2L4Br5bsfYDttwSd12ON8tkmI40OmVAYMFRCeNbMkS7J+o/04JUE1TeWplpTuMM/TQFhJdK9454WW09soJZWmTbJOs0pdtg9gIvtP2cLNUzmu+G4dbf64upui6N2MtIGF2OQ73c/CRdtpAHpPEyYczV+O9U1+jWmhr3gxBfK0vTMK6xh/eFPVdSWi70UlR4lcanZ/gzvMp/GoQBbMgjujDUY8VRpKzoQMs1fIuScbn5T3Swc1ZPseOGdiLH9Lpi1H2nuaLrTsOZgtpzYyI/OVEurYPIrfGZBK5HtQfhSRSqYJbNZi2NBD4HsD8XshUWRccAR/FwG5gnQZ/WkVlOkFEV0mxBFgW+VSvC6q1/PCObO1rxOhQk1dvEtKqNV2CWcQZycQxy/5Sy+YF4mWLVuJsBGDDiM2cEvxw/ieCTlB8Oe3kihBKJnOJhgdH7NvlQMfrUcJju5tyRL6tooxtCYs5qeHTfh4faD0EMWOA96I0uxDNY6kQn7DX/bLJDHNVk+WoZn78BzZiRcY8HYtRl9kHDBQmCGPhS4S6HZ5UiKcNxtZZlRXxSYVpdqLuYKxB82PWF99db48JmWxb4OhD4ys+P1IWNIVMc+3Kf4d4F8X99XQ1bB5QtoXECBoYE+pouYd/dvl5Jj91pRovVURt+gWCULK/RJ/gc8YcfGmIV5FaW5T4p4LW52WkJuBjpE/HXzDkIO3w2NiIKAUTj3mawQUE388e2t4UCU++32o/DkadYfibSRrZpagrkaS5szFePpE+LK+/aQVUbeeRDMuwnbNp/XUOWQFDouNUSFQ7d/stSa6FTdXbWvOggvaCC9hlPVgu26UNAl9vQ93lc9ENcwg72hoPoVjoSBavDh4AfJluqWQb8puA31FJQF6g7t7Z3RyqKmPZ3X5L8fQ77l61blsKlFG4qUTnCZGh6UoT/fOeIjyrCSE3WI8RVAZ/xIBaljeDI71+gvn1fq+iOndVaE+lzL2p4X+Ig3luHrUuvxjzFcvtWwOEhG7c9/Uw8bkJVnsfUJ1TcIxVEYwYJ4M63HcJyXnAk/JrrI4hz9LClIF6TUjtgtCW3ybnUSEcUNzeGynn5TaWZP+sH35HlffAceerw1o/LKkJMa9PidEYbr4JYU5PuGeBfFBWH/3EK/2i8KuOSht+87oWtvEQpqjZbHV5texjzoMZJT0v2w9G7wYgLf2iiHYJopPe/JJdVNwYg978JQutdtRoLsQTKXuGzRt6D+suVbd6GmNaDnbqiFPwdD7EMAJUMAPctxAwx6C1tWi6Urv4n0LPt6XaBWdxNmIVRcdXnC3J+wkMyaychqUbPwMBGjf1cgeqyzT2LRltQzFZBJx5D7JfZqOCa2+oqRXebFFu9Wq2NeNtKUlYXyTiym+Xm/lUtWsp/VqjpmZa/5ThTMwP+mVCf7kzZ3h9h1yVSzzVwWTWyrJXhj/ef1+Ssle3E56ji417KJ3WYCETxNLb5KrWxs9vszvvktQphU7S2/7+9uMTgyrC9SPTrBMHg45a5T4cIz+5scsnvyIggxRHoO40x6nPeBbgRe9kzvl0mIKkhSeekk5gMlYeea/fAGe9JB2CFBCWFCNrXwVWLIJUi+6+CwkiGUJkNaK66kBiwGFCRHWuvrzjAyqWspSCaxC5oD6b/Vd3ys6vqDyCmW47vJ//t+gS6r4Zy/1lY836Ab/jrETn2iidKG+qL0h6l8rQUO0u7uQn3lmE7/KUn9MXyhfjS/J/zN1JQ6ZZyJtvgom2gt6dF5+9U3hQfb/4SVkTVNGaKqHMeb9VhqmQmTRDwEhXXW0Xu9+XCDwh0aFVHhGZc2OnjfGMORDQd+CX92pgRUw/cvt1kLON0gYKorjUOqVDQ23iQ6977byJL4HDymM+DVwEOR2E/BpPr1GkA9hPhQxExR1oZnTJLntMYqjFgfx/kyrRFUa+V//wEJhwhuKhFNpO+ud+UC6Ix1f+gAJbaF/NwrYs4eWaQcuNcCE12bQXhrx//kDKbVpft5f1mLylFEeITXQAE7cZ3tzvPnNFCsmXzQFRitumDh/VdpMhk9i+JAJgCCsBKPbVGX9jcKLf+3yErRbYKVkGQFtqrPdVQYYnks9EGfgCdWqg++xX/SJa4pqz+Z5xTMTrLKqf3YMZBs0fTvBPKiL63bNBS7iARUBKSNGlGczDuZiH1amgRnVcKFc/HTm7/IiFE6Bl/yFc/hc9bu+0BF0DAfB4+ueZR4mO0U/Fh3uMtbSRmeas9Txx82+QJLcZNvm1IN5Z0Ey6efagYDXC4wsUHyV4v3v9KiFSNg0euFTrvSlulukH1Hwn75I6D+KHBerfkgAe9UiN68Whp7Ab3bCMQpfPbGqaCp9bxvWcgnBJPxrTsNbMAd2WTZmjXFn1veIyd0ATRiFSKEH9OI30Jclm64Umv8u9C8jBwE24qx2Z0EbpzQdwhLjaBdEG0RDxP4rJqJqISFzO6zLu1Sj5mljRw9Dm9UFXVzVm3RAoC7ehRw6KYXaBUsgfNHO24QuyoZOrUIPdgcxBU1L/Oggpn2a0EGAcqsmL7jY+kqqJwuR3wthhkC5lBS1HqWdw1TAM5taU1tqR4oFbHAmdhxh49Y8Nad6vupfPXBIiGcFD2sGT+PO1TgyGb9/3FXyv+5oqAaL4b+QKM/4TheRi3idGX6UOVpk6gdauUHss+0GkQbXUjE0ncpbkt9xC1+HlwcNyPvkpxJ/5zq+FpVbeXgr2dscdvp/BH/ZuMTR5AnUHnrDdYwl3FiohmdHl/D7knfswfSwIJ/iVu91AwaGQXQcUUKqZxVTNXkktZ69kT5et3797pZTqwzf7IrEM5Zh9aF9KLOgq9nTHyRzF+cuRxQ5Sfdh0EJUNnJtDaEU9hyt+tvYKh8fl3p/4ZH65ZjrujSpw/IFcyLYdYW2mRgIOjza/qeT98r7llA4UkSY8tKb9Ilw5UYGyaA7ogxK/TTUQARBzUFFQopkYq2EiP5vUVgBzkdKy9rtVThIgwCkY6sJXwbiKuH8gT0/DcnUtLmJH2lYO7UD+ecOcB18jrgWEngJrgczpwr7rHxCfQoervnF/z3N3NZX+nuFX1uatb70FY0h+AGiI6tAz3KkzFJfESekiUPvSfoHPHUF7Yuujcf4Li/9VHdTDXu8rJmy86wRfKW46lxisrJyH888XcsjbdTGf3hiFNgKVsFuB4dgRejFJ73CGQSEwe3oCp1juvj7UKW2H8YUNbXl9feXN1I42b0uQl60QZU6e4MhTb6DYdPyK+mUiyuygUMezxd7yx5Oe2RCtSdm7krCusU+N2n2QEhLt9NBFVydrD50/u3k9934w07MTJmZNkeuJwN1YTuj9aIANvC+/YkHiQu1w/eGLTFSThl9BTJ+VM7dlilzc4vKmDAT6OtwW8qhOzHuJ3QPJ4kDbKBUI7hXDHTDq/RaXpna8Q8HiRsbXY5gju8mcit05Bb6g7/dc1Vnvu2lpEcrFQ8WyOrXhn8W2Zc3z3Qw85rn+iTd3YmYpq09yceKjComVgnA/OMPndsKQgFDjAWRgMB4P0kaT6XB/ZjGdYu/7qs9kMtzlAXi6gFstzpEXerU//Aotw16faQ+ppqsW784SEgOQSOZP+tEJuqX60bXFO814V+OhPPcW+4z8PbnaHG8s6ESiXtCaTA5BKaY8i5STbB6xHFGcRliZ2U7tzUC2Oky3OIlmX150RUkt7F6Tycv9ACZxyRWAgKFRAUgQSywg7+d9UBUASJWlx1QAIIIUTGnMEPLWvvEbRXYM75DmQ2eMu8mygwui7SCEzkdnW675yL+ezDnX+YD5v300VqNBxPzUMDGEq82nMpLPJl7w6teVpgiBZU5bpLy1fVEE35yrpAzwMGtAMZZYh4RrL00sT3eDiWHmz9u4trQXxZwIH5xMCIHYWUXtntJO1MfSJjbl4b8xRmJrNMt7ZU3m6CnntDvtj0ck8h/CkQKn/xQ3jR42bC3Ww8m4zKLVsnm6BBfJMMJIJNndtha/uEJ4Po0BthlV1kf0IQ2cmPy1AXmmOQshmiY6NXdTFaB8lZBZYTk4oSqYbCCIdd4gEAbwzHOmoLAKsmafCOhDs4URarPd+D8QLHR+0xlbdGbzhnLv+JHYfiztGUYw7zue8l1P216AEtA0wM3203U8aSIjEK7/Twe3SA7+ViLcozrrfsjS6PczRhGI9UKUnnMlVXUbbth74QUuNIxG1zGtHW7JPjgZbi1tlvyFuX6SIpt0JSeFdM/oZveEX+1gY8djiiOhhftZb9Nk2bSTPj7rx/Vgt8nuQR1H5o6k+iah9fkb0Fl9nE1ygl/r1tWw54mLUd9GKMJV9VPAjmh3G2VyqqMKKbcp+UwdFb06caKcotqAgW2G1PI1Ix0PfxpVaHkQTYFi9Q6YmE/cpLrmf8YVC9Yt+k5yRelje4fmQUN4uN4dTvg/NPrH0fF19zveai32di3Nd9wVjaws/yjXJW1dhoeWx34jwbrU/yBFte0lxZ/9rfBuphFmequ+VmFh7UMsrwBn/YFzSAdkCsmlchGF4+5djFZxUPMM7+RhLLkpngkJkPQSwkaM5H3rPCRhWRHamSEOfgy01Z+e17I9c8JR/CEhxH/KYuiBZEewdtL/h5DGr7N4A4qJHsMzdFB1aLyWPWAcauU4iP9aRLcBIMwqT1sPMSyaRUUHZpoEfV2y7Syt7rqLdt1cpHUCNB4y71ERY4HxBfZ/GwXd6LANRl8fF9/73H7k685haUzxZHI0qUNHwuZpAvMfcsmD/gyyMSeW90HP9XRnr9SycDn+64cf2bq+cP5k1TASa4nwEmYShIGOtlHePIo7EXJ0MBNPH/L6Iu/sRbYWakyoITEOxDzXAzn8EdER6ESHTs/Eg1Wy/hGBRrH9hypLufNgci8R5hBlqhho/BsU/MpAqdxvRWFDc/xAfewCBfsUNQYKCo6eTUq8ygczpOJbQwxgvqFj+NcimnRoNDsvDVn1oOyxfO26sKJSn6+c+MoMFfv4uA8kJ3TwclY1Kr91Loi6mHCbit4a9Kraz44FJU1gQIUpkt0qRPaMaIfheGew89e6L6IdkV4rKMob0ZuaDXOKew+do72oCdl52hvXJ6zGkBa6GadhNfJ1wiakO19mP0t5aW408pvM/f/sT5CSaWoaBLlDt5T3lPBCIJu7BA/Qaux2dB1pr34Gx66leMwJfUKlk2tkBGlJAAmncQzo9xgjcqIjIKDLQ3POeEYg+HGxQjciHWqeJrOm1R9LFkAToYkRh1TWmrh1HktM1KF+iJhBY/VvnPUxc/e6Lxf0VcZ7ADootXpbYe540hMVtm99kD3q3FI+wPR2mVM/8e3kxJrpW/ECZ57ETR/jj0J1B5vmJz8PhvG/zcuYnmn4wAuou0kuN0++cNTk6FwQeiGoxlJ9Qt4zseXzW2O+ha3QCG6EmvC+5Zini+Vf2oqwjiTuZIeHWVl4LtGdUwBQ19p2XrDISpfwLENw0pmLvF8C7ASojWXS7UHzziHCjdKBUqBrAOn9OfQYDJUNvte8yPDi7j/h1ziAgXlfhaincz+vcGXkcoCZwqX4rpMGyllg2TgpjEjaNMeUfjLqX6Mqz2T14FH02hI3VchCHkCW7eoeo3Oz0MYr412syw9qJYzi2UJM9KkLja5LJhd1S3uhJuN8pXrILkTXC1rqYIi+8trQZYWq8uQ+SEb1P6aBH1memNHoZrQVULyyExWChdcP/knYGCwsKJAzP/OaC4y2mMJvhqHW91go9qjI0ZwxqLzrUtJ1r1z7Yj9/BN3o1fL1gyuqQkHiyI0wuEf1fiC7QGwkEu3qRRmykVU0Usw1Qy/GZPmOJCpx/F/2by7s9rVqyLvyorX7H5xtvt3d3fs8vByeH3tPl1K+Fb0nMo+StGpdC5IcKnU99dSkSzK4adr0tp5FWXLw6l8YMpsMNUGfP6cEKWLvaSMjBtOXsXWmCuecg94jeoug6A/wnX3t+pXqIoqnyxxINjjPJ5aX3fmnWL1/d6xSk5qobPTo3iEM6padj6cbr/a3IoIhLeApjUy2xzX/dnJd1XP7DMGeJBLfxbJLnEMi6jLLQWtBChB/hArDMASkrsFoaTyyDFS/J4gD88gQIkqug1SzPV11iWe5/lsodmGjq0yy41O6Nfr2NqbBg4ZBkjvanjXuWdoZc+NlclS1Bgq3FOxJtMoJm+ionYdMp8znhMjn9UVRHvdO0IpIcblwTWMl6os9jMfy7/viBL8PtoiAfgjvsbIsIMTJ5h0e+Eys/sg1yWCIUgP2a2/rp7LCezTU6YaxjTGg5LQlDDyhqoIS/qGNFrTvdkYZHhmSnx9JBhE4OMug1VjbDL/kjPOGE1OFda9EUJL5Dw1Eqw3xnJiPe8LD70N3ibsnuOZ/xcGsMYwSQF/a/L8HiESYnt9XSoO+Y9qykeqqBtU04quNa3YHr0LeuM5K8fy3GBJu9jhYiVQ8Oh3v/DZz0BYfaLLdfSkUW/QsUlSRy4X/xdBo6PkaeKtNPity+W+4TAXyNtI9oifa8jnTJxQVdEmmfwYppKjEntQtrQHZHYCisYvnJZEgv60JfAG0TuX4hQ7jLAcxf+KPUYdb03XY518CPjat/Aigj64YNo8E7hy/h99x2Ai1YaJJ7RTGuZ/nIkMnUFEmrMd+UqKzukymPlmz+2SbNZMx37GGxrtdis135EuHNmo7yzpyYbLX7sR0MS+MQB/izrfgIHhiIo7HvTjpCEUoPyyYdRHkTeZ6xFPKCVzOo36974VBC7wjHXz0ZjzrJEC+zdzBsjfuHHeQ0Wly/ty6UKL3HdqPG953TfsPndsJhouwxUnSaC4h5LCRlcHUVDu85fOswkpkcg6xcRRvr/wmRwVh9M0xyrGHmbgboz6Ldc+GlMLH3Vm5Z5kBsV7PPlgmKsJsOl3xx95DuDZhfl+eAWw+f4G2v5Ilt+D2xvENm1SiGcc8QNVj0dAhsFnjn3CG608idHLpOSm6zLBKL56hvt3pYpEj1nbTPelZiqygnWNBJbUEZ4VUrpotmty0BwWxYdFj7oFL22stU4GuvJqcKdFLRpXN5xgNRA2PalovJOSghyTj6zXfD4WSH71BXQjc4BQyonkkuokwV1KFeuJ8/HzDtCLjyaND1toF3oETk3vm93Qy1Qzqzj7YlRRzJ7WTYOS/Brs/9XFheHykkXt2LQTrzsMJ/U66gR6G6Hmi9XflMCzb0YMGN+WblFL6uHvzygWKtWIQwsixfx+YzCLBHRDlxfJe8m5Pe4t/ghOeDa7k0Hnqf6wFNDG7eBhzDCR20DjNQBULgAu960jOVi5BcjOF7KUINw6RIm3LOde8nOo12i+Im5ub+QR3c6DRux461qUXuJ97gGGBzhdM82ehu4jmcw8QU88AFKSiYqOcUpLZ7d7hJq4qcMCq5mejbdtqMibuSqlBMKrtWtJ4GXf9CxHevM+KnZh8vsrmZNRzgdSLY+gHw4r6urvSN1bARF/6Vnb2aEvjHtdf83YXuSAaCkPMVHf+AmTaOdBiRgd5Z28NtIv2bl+OJPq3niGQAX9iRme81DHD9FnBVfpgWPyMB/wlQIDGbb5wkZTyvQuPcwypvy+Fvao8UVL1Tg5/0K8luFjGU1DNMn7QsN3JH/AowWkzlXVlNFBpxluzr9nN/NKrEfGvps503eBQCOcQ3tHsYJl39Jz31H/zIO2FJqDeHKtYJU6lk1gJ2JfOKhboQ/Et4QXdyHgc0SPB/c5nOz7zRPsKL+L2GqCzvvXjjWTmSdJWPTDbM2QhwmPlrOT4tpwRrBCo28+cjeRO31IYM66ivvuLsOxBB3SmtZKuVw2jKjQC6rbNQkLmv//TJ+czJtgq9Jo+HEbNpG2ajUgLc+fJWNO0g/dop0R4EOMO1OF0Ib1ltno1QgzAnrILJ+vQ6Kw/lj1rzB3inlUK0/aQrXI2P4OooNvhKyHUS1bWAzhPcLpl3Xcy3OQlftDEb3ni9bUFib7iBZqBCzN1cjGwjY0OjkliCxVr9JHGZxEFc0DHhenC/nKRIdaSQdpKP9EFleGqkt+fGdjk8G8RcmpFY5rZHakeGC2DE+1xgkk4VBwLohQy4xVeHXY4ptAgWKopwp+IZmVT3OGZEfOIqCZLe5Q/jWCdSC4prdDQyTkm4CLhnhpACGkrZv7Ojebl88gEE6/QLdLbJWogLsmhYECznS+VA3xQkj/B782bzrAHwzyOa+TWNsVMJKVvv4lnfnpgvSDBy1ju5sn1cpUxz9IVrpz0fSNltfJDnYuz3JhPcUMEBEOtNXUy6m3uKcfoHTzxVehBfj1iSna/nmECC6k8c/+APoAur+bjIUcWJ7jmlUosU0T3HKllhz+OgPeZ0kwRlo6Wl1hnS5ZGTXqbjh5sKeZfEVCKSLpupwuAq0erJmuJRbvkPxztGuD5UDoPfTbBWqQbaSrSvTqnETuBlv1Frylb+rvfPNjXvRPuwzSTHWWzlfbuqc6Qq6wqTD/iG1uuNr2PU11xdYoLtiOERN0h28R4kOifWywstnws197t69AyiZL+24y6kGIuyWY817oYvy/NvOSoRu+9gVhRfukdCf4Z4xgc7BwqZy1d5+iSp3eQuy/SXGtqDzf6ZYWXzk7Pq/rN7Phaliayk+cqYC/5WgTQa3Kp6sNuN1tbnXI9D4MNDJdFklMdS1LoVfEvDirZ7FUJTnCmNru9ip1qR44kvsIuoMWbqalz0v5OECq9eJnEpwRyEpPh5xtO+KTsFwvRYJKaWQZEXx7bg0KjWQf3Mja+X2k8GoUl1LM5qfEx2/Y2nRAwFaRogywOAZpqxfT0xGrD2ApKmRWztSZynLen3MkHrJH0+y8eZMzPiArZR85tGj9UTvlhln8ndqYTjfXgC6XvMxHPKG8GvaPmVvH29p1TgxsZwZtZZAmF0cKs9SxYR4qXpewq+KNxtjzuk0MNaDXru+HgkQSZ5VYPYGRaW/zSwEmzJfOPtMLai+6KMKErsSinqw7PPNjIm60+APG7Zq1gfR9gzwKdAq6rV82SADEZDJrfT9w/h3SUTevo71g7Pt/A9DhzIySzuvG7ppzblz0ux/3VO3nxOCUCjVepWm7o97xUmOzrcuFYQDLi5VDvD5QbarhUBmXJbfmeWH1iu3Q3N2452VnJQLMD0UpcamcWSwYMoK07Neub2LGzLalGhcGbS49SxTOGgmbn/RT6xAaekRhnUcQK2IXCaOGg6gAO8/IIkQ131E2dvPw0OD0SiFh5aGxOGJ9Du4jXdmNOg23r5tm35wHLgz9oiiT2253qnCVds6gmxf6HcxJsrH+OFcBKm0QOupE2n5GuBP03kF66UXcUFOOC8/wjGBrnyOM43nB9E4p4+LGpQqzgo7CzU1f1P5OILpjdz6Whlr48eL64EjX28ohK7tCM64xkZ2mYBOkCxwLv8PpYlsqnjqCqv6e8p/J9tn9CA2MqVbrarfaQsQPG3aM0+H7jcQifcL4qGWrKFbaJdHhHWI7qp2YhxlZaI16mmEiM1HRdzdP3WULCxPUJty4XhfIAziTkBqsb6JyGcIYLN9UipVI1QRIfSRW6rIO5fUmx6BCPf3dCXKZqSwNkmIewUr/mPxG8KF8YHbcMnm+iNAQPXq8efSeW/ydylte3Dq/jauRt9bZSWAQk2eeXVRm9Oor9vxUsbfLMW9wDK54Ugn3BBW6UO2RjknhNImEqL4udSI6xCtDiOFeH6hI/z5rVmUM4lwVnFlCiAexJqOqq6sVBHmjtJFPbSJIGvxQ9ogJCZ8g2PmLyenumN3ylaGS1kd0KlLb9I2u6MDjXn78T1kgSHzZePotT9U1EJibduMUg2tuJpn4ikIM82mRYu7pJs8GK8PVoOv7YTJqdScuMFrMl/Txdb4PQdC8aFvWTT6I3zaG9Aw4vDBHZV45cEAq/khx/9KRfNDW8vwJ3EM4lXnb5P1+mZmIbvQYTvhVbPgwF7VPL81/DeSMmzQ3U8TAGMGqCYk9k2aaasaptvXASl9BhCr0KrguBIzrDTdZhL8vH7AxwvyEhB9gj2NMyb8B/YvP6kMZ4iDMNspmsaoVqkjzmLWMzFY51b0KLlxDcizcmORjaJx98b4LS8xjE1j1COticKdfdMYdb7m0G37XA5N7gj+0PYBlfSOvj+sX4wj5P6Yn8q3snMZ3sFuMlGPvZaleVbYCMf03+Iijhvf5ab2SNPVQGS0MU5oSK5v/eYY3mF6XWJja62FltrGi/UcotbKKkaMXLH09raKigVRebfA56lKB5oZjSHTb0Ojil0y4VXDK3SvjfBdX8hukXOR3DHtZmJN1xKpCfRwNtBCa5R+wJq6W2LFfEfG7P3nbXpSMdij/5qVUa1jSYb17ThMwk7xtlD7eZnq64S8CrganYEd0Pb1kZ23P9oxphTIIGq5VZdxLLO9PwaDxd6dOO0Pj3PQQsQbbkysibm8LP+8u6FfmjV8w+pD6gPHlTaukzgcfFqw1aXO8qHMJvguA1fPALbAFYcCqpQGCEInOi5NNIqgQP4drfdhJSDpA0CaLOhA7Ic15oqEzM2NosRrGNxwuLszEi7ABucxIkwYt8jVze4mYd+MiJycvK1PHRuGKW6rfkPBDlI5/In6CLaCqI/tTkTDvIjG04p6vUlgtcIDDXDFdXPiySGs0PpCi2FMzVPCLx3qoTmwSNm6FsLMHOI7MMCXaAdJvmxXy69bBZjbd7tkJvSgqT/9GsJhQ1wwdqH129jiByq3ibBaLe3Pc3QNGLE52BVmZg09v7KSR+P1OaIYZ7twqdETXaxrDQGUBvGXCtfvKhHwjTLdCISHANWt3bg2dunbHxDmWRSrRBpSyEV6mWOn1WeCcKFXmbMy3EJKvBEyitHsjCrguv9SLcqniX4hLYpB1TC5y2gFHg6Dk6uD0iuEvom6QaIVhs2zn+Z5DN7Nvt0tWgnnIwlFFnUZOgx1KthAwoPUm+ByBw0upNyipA222r1Jzx7MrOEtU8aE95Xe4LMOyKupwEUzjbLDSP/yWPJJMq5xIeQQXJKok0lh9Sy42gQpcmnV/peOC/z0BOuBb3+gm7SisO+vGwqFY+MREuJcxKPkQETO2lvEwOXNbGeOeVWyw8kTn+orlsaTAwUtwOXWLFX/NYtC5wUcyxAKaVvPiYZa9dDK9IetGEG0j/VJlj5PvE19JIsBXpMPhSUy/dLv1ts46ntUvz7iLY7gRgHwVxsdRlFiTBrVXCqujR6xeQsa+mk3i8WM50f3+7wTzGFoM6/s2XnGeohrMYvFmSSPd6868Rsw75rkcRjt6PCyfEyHld/EXhM1H3X6nWTxAWKpos93sS3fUvG6UZJNuIbeioQ+iEv5Nyonl+FuP3sGYM+ML9OB1fdQHheTvhgHr1l+GQyS3RejAZCdo41KGWmpeSdmH/jpgZy4oGlQ60pK26Mixg6PlGMVFy/weiOXWxcfhyu9WqjPEcd0hKb4OhCunvK8egiRIz0Aynr2BQ7B57PJ02vuMAfCZFVdM9zEDX4Z1159a8IAUEeOoK+FBfgudIspzP+e4pNSMB+BuygnEkEclPBmk3CWrQjG502c8TobBzb/VpgLzpt6VVraCPPfPNJoUR9nBVasUkgWa2HegzQRtQCTbSWXMPLZxuXFWTtEOE+3mVR7JG4YEYy+y+AAy3GHcFuxamLnaYybfO+CFufPEO7yMlioNUDgu7u+kL5nd1bs1X3oQ3eEy4scxHe40nhsyVbbBWwQxCLsvh7s0FU86xgE5SPt2Q3MX1OQYbsG5aBhOD2C01AuUgW6RCYUAgFyWM5VJ1SHADijs8bADpwhummePexUDI+jnAYj0l3OkOjCVvc4CejGtPU+dLiQGTR6qZjYrHnMVaEYJmeBJG5b01N0ax3n+YlvlpexqBiyMNDDnNNnFQPF/ZQ/yPY2hKbkaz+BD60Ib5OTyg0lFe4CppM5xrMSj+0S+wZzjmX+zBHVVHl7E8mU8keES8e+VUZGNEPfvNbIftHFg29g4rm6QRjnE3XRIKq5IdLoiDYt4XSGKxnlnaFFClCr7alH5YodIk+12uSrYqHK1G4IK/oP/Od9zonF4jYuYXhDp/dISRqsxyIytxiLwvbLF6+siyOrAwREpn/UHbKks9dkgZTqj1Zl5dkDfkrUw08u0o+G//r+AljjsRPr5jg9lyHohcX5DWQYgO72IuCQFIRqppJSY17TBt9IiztqJ85siAk8LJNz5QwSWGNyYWUpt3Ly0wDuuZXSjDCmndM9oa/DPp+80YZjODZEWUG7m6TArczYXwOJp9deUpP1YeD8RnGQa+BUNnCIG9PPYy5FNjGxVpnF/FiNhwrvR0sb1dYu5eLSramtLAVaWbe5ZufNUnacPnvnWBrRWEA3OHm7L88y7bgLBXu7WXtsswhgzNdG/C6OHwc/7SaDIFVbyv3mZdnDLPG55bBOcQ127bul/DndLJ6EXMZYfR6KdlArtWch+6sMO4q64UOaTflhPlVkfCs9S2lkr9rmUV0tR82RfZftOPeksE1/79VQDTpOXqyGkMYDLNPg35H8kQ4NHRnwB2vqIoxJ6Qwsky+rp4PMQ1fks0mAWQAHrOwPdx9OTtk/QHzT2xCxP5IrRAoXrBJtmh+z/gLhd7Hy0ANGCwLa7Yk1fyniU84K97DbfPpe7WYEf1U3AH8LUwoSonLOp7r78uCJWQd4ueDRyoctgJ3yWGd05VPklvf1zqCOPriQt+K1JcTd1+9MH/sMciEh1SKpns1iE1Vx6PrFYZhrS7RIjlna6mhljYFd/vhGp8cFZr7y6F19qkAh0pWLIiS0GjsczIpDuI5gG6oDdbk7EidJQjVZJxaqw/hG0C94gB1e18fJ0k+YfZPBAfJJG9UQjnHHS2iBUoA/2lHp+QYPjHFsXAG//Hy3RtmXyJsKJGLkzwrK5mGSeSUD+TzcXedysDegZk6Jj6kjAFVJMP0tKIN9yimy6D0hnqYddjLlVQrTfK4xg4Oe30ydY5hhdVXmxjGIHPD/fXC8dGy6ZPqNhu3GeUKBLmN9l3Uljc6ThMKYUZUBinA8EJ+ZT9Yjt3yEM0UU5VtO22NtuNEvocwXLOetC1UFnOhb3QUIO2wndEfGMDXrkpSJda1zWWlZt9HIcLKzvgHgLJ9y2YapYiDqRLMsQTSNXWuBDceLASvbrrHmNL103C0T2qR1xvR+dCLoqHCdOnIjVH33JuykFhQ3JapJoez0k8mwTeEjkvho3q2vP1//T+X/g3b0L8/4tOFoPCoTtxdp4rnh5CMUyuYUAL0rFEuYyreq5HzS9IXC+Bd7IurZw0bJoVrovCd2EHxhBNVBlWV11cSKP4G1UAw9X5UeQOLRclJCpH+b7zDnZBiR8/HYF/cfwpBfmc8iA10GugtFnNnpxrTrlIsuK+LOkRtRNVFINglaE8ZE+Bn+iiXRtzzoerC28khuFxr7Gh9TNbth5sttrUbL1sTVABSabP3iBeqljIQfHwysH711f9uyWeqG1i/KE6wEtL4D/scZ/j4CYfV/iWNzPnFOnDkRbFAO2nvtD8dUAUhoZTIa55LVGAvYFphLM0wd3bUqEpvT/Nqdls+aQrdyKZVbpWo7WSamL46YxOxR4IVAe3c+oVFXKHcgwVawG3ZkMA+CEOmx10W6W1sdbuRqP5Xz+WMnrx62E8ynx/2twN/jhVwUQlxWFV9gve3UPcNii4Ey/nfGeemSRmJqeAtE5b3YWoc7InlC7iqo61FUPAULWYeWObEUnbwFlFwgQZwGZsV1hf+WCyhwpwKufgODh9FNv/kcejuOemLnMicT0JnIH6+xkCXjo0G9QAg6tZTphn7hDIcAU+a8cqaOCqJRAu5C5We6mW8153vsmMxlhMCIWZx/UZs283QM5ZgK0Y+03eqNoey051dq4njkyM9N/TnLh/ZRFeEJD4B4YHyFdmKVDv2G7gnAhjC/6OwtwWGQmcCi8U/FZUljUQC5wTloSoRxnrpUmrMiFIk/vw4E5doK5xbzLp0+mOCCYc+/Vbl1IWGAVrzv01428PVEp/3V9biewhlu77MtFPAci131/TlcTv148bolBPBAAHCHTxkhq35DsTXow6Hc47DrimkV5jt7YcwBe7jM/tzVRB55nSPSddT4uzhVPOiIkfXllFygtiJ/edd4rrBhraPbewWlKN07m1zlCzBxXFrG4ynWWLQiWQSdwzxZm0GXTVNiY+lObP0HkQu4Utpe2VsrlYxk0dyCTyt1fmnM/k5thHKfFIOaI01uj54irhbxWJOMafSEV6YeYE4o2PMYN5vfcBJ1VfrJPr23Cv/R+pah8Mbaw5a+dmuXS3Lp4C8MDLqmaMv8k+n1KLwJt1s3Gzllv5/bbD2hAg9gQWX7jcNTBgOAtrAqgCMyJXvbdSFISYv7PzdMx52Wbgcq4gwtkKBWhku6kV+kzm8IdDReMcDaoeeh1rIkkfOOP3lQeKn6T/hGutJKqEbT1qU21zT79tjt9aBzt3LFcNTpEi+SY4mgyOyq7wo0RjhtVBjGqCIXyULiI/NTtDrhyQTX5dSYf5PLETAu4yW5NfUOMRN+MjnSIVv4Hi8IZNWt1XnYQmC+OiKfymAUZcKCWTDMtGQJ5TYkE/RIALaw3ajALZf4J1O33fjle3Ay6pLT4uGbHUxgaIudqRPpMgTgb66UeGuLJ1wUcT5IwjsmWACcgunO4ddVXyySCrM/Ql6TRlD6NlAQzT2kXR3/5fZNg6eZNmWqiW6sNyjh7cVsFY/rl15zarBvCM0Bsg7lPuhDMn63b3cVsugA60seJvct0OoXhIAFvRSzast2XJfjdS2kKZU0WfZDCjyq+mQtdmZXEWiaZmyllL7b2T7m6xvb7Sh6NQXSe5lfizHIWyBzZXIg+w9SU2x3ni1w0wuKyIVbyC2hqMmDZwRV5T0Eb781fUcH4JuAjj9O//qWNlGrcd3gZS8cwpuR1XlNuSu9VAvwDioRCAu4Y6qfloiYcL4aL5Sg1yTGkMEZYCnd8wP1bc28F9h9Y4PcUXf2ktuqLJY9qicbI/bdc7qsAY9T76cpvUb8oeQDRRCHZhGhCjuLXfdCmm0B1jD8LqBovFE+u1MAL/PgXDVGhRdFdPOq6xKb9RK53l5NTSaEDhpA1+OZqh5BdaKQTKRjlKq13qi46pOI1hDH8n3hHrt2itrnamSfeVmPiVJN3YPaFvYgC6yPTKdxC9g3btor5h4TgxY7kJr98GGG+U84TlmReGtwOCGnr1vacdzN8EdnIXHAoSH7n+k2wNCLQoCANBs29bNL9vmz7Zt27Zt27Zt27Y9i5iFnM83ndQfU0L8YtvnI5ZUczF9WlRdUeQIFXS9mRvXD9XTwNLWcZqJBgzfYfakaORGh843XD4MNhPV81kXlZBTjOMKK5CelBMmc7K5XjVwb0uRSj1LB7QzNwbiE+vEfyE2ESr9w13V+NXeOOfgC4jYHhzyKnqeO5dN1R3pey6DifjIJFFtjCxuTTwUne8YVmBjX0e0T/wPq/kRILLsmRGBP0/FQZ7B3b8prwy2QaXMjsJGAeMNpVSd/s5S8Xl0pCXqsbxxxOiLahL3nTP10LOQds3n4WqI9K9vSTdBLq+eDE99S+1C9yyhrsQ6w/AJKpEYQlCnfIN4Kk02ryQh7FVw3nQQUzxkpB7uSXwkuNZzATbtg1XooeJTMqO3KGlsL11tnUDkyqIqgRU/kkOazETDoceFNDgN9aKIJGrNECw+UHafxRudF8qnq0tYU9qzCDV96uMHMFA3p/uycW6V93fpO5llvsAozXaq8WFBjLdlKM7nRV8foIHTiCSORfvKjb//GXs76TvLDnWolqD5B16dDLK9GZBfHqUWE3Nfepf8jX0gEykk5noIRuRsjBTTP3li8FViLHGCmKmXEiGmJppTBs2i3IundNPJB6WuPtFTUmGXOyNJqzRr88DVoteG3l6S0wji4W2zDQZLADg3MUVPyo43MbVzSJyl/dbosC+SJuydQD4Mq2Bm1fPuL2ISkVXQHN+oSFmmPOVuOboX8y4/Mc+c8+/gBgdbcwVaqQsSkc+FNICXWypaJFxeyTn/SC0jcgGhRchCw6yugCoMH+7+CSHUw9sgAEaDZWPwQ70w3Ee3UvXGmtQQot5flfnI1sGe1FzUmLocsOkeZIsg/djPsJxfq/YVPW92J3+f8UFwCdvEEHfr4h9kSmHl4QHFHEtJfqnWP89nLBhLKJVRZvmyHHglRMk1XbLKvicbz/Kfhq2TTprwDjEV4G5xNRnZXZFWXe5BtQjkxN0UWJpH90+itaHLK9DKjpLPSTiRm/TzjgekzoQ/LWkGoVSjPruUg6xQ9hCQwqaV+Wfeb4HvgJ2wuUpqTWNnh2nHHQXdsNU1Njr3itUQEp7w7Sp+Kd9ZyKR8xRVRKf7BTX3o56e+nYT5DE/wyRPOiodzITp13faAHKGwj8PumWwg8HmwEpc0YjTUhldWHgY33ojxG0A+jaPTD77CpoEYjrY5O8HxzBukHgK7qu6Sagt7+ZPR+8DHB946O6wEkDz80sdOV5E35VSCLSxrCmpII3/a4dRhhZSbugNPxPwsBcOOP8pYaNZTOcci8lte40hDh1ftenTJdwsJEB2apb3sq2ckPMF6GdU4IxJGMifX05t/w2wtF8Qk1KX0tosy9bmo8YvQYFRwGgiGkznZDNoQGdWlb0KtiaZX2aYhR/0n1Q4JrP3hdztfjLPPpxGC/TFFdBUxn+y3cOd2aS2TF7qcUUkXcO69IUfXpWG6Vz/kiRxb8blDR/JvqHJaxD7ZGdKuFsp/i/remgqfMQNZXiTgzUBjYM6g0u8gMtcEZt48hs49jm7SNnx1DblvY+MD1zDxPbPOKP4TXDyIuQi+mEdBk0MeZRPBhdG3l1l65p9dgjCfQ8HWjpFgq4utulX2MpxyqFEuEwMDPrqu1fE0MiqN/YsxQT9uefvrZjQp9U8KeKDjo3DCoU/Uo1FdkSlnSJj/fn5V6SX/BTb87P31PcL7s5/lGdCfa+RRMOS7wLHSwJmxZ8MtBLe6Yy/UVNTcmg4fzuz/6nLBsRkgKgwt0P++UVLp1vC5ET6N9fCZkA+efXhY+n2T3+hkkQRiMXWgoLCymxy60m3uiZb0/yAFKdePa2sHWTWMHRXFjXQtaN696NBen5bvF+q50AS8QYkS8xv+TN1fh2/TbzPpXhPE4sDE9sQnRnX1XBfiz6BKwt+nQlGjJTiMe2f+9WsVHS5Ijr0NpNIssFw9gTkJ0C5RWsPzTp1mUtsbEx/4mgTPHCXmYRcn07W3oBbGSeZKpMDs59ayQW3vGM4JTwouGDaNyrezy4Zkl74xNJaRxwqZGa2Q3bicL+Idgm1fagYe2ZN+4pkgJmJtFRFmto418RUXm5+Qg8klScVZfbUuVo20bwMY3DsFU9yZSqoiq1yODiT434REwM19VnFb+V8w8SrJgTJFTDK+rrOSJEh4e5hexKdQlHMlWkCE4Ycuc5Jh7in6eq5f/516vdLkBV3Dknga+UZxfGhniwpn7tGSt5Zg58Y7riudZeAbsEaDca27NmvHW7ENCP9HFYRrzghy9SsUPqc71hgGcwr+n9bwDwraeKsTpC8qqNtmTbitIEF8AQZtQ15pfJB/39alNq/8L7Dj+hEW2EV+GKXs+SCVXBiu7UD3/QfBeV7EjkBZo64oIAH/a7x2KjogBSQrJQzYfGsbqWtpVYhE6L0cBh/Yr+1E5rWAkpr2YhwghybJUSG+a3y5pLwjhOehjjyZDr5IaO1Kg8z+0BlOtYat+cslB91L4HKcH4q0Kpa+6nMHdM3a5hkR6+DODCljz6UPPfefAOG2G1V3vEYj8t2FBKcELWFgd/BAhFK/SgbN03BCs2ll1EusZw8XyM8MEYgRVlJ8ooszY8ui7ciWjbADIKpaAoa5Snh7K5DogNpi5K1EaXO5DdZ37tRX9avpMbMXjYMsC2/opViN+pGqKR75Z0s9KtdPIy22jLuJxH/CbNFK3jC2t3UVG9/ANoyJuZZ9UmbosTqcQCObNpYXoD3Tf9sn5pYHqxScaSB/E08kYcEvqlGz88NMKFAlo+mRf4IWucB2aUOox+kfOEbCimlvPtJEP0XOr1DjDt1mB2OZ3+gP6kmMGIUPL3pmPAPnk/hcJvI+W7Kv1Mn7UEh1G1UiZ8pngOTveJ/NAA/PMwj4+5gLUlYkL85ldmQQCXcSKV+deqrEoWMPAT47M1nGAcPYDh9rJMhUvQ0eFYcGJT0MbPzlO45DpIiFuFB6j/dIAzzBKTdPpTQFdh8GYmkrhA1CBoOquZO6md/Uh4ROGkG8yEBbFmcG9/qWr6WVpcVhVUoyqSRECt2kbDVN8qD5GtwvitHDF1Fp3WobxUnMeQ/1MWJOAA0du8x9jMySGJ9OFkyqR5jjSGj7HtXwNye42bdFiM5BCXOBR15VCdnhn0IvVEK6lJIAcXwx2j5XXOdIQVjjPonLOUZsX+5l4QIsMqWNaBO9KqMxhlu6bowZsTCjEkdslwb/3f3+F5D6HtNt8/qibz2V26HHviLX+uxS1CZBuo0IqqoZd7ho8M8E27USDslWHWVWMB/4n8BTnrh3XrioLJnuUpbjiMRnTlm2s8jLn/qlOvPeBPO/SQC9G/rWn042M+DBCSu9+s3O6oCV4ABBnCMecq9ptQfGX7b5cUVFiERi4gO270/lFJ8hWnWbViIMq9dlju3x1hPNmcIbIzH+EzzZgo74dq0EokulmbUT68DIBYR1k+aQLScSJZyvUG26FDfTU0C4tQmuedu3lBzeSIajrzctCOPppaQImks+h+BxM0k3+OVblIoeIq7hyU83+TJG8K1sbE6NQy6yt/QhS4ZGgXKEe+q/k3H8LDrVsClrYg3Vt0mFWOUT+Zvs/PcWEzyls2gXr6397VeLQS1lrZUDUltGBgWBu4bxdb5lHtptt2TPn8S9n2q69E+1cw5+s3EJZfeL+3CPcmWQgaErPrkoLgzRnhpgDmISAQ/As6vi5hpetx+Q6IriVlXFwsm51uzKZ9XT/hCp8DAISn43s/EdhBYUqN/fff/y3Hl2Zm1azlG+s242HfNGU+qCbjI6kWnhQ8zmouRPSTjXC4AwwSM19QvlukFiY6S6wWQe8Rv/Z5sqUfIksmbwTsQpSR5dY4n5MXMM9GL4OMk5PVtk6zXQdzeMLjXQVW4uq7rd8csEXtTRmWdSGlvlI+gbuMPPdULuFtq1e4l+8sqGrYlUSLH30hGmcj8bEkjOi6WHZxLiyKAizQMrh6Ap76a6syLsVKh4kt6ASNrbiRXsCmy/RsN/0PCZYjfnDA77548AG3BQ53iXH7mRU3nnAfZnzI1j47WAOH1j/GSSPEhfYvRbn72dtQDyntc9ibACFa6Q5a0N0mA83ZcPOv8r1NCAegz76pGfIj1+CLNuh/bmCB341jwchRBAxrcq/sNMjWuwquOC/ayWHek6FJCiLKlt4ARokooxATIyE3aloHYtyO56j8Mywe/qhWDvbCsIQppHv57Sxrc+WYVP8EwcqXmtuFjql4PLfm+P+eebmNMgOI6uENXdJbC8K1BS6RcSReysJXtkKMFmRChuc4fAPwscLp+Mp2jE1JpX36MlXzNYssfoYySFAqSbakB5rt7cIKxlytUlG9Ol/vlzeI9jHhgi9F2PzWTqhj39bDC+G+twov/D7bnhBN+NWfRSZdFi3mNV/wcNy1IipjYmsnbjdHvO7LLJl5/eYchYStGNKnKwubHNdh51uni2Bhf4E4nhXd6BH63ZWt+xg4FYWA+VeM4YeOxm+gxy1ELyhamuzhraJ5NnfGIN7OB27nmwfqit8U9VtMDhtpjXyBkWUljlHYeyEVxOed95eN5XrTrMc8D5BG2GbUhjOfZI8rYLywy7X4CADhLsuCHRW1IYLWNKfN5D73F8jk33LS5hn772XxRqkg8DyHbJmEzXK9v4KOovH32Ci/mNYjtQTinalgUqPMpKuIZCXo39wTFMG+nkbxfeDt7n49gavQ9/RfjRF5bWacBgcvil2ob/9f1wBt9kb9E12n02H8QdSQTNS6vpG6m75n82iXTyT++O/hrG3xe0V6d3m2Vt0Cdnhom8kfeNyzKHdivL3OPmXmw/b3toiExltd8+sJkBa4bO/IsQxulh7HVeYQY79gmBI6Vmn2RFsRSVVfLJIEq7fY/63lTk0WnM0lh2Dnw6lSTHnjrJ9oHoakblzvghM4/3z6KdWxkvxGGtgJABAtdacISb1D4p1UKCm6O3uUTtt5cVTsCTTFDJX0LOs7INFyTEWtOiUWFCioLvW3KUkzRmLg5C7AOd8lWpxt5fFFmkU5h070CmcovjABelFs9ibUSAEhwbj6Ibr/EROxitk2RQe47yok+z0q0UKT59o4cnIOgc1ATM8GBt05VrNZ55F240I7+X5lbJElXc7R2WQ+zTaLmJFTri6S3wJh5e8mNIrFiuCBIc8j367lEyzufDtsxK38DVI+FjZyYdCmOXedGY5C/NBj5zNgPgaTyo4tCYbvHT/FKhHbWzbAzZ7gk1GLXJ1MvAPsVJ82e1q5scPjdcJ2H5QBrT+iUPwJnKa4SMXPoKOOEdMFmkzzCusm4VbCwX2khonAOamUTdDHMTULwCMLzfWAvuu0KB5prJmQGjiv6twyCvzv8am9lhXfwYe5vb8CGHWLXCe7H8VyImXlSdvZqb2Mcg8CpcFUlZx0kFXd04eKs5X2ZUlgrCn40iNyNnfWD2Cj/HQZ5mx2w8zfgHQeizm2oXTSqDWrwRQjVMRvgfHi+7oe0kTadoU2heBtqvnXWrQQJfM1R9yhF4gtKQG6q6tbt4CWIxOIrlXDVr21oW5gxgcA75xubee9Ad9jQUw+bnHlntr402v8rh3wHAN8ag5Os6I9ea39LUxAer9TKPJq0X8FsUMXQ+/10JQOoE56vznzeAqKqu8aFSBTMrb8AzjhmxEX6/2nSBNUdoIIIJsqtCdNzDa/72P51TqIZpbtOz6zFEwVuHGC2AXP7A1e098yUfgRbAAsrCUrMbh5T/qqbmOJjeJEdUO/Bgvw9wqtg0lP0piANt6uLW/i8nRbqvHMbK9Cm/kTmfXs8t61uVsJJcAKSfPMMJSAjOtk2W+z5V/Owzae0MoHgUyDZ5SsxhWp/KxpjMsadC3E+ROk8vnRywNRV51f2KRA5sqv0lnEcYUQi1Y+csTHxqxTwVXGlT2mRFFbH3vBo/D49fFPyItdIJgFIERdPHHDDeuigeDDFfycjBuRf3KYSor+ehPAO5crzaWrdChp37G3CrsSlk6vXocsogrqO2GsdFa2ItwRuzuARlBYDX/dd90nCEIkpLEmcyM/IW8DwGuCzbPQZdQiCT8JGya66tO2y38lAcgJTSKo+SCA78u1m1F+0cLaHK5gycM10AMmZkrHrPyn78xy7pLpgR/0GCNrN6UXtvYMCvxSVMhJYWEiQNqoAu7uecPvPW/Dh/UOVvhIJQyN0SXC1i4M+KP6bmtlbJYLt5hSgf+374mDHWgNyhF6QwWLr2REUDDCOcCasJ/x2GzcfPihno/hIx1o7f+8IZr8/FLVXVcYNNbyQbc8JLcbof6HQ5646h8TZZfVTTA53HluLSBb40Ul85ZU7sayIQZNejRIM6P+/u/uWfngdn3pC7AbANDyEpAZLzzhg263sIw3hDs3xrUxdotB7GGTDWEjsdgUq1NHeXnIvbNqkbb6fnxmZ8j5OqbrRt9DB9/YUVfOkR6D0YX0fQpUy/ysX8XU3OEXHGuwgLDGGKrG542TzSULO6mT6lKhIQcPEd13EXxU8yn0XVg4AcVEds0fZwQ61ubAzEmlCqH1vxSID2jAUI2Y/gLcx0Or2i0KMyoW6JRPw+UaM7kTA/qCAXBbJLOtsjFVL30YxAJNfroy40RsglZEawVrhIA5vbNqyoJj9Gu9ZPSqbebEHGyXmJ90ol/N7ve/7+xpiL1Y1hUo00O75Gu28Leuj0xAblDn3oHGpI1waNc+VC8QUc4HONlwBccBtqgxq0aOp9ONgX/QE3/VYobJPAkEfGOqkjmrlafrw5CqEO4jJXGZ1FIvRjfkuOT7HLuVS17j13gsaMMhn0yEHLZ3W7GNaiB+xjeOHLoGnQYBTERrBSR7FKgUkPvJZTYJW4IW+oyLNH9s9a/KOvCUSyVCjZPydrtzOYRQzCJB2EKM+4BETBYz9Z2BUWWCzPIZgk60JMhxtlI4ag1wUqU41n8VyNJGdcs3Rjjys0wJUDuUyzovceWNUILENKohNCZ4qeoZWwzTvJx28SpEtfhrafhznZ9UQ64Y2nJw9KQeEsU8yOMfG5NHl4qSo306cUzQCgEJC3wJtW6KtjztQH4v3yzWeMG5RZMT5FBYIOHVZCgIXH8/lxuap9KOTYiGb3J1G3jddnWi5CpyAoLtnhv05phOxPhd9wZA3h69GLvnxSivjmoBb0mXyqscUlCr9aL/eJ7+Nxix1yiWeUiiCw7ItPoQ8wn3nZjeSHQ1xEpDkEK3svVX2qt2pX6LgnDVtKMJYQrH7z+jA449/B3Gp5jFfWxpXo9Smulcfd6Ucc/b7Gz5L3n8f2A1xSmLpkvCPKKzclhAQdzyVFr8yAc7lI64b3VhUM0BkP0DLyn7ArH6JxXlHlYBOOlB5ZTYshwd6seAi1imTyz3tcqqyCsbt+VV9EbE3rpuSoepgiRDqDMlY8GXtJtajgCtw6vhdpi/Iavlpcr7FMmifkh5m0kgBiv8IzhKzz8AhIGWZFoWkQpcCiSiz6qaJVkTf9Xw56HbvIItRDRFDuNkqNdUY3rUtad6J525NDDxTPltHxqMOa4YaYGNXl8zGcOuYqqG6imyq72NK+xMEIehV+eM81HUnjJCyrhfsKWaZQB2RTWoA/rXRZCjcfeWQEIkFfZfxzV2DeOis6pEnxdlpGUu8Egq2/9ma0vewVtKhkBQU3iMSGuYWOpQdY+1L7LeoHmEPMpeZx/f2jC28ugnw/65L4iDgOSbakDfE89RDgUjVTnBu8r4FQhr3vHyPkylKbIx8KBVHtBEmHPFqSAhH/l+Li4sRB6iWL82IyXrlyt4/NhqJZCb/lgdZNWGtjlWOxluOGyjR6krYyrd1qPVad+I2dL/UAmf2OxtszIu+SwvirKBrah6hAyQpVw2N98thFldWo0SRy/ARY/8BTvIRoT5gAu7Phy+3s+JW9Rx8y/ad0c79Ox/GtEqbGGrfcNzfvETEaG/gQHPEDs/UAommRc2q3Tq68QUD4uYobFt5mUV6e547tjBxQY3Pm5NJ+wCYonyrKlSyvMiIzXIUqS0JV3FyiX+mYmZrHZ/WpfVJAGeiHvT8bm+jDk+8BTC4zy4HkUSmutk+LQ8plz8afWlytJOwT1k35fDYPh6DFZeZFEW/hjgkI1XGa4R1P77hyNI8ZofbRp9NdjfWj5jrgNhe25DvYwh6KyOmKnGw3guTuH5J7Y/j8jywS1pADGp1eZKiBNzkCUOvQCjFcsYntuFzYfK1QU8MwbPfq15fqCJxDjGa9XtLwYorvxT47cWUDiRHcqGm5YsVBBisb7SDD6foFmdN/z12HIaoP4v/Kuo1CXbFjFiFGyqd+VtvdEyZaEP85xWACo09OI6hXCEaoiY66X3aVHC2GPA2VHSeOulIijsbREKCGHZQW9xlmiCd0NO/0OhnM1HfMkIajf9z4gDYJAJgXB0gkUaZb1eTbi//7ZzWvpVQ+akNQyjgcdDwZ3LhksOlk7VXFu2TZhqWDH+tmioloIgt4WaHERxqaUkeo/L40UIBLg8kA0ONSD6CThUXEarHDIhSVgUCMV8UNVX+oTIAyRPNbu5xm2V64X7T3Qhlqb9mfD49f5dP2tWsnEkhctgoFnb3fBYCPeyuxRh1UsGSxyZMSoEuYn4ndMbFXl6Z+DpbA1fI3uKtxGPwz+V1kH/18IEUeaux1mUSM49hTYHW6NYGqSfh83o06HGLWGAIB4uML/k9C+G2sadOg5hxZ2iZOPcw3yBba6+TdzCO2kDSS54vB4b9/c/UWx5HMwqzV1fnGLwc7VqdyVtC42Jb8GYeT+YT2HQ/SmMSOTgzO2YNKqzixFWtSPZHaQCWeXotCMDD84o0Bc1VXKS9WUV9mHcThuMeSo6dma1YVe/8BLdgt2hYd4Nhi7GuLQEoN4EpsCITMV+RBue3g2bn+wFdLt0AbH4WtQmiFFS4zHtTokqfe15Fh1owecPyLtnP164uCCOwbH3L7KEly1QTiTvzaiyV0JtfKGCEcXSrrMFcF1mdSOdCYlarXNrpIFDaKJ6jB5smtkmOODcKS+2p0NDwaHyCMKJ6S/qyTpEzv1GWx7lRxO45wprEzR16BJUqjJn2deikgjidl1MWPjkXWWHCczrroiHQ/K6YM6LSQRNQsPtHO5gJB9b9tUDGPEyhTHaUXTYBi14qf1RFfxvwmdkjXeV2o65IJT+Oy9+UKUi1NaKC5rIiXKR1GKCCj8zdEmQyjGlEv2fK2mbJJx8z7jVJuSQ8WVChsoj6FExdJ3mzjugSR5zvji5+2wwx5T3t52Nc+IB4eNR7W7r7T9g321TevVaiAluQDFL4MWpWSkLcrtJ/rwVObBV8LrMCdDxlDzI1ZhmxFNxE6fTWRTH9el0gOhxTy+Yr9CuE6qhIOC+LJHFXz0AuG2tJygovZG0izPLKhLquIfB7iubPgd45jOK1VHFiaC7wGNwzXmfJRVWp/LAzEv7HMJkKPeFJVFL0q1qIpIhHrxeNqlhKYp/3Xt06e3mKShjzBFh5yYEVBWk0n1vEw2LmV57WMQ7TCMz71XxYdWe1nEvc8SP2lz2kNaU6yIwcxLnfKJywTGCxZJ6A4eO39nuSq6QlUEnq9aiDTojWeL1zLnAhb3KGCSmybFFKUc6cXkD3Cq1DJo7fPom+RmG0/8k/RSCe/CyKpbxl+/89bETQ/aeyuzAgLcQI7HtpgEaHBMRk9zIMlpMPjENd/pBuo7oFJOnq++71AdyR+RDkH+UljyMx3izf3r5ufOCggH3oCaVv4fhPerwC4RWQIVSSxFQD6IBndWmYeImrdnZvS2kJSt3Phx6/o4Cs2LrQtqXek1uzpsPfshxR4D39mTMwdYHVoxFY/7uZ4y0R+JdXWin/PbeoDviJ8K/HFQotMxIRdU8g6HEzkAsz8du32Eu8dMagn1yiWJC7v9nnGDsG8Q+jSlG1p/J9p1IbAZ7R8d/dGiE2UhDom5B2KjuabGCdeiychJTo7cGI3zRUUQ+NjV3FV3Myszuo3fz5risNFq5Zv1Fs6osmfXD1ma0M4wOvXNWWlq7dzG3QsDzTeYaYHEJhZAyuztOQJHU4D8mEibiejDNXhCLVP4HWCPRLfR4wKLsLH7QU/tfgiC600WBJjy0lYWgWZXC8c2PhdBa05pJvTLp+SpXhv6hes0mPIDg4mL7dNQEVMfnAJloeoaj/fQ7Q/+31HWxRpua3jV/6D0ZKwTutuMkDKJTGii+7uDjXZ1e0RPotfrzsu3t5U1j3triNr8eLDOgRWCq83boU+NMARNTKaDRo44HQc/BjBipu40C5i78ebWxH7vOEBtvRScWwkYw+RRsSYoG0pJl3VY0Bhzs6s4v+AnGU/L4SC2sCCF+5xgywRdUi/jSM2VnzOiuZk1zZmoV4SoBNvuGhfd2+S5iCwjk0ioI+X+J9G/YoYJ3tD1Hfby06AvYzQ1bm9LCDc5/50vqPdTk04d5rSTVG2whAOtbzXRayAJcjdrVJiNYObiTKUkwPcp/CFralVI3vO5u/i0oKU+NL58/YRf5NH2ZABySV6prBCl7VAfPcMxPr8+IS7WhVPEF5urUE24NlGrIxi2U+k5C8jtD4hgGoSELo9qu1xhzNt+bAAXPCxoTH2rd7nm6a37isVdg9uPum/5UrNixcLQ0MFRT3ynwIAXmSWJgobD7BzxNRFU5L2n5Yepnh+nZrHSqwUdO8JlaslR0hMVfn+q/2IBzFDSS7UVI9Q5QYPKlqCiMtbWlL03vxBhZ2jpVItObJpMGcBJxQjnSBhymFjyVlmc7rL5NzoGJfgb+beV6e00osSpTvxd0F9SJb1lx28/JapZsIZ008DCId7ose7W09W7Aij5/RmTxYmCkyWx0SBzJQgHUzTN4Ptir5ycx/lGVyGYwsUH29rnpAyV8vY2Vj2bJ/M2xuL5dL+qSKwTuH13zSt95l+v2Wc9OrNUmWBP3X9sy24YOQm+5ka1bltcWOSKfYEg5qZ7BAbRg4tcSw6qD+b9uowSMz+XIF7go5WZAlERSDLrko/mayaATkjUFfvFC1qhLEugkQeDSgvL0JrJa8sahRiUhzLdNtu/ZjXX/wIh56vpoKjE/F6nY2Dsk89YYV30oIUBYqQaIxprbTplkRFP5BW8NFf0QcKNySD8mSn06pU+8nuiM2nRUZSKoljYSD/DNDvSJrB3F0x5NW71vxr9MYAr7csbhv4tK6G73pKV/XQC//4QQMoh+l/6ssEa/ttdRHRwr54IPnfzsvMkLDtkmcJk2xrwCrSVAnKZY8xKJmIBpu+C/y3dDg1s0nEuKbDwgWScVwfXsSHbaVopIQZLaOiaVR7MmT4CDK4BG1C6DZ+imFpKUg34sXNPkbSoL0Bq0erVnI3La5pxol2aL1IQndHwLq6IY+BwAsez8SR68hTA1BeZpO//KUpUiYts95EbGoKAmEntDN1M9BxNSOxhPZScv4UEmS5U2OBO6JqMPeQGjFx6FDhgIAyAEFrT+OzQiTrIm1mO5ExjjwmEYO22rueLTmRsiQMXpCp5rzSW0YoLWoUYX6RsS1FYNGV4EJPMjXVxZsuMQ0Lo7gDss6r8wWv66bRmSLoE/E7RbXtG84UiRiQIEEiDy+E5HeuAUSWOWWZ16el0xexGUWFymyMYFZOAWw58ju8GC3nz7tTGoZ4I6HCkjzP7XfBzdOg36UPQNIRmYmuOVAsSr/NGThQdqOERBrzGzhzl4aD0o0bSzryVCjOcNl8C2JaWYTD6jXubmMy7SdxbLeHV16IN2vKOiP0+2Y7dLUeqQmPyT2aRlOFUp4Ld/iVUYlM7/AeM5HEAXpyLSODUte+TS4RVad35Jfg/aafTOy67D8H8Ad79vtTmP/4tEQOCY9yg0l+YEX8Vyl7aLoREiGWHrkFoLPnMjsHL3kJwrHFhf4RjVKcs5UFISC8JaUQxte+me0S9+8sqjEtNFHZa/NDaFayJAtHy26AMVwCyLKkJc3JMgeGqPdj11lyxljN8WE8ox34CzImH6qViUcyOwjDiOrcM9CXjcdgnLIk7L5hq9A/khH1+asWy81n0/ej0ntuNWJvNjYhOng+7fWUMpgGQIOyp5DTlV0HUY4b/o7pynAGX+C75FMzRhLoUPoKmEUEfF4Kb4V3cJAVcDaMeS0ckxSXMh54/vDMWDFFAFtAIqyPhyg4/1llbYWpu6kFRiQJhNokouKzfwImjmYGcjqoXuUA09v452fztChsfqPT/7ipEUTG0ThD9L03jgnNNEXg3wEGPDHucOFJL2ueMpOzXS5Bup0SJQKBU4KNC4foIXl4kxYkkRrILQ5mkl4Fja68j5BPcwsrMQrxVwkZc4HPOXTQlC+P+m/0+Q/uT2KgSs0YCTCzUzDXla98Yab/bXE2KAPq/uMypRhdN0m84qe6syDH2vnjrz3K07+DdPw+IGumZHvX0qkz8si2ywRa5yHSS+BphiiASt7YLASZ7q34gYJ2w9ZOGSNje8uExOXqal8T9ww45ca9lRWv8Z75Xh96CtzNChrM9ZBv1K6P2EjOJDhlKc9xZQMPoICCtAD4Q6Ha8Ezb6JQJgN4x2UqiGlNUuamHQhcaWAhcOjX2AduRmdCLB/g7KyTp+I2pAcMsCIyaaky0lDyj9m9jFPitrZEl5bod4yrvzHs9mtusnT6w/jo2pxkYmlidj44dipKp1qUevqYxSgmHg8/vBS1yO9dqlWcB7RamH2XieRsAG1eJCJJhKXZH4xS907DtkfuDO5mHvkBJkGF3VHzDi79sCLMmGmcBjYs8WNB8f4KKIAyMQB6C7ymAKlD20kpXnmHCoI7SclI4eqh5gL/9mt4BiAiM3yppsBNLCVXFm97EUTm00FRf07UN/TSzEWCUMdd5RVcVrjDOgwdcX5O2ikt9k4B5KZk9rXV6w06YrKJwPQ8GYFmemeU2QLJtc4OEUqlyYeoaFxsdq4abUXi/1LuwLf3rDEyAf8ZppOECOCqRmWtk3NL2m3yz5SVkaIZdUc/0PblMkkteUg9+j9YdxLjHrswLFppZSWWB/HjGpqCpZMdjzr/aG47If9a1WAA5JpeHXaYtroK4Z5qRUJAJ+t/wOMla6v2+hCEz1+jf4snAy7zYOviTV8AKKWwfLQGGLKkBmpRrpWe1xSz+Exv0AT0G51pVUqW+IyaGROo+ypjwmTXsEIJHRGfVAjMiAz+ULv2cIy9OFBxZiYulw/l5RHlXwgNs2WLBSY3r39+5egF1yWX28s7fDsAMvgCfGVFfGAHabZPIfwDWjMZTnneT4f/KwRmoF6AifoaCS/PUnT26HIxf/rNNUQ6BjPkqhZIu+mjgKdW+8OWEnjdQ6s6kBLxZukkLNZNsZ7+Ib5g8NYWeAxn0xugJyZfJ89ugVPreLVb8RVUPTIM+g/BJIuX3ax1MT7/y0axEI5hbkPV1t/TuX8NFmtt/IfO1bkCRaK4XZxW9xklsmI5aQZ1VT5CPHgWqbjmzJg9pBR8x+jdrrkUU0DDCZnE7ee5OiKlmiqwUl0i93IL9IAiKtnLIOXwd9dR2iZ8VCAzGpHgR7WejIQsTxLPjYBLnmwcVNqItojgKohwNga03u4kgvHR5SgXfRLv/fANCkX54q3EoY4zxpo9VbMx2FU8f9td1jeRB0xwH4HyBAJ83P4PBIQINQGqWrTgVbU146DErVLj1zU6+It1IGITFBucuKY8Wm561PHPAAhdInxEoroN7uBs4KcfzYjIJUrDiqye/Z6n7B3Xbhc7QCVfqiFmHgDPFQGL0y8+MzbsIDagkbHYyrQ7mA8Mgq9WMQIMdpdxBwRHdqJE0E5JZVqqPlPWle4UOOrCUaDKhVz87NOSMyrtD3HodgtYOc6avnyWHIx/+44UiMYxDFlU/jr2zW16h/sESwQ2yYARwk1JjVn8dQOSjzqPx9zhJCpprJYQpmdFqmEKL77lAUFPvAeAJMqPvmdmSTOY2aTWHNBmoCjbvA0Tkaz0k5DkC0n2WqM3qGiKHPfyFzwC7E3vqYdvwkzL3fnrJZ+yJmj6KKHUcWQY45ADdSmLqh46coQlreEYvsUBmNRneIXl5O0Fs+mbW1nUu89Do60vVbx2FlYZOevi/YeA3RvVJThi0IgaKgG5nPdK3k3NwfPXNj3Hj8c1+cGAcn3TD8XUjFWCeJxzrd5Xo5ZDFv1GK0Jovs/izys6iqp0bTKZt7I8Z4QfBdgX0vnl2AjAw5a0K6/JfYNAhbJHjCV/VBnl2ac2g/qgoV7i4v7fon5pqBN8sM6Owagm09Xpx9Y81IobOb9Q/Zni8+2Z4gefb9Ku61fwFyOEbYNsJR2ris1LaQ+bHQz9SNJz57YN/GvgL1SiTVZFDQnusUMPtkNjoGv3GscYw5ZXAcpUbbLt25oTVBvlalqViTu9Kt9sZ05xFvkbXVP7ovBeo/WJsvZlDj2XH/F6IRnfSWZDCs5MbewFypmIDQHZIuCLhUw04JJRkoXDs0/CshJj/CDLKJDK47X4jGVnJVEmzhbK1jiRcaiSeWmipqycfT9GiTYWTTOUEj3zR2YZmfz57BRRdYq+CdJBqZlyVJwCzncb/ur/ZCAVCRxwiv9USqxoldXgZrp34ZBz9gu1eujCtWRlZU3Zh+3nS6YJ7x0AfgTuBKq+FxXOtX2dtXvYWHiZPXy33zdnDkRxPB8/4YJj/0WYPCX9+DNxFUxpQfHHryYnt/lO56TMgZhndj9tAqLPxKLGHqNUAi2cmGHX0fW2Td74ghrvv/yEW5+qa6U4If5WEgU4wVQ9TFgqERuRhTX3rp8jZkdgOrwxLuu74RFZlmbSu4jevmYV1zgaeytW1+rJt5yLubSqNmfeAaWoGndNqp7UprVQlnTdFqkrkJCF6fcFxJcbS9qJdEocUC2gFwnw7PQYa4PdpTUNppx+8ZPvrfRbFIDpkSCJpRiY41ZZjEK4S7ocijCmjfTZNf5a6Qpk3EEDi5p6ubB7D4my4eC4bLJ8892sCXCXIumDdThO+qYCZ0dP+6H4SLi4bYf1gG52X9KrUt/1otjFGIJtgNYDwk30kfFnVvxwZhgyR6fBKAZzk22qoxGpRZPyZjS951frO13xyLbn5o7P/KFppcczUTzqst7XLid5mDZlOqKZDZthcm/1S5rgi5MxL/MSR8yeAC5x3+8EabIWoBMQbdNvG3fLMwnrqfh9CarURT7ziqhjBqq2ZU7rEkUBiG1cd2m1L9mUtgJNFmXh1L4wcGomEipIiWdO25e6AWJmUnFTC81K+Mop7HDzAvMNwHz3wc0IcIqnqhEfBQqjRFfLsODPlWHIfid5TiVrxwdchjGYOopy2VbYxnOzEuhUTSRVaBRKxRQRdrVFgVYSq+JHsu+fMWavckEGRXhtwdZfsGsnThQYOFw9ENibCxkPNyH8F1N8cCS/v9YWYJAz8KAhFv2SUn8v5xTp54Su0MKsZrZlFsHDnmg4D1jY+wg6W5s/GFYCNUjFIsV50weGpIUAq9E3jqoniMELeGOpXnXAWKEfcYjcjsXhrpS21S0DSHN8cUan0oY2jLGKZgOHNaPVEnUoVR8diiqkVtNIxtA6+oZZ1jFf2Eiv0Uw4F7bS8B0yCiym7WjgqNQ5aGvHmPiXZ3G/+JsSYoCy9rLI4AGHW3pORob/Do5zCDP+Kps3684MwwvpVL2tH33TK/etPI+xvrDpLSKVGXI4b4lIv5cnUp/sokRZeDBIDyftzyiPlgmTPTuitstJnSGLzvtr4v0MAvUUkqgGBp2RA5LC4EPH45a6EjZj0Qx1A+rMo6jO34N4CG4UWCJkIFUBZ/O32umMf3/9ax0fTj3KlQDVKKRTGyVxWmZherW2a3b1acVarWnRM2dWr4qdLCizxyp7FR+6kLjnna2uPMlJuNARL5orrbpdaZkM9osoqD3ertQsZjnf4eWaYNFi4fOSiUPVgzsSv84eqmIIX1nkshg4RCz481lg6raAxSl69t3r0haKHPSoFs5gTHal4g4CR14cWuITf+Dswh37udBVeA1tAp+jUipxG3BKqjzYgYjxoPsoRhbiQJ10KLkWg4vzi0II5fP2vYZAOagtNs0SO+nvOODmEw/9lmrIOBiBipQg2GkUpXiI4OqHLdmcuWvdPRGh60m7/ynx8AgYzXfgJ3TpopBmzPOOnMisvLTdSWlXxqy5S0GAlcmy8j+OIWaZLhvoJgKK2NsGwx6ZjQihb4mt36T4hex1r9KGp9w8OyI4buUzzWsPHVCJ+VWkHiYZ/cVYwNlLW0klQzg3wuE+eg3kk/wsFjqqWEPZeKQIkFhEM93vbyypSVdvxrDgwgZQ9R9LIQuDLsEpbFPn1AL0nZ1TaeBdFYktLAZk6Q3Ktqc+SsUX3S16alyGiHNnU+0p7Ul4AD0FjchhZn5i8xyELT5+McPKXAolU0BEsHzKFKJWd4jQsftyUWHxkTW8Tq8NXyCeuJ6HMvvug3sdErjyoyaEwlsqcNhzqKj3SReFBUBFDRcV/1HyE4bQqVkXQ976Q8asM7dwwhj/eW3wyW41fzI4AKdvxuYZ7GOgYO5qL/SzATkwgQ0ZZyO1PPGtcvKNGyOaO2uzsTkSTbdFJZLRsNnKPqLTWdJVJ6JGsb9KKL20MCXllBTe2Qh6xMCQvl/5FP7M+2OWqDZgvNyLVNxIfjmZnvnrwyLcXzECgF6MQQxoR5ZNsWsCGM1ZtIxdzt28LikGL5X6kx2qp2YQvBSGwV5maFFeshxf58rM+MIbAL2xM2JsK3W/odmzTaQkdeGx1BD6hMiaj7DnNgDFvlGOe+3Bk9gDDQZMD81aMDbPNBPStlfQuIHJiOZg5//4cbkw2U+Ul+jVZsASTpeK24fqPl5zlwdujxiZcI2aNspqKENiIHDox/dBQRrQnZONlCp5M0E4o5KUoNIjEnRomB+jkq8WZz6LOBtYxFJY+oHetUnuYAkyZRMLEiY8erSB0k8ffKz9/tVuNXI4++C8c7AwzezAiI757z4f4cLmTWQfss5R0i+5w3TVT5NIOSxuVrQXoNvz3ng4TEFM+F5tD9VT/Gfhpb/4H0fsft7Ws4HmYRvHkvWTihwGN06wVOHAIFwf25Tsb2hbH0m+PdmuDm2JCdGiU9JpZEVvt0ZISuwKNXOEdrduWh22nDN6utYcpmsy7ivsDvujHZM0m64yFfrbHVIo45EScMpA063y1OBtEDncVv/dv5/LUqgn5oPVTgXailJcBIWj4K6KOj5SlE6hbAzXktZyJNXXfstKqzMdmPdnrMF3ZzHorX+AYW8ML0SNHMLsLEfPCP8CSLocquf1ogqSYve8FehPBIHFl2zaX5nL6D0aSLuz3Xbl+0jPPIqmfaqrGrRLS/dU9236e/MRbXO39zFNpZc1nbu8+iZ0Uu5plaebxS3Su2iQuumVp6+2U9RFvj7ybWDuALnFKg4Lg3/b7VZrsuInaTxf7qj/V+TXJeY+ACqJbtRjXx4tGTesRRA0Jbz1VHMXVnDmFe9LPv+++Epiis9O15qPxHf+50qTUbEnmbqnK9k+Vw2Ie60rxo7VCRyhh7Yed/QdSXUOjTQKAjBfgVsSL0+WTDdtwNMPfcpxTIWZNf5qxt4IjPwzk+uu/QqCU3npuUMd3OV6MmtWl4q09pP2pFjwYF7dpp6rsSzfMGbpZRqFmMBIQRZrn+izkTAygTEzLMyWBynFT2f2WwarUTzWUP3JeoGZV3uKVYeq/WUbRGDpOMSBJGve+BpCz67w5h7TGdOnJ6AJb4ypEzItD8Npik02XePNiTG3dCQw6Zhm1pT73TIvzfK6YBgzlxB4q+BlKF+/VhHOqQ+yw1Ssr5fPdR1lK0uiVvSZL3vl77bo+gXEsofCUHOWen1eOQyii98azIkSA/BixKjsBHzn1NAu0YVAPL3yOt4pKILLlksZcbUlBuFsRlxZMdPFByMX+1mxJUmp/l3wkePZ9VYvD+OyxnpUCEM+UGaau/EBdsZ7EhOeIBkSh8usGA5UQcl7AObH0iT9WVo64y5yKjiB4708Cl+zguDA4jqZE3xbWWqryP84t9nIMXur9Oq/ejiQZ76g2B4+JSHBCSNVxUp9HTYq0iN79uY7EGoy6DDoUd/g9rtwoxm/tJQlbJxgitl/tNTQ7JMfbEFo3A8JNUV7xDZRAOyVF9lYceFBfSQxddx/YOuZUUWIhZd1sHLsOjp/7Ol6n98FdkvzYhUaZLygtTDjpEerzT+U7xI0Ux6CLOqE7m8OZreC8FiDxhGtMgv5PDLKbYvaksJO8iABSA5ZCBmd9ouoIJt/5T+/D+H3LgMXem7LGj1IYYBuljhqZzArTq/1a0Xui/6aMoqdMM6hV79ZKLkPw48S8blwNnYtsC8PildrLgYUhYQs24lzkSvFXXd0ef8RGdE06DiFzSaen3bYV5brjki0tpt1uaGpdlPc9R2WpX+K1D/VZVSR6+G6yO2jp9i5STojhrieCzmILbFrJxp6YSJPjrHkvAvKvn8dfiN1tSAPDzzaG7OqeeKU0lvV7krq2tqaRW8dUUR4iN/fN7fr0ObbUrgcNQLoXFlYrdZ+LrLXu0/FXI39IU1gxkOZ8uvlg7YkUFlhDmoAmcuzuXCuVX9XyrUXOGpJ64hLTeXIykbIIQiOD1DrZXpCBmCBUfuCLnaPp5nYHJESYDyf8lH2DYLf4YP+oHAcGjXeAoFoHFnKCYJlUivMaW8W+2YTy4rgV9OfWLNpRzS0PtXXM3PmeeEvGSAuzffvv5onTwUYUCW66VfWbmxoBay4wJstcXwgbxsAAaKMhdaOodVZEBPYqygepajVFd4LoEReCHWeFMlKs2mVOGSOkDpOC9gVeTzo6Vzn8kZaYloXSbDzPvlekK0F8VQ7dYIGRwDiIr4Yog7ssUipLAV09BAIJ9HRwofZraLvOMT/cxHfjHx6UlOewIltbYKbXrjaoLwqcP+rmVIfu4gA4+Wjl4gTRsT8c8v17omB2+nfmlSbqVyP8+BpvY1F3zSSwiOEqDfUXc9atzBvfBpLRcKyKp4Zg8OGMMDf4lHxg1JIc6Wmo3a/CDatKB1rUKQ2Vs2yPVu3GVt7Ez1601/GlvXoDJezRzbAwuccRdK5sB8TbJT6l1MnNbvacE1t21yrGOZvrkzdlA09xavQluAX8Ct0J6Ww9hYgj7XMxzFdqFbM3rKO8SlubqxQ/+ktGHUPFcJ4m5LF7Nk0hCcR2SLZ/2jKGksocovck2cYeYfDK93gYLxe1C0AUqNvYtkz1HlepKdWtQSGIEAJdxHVfH6RjsYpz8vM/9Udc2zNz5vUgL09X1z7q99k6o1CZeOgUbDdHH8GBuPe04UkZidK3q+3JQOSeGdoof/HzvXJCdL9zmIYECaEuAjWL9+JwQtYz9az5v2NnVaQJT00h+j+8xZlCKxHs3lEsEMnFnQx2UmX8VPOkDNk4jL4StHp51ENrzSyXK4vlYNfUpTQd2s48LnCXZPbeRjNKF7J640urI7R1UF4auNAVU17cmLhDm68z8WY0mgW1uGvFkUX0qTZfTAhYwsm3lBIWxgF2oDCzN8lyv7LVamyh9HcMXOBcm8os5hRlMr7F7abPH4+dYFhUnhTdEWjE1vqYAp/UEGb3/RvY7pqHiWhP8A2gJLOXB6OZn/ocMvr9QnQ/0SS9Kt37g7Jj+J2HgIF1bvRCK8+0ool3cVSdxLhdce9UyXtawvz/wjEnkSlgHzo/M/ydyRI+k8f0q8wIdNAUNerYVgUOL9r/5E3IRgxB+RkF8ODXoDjNUlduBLkbMp80NOWMXQV6dnZevjNenRCXcbajAx6KOu5OnwZYDi9wcBA8ynfgyZESRjl1J8vX7PoQfgcqHY/LY3zDHR7fd9fU/l//rxQvXwnLLLNTug+UpPtIrHANNf/WhQyRp6asPY3D3tav1R823+2PZBM+cT05CM+Dh5qREXAgwmKRMwVtBULpMA5c4lKXIyjmwvORgG2EkaFDbXtWWYMYUqM2g0jIUexdwcjxMBXZCD80rfKteNAVmt7TwaOTJ+tbajayR+92NdNmxk72Q9bthJXaZjFtK54V4Uz/omagF2nmUC30K1EIjnA8F6gWYSlFRMQS2J8ZbmXTa1X9I8ySPdgBlykJgc9r/3K/L+xKvMw56fAy0GagsrpUihO0NAhnewS9pnKErqpllSdn2KsJTvNKHIEB/4aEU9Jn0Me3dgxjFsxYt0M/HShselfsuCfDmKsRaURoYFq+z/XCFEaBZzr1ehwUuGvUR++zAd17uDZLf6jkiwpifvnEDaeoWWljBTJaDHXgQTaToV32ZB/Qvh2vKc5sCH2TZ/XfBTgFvOUvy5w+YUVZgMdGvsaFipIaV8uRbDhYYihX13SIamarnGISYZdM3TWhuNeakqFHQhVhWq+Khalnmd7CPd0F5ISi7gM45R/Eh0F8ENtj01anEfTKjbFzMgfl6izitDYBQmlK0YIjWNUm0Gvcf3aw1rC2mWplTzZpcMVH4lMTxGIkrkJTZwEbL2x0zGJ5KTrUnwYOKYwjQvJ4zOYSc4k6wl2xdYwSPnBhn2T88r6JXjuVKwN1RYK5EVjbiaBQ1azP96xkzA1K6Ko1gjd7Z84KquVRSCoA/z2l8tdSeic0B7k6N609YbVQX2M6ta9BJCeODp++sH1MNAJBMiMMG5OXnLjv5oFgLhw+XVOsvCwoDSztZLTmVDgcO84GfD8eeX78qelHhBXZfBLGcX1SlalQHwDedHKzYsPNEi+Qj1q87LoRLPBcKTiKmXNoPPHgQ6Smw8H7i1bINXqlt7o6lbn6DF5Sgviwyz+ZCHUa8GTYsbvAck2TARaAIGODpTMRzzzK8b9v1PV1neAE1qV4JY+QK/kqAwaX+rHKVnPPByojqol40oR5OmeFVBKVRVjTQK0s3hJrYNbEKmtKHbLa7M4Lnl6QaJvaQgyYXT+Mgq/8dqz9LP47LtfOKLUKvzYyHvinsxmaDREhc4kRQ0yM29j1lpOJRZ2Sijdc/tfi7afI1OC5K5pRZkhg40V22Regs3tluwekSIp24ZPWP0NIFVSE0+WdDf+T7ubvUI30VxAwnvseENpWpCyGjQ+FqXgx1AY8Li5vAEQ9SQYEvNY9WxvEwP0cdjhOnHG1vNcle6XqYtsODr/r6wssznscaqzXDOySRlCZQ/akm/FkvJlHCxcYV1bU5N2HxjPXur44UpsbLDVvcVMyKJIQXIWCqMd5LL34j3nTJRKYb7A+O3WDvd5/2W//QS0IPQWzRhl3Q6TSTlzA5uHZjfljcCjKbQ2MOXk5veTh3/am8BhF8Y4BC2qOKcgRsOVefpSlOyeHQ2MI/4oF+5uOTuT4TTziqQX4qU9mm3iMtmnBCHleddMsYKjUyhB8VtsmwVjBGYx295fWFtfphFvWFrsS8smrW9kPdXEeiinPK+f6WSb9geC0s09cb5WydqbP8fL3UGODm1f9WSgNqX4Ytwk+LPpG4CbK82MOvTDBIEstk9fNxg0Zjc4plk33xdnj4T8E+RbbuyvsZ/+O+bJTIm9LkVCSXAYmYZO7skS7RuE9Wj8H7NTBRFqPDzUz4kdY+gtWnpjgFwrnxk7Vpyp6lqeK5+WsNxZJXNNATXpDt8PGjoOuTtUZnsp6c0PuE6A2VeW/QWIRoabnzdpzVA8EoZUd0g45bL7d2WqyOqCVL9bvNbQRdmWE6zCltJ2qvWnK38P5OiOSC+hiirLOIPm4nO+WSwm/5L1tMn1WA6t9qgWw0pG5GauJZqRi7N3FHBhCPYrEOW2T5ffkemeyWRGR1h3CwnW8EyIWFTyAMThPJbs1KMb31we8OXLdMxDOGuSSf2i6/rNHGVMRrc7ptJty3oYgd4wJg3uxJZzssLLFR3S/pOw4jVPw9ucRK+oO0aZ2id2ixoh6I8dLiJik2zNfGA6KcJ/rGkBrR33wqQf90XHGsGuWsqW+7tkzddgBNV+q4TQsV6zYYRGc5Oz7H+n2gBiHggAANLZt27Zt27atxrZt2/b8qJnYaGzb3EPsQZ7s5dBIAV2yX3sUVbjlAvk6TA9lOpRFOG+BbkbRpRysc8Kq+/pXJ5etKidMx18PWSJX7mfGq5aJluAaoBPJ6o3S826/4hM14qo3sKhqRW6bXlAfDC/qCbdinvaMGytBlLtmJSHa+zXQP7Yni+TDCxhgQHHHKN0z4Yj+ETvERKc+4OP+HFb8gJI23H030I9Ew4JXDvaMry44EILsJlU4Brwmjtg8deC3sJR2WkK1Pjon20R0cxPjZaA11gtACMhcOBOAjd9pIqDm3nsu5vv2hl76u1zZ9LfEbsJp3ECxMra7LHH8IlomRoh65I6BRqGq4pQbyc89fDKrZx4+4L9lakLYcjaZa6cvAOgquM19KNAh8MCMn0qOSmO5/+VlWjJYC9epp3vJFViAdozwGYjPwG+2g36IkHEU4u2MWYVb+Z9s2nvRZQB2KxjPd8vNOZ3vbfxswyD7DzrfBt7L3wJwHItLAYb6c7fFG7uIA5ehOcv4+rKEPLU7T5WeQ8hhjPFQK3EMcPfSNoI+z3eBlk3qqCyV6hDmI4kwFhtw+3b2Nv1aXCO3ghGVR9wcW5VdfoB+giB4zKn3YnDcvTC4akUsaIQF3FDUu5QIF0WCWn+tpcltf3SeC/0Y+PRUggfOxgnRFAinmCeObobaSnOx4ZF+Muo8VxBHQ8gs9hAavFXj00ZboFpsEVMh/pqB2X+xQV25jocAu35MjlwISNyDIeG/8oiFhyUNoIq2qyr6rpyhzn28AHuDAva/F+blSyYbNFLtAMsRpKMGyRcGYY/APTX4QfwD+clXEEGcfQxjWT8ldbzxu5P8erep8OKqBmG6AS0jBSeqgj9SfrLNql+cFx84YzkVGvfzhUWWURoKdH0+bB/485KfefhMOnh9JzWhY0r12RH4itTVjfwz78BMtyssINVQij1P+zJsbnH7ghD8Ob1Zfa+xFzcDJ4hOst+Ss1ao7KukM+i+NYWSIfdjB3OLpnpgfGDeh57LBnG8HFDGRf+A1b6XkSm60YyrCWyJWE4MO6XnpOsmYd3ye+jAmhPqoOGfP/ARD7m6PkAIizEzCo6D2cVWN6y0xujpB5hvo7bUqDCp/gcsWIdMpANPrSUUWyrCJmecl8nXzQ2r0x83bGrL2DHPrmy+BnETd2hxbO+ITqzVoZV+W8zgyOlqJFPw51GOhJ5hj0HVdcSPxyuVOII9kUhM+Fr2qvxs5R1J+d2fWHCAVle0bgEfldr8be+RDz2C89rH83Ug7Lgc8xTtZzJ5/fcLI/US70HtrV4e88CLhTqlcWcvLK9Cf8zzREDlPX2ztQIhORPF/0+pSHWkv5XfRDk6ZUPPVqpYehEpgWSgM6WB1IvKjJii4Twx5wdmGRfxp7O3+hoiAlizVlxdQ32+lgq509VnCWbFfzgFwBs2gf3C5KZ/dIAR0epg5JXB2t29mNFKJKuMGw4PgOIHuzLtO2BFtd2t2+M/aPj9vjxy6sJnt7K28t3wbPPVlgRh5BWzA3cPyH0eii46+lN4xjUYtpo5QTuj2NJojIE3JCqwAYqm+w+35jzNo37N/lolCUEXUhn4HHQKdKg9gYIT6ncyWR2rW3QXp2drAk1WJphXv0zR/vgN+OSRK4MyG65ao7DSEsBxPtcvuNmDFHKH5wWZio5SvBDmaeMwWK0SsDNvttvHNwQ/69vrbH32OqD4Rdb/Hi3Tdfqr9HUWb/qpnnc1gArWRtauttlK7VFH9S4S6SFpM0HFhCM/PaPCqQfsuJd0uuXkbx9SCqK9QK7ozTaCaHVC/jlmpaLpxPKDFrX7oP7Ub6f6DjPyPe9VSLKyc3joxZNE+IBLAMIyMAlT6WlX0gGkYlakFIIjXtsusElBfAX12z1Pnm/HWxlB/T2w3VLwXKCZmKab3B+VdMjsvJqYg09gRzd6Vf+vDD3l0CQCQnwSe+AEhrvSx4/XIJ/n9actPjjg9ywqF4KoQd4vPaRlL39nUbHXNWEFgpHrUyrxl22vUchDbfwu8mfzOjoZZ7oByq71T2BBNp55X03Upst9u02py/j6pXVYzjcKr6IynlJMjS+LLv8XN96lEJMDukc3MPPGoOrbz85EMO8hQjH0cmX2a2/eFVUkPPNv0l7gH7VLjrZ+4dPzntpsGay7VtHaLGIqjAhWpRxU0JGVwAOruj7Tg9nID+M//o+HbBhHUqXGKquRql7U1flxyp1h62qr8gLdgbnB/eTGFmd32cYHJE+mJGtB9nz2MGG/aRdHh8gkdEsIuFypNc3ElA4p7bpNulv63PFy9PkpEao5ttXJLTcnevWROWk+ClqYcgKOIqjUV1mcfZDKagO/F98pg6aqK7mFceHzH/Qb04nr7MDou05o3Hs1VbGSwVZv7Bjr8MXbtzzhBTSaLflSMKUBEWNuUwoZQQ8lXoEqKc0zc6PKR/ESuhN9PHyGBKafp6IGTyovfqcmn6vvMKOJ47asEW9KvpIe/MrXLDAqXK/5r/iqu+g44Zu8EMMfhOw5UHVlGoxgZ2JmfknAjM+/spkVKh6x+10x9e7od72u8EJZZPW/hsEvWWCHJ44v0iVCkOGycIPzWEgwbf6Q95jN4ZduIjSOLgbCgou4Ngsae+gW0ZcI8ILnh602Vn2rw7sQ90/FQ93sHJ5ZHYHzFNx2HB+ZXr62A2I39qNKVSwJlBr4c5SXl4BtMuBd4w9DOhV0eeyTKVbe5ahiwfCfQ6wi5SzpPM3nkOZq8prlge8tu/sm1IKpOzlCG0ibo9/LdbGp7I0PIk3bvzsIAvax1ZnZvDrSoOQF5qv+hecAfuRK6lH3XXHzPvd3reIjByLbqX8fLL4FzYCi01kFIfZfvpj0+i/nxrycY4mNVdRvnL3VGq0/cWFp28uafO3OnDZzH055M8lpdwd5ooIxfpG0mAhu/D52K2a2uRMqikAJ+0TPSeeAmMBnbJgha3MkDx50GCcyljwBL8RiabJrlJQa3IM/thuNSgR51nzeeIPJPtT4VIbt1NqIclz9Fb7jlHDUcIYi45p6//FYBnUri0SeXrWU9Hw+kFUOjGPaLhNIP9cuZE+2fgoYoQwy74bClqvHDQkNsFxQ3QeCRxrfq8IQntvomkNtm6MoQT1q2OP2wVKR73v5TqIWyLq9Jwrq4cdOnzrb86ow5bahtitAOUZDWgRGwbiCBrLWeDfy0cyDFI92P7Xxu7gjAEgQ6SNG+6GWVQdr4FqXLMk14l75GPpgEA+hBTIQ9RoN/VQCSvJkg6qCJIjjQOPSmwfFUqjBSwC26Ywq10hfGLv69XhBLJNKCXy8YNyKN41kk8IZBt18i9DEgWRaL/uwwLX+yjXCiVYJjD4ubFTLTuUZ5RwscAaoN6hXWh6aVXOQY3FSH8/BmoQmdzluuG82SVQ+f0DJ63F+WhcNIvyUU0hsxLGh2i8xo8Rv4LVFhJYxTrPtVpoEGgmMzyeiREI+ZRTFKk3XMcK8DeZHgbWkkipD9XwrS03F+nUEfqFtkcHzFg/eH6l9xwuFEhVE897ubJ2EXdynrZAgGGPvuFRlKNlvXzV4EIbHD4foDNmIsMUyHrpGRYM+pwpPcySRovMZzaZWK3G6qNzw/4y3fYHQhp2Tk13OqvQg3N5bRE0ggtOvC4YnxH5gJGtjX8ZyT4iKKW6Vy3xF8C2Mf/5zvlGUq/iN+YNjD3+XTIit+J9ZQ1pP9+E/ApiEfackI0NQLaeyFUVH+9Urx7QpmHqqoRxUL5s5nFVAVl9wnMCHsyb2898mbgWVf9/3s6hXKWcGiII0UW/G/wXm31m4Q+42t7IpQ9JtgHJ5Vn3x6u5Y8M+3dBMy6T9K3TI0/ERp0rXvT5O+RHPx7rib2YQw2VzbWratrT3T6YFocdxhjOgDc/CGfhvCUyQpJNLnVpJJU9ygtMfarxsNmcHsoeCnPmGXdVTb1/fK0pg6I+ZZWC+lQMhJjBZqpJ3lg50I74bn2Lz1nUyRQO0TSjzgqBB69fhVwi+I9iM+35whTFWbOd9wjrVE4EBPck3yWfFZjhLsAYp0Vqh7BcTTlqk+ZvM6bok4CeH2IT3x6CfUnE9JRG1YihWFZa5RbvrKdRdOp3TZhsSEwgkJIouS56zhzMToRpFkLFiHsw8/0Q70XYJ4eSa0RUM32bT0XZ8d9sXJPZpN9ondJEKBVpTbTUDMShl3lhdcF7rF2V9PRibOHCswdjWeGrXxIp/ctIEEtq6oqUyVQxWrDMnpWyO68lp9mktPwRkV+zYX0ELFDPFLDMPXTU2s/UzZ7Ni4uzN80rpO+EkaoWUcKObH99mC5b6KnDTYP6M/ItAgoiXZxv5Ph+iOO0OiCIwY1Bu1RCyE9tkRzf688mybbebRYtIzdtIojeVbTJFBhSu7NFoWeEfuUDQWVfudOHF+LHlUbiGnNaVNV9ppxqkOjuKrVscpiMyLbD625DrkkHabtismP6shLLT6fTro6/OqcD/4v1VrDPGPKROPkVnl1KDJM9t3PxGNiUwjO8mFG7+9qK5tvCuFW4ZYt+e7vS1Msa/W+DnQrxNQjHhpOiuVy4fU/gqia3cPuv+OSZTkgTNNELnwnR/0tb/5HCeK3nPTZLu2RaKQTsXRveZH2VB7inPJ7ZNxGz/NCXrtprystwwUsYbwB+gzMgGsgh7Qx4xP7lQTvmO7yBZeU6WtQsgl7oz3BIPCteVbFTlC8YzC+iqeXNi3Re5TVKcD0J59/EJBoslCO0k53lOBlDoilLxYJwUNjhZj4VzT2oFEa1xWaeP/UigwSJoX9RU21GB3YdFcwJtDeU2rOQJZOfxrT223PoE2mZ6uSuTfkcIF7QvNtZmS41LI52fDtx/HDwnlP4i6iv9k0ihZsTmok0SSoRMnlBjJM9KjdhTa8tDmSubHq5hpVrMWlRZN0/hiwSKsJRGYMhcCYx+flsb689+q8huQsEfRCyDzRk5sSK/F3cG458OolcUY8yCJvs+4VD5dcYxpXi+r9Cp5q8gLj+OUbFjpVdw/17FtBDqhuccrhzTxrIZrkk0aB0YXTYZOJPhlealVjti2ddJi0frgcp6u/TEvH6vAy4keI7xrStqyEy69otXYPPP6HuTGN9eSEcPJFaGgmJDdY9qjTVA4MiHXTgatayusyYfpl6o7/fYiSpUr1X7qjHVU+aabZB0Chs3td0Vkz2sr3+6qMgzNOJrttTqBMIh2OmkFgqwJQpfT4DIMmvUPaL+S9ploJxFmU5EnYOZwrLQMpkGxOhCgc4LVe3/YhMlmBMBfZsHBA9UYDHf11dyhSlFUVx490SfOTN/B6u6n2x9ZlqIpQLr3bAXJFIOlub/RdP81+hWoxSWTNsUjbXWfk/h3mn5wlqnX71QEKZUzLJGJwFPxbfjwLpYhnYLF60RLTCCwZMRVS4SBoRbZxSgUQqXdjQh5FZ7YGLSRfLI1v2ddDryBZRoBWggaPlTSuKMaGZmqNtEMhsmB4vQlcPP6KMygpMHqelTXXFaOUexiufkgFOUF/ei4/rkW59saYoTgsX7srgVTnWxCtxYNA7kt7MCf7hum9KH5dMnPt8ml6pxx8fIRFs/TbusvVXVtDs+vMVUrLWhQGnIPM7Vv0mNFCHGKCjS0W6262uvxblS9R2xrqT16xwaaoD2d0SH8iFm3hj8aQjQWBCCVJDOm8UlF7w3/CXPF5IR1Q+LV8LYJ8U6PDRvzFat7X7xk22cyYYacVSNonrKmZt1BoElfN92yVeiV1HpwYvs9odIrPu7PZWeBooTo/k2sVsGcfBV4ii4QBdOng6H7rxivOo14F7c9ZNHDqqlpSmyMKPaDXHvnO5FtobKamAyMl+uQGwNNNBi/cpqihPWyMrddBdBosJ9R/vcDEMuPrfqnuoHtuoA0Y/GcDS5f1qghD1FswL9wGLBCwxkmrWyW2VdUabFUCqXCcNB8KkijFsoPd/Rj5/CMjNRV7Rr5qUbpmY0XXxgZ9+z0R1Qi1QDdaGneh9VsoMFQYl+n4+86u7seTfvR2ueyXyrVhvRWUUCWiTfZaMYsawWBDBVxh+sot2On6seFghefpi1iXcce4n/z6+WZCw4SXjdeATXzC0u2ChFOx10NiCMHibiexZb+n6Sf9Vm3OY+gwKlY4AgazupkVHLr2Si0huzqdEKZ0jXYWao+CbVRWaC/WRpX6ar1Zg3QgH/ez3Z5i/N7KaFeeheuqCrijLcx2WOyeEmhZVTGgcXeGqVQNsOV0nZ+8u+idU+X1n24+IQd6g3luJ6/KVEC65R448PMjlXaCe89zI7CcDEH77oElYRXdOCBP/EKbXCU4XtxAMf1T2Rwv0yCcZn7I7SVZj5tOm4FsUszHqTEXTES7EBDaveimz1sKAqOChqj7KBegjMJ/4G1kNFMgihfnz6ay42n6Ptw90EyfmsF6sukwOBGxl+rPZz+cn0knc9ZbNr71gMxc5Gsxivo6bqzMGco2aThYBnhF5YxdLt44AUXYSL3tMWO5cokt6dUpsaFFy/POR4v5HB29m3kbgs/+GtJe2ZhOYe0nJGxXHFYCwEWzQx0i+AIItwJB2I9VWz1p+JyfHKrCZroMWJ5qhcaAiEfZjD9vKmD14lALoZbC96t09R/+n74s8fDd6x3lU8xMP6IW/oYqeVBZVnMMIaYM9dFWDI4MOqrMwsg7WSMW/pwlCPbjicbPHgwm4E/f6Nw+hOcG2kmld+caNS60G88XnXmwDhCdEOmgGewkQqmFy6SdzvJQaKbkiGU3ddbKOmdv1tYwE6/sOoHxq2u1gxEZoq73S2Rlfk8MOucgzL/2j3Xfh2/FRCzrdG9vZekBr7sZNEvuE11qMQjoR1RWq/WbfJrvFYBQltUXUmOeDNPi+N3r8/LEaPZ+wAopF0qNGqVkwE3UxyzOluaSFrG1nlzBrOhsHRBHo/wpnTAbeNEXf1aYwemPDsr4I9RP9ef+9RX5BG7MeR3dMQllaMLULVgYQbhwRTZLgAh8/GRrvufcZ1IRSJohArSLD3VTMLfgkcoLmLUAIsdH2IbEdrOYlu+riVpA9jORi2RuoUW3l7DqzbkIFEBsWbHK4n7RJh6EtnN8vw5f0nVQl7LJ5JeIZyyOT9JA94N5nUlE4ETF9A3eu9yh44G4P0Ytfwa/KpMnWWTdeMM6DeoWUEZ5BrX8cj56OMUeCmfHAal9DJv7ZztGRC9S1R6NmK7pvvXbQHd8desH7nxP7c3Kd89fo5YYUUXwDrWZ5z7lw2Lcgi04n00XG1TcjwtX4D/lnSgPGbq4GafUwI8+2OxejM9xnOxOieKAvhd2gFdXBJKIXIAPX/KxXs513rwT9BCnEbFaCUZb4vIjSZ0D3kkZVSp1J5wfHSEInypEXXKe18zotKgjg9QDrm0L+L4CY6g9m2ThnfiJGGlgJclNf8Km/uuljH362rc2XtuOj9TYS+9x4RXup1mM1pkOuvWBLfSluLcWyFn7LkNEJ0w0gPJi9w7hJcntOKvGAz0qi/FItEnmbyAUmHAeO08Ht5Ri3pQBrWVcRkibG1TNYBjMi11eD0O0JJ0Xd94ljn0qE4AK9iH7vmJwDIUAqf+pkUFdB6L6hX8U3tlSh2YapT7CtRrG77PM586p21U11nlDABK2x7M5y+9tdUmY4alcZqRCl2ivQWoTybrEEMoq/m/zbXzCxY4sgeLlhlqnSrWkurTg5XE9xR8/HO3SKx8ogIpQtvOZKtsEwZ/EGyVfS6gyb2OlhHf7Wi03TBjiVfRNf4VePmTkM8afzP6xC0ULgSaUA7ZB0+WfXTdonS29E6fYe5o3KjPHFI1T9uZN0AaHPuiHWWLMqdUrRiGqwexBIFTkdCjm2tVywbxzxpMaX0giuSphnJdfHXgVN/jWDhyos22RBMH7OfLomgvr7gxNgkLYEcRtAcbj+AjHHn1zZmW75DZbPBi+JQH10kpHDfyzM1GzimSd39aNHp3KVORmxmDE/xvFBFKOS1uXRrEv5YzAVgfLSV5Cbm1HQsfa4JOEL8pv2rJKdPelDSX54YHebdh1LnqTQs9QtWo8pn2doZX3eyHCYepHa/secRm+BzaGwEmB0trznQ22XHtS3gjPfKHYwDR/bdHRkpSY1eirMZQ9kO8vpyOpYlZ65TN2RbWLwsm6llSbDK78nvoH3AdY4/HltpKwblVcB2ryu9G4xo2jU7Hix0Zmz0t3XVwr6Z0S+pSM4F4W2e9eYntJqbh86d92FfTqaG6KsuT135wQskYrHswLHpnvhD2M5W+a9YNgtoHTWkUAycE85JXQRj1rnl/YluEzzogrReMrxzUPY9GdxahcQDv14MmrEcJAoNc5yntF2pcjtVJPcDTVug/iPqfvT6Hq78fIhg0xl8cfk2LNU6ith4ed6ramPJSAn9JES5cF7AEd7TzcSF4KfumjojwG5hoxP3WexHfqE7ZMO41vted8EpEV7iGvOxZqpZz5yc4RXr9CfZkSjMYJmt5GEyFnQUo7AI2uvMJX/DS6xMBFj1oI7Kr93jgLbVH72Jxu43UrnPgDs9oQ5w4Jeua9aT+7F8Y8UvpZYGJKUDpI9rMx8CSl2Pw6k4odFfi6PhwpBySLzoDBAfy/EVXqTOx9HclQ0KLdWjdrHnVFzwpb+LkMzctX4eWs9p8OkAhMOfUkKCG9wiu+s/oxS8PgmUx8K8w7OJvJFmWXfQ7SEWiI6d/gPgVs5aOiPzGWG+UMCi2V5uGPmHgAQpdvXbT9H8LlTy/aCUMdXMCmxt+7ldjwyMDcz4FR7rJrtNwj4W/cmHMhPPAj9P9YWqBgRb1ShlD7kzNUfLg4fwgvovjAKk994zG5q10xd6HTroiBKNYiwa701Us58RIXA6LDLt73djbFdjm3LSmVIykcGstkJxBGSz8fjx4/tHqU0G40Gs7evGZ91ZndJ+XDB93IYVsDcP7OQ5w5eN57WB41sFtnaoKZjqjkpUOXy5F3HhmYTEVcfp1A8lcYWQhnwqhJ57EzXq2RubYwUt3yGvW7U6ecIo5bRLejHsyDKQ4gmR2QhLoTV9TYdGpytHNVJ3RSjaJqiD9clf+5ForGvYhFRxi5XKItVL337m3+UhbCEEsJDc2v5zVBF9AZTq0tJYrp68wazsefdrTFfN3KnlPa0VnbUu9AAhTc3AU/oCy3nHQ+qWAnwF1WfEUyDKlr+1XVQUOc9xUhyycDcQoLYSdW25QuYp6h6JakbAK2uJlm82ccb905pCizpGcVaD92IeNM/jDaZweBH756ULkoAqvefHswLNezhiLGflPDvDNPQ89MefEf1vbxy76vtGNk4xuVAtWBIwyVuPNSlnD9QG83DOMp5C85bV8BrupvMg33VXx4kSE5b9pkHz7qNRGugkeDsw2y00CKogMc9qsQkehDIUZM2EeOHsw/dHZQITRboOVqTkjLL+w5TBQUt3OGxDeR7LPLdcZAFN5Ick6rlpKyX8LoTZ6qhML9BPrGNu97BSsnUL7Q1JtwhqJbqfd6egNxHwEmuq97BxXmsT36WLZ1hXMiyl/oidS0Z5VwdmaiqnElfsJPJkchtsDPvtRx/xzwpXITYLTSeX4E+ucjrH7szfKAi+H3grnYI7KYGhVRj9CKIZzal6KL82aXesj7Y4mJN8DrsDDVM7EDIW/79+ihlSNPKQdw34YgpJxnrXHqyIlAXKVouis4dfYk2yckMjFNRlQRKbBBZn52+yK63d1wT51J2Q3kBBd2FZaKqZzPmUg7i2oFuukoC/csT1KtEm6k3xCkSiMN/wSchGidJX5/dKnOh/ChnrTe9o291730M4HOSoPZ3JEPs7TISgZf+GvaxRb1QVTkRnAxAvgdJ4agfIQIvCJrqrk/L1dYjYkyIJIFM8ourIOMyk/Br+CE//NBbMBGgc1A7dr34vrEAcO0tCsjg3yXMRaPct4IOfrrwIhypv/EXLEJTcEAH7OwBKX6u2BG8COe4Zo6h1ZZB2SUI7u5/vnafVTMSG8bsRd0Z+scV13YS8E9J1jXrE7bbPT7xSl+lJ0p+/zXzq7TLWhTrfnoTdhwZfp/dcxnu4ZqSODFxt8mveGEJnYy7oQwqMne8zM27moyrvG1rsgB+QmOfjY+JWK5OCJVFaWFz8RTPEmldAkAZGeV8qiqIgbAczlFn7ZIF6g7Q95zFPGctnd1oXIA/g29+XCVHVuZmKX87j1TuZ6ZY0+mPD8gYZLWwVs47YCtxE8XO2PXt3LJ3TlUi+txpfVvGAkIdJpRzSc5SnC8Kz9t5jShudw2sW6dayxaM8epYQfAfKSrYBrqjeYouslpR47gWS0FR2Wx8RNuLmQpQB9qDS+6a/vmxG4Z7l+bpLYiO+h8kFfFfudNBBjuoByM03+Lq4jNknKskBSAd0Vd3VFIczbeJdboPZ4oJwJ8+3Qz2Ub84cnHz0bDKwXslY28uDHX/aIgFWDvkiq+wZHpO2AovfcASY9Dqw/uY+h/qWDRWIUqpmqxTvG5KiUaK0WzQYJZUsf2OCL+sAkQjqB+MV1mePYZXYLSu4nDhV0Oj0WHlyVRbRv9d9bEpeIeTG3TuOByHGkBaRIbhXGEye/5YaMTMyZNc+pQlOiuh1I/FEeMedG6iDAhdp0dPFRdXrqzqsg8mw16OZFrTxBVGMjyFpm8zbnu/yTtL5Hfy0Jpajimjh6w9MPIrPEZg4yzgmbKi8kRp6vm3iQZGBociOTTkKiBLIHMxxpr81PoDSCPiRPO6NTprNQFGFD798NvQTgKzcdNbc1K8sMwf+AhVAvL3FgOL01JcYRscl81ik+I+uSVO7qHve+ZC39MZOkDX3RVIMIox/OyyLCUitrjubHWuB0MZ7Fr/hLn/UP0hMtT8eFl5WUNhHyeEhprsJFWVJHNaXyRoBmvXJjFHa9XOtbaWlUoWbtiEosF/bt1IeN4236VaSwmT7ZwTi1djtfntPQ4QejzvQRVUoSVC3Zr2vQ72zNbmkxD4hfwTIHJ6NqjK+07ZUt5doVnwCkUqe310srCy0iZ7cktv4myS87lwoVcizL+kbWctPZqndBV0J09XudHHvvnPSX/qH5K3AUNDckDhzrN56hBH+iZc9h/sNKfE4wfWzrCkNx0F8M3y3kUIg6KnO14KSqPuI17dhsQ48VFKLuF/RszqSXFItZ1MKRWLwhH1kAxt8oFhbtoLo9WoTtWSEPeTK4Xqc5hRDo/w/MR4mfcGWrU5EYfT1oC5UCc2ZNavVDAlPDPtHHSE11z5pNXwcTyoBciKPoGKCWkAXUEig1O7rxyRg3fkmd+Qz1VMAPvLlEGQ09eo7EI6GtCyv2gzMjy6u4MmLOQvOqtloyKBrS/JCR+znL+7Gk7x1KKCNPZT2Trkik5Xiw1bAnRZZyIm9s08WT0qQ2sQU7JM/QJZtMrdXZH3ahAFQWggijBInjCtJrFwAgW5k3mLTeo2TRgkFcOMeyyUhV9uqXMJM2ArB1dY8HlfeVpB59rSooFUTH2HTn6GmjxoPXJRl5ZvbDQ+jZAMJ2EWwrRDszA+YGoXVUK0AQKRMqSg5kEVzBSDaxOXQXIS8cOKXZNYWLur3Xf/zNPOBKRpb5pgJWFRwJmYwYt2UUYLKxitrSvWsHDX1fzBQ/gxGVYGNpQVsLcWzgPoijMfJKlrYOweUD7DaaR7u2Ham8Gco0l2kv4G4PVU48bvegKXsv+yQb2FBCBh27U92imnB8FXqAb4K0q7tS1wck5n4bKKmkz45w0vvVEf2JrEppa3iBLdcbn/C/4aISB7pw5l3SNlKvv+X07dSWuVu4brZmcVJYtUCbzpeCkTJBp9V/plKic5aOqtYXpESHMtOAgd4XXiPMKqyZG7ZAjsVwsRjz58HqZM9gQanV34bevs2gjUDNjkdHqU/IlUiH1u4qc8tIVU8yb8sOCYyFWM7kQv3tsedzOjSTdc2thgNL2W5+36WW9JsCCs5b8EzDkxOQMA4ojxHPEDH/oh97W3kdkrM/oqgh6JI1DMjYtvnf/7w44qdI/YTO9sA21xbNlSoXSzZpOUMFiblLsVqViZ/iyuyHICYoJ3qpz37j0klDTeomhXJs8t+5guc79pan/1NUqEaFTwBxq+TpATGAMiqIkh9b7PmgtuLjfEPBHxGCbayT9T/HT4LtJrOsCOcJr2Bkw4PnoeMnlPLjmsXXH1V7cXLFhHnDdBifwPIHsBaq5ZP0xONpiFZ8W5Asi9mb5bSZtKrfHBjPAEzsNhXyRr1U9qv8gloMJdiso/GcqgOECpSSQ/Fy2FMyRK8T3G30jCgUhn/usfcfAVLJEZCBvycMp17R092eTqRc843boIqgMi+zYQUyJnFhRObkL6ZVeVMCLahqPdkxidWa8DrLURh+WMNtSWghqI1pplftrHobS3zAc+hfziMYlzHMQUdEgg3l5TbGgbYz79FlKkMnbOX2jGfCCBRgPMkB+qb8nPRD0uqozOJoQ1tTuIs3DVNsi5o5eU71aCA0kO4kH0mX01Ye4JOsR429oKd8BGXlNKmotp4y1Sa9drhKeh96rgtI6JiKuI7KGb19CkcBcXYtJs4MjQrPQh+tbdN68aWngAqry3BD3KuOC7nz4mqsTf8YdZlFiBE6SJLfAXFtlO8UD3MgZT0XtTLYx83edZhtV+vJgzO9cmmPOVJS+phgUZnncGH5VTePkfnX/Xn8b0tgEVZKCVlf2j0KdMX2C/wvKdjkUgXERvmzQMHRfBU3Ji+6LYtmMp4Vktk9uw+rCTz9Dvj0LCNevrumRv/sUWEiNRse5ulfdBHiix9Inb8WvACter6UN8MpMsbleVSM/Qw/DmM/qmOpFPpKe+0CurwYHFxq9ms7QmvqpreWt+1HGDGQngfVSRY5Du3R2+EMxCjZ/sP2gu4rRoQkfKQtsvhfEsWExAK+Aa7VH7n89kK/V5ghxNZrCKeApfHPa1BX0wxluNtgSq5WUuhtwQ4QjxlYbtWp90cF8yw5xMUxIyO6+3QdXsXUi06pWARvmGQjJJCWrwRImynhfNf1NI8rc5nvEq/+V+PMWILjuGmSlxL0ITKLslNvucbH7VjrFoGIvft5OASUNabSAQt9ra9c7A1JykFsIWqvHT5iJ6vGROEuqBRpfFbqwmNQOUsX1xImW4sRHHIyveyQt84HkAG7xymFF5KvjLNXMpnfcov3fJzymSj8XseXS2KGWOGDbhJ1xQWB1UCT54bgL+jMzDOytwmUG/0llYScNn9/DPc5tgESvqJ+b9kkfw3XeZu6JKllLdnT74eUGh4XLmbXopcDnGy+ovg9DpXzb5UjbqAErxAK3Y7dl2IBxkEUtjX0f+dNP/s66U1lWg6f/BwiXHciGQPoe0v6QXzZ5L7tUvsL/XBg/gPmqwkcBMPfJICojbZfVcCQ0IqbOy2GX0/2E1YByGC7zoxumV4T7A4+538PVzGUp1aCv9ljcrngtCH1cjQi8JsDDGzN9nbXY0S5kGOEI/cZ7pxeuDbzU6QhpmwhcO7TFo1GP+HBmkL3/A8/Vq1YCY+HUlUWUjWm3T/CB3oflRMOqjmvVqUPDQbRd2Mq3wn+WodGfENzMuUpjwqNCMLbg8hjTJW64oXGW62tvtQJhqXbTRrVEa0k64z8hE7xE9RN7CI2uAGrEvWQnddl4bEYc6bH/u4w/K8+CJsoNvOAkoLPMWWk8EbJvyU8eErPR0SXWHHxG5LAbqt5bw3CSpP3Hv25P/jg0e+ZRZe5RtcI/nnnHsez0nkFknnPiapkc/49A7rtbUeWlZmilT5vyK61lf8OFtZVVsSWL6zC0EPUBIFtc2QFRPMhmlmkHk56dK5Nv1HNPiUo/ku0CN33XO3NNCfo0ZOcMkzeppxHHM2SKO/g8kTnapgcAcZBkXx9P1KiJx1r/t+uHiVmJGSSKqRVg07QQAtxFP13jhUsxNMRmbCQf7w7RIHJDZoL7Yphu83aViJfpP85TSBdBoZmEBIgZhi2WPddHxvkqjRD8YpNi5hSljtbS0lrGOrH0wUdjACmU4XAoB123m4mQR6H9q4MqX31dnjBK0NR+7axVlHjZqV1vEPDz74isKfqzU2Zld052r+fv/vaKY6z5CoUYyVg5yea1uhtR3O0QahGNKVaYOrflJaMHvFRHILP9cSvscmC4xJ2qAMxyS3lzxkhcZW1TMJUtcpu81V8TiRUtXlFBpbe0zqc4M12HGhutNBFHpRuQGONB/YE/2BR+1VxQ6gUt/sBpZursbaxBqonR7wtMv4OZqoMi9qhdcG//JTVPpqJgLz+lvWGXq8wnPvsw9Og+7bUXtRWejPTjoroWRiYDfUwTKcuJE4fMkBcGflhSaMot6T6STfntO6G9gu0YzwpZu49A4GmB9ynaMwfBa2zW1L0hqVhcrAsf4boZpton3ylGlFQd0VRRgVQ5ONWr+7TomPteqUXZBU13ouNm6eNYVIiR+OEsr/8I4KdhTnpAJF97KefJMA0XWv7YaMt0m7O29otrBQBCqpPQmWaNlmkbDO8lHZ0ertaOAI/qf9gcour/kEyUtYRYcQqP+sYnOdiXFetKFdIhEicxGgqh5xmBvjJIiPBUOXG308LtyBvRnFsurx1Wv8L4KDhZ+Y93kDjNHlREGmypElpi0TcskQ1jqEErTtJH2AYZssY1Mo5SSW4audR8yLEZppBY1902Qrs1igKsGncpQKhDGOhkvEEBYfLx8v15bd0VVZLLtq0akc9zaxBz+GEzAg9Xa7STxtO+gcIxXXBBJgdcDqCHDBr+/9yRyjR63s/GfV3ICrP1d5B/Md8yco2k1l8iPJ3KWVJVkrfb0biQSjXS8TZfg3e2q6haSis5RPvQk2ysk8qokE1aKi2hOT3eYro42OLQgkd5Iblpvgq7FYzjlnMYQtEoubwrcnWrk47PULJkXMZ0MYBmrQEm1Bjm3Ieyx5oCXJeVdtyp+Xxg/ZYK7fvOn3lJlbOAKnnjPJAMJuw8iNJc+6vLWexXuz4F4LlSfZcXM75HROn+L+mwcnOYo2BFyISbuEd0iQ/NbFta9XEuuGpNkicPrkGgIQy1tILQj2Yes+qzwbQZr3LaDA2uqEVCIcdI2FGsrg60s3iDJXM1nKHTr6ePdbaX9Ol72JjsLyZccJS2i/0Au/2xJEwAshesS6HIzv0smjYJdLeDK2WjHMyTpIDMC3iZdz3wJtsqwOb8chZD5VV9/3OkBX/VKMpwCHw5CJc7CPq6aJMEXNO1ny6D+ffie86SRan0q9t19u3ZYwl+g8W2trXDFExN71wxwjToTYdoeET3PF0w8xjn4ByebZVpa5nfnX/7ZSUPJROhsXzEm08MElm+YbOwMCYTVT+5y4+TcMHnxiA4utsl6koqvtoyM/ZX/Z13knUjONK2PkmwzPTrHlXwvBFFW7xh+fx/pXjasQGG4CygyS8ET7T5ZbFxouUCZqUhPBZSIpRajXt6Q5DCPAs7Uksg66oZqVEsjBaanCWKTQPVek5Y8UGocpwBUWS42wfzJQagU7gJbxanS8deMCsXJMs+J2qsmdWxNQ8/N7Kjjdmub4dV4UIR1EWLtpyjKwNuFxvwV9Uo75qRTONHuOIrPXKqLBl+siXgK8IkYGtHZFGCvwE8JnaS0eVjUnO8pyZ0Wnk4vwzurkdu9/GMAVleepD9qx+U80xugnlIvi0Bwf9Z1aiMHCYjZYq3r3HDFVlV8er/pOqb1bRZCGF202ZBjeCHuhwrGGShG6qqwJLhy6fPuKykFbVqJPYYDIMc29wM8x8EhDRk3YC49RLI9zg2wMO5zhaITawwjMGajckbh8EwMWW2JSs6ojHO9yRHqWc7JnI8JPaOY8LtSGY+Iyt0fcdp3BQs0j56SZoKRucX5eFhciMb3hGkBCEYmCfPh7R+OXgaEhoK/pd4/NgdMjG2VG3k0bBR9bWFF2N232ojJFQ2KAvMlAv43R6j1AMb5/WlbUk0tfUYUSXJ2sQ0hPd57ae6yAsY/do4LenKcCrywbXh20ry+2s0sh3iQ4AZSlwBItaNTYz4ZlyLcw2i96qaO2etOfjrZgey4int5vLXvZAV1iKqvPY2631gZgt3WYzF85QIF+MUqmIVx16/U4eUhdDyO45Cr8fuWedmJToE6HJDOFNhxjmvgBFP4uoXh9fxLrXjelUnSEuBi/7LtTK+RMiTgdlFJYaD94tRbtQ9agm8VXB3HrjLCYcgIutS1mgjngj9HKK+0nm00aQJSAq+k0Vf4ubFxum5w/GFX6P5ad+jXjEcbCE+/n8U3lPUqO/ZdwzHq43n6wBMkWxdsdgzKR2Rr77t66pEsqWjCo1aB4HPkdNQVt3vUDXUS4belMipu7iZu7p7Jg/fuyfyD/IN9qDS7Zii8rVaKSfR/GgcnTxM/2BVjREiRsYN6gTtIEsa3hW2AVuH98eS1YTTgZusj9eCG8ZrM66Vt5UfSwxr+4Y7eqnN8uqa04x6dF+npFhYqpsn7ZsYeiIWQgoBgIpLNgNlbOFvDgRAfzDDUu0Ineh/tc+7pAqudW77256/nW0PFiMDmVASiMoJpVyLMXLtW7BpziJhGWcFlH/FJAneIGVQr6o1YKbYmktNRsJUnmzvCx6eSMk8zlhYWywRVPOPuvaez5dqlSURYjfvzHv7mtilw3g0SE4kYDH3n8qFGhrPxe4hZFq38nNcEJ/rXG/XFD+m+f4NaVznEYK8wZPLIHeiH2MaRYjqajHyq4iz2miqsCdJEtrd7PQw9xYM2rHl6BaWbbajxH0yenzwuLtwO/Dy3M6/pjVxB3X3leHnfDJNdwR9IWYUeKvXw7Efp9mw3Kyn4RAlXJ09ulw2gnWBawqLK7tp/ahbap9q1TmgMo0UOVF1Etu3TzHxKL6oNUsAkgrAzKxR6IqtxefMhOdfL2d0flSobOQQSyDrhWgI8kXTJ1FAFz+5r1P3XUzIqSwwXT0+qdIk1GW5SNKW04Zrcn2CvECFC2niNjEWeENAItLAgdjNRXdyhUW5ygegy5O24eBor7E69OfgxOBSz7uAu4liym6A4U3X2X1xZkZ5Qn17fc60VrH0aVZdbzHk4jkr/pBH6y/tHx3a74YZROoheTHCMdOlxM5SaWix7ROxE5bRpYn5gDc8M23V5Tqnl0+jj/Xa7x4avdf5nJTtqz9ftwVv6xnicbMk/sQENKfoMK24d/ntJwpXYLIl6OtLFTVbd7hi/lUyo778li/1yF8Raw6aGtCTNyAtAJNmleeL1RZapjbzwCPz6Co2P+YuZ89ZmCzl82ndI2nGQm/W+lYF3qsoBK6F9fGXVTqB35LGN7/Szq07kAFNGIzRcsv+WuUWzfBXVOXmFtMX4HIas9uejuezl8/RlHOKQDgeRvKCN0L59opKOXBXFIfyWhiMFszu+2T/C48jElC9yzV8Ia/iZAywUR8F7uM1+dQfqDDyGmzxgQZSIavWbsuDF29ohe8j5uMYisJS1yjrgKPFc1CzCzuZJtKIUXfhQ3kq+5D4qdM7ryLTlQp5zU1lZlMdwsBo0VnUCjFDSiIfgjnmVeOiDSBTraz8hmn47ukpnySmFXEvvLID5CdX659HymzJ+DGGmBYThw7wV3FPQ2W5JslNm2/Hj+vbY9+NALF5JAjgvSbX1OEM7ED9ZtbpLyS3Lcrjw48ftHHFDzuRhztIJSnD1pyP+tE2wfHy5a5ycXlmQG9feOOMoANCL1fKsJ7GKLPoQrse7Oy3BRdGcPHnAyCGvGTZNC/1+nYxHedJe3rOjmcDIBteZcp8QQRtRQpV1ywtlQGu9qYJ4nEFdvOGxwUp+ajjT75PFayibD9ZGwjZiKIEVf9KosCNNMqCad/emjzVhFZNuiI2dRzkKB33XTTlR1/IHAivRbUXDZwfODA59ekaRwGwMfxbwPNb9/DfY5vRBULLTg4hvWfC5h3+pibDgE8nl1RLPaP33KejxnByYQpBTe/Bz5lemW3CRMZz24y5QjlC5bdFLkIEJJ3hlB33JFarK3CZucqjrAnhumHUod0KgP9KMPG/7LeVoFo941ZQVD4I7hBHEhDevNFcCelgx/GSzJAy4sFRfulAZG76azIYqafWGscotv9I3VQTGtq4za1ZjOnm+T6xeaDeg7Sv6YmFSB7bf+NC7a5Xm2TW+D0n960OMuxHBClXThdMmCmyN12UcK/RZ47dtwvsF958axiJjVKPM2tU+vPQfKlQSEXLoMFQkDm/zg0KGcYffw3jjgJ2/Myf40XT9bBdqBjaX/HtdwwYdQpBTDVDiHaPRPwDiRgVr2uHndAt8D3Xbv8aBQn8LzXz0XzNWecqslvVgNwJQ+2tzlN7HpUwVei2AYOc7kSR3SmBDk3uo6q5sUurO9MBw7HGPQIPKE+SxA0AtG0oXL2faXzpq61lSRpxXHl3+WRunJ27CW8hptFz1PYfZICeSflC9gFQlvMpLsH8t0RMGb7wEeTpvqF4WOlS1kc9j7f5L+DJj4PFABqmqyC2VS+XZfWViviB88dJ+SFR+1lDgIyL5eLLjTQLZUhj1CkibLujsB+Pksv+E67QRC94tpVhA0uOQGd3+a8w/fQqcefQp7wzz36ms3JY+iCcsjzYuv0mxYLHrGtNECDNCUtXfP7QUXDJTqbeYyOLvyXQMo5hFTXjoFVsmtFiiXKR2e3Q9Vu/gR6D3U7+fwnrWndvMmJ+/qaCVw1XLZRrXZk5VUvnUkVKJNRIYrOPDZ2zE/SpRnv558ovNPEPsmEb9evsLMAUCzN4mrlKTfysXb4eKMTAyzcKbpk/YTpecaQkJIsr5j/tP3Nees/1u4H9sfEZ8+5/iW2mwtPxEt2Tv45NHrcwAcrfZlQLquSF6HUNolx/wooqGJVZj1YPeZX5mppeJF6dcnF+2AKkZ8FFSd0uiPtUHsXuNclkcvOlnt55wTQ2qqvggnjgqGXSepX/9CRR0cNU4C9ieo9U2gUUcsqYBBLWQXz4J4BuNMO+hSrYFLqEHnKdl++kzlYKxzri+P1+I+xFAbxj1lvNNxTcnJeKALXg3CnxwgpTOk894DNH+0gr5tcMNToPI9RFTm+cMfeTXxFNm3eSWqSUs+S/WAPH+zWYJmyJUUpr8CTMGyZFsIfIWy230ZoPZL9U4/oidsAuYznwO/o9u2GKn1iuTvWaHL0lF4TVMAMDC9Bvd2vKNk/xAAx0UuR99HlPdmnCT6bho26gMuN/O5XwUqcGLFK88bfi2lp0QOvVWcYTYAVhcnYVlKNshg8ZFEYrOvKR6q3Z+0C1sVQ6Z7UUx33SO3WvpQMVOb2KQ79oBxQofjbKYC/aEoKZtL9S9RVpx6Li4UAW7g0dDYJ9bCOuOOi649ulZRxMEhq+KGPLSyN5yElbyLq4K1qq5DjnEtWWLZ0Zt8YgNyxhKhAcyAAB6qeA94b+heFaL9fci32nLUeTrz0DQzOdoQxBXQRLsZS+2sl91rIEN0TS29InIxRJ5zEPoW8TKG0FOweuikkf5NkRrbaZWhpVbciA4EXr+LtgNny2N/UluOzORU1LeAWqCaC4FzqenWKIn78t/cEoQEtDfWBp9usawgAY13Qpn9Hv9DbSOMhitgTW5iLjbn6QSuusoHGfdAR79zjDns1qf/OJBd3XVLlqKYqqzdsjDtmmL9RKp52pu7QPETelZ1mu5ILoaL/jy6pJb1QB8bb3hhlACEKF1KLidenKS4CmW3ScPTwbtDKFW1x0jUmM3xtM8DhqSiptjj82cLMQU6+VC0QeByXrExtVBscgOxKdTOMpSghb0R7oistIImiw/oFFuDMsllubSw2Nmnw//r/nwuHRqkVfXW4/8KKMPjXrgzeWq3KCe2eGY1sEpLTbR2PA+wdsrb2e5udlNCGhGYMsglitKdFJRhZNXeGcGBBohVB4M3w/kXtj7GZNhW07vtiEAH32lhLouDKIeBuPOlh7BD1k3D2FjWnHEM1Qzfcpqf+PN/jTtDyCblFEMuwMFyohLfj1z+pxHFE+tUyPJcwGo328kw2U2lBp5n2yeyuRHHUmOMRZiOymuZW/tWkpZY27Hw2sUl5NnHyPQ9KkUaKcX12AZIIFhuu1lKRYri60qxqF4sIEeN7v+WYfUsb6NxeionqZ2xFIy5qanPoT+onPEgYgoxwCf15s8h1WzIUQ1LGGBaCElu5UP8UJl5IcKX5i0DrFfAIhzYfymbq16Yw7q8+Xw0sP0sfV69CFq+kFOnZEZhNDNkGo7yiZM4OtCEuL1xAqq1LI+czljGUX4iHoqmjor3rHvmnCJnh6JAhXDPt1p1uhayevcfhNVy8u/2pd+bJkZmIg2/dOkAm9angVqXYHSg7UbiwjMEFa7S9ZH5GcC/6j0LijYPaFRRNdAPxNTJlpon85pH6ez5WJ8T1b82NSppEcRn6IyVOt7Df5nJpV5tZkf+ietdiaMd/hMUYUu2hhONGXqomMs0m4r9hurYsPOlYGWiM2jXcZa9EtJMoCzCcEM48nKW8QVc/kT//2YJ+aEv0ls7lzsxIXJZp6Ug66YzHzoi0VlwByVgBjcScOwbRZO7/oUnhqwf/l5+MuFbEX3E84ZeGHMZiuUjD02yIrmRgWB7kX57xgNWeo5gs5wvpJwjiNchxl4yec/9oBd5yqygEIwnBOEgnd1hN7z4HtopFUh3He9C8dD6ag8yQILDOSCgeQl0q0Sl+31dmCGHnCMEpq1AvD44S0uon/y9vmdI0+Eb0yWVuqADEi9xp+TD4DzfcYubIkkLASW4sgyXSe33nSgFC7zhmrOW/ZhdWfTheQ2JiPgFYR+HTkg0sGEPUB1SzFhyX8+Ee6rzQMZ0+KZgqB6reiPD8ShcbHv+L9zDzVShp3qbzVbdMf1viDdgiA6c/SZISCEvj7VeEjMc4OHxBpwlk3mo0xSctObU1z4wa2mfy6l8/niYKYeFqhN5wswiXpGE/VZMU1Z8lWdoFj37JMamDywyJwXEbifvUSdGQT07POz6ZLDydmnSR3C8E5UiC2lfvnyG+hDy/wM+MASy+604OqyckSAao4/X/3GumCmO8Cfrb9F8h9E6trRQw1lGrt/vVQBQLuEKp9buxnlxjde85cNQv4D6HkFA3rWsIUyDyt9qsB+VPfoECP9eWf4IjwnbYP7LiAFcOLXn1tZwyWGL9ok/mr1dZwcc7Ew1jMK8iJYGF1Qd+NoOEWddjbPM2B6gFBC+9UApNUAdfs6tGKqrZdJYi3ibHD0LdSMdvaJd0u6NwIMUm9g7tLd07MmNteqUGiYt8owo32mTSLyx5B4+1hwi60P8lZyz34CZQ4mH3cQQ8T3GWXrXc/8mqzNulDn87rBjqk7F4NwnWRUdkEYYeVzy/jKle4xVqb806N0hBNb4ex1c+/+Yf0Rj1bD4nC98wYLCDgjD6XlpIQ5w3rVdjYsPvhdTLGxtIklL+YJcnzh6j4gh3suvwMgPzKopg2LpmtMF0Nc8qS0yrGvgr0hFA/ncuf4nIIh3os8Vz5zxvxX7sb1D5zIlef/6fVfiyoy7me/qBOwOUESytr7jFWyw1RQnuPVQkklf4EOokdtYoyFlhp0lKyCyDk4RxOlnk/RyoNEvUDU9WWYCXgxhlwwBE6Ogkf87XltzVG/jaOA7Je/DlXmtWVK1N/HbDLZAsL6etcuD4PJU5fVEPWr5pijyUqOcAIg4HzbwzaCgcuf3BqVIlacxk/854liCfi9hFFf+mtmCRZmWoeghMV5TymGbjQhCcY66pXWvL9InvwfR05VoDyIFSPusER863Ir+zn9Zs3Pwh9VyUyaoSVny7CfqX+apLlEesLmnAlXjYtlVdROz7i1DE65NW5M6i8e33yIe+N2ngzUzxPg/GDs2JaGb7ceAQvNNr9kYC++FjPaH06kj6V4Zq+0A5EsnkoDTROZWxie46oI9gi+E/hxhEbcmk22kbPu0IFYdWu4J5uBcerebAhrJagIIrgr9F/IfIrJuHbuXHAQ3XBSE8TkN5Hk4loUcI9WvAzakrosFjzf9ItweEWhQEAKDZtm3btm3btm3bP9t2L5s327aNWcQs5PR5JkzN37RlkC99V3t7ivORY1MUvmyuVWmWGe1ffiXq55Urroyp6tqNcJAilM9G9Mm1HYl+vz6Q49mk+/M58WwKvzDFt6mHnMbhpy0u++zepcpIPb2GF07EUtbq3Evshwvo6imouzRAxlWrE89rFRlJMyisa1O8Jdfrjoro3UeVYI9vt/yAILpRQ1gsiK3uSBMS2LJVAUR50LPA0ycrolMKI39P1L1jveO+PLXkbcSLIaHUBkVXalk34VM+mAa43bQN6XNOMzWEbVMw1qbA8v3kP8PNw4sNTNPhKie4TLbSCdazZStngL1etzPvthxQgwYjsWtmySrjIkEzxZQO3Dl+FGzokggUhLWd7S91a7HHs7CLPXM1xWGBSi4mApUwebCfQKlS/kvjdQxUW8wV7wtqMqTiZlev1ubNqAzS+dKa9nzIp57hjS5L2uCnD3J4Q9mNNMNWW6nzJBm8i5TvnuUwLLrT8oOD+2wAWneO7Stgn7nUCTU4mgkhPBOexBhWQ4hRjmYI3P7RPuei/g9uOKywYEDV0Ogf0jtrQjtpOlOkIVXlBSmjirvUR+u8WNLExDFCRbjA5w79my9WrhrYVG3FkjA8S0x35dseLDLnTcJRmMLQndf44P2/YE2bRNqxzM8NyR8T0x4qYRxKcaKXlZDXQNBkNGo2ru+cOr+QmrlXCf/XYFC2HsqaW9C6+dTwXaOFCrngIboajQWwpRDsGxfcZEeslEgEviWSAtFT8Kz8MVtgkDdxjuJL+0fPH/q9DSVN4P8cM8WNjd/D4+obp+wNJMUUI1kUaSMhDt9Vb5qRh7o9tJatSyTsyBO7ifC/fOGC3kYdmQswPa5ShrJQiE/defu/EYfwD5OJX44huyEvsN0IcgRxD7eKT9Sa3QzbuNCbt2toAkAP8bBnLFUFU0Fz1IHw12cHQKuE5Ig2MuMwTig37g3+A2NqaNN3UCJq2i9u9+3smU/oMVLcaiUB8nG3cfk9j/Oh1CpjSdsMt4VAycP2OsSkHU+nS2lUMedVGX0UjgdZF9KNYQRjSLcvsrP771kw1pXPTANhlLL7e/aV7hpQJESTZTeEY4tmzPHL3h/zyg6ncMcylzGrj57b2qmsqaq/uozwx5uyL4IdOkp+fKbAUORCPoaKU8YOlEYjvUa2ykYPK6anXM0rN3Dy1J4mpWRSQG3VhGmUaNE+8qsMCHbcwJv5cFglRjovTIM7Ar3H8uCpYyak8KGngPaxaRIzq33zJgPzfEG97E0cmYxjgNDTPnqvWRrI896Lr6sleyKwe5O5Ms9ktYmzjH9rQluQXDSEXDKD3KIhCnF5P1MhPf8l88lSIZr+VUHH9pqMRzNyc4w/jMhYZHd+YrUlEwuMUeuw8a/rXPXnUuQTo1x5/29SMPSz518QrOdAPCatMzoLURCiGNFHUtL5f7TlA+6kZX7OxApceIJaMPO3/+LcCrocCmpRSE/4vXaFwsoZUVkshgprS/4Rgl3ClEcvBHDKuQOD2TjvPpDSmdT1Gmeug5G6INdLd4gs9VrPgHsy+sNNkNF5EtAzfI1qZ+AQ0atmiR9q1shJ5JR4QQO2ZbHJNi3gQIEyTfDi4h20AIqSU0v1+5Y4XhnqbhLT/+YqTFgKI0LaCBY9thlUSlXtLA7JdkG2d3WJT8lxckxcG4yJ+wcrMBrTRZVFhgmUuJYvJUQ3hhmXXeJR3bYq8YrufTQcaB02ftm3ndeMEzCoesl7fZSpuWYh31L4r95jax7MbqJOEiBWyq37XAg15dArBw7oyjiT6iAHXsx9fGcskP0HvFBK4DmaMCGFyV4vJFY2UkGEydrL0xJtuZOepSCeJ+WBg7T4vm9KlsLNS7t1DMethIE9pw+Ui/J/FszRe2PEeFjHBuNaX5YsL7qDIFe5UDPSiCTCQzY5GAnTJnG2BvL57YTxu4Op/tOZTqUzp8fi/RoLGJliX+csYLK9RRDyhyKx8f8Nr0UEcLRQsIewJW0g/G6I/RFK7RFiwfpBlbQxixR67cy7FZDXjxc9JmQ6eiNQxHFUtDXO2tQf7adoSlxy3591Jf2UexGipxoOSrhAW4xb1cqEi8HCxNm8CDEaIod98AR+B29gLYcM6cUkN0HXKyzkrUM0OTorioFPqMU5uSOrKCEy2SgrGWbQsv1odFsM6FBJP9oWWW6TEUQf3+j5VOIfhPa4U+a3MQnGb+95VIm/IcB6Vz4AzTBrX2YnaK0biNvFodWlko0fhlgOI0UijtzOV7WGKipo1w2zvmhSXPuVjJles6e7e12IOZ/2GzfGLRbL2fG2n2RXcbVyh0mzMBELTlXd3fqPsjGWG2aBqYu8oSczubcNQm1TJHh/WmIkV8HmvVEQcwRV0Ddo35FlqUn9GbJz0tQ40NztxbKe+NF/L6dq5TOspV53zG3+1ZRtf1aw6TlgI5KZGlrxU2Be5beFJHygrL8paydf9OK9yHfPOf5j0WSIbnWUAT6PH4iRCBuaiOe7VByoG2xb+8dCqiHKhKTCr/qj0X6JLBfNpjq8ZHLtSqKPhoD85Tyw1GR99YrRMT6z8NcpfLa5plWs/8LHf1g0VdCIsdAMRqL7Z7wOd2gbG3Et050g2V1JRULm/VSKTb15T+M9F8JBBb9Wlgwn+eSoMz8O37DLTAgsxxqVwX8zqF1hotTdi1jah59oH7puHnQGp0ZxhTX59xxuawyicCqOnwksZquO5jF8fWWLfY2BrjPL7Dv0DxnKDPmNuCR5QuY/cvayvUH0H7dobGFwIJzWdvrsT4T08p0uE8gaks8sO0jjh0fdkFGNqcz+ML85PKytjkr0dTuHzw5Vmcw8fjDiOik22y2DkXmbZ1euL+OSHUHDp4axu7RY0F8adxmXj0Ma9juGDKqApNYlVURwkF8W32KXdMae/duJ968XxbxQDAzJinFZA2xhJx61+tb3Bsvjs/CRwwZuvgDplZmHT4+XvqjCzLR36Ab2xyOkeqRVCYQIsO8vqgE0aK/nr13Ck36SLixUL4oO5nN2aZi9RJ56rYgucpVP0t1lmkfVbxXFRnKFM5cXBH07jHUpazL1CfXlf+iXBfk5asjbn2YMkEuo2K/ueeCHHbgX6VQOsaiF/hd8F7yuezgculoYDmdyGJGu4YS8X0NGzlX7zfh2Qpz1BfUcJzIZHus+pW21LE8Oka7ZRA4MbI7cLts9mo2kcMp9JNtLCuKl6YBQVMDfSLBS7u1pIub6n/CQNqJc+IOKlZFS5E1ld+sh1qYRFDdhVXIG/tx0KmdTh1ZfaR0wkb0ugHJ/S/i/LWw8bNfZ4tXepse1hdiFnGR7bDqvNDrgoytbiDEGuEPECo5B2uX1d7vbKMeFsdv1UXsgc9fv2nhT+rcQ6qHHiANxhWMogb/SQcq2Oh841T3dE23b5VLqSIaG9Qe1rPJFyU5XNCQNq3KOyeer6F7ixVyVd/QxJjXCZY6mP1IzQAI/Pg/WJG1eZdEBmvbao5OKya/ZDtKOM3XMaALWhU6Yz4iD6TaOsTAfRlMv0Sh2b0t33RQSm3Zjkslu/ODUrBdvn5QyouFMEfCZ6XKF9W/qH4+X7JxKM9Q5TtyKuOozHFV8LBLXQtKdHcfm0yFHrGOGq7IAx7NgpJSO+unBgxRLWuTbEKiEoeUSoQSR5kFsXm7JbLUpab5f5v3l7GvxqeMweD7IPiFkuvy9f90P+0cBTIcut5ZobeTI1FmsNlhp/w9gMGu/y3haXP+8rX1kdGjFnYkvmL1t0UuJB32RExdcGUiX1HaQwde7RCND/Psoru30s371iDpXuDJjU2/laj29kfeW/yRwJgl1nAhbdAExcA80LXXLyESw2w3Zi6cC7TaqG+5eppnk6mbJ8p9BAl+Z4qmfG41YQ2V/1Q/nGZdL3xGm6+Ucg9Ix0a+HyLBcGx2pYCxPy1AFTxMbcBbcTGi/nfkn1oOyfxhMEe5I1UekikYo5LuIX30nDgMBs4Ptig1+TYJr5+jYW9lMsZZ8Spleh+jaAu0Mx0LIkOc5XwLVM+odZJOsIVxHsSvesi7I9j7B8ICXhPC1/rf5F3vs8fKtUxYB3q0ecoSnBIU0xFJgN4LUWea29RPeah3JTKqfC9md3/nWNZRlKDxrdz1hBe3dUMkWzTU/9Q8Jq4oyCKmaH0NzFaHhDKYAQTB1Asae/JoM9jahyZJbG86As3yWs46IgOv0o3dD6+YU51T4EBoZ/jWWCG0f+TLDwEa55wBO4mmB4r/iUGzyIa2NUzFOlpEOuszasxreneiOC9DpTGFkgNXydgEMuYiddZHCt3H8VqrIxMagoOxoVz6xHnG2Pg66tKRKwDNdoap14veEGi9XnYmUntw/YDqPar8ZA2TXSWKKCBUOORWgyZGy6cBySQctC2LdhvP48rXfn/Wj3TzEJJb3hOlY68rkVsxLc98y2nqtFISOB+CelAOlbdxJ4+EA11msjlL/JjJYbQ/PMeLHRNQL5CPDky57ePs8grsMZY60Kxtc37uq72xzMH/WwsnqAHK7PyuLNGfhS4GmSCJXiRu/cNAmpRSXlVvvehsHBo9++UhWHUohaZSOh/IlMGmxoTbiS0YLdMg9vQ9NKLr2Ytf2C4A26RXZnJmhRy9OG5X55hXQjx4KqqNKi6DDh6pPCFDdgsPsLSW2PlMcCqVldCh3ysB/fObuNFmf5z8dxW9jMPJGu1YGGF09kH5DV2SKaHkoTiofxug2H9ePN3Shl3DiDCTtivIb+F7dcVSjqECviPrQJ/hFmgYaDk/dS58WJ2osB9T/7DhYjpIGntcssbuPZH2r7Z4jFkAdiLM69hSQ+R2pYlGpTI4MQ5KK9CHPOstNDz5x2h1lWCDX2dw6vHl0Mk8mqimqnNKbG8oIfBMPbft/OkvXQ6UjINkcpHQ43ACtZbqc893CDLRtLL/PeljfeLvhl5hx+extOfSyduZdIMJHfkrpnXc6En7iGancq0DULkH1cbcyMUEu2KK0lp6gsgPzNt9GWZJZbVDCyjtDKoGm4j/L8SL2X+d4ZQbDr87wLPQyu6+vT8fDzIaTRyvSpOZmP4znGHq2SCCILI0J4Ed3tZvuxemutT/JXckUDnjIMVXVpje8IEaLLbqgcD7qyBOWucZOnaVPEZ/7/ZUprtrgCzlde83iZnMWIrmubBefUP/B3TBk3QxZomQjLjRH1BVMMPlKc+QQhv04s0BI5nqp8k4neSMyAYzmfd9X+2yhCrYEOfw3Ddodesogma6+MRa2rX6Is4KeDFlpH3ot/6vLqaDQoDZdQjGghtH/I9Afj/WgdS9TNXF2Sx5a7a1cOfbausp8fWnHSjAsvGKZVsk3E7IUiKmMUHUd4O95yFwYBDeiz5Yf3MEm7Wlo+NQuohFmwgnZgk0+xDntu5zD4hlsb43DFqaZr5LO3S/jF8ak0By17qOJsmTcKrKDHfmDtZMpfKilXJDYiOyqin1C9AQBTyWcS9Xu0BYzS1sk3qZUaRY6BuHuNCi3spKRZlp0X+nB4Esz35MZFa5gNn4eK1Tq33RBAvk4Of0ABbTPQzVLYKP0V6sP//GY2QnbHnR1TI7sktZgpEH9fYkhYQSQrn//yyfsAC0SL98v/6Y2Mm6BjtLY1lmCWf2HVCBhECi0iD2uWN3AABPpZnIY26RMVUKbk5crVUIlnuoTE/r5tnfyqp0qe0tN6xYugUYV3+e4haQTJ6HL5dzss7nrVDPgdNOoKg7wI0F1WtcMYW/Db/GxS4EgncHC9332zvCURWWauLJ2HSHKJjdqoE7aLZ04fKgw5Gl5AaU+pH1NvIii5ZWfjObYWxf6Z/MSVWNvvCsFLZvf1s87ZdRMf3yb3W/TWo0tgTjv5Y3Mt8CexkJD7fJZZO1FcgV95mydNv4KziI3frKKFbodVpWy8QwufKkIVmW0YTyLsOfi/WQUHcdxw6gpWI1GfqWlU4oraPhi7sMgdOM0iG11zLNHonH/p1Yy00A/B/mi+VATq8P3QoJ6nKqr0Nn/cprqGSGeGOq9Tnjq4Rgi8t6ahlKBlusklm3vmIyzw7VjrzWNIpBUidJ15TTxkyIjaK/LZ+ATXI+XarDtVjd7hLuwaxi1pwU/lxi7beu3rwjifmhl+OadrRFNyJjyxiWqXaunvwb7sPyoCIQK3PWKuTGWa8oOX5cTB0gxN6gE5iBotKVEHVPvet2oLI762ph6t9VVQNq601C8dVEtLnfY39tLUzXhvsJ5GQuWA5QxYCyk+7DZS0m130En+NR0rXi9KMNvYbHb+rzwszgfX2Ndtc1vfqhqrCU3wkOZKna5dPY8/rmW9DUeMQ+uCjg7RhzncyaMNopDxiDQ3nSovzzCxN2IQyQAzBK/feC/w/AmfsPT/DaeETyN52tZenLXQOW1jrI5HSo1vPGFgjcVJP9L0GoSLo+ZiD3qEALXffTm9C5ZJYax7TUgwKOEiAPemPGOHp5cLLd/tWayaDAc0xtYD6CFQlfgGkAN89FhDBvy2IaKdRzMyX1Kqixl/UwN3Hx06AkKdezDTcrV4O7X4bc9SXqDoUOmzX1+ldc/6l8g1VH13DMpBA7PZrRPcsScdrONaLoxSAtwyALChFgjHyv26w+lNtNrIvIj3/TGGqa2uTbWnwvCgzDMbwdQGC+E/GJsOK3M5HMXXwk1anHjWS78dfon5s6VnevqBSMyLHkcn51VABrVOOamlLRyrit6GSIJs9QSJr7hmur+yGD7dTyMHWKiYvucgq+SP2aiEg91kfB3inwXr7mQAGs4M3hMsj5G9Bh3KpJopp8zmB/RzpipvQr5jDYlOtxYzFGLX0FisKOk2lZcQpXa33QJJGsMo5tkqaU9Sq4Vr1rKPruh75bRBpP1CQuwAEOB+9duXuZ/S5y88KqjuF2hcn4cOSlTVbRO4xxlGgRtEIYE5Ilsyj76h9/NfbaAcLYBX7E4sYe0fq5/6QpAuoOiCwiTINnDbxeqtpRxUAuECI6uYoRLSPgswiISmA2wwcfKM2BstgGn6bSH3evIuiHFOOCV7E5ZrcnP6r8irvxuAYMnSL1wAyflyfEoglj+AUpvG/YP0wUj/4HXluE1Dep3nflZqqjtZGlEKMnEjmJ/5o4rdggXZSdaLICdkPngEYttwdLfYV6miY7qu/maAOIWisZcM54Qzg53tXMiU8LruGORmBNwQiOJy0bekMEl8hu+EziBo9ViSKyC/xsSLKGeq9Y6pAxxYKBqTp4rjOttzTIrKDKRz1TaacCK97wpb+VjpwAnatkclL8I1atX7bI5ONilN+3pSjBYQ7qY1vaLHrFL4OEpuIlxs3ulCA2+YR6k0lWFcitBfElD9gf/lVwkV8SULwrV4eIwmWiXdKKfx6JJKky2Kd+YqjAuc8jERZk339psTOu98knxjBKw1Y0YX7B26GdQml/ZJ3eXQiG7gBalsYwNQUAk81SBqC5FArfVrFb/Q15CytsyNoZ5PCnzd0eqVk+IgmTiCl7JOK8eQrL5lIAor956bXkomA2o1UE1hQH0PDF3aUB1FM5VWx7KEITCGtxkketr90eeit8Hh3s8QtQNZ002qLzR8FeHMtRyI2cjtffLuMOeffI3zH7C3bfN8e2qR4O+GWI48hxnD0WXYLk4X3OJfmheNDiQgQ9SCCmyvWkPd0NR4NxVeer5QHUTYdDol2FMV6gu+t+/ia/N9QXpE/RCj48JSO0RE4zNbqOtAZxIS4D3KHPO4bfOn+L8z05nvJ6Y5oI9CO6chQDX/MsSxtz93lWci1LTiWM8loCeeUEGw6e3Lgawbw8NS4c4fAvRxQSGOTYT4ySJEzBep/5Gw3ggBSggMoJZZwSSzNqT+tQTAQSpWLyYGbJ33E+cVw+cn9ZPQwNTzG6X/m5GSDvGl9SlJj6zhQmJDhBR4xE9cH9BUhB8/Eq/l6+Wc68ypFLB0iPz2+0KVee+1+LVoHO6og6zrnaCaf60OF+jFhKXnER7Q/fXNlAZlP4KhC8tjGd7sEBHsOIjL1SXFtqFSuuqfHklH48vDq5vjPPw+Kwzbvc/Vzi3HfW9URDudhp67BA8UOc9ZJwgEMmP6sjYdgBD4AgUgasG8HrQpU/sF9iRe9EcYZfGpRr0YVcba11BmWPgeT/1D3Qua774u5Vph1VRMD5MhxlP5WA10WE71EMNojhF4D/us9U7ZkpEP2pyjNZaioQFl2nUq6TJk0HI125Nm8ha91JmVRADm4i6rfSeighGZGb3MniXqTQEzZarzXblII/G0Pf5elu4M7yHR68BicAgaJ8KH1wBbrloYpwmyUPHWyTlZbCxzejuMtXgTXVmeqSe1h6RYcEUvlPlA5bUxC38EBL3zq6m5m3PBze58l4ElAm89xzNQxL6e/s6MkgHc/uoUrF6m5l9+Mw/7JxxBtaHq7oc/3g/pVN+EIDy5YYH4fgC3SatUJO066EuJBZX5Rovq1Q9tK7lWokc523ArgvF1LRBriZbxL3tT8qR9OWLa/1poV6UooWDzSj1nNAb4zLRtx2E9LLrKPrbdr4BUrqyQ4WIYLqlOQz6uwYWI4Ef1tKJVNfPVNqlBIgMTuL0ABMJhZNww8fXmrDEr73gjb86BIpdNCJ+GsaNaq9eJMXNFehXjoJMQQ+Jr8rZfn0HTBXAPzzQynMM60QlzRL2O0DorQB1JdPJkPJXQf0IJVskDMhPNW0rlm3n9+l5+hfYgjhXMe/OzgjIQ3s8T7xRlZdvcgNjjnrrD5VXaKr4wr6TbGMMfEP77vKlVDhogJPGgVsxZlQgSdOccg+4Ip7Xdlygi5n9iiKhz2I+R3hDqQ5fAbkTJiUXAr+cjkEVAF4z3WMXsnq48GHNKLvulKFPw3uRsmB53E+eXOrSzOztKzo+Vk2h7FEbnutv/7tHWaxpVQgpOWaD+vY6QRtR7V42oEI/RNq6IqOOIFsUjZ8p5Vs282k/5nrZ9ekioYIIrX3LDOftzNRk4Q9oo3B/+yCswWwiXox41nCuNLj+EDrkuViU24B2tR1qfnWZRJI1PHlnrKSxuAoo4FkYWu1D24juuGINyOyrb9nuNgIM1QLv4RPThE3iPGsGZSPcM2B8o/m9TPVw28QVgrLKBT5ZPUxWIdmv/COj+GAPyoIWOHT/dLI6TVywu3Ipui+fh7DycaAL5WAzMHoRX+CZ9LPKCgyr64cN0qoWDoHl9ZpEGwmzwDce9gPFHY+IRBqx49XTLUjj/JqrQLQwSOchdXFm36ojn3nVk73fNWooYsTrsvG0e+IbVOSEPbEzQAV535QryMsz9hqv3U77fn2yDfdsfrA1zXyvKpMmuM0XUIOVDR5/mrx3PDAGmuj6lqKjk29XQJ5YXOmBnk82trh4NP1xvqrVccS9pk04Sg8yBI0MrrfFHbvnZh6a4tQotsr25bb2eXw/k7iVrWzNzWW9hrAnwuWjCgx16BamtmA6LG0//emYAp8OYBxiYnHfFqOsDx0ulS79QrEogLCWqNsncG9GBGesV9VwnhVfkjOUpkf2J/4vEjFG8KMLyz8dKyuMgeNotzt7HetiHr+Z8Lf0lAjv6ITkagdZO6IkwO7Bd8rOeJx/QOFc0grASTeEErOrfe3AxWQCL9VsVYokpAyEURQb+oGAOplqQ8ZsJkKgNFBWzujO7YhYNfLxvF1Hi3Cta/uKL+mQQMSVeGjBbZNqlix5CdV+cebc0hnnPwJBin6tMPQlavqzfom8Xy1w5ECPfZnfJZq8D/GCNEpYrHJ7Ld+n2LvkrjV61XfykiCH21Uv6n8IV26XR9eY4Sc86OeEzJUS4tBD2hIMzjlUQBuD8TL85SMTd0mDIjz2BOQR3GdOmW4TrRxN1RGvd2A59wJVS2kQvji7siWOsmWjJxXmhy9ywsad9RmNFaDgxhpHy+0CFjP4aHxgrZ2+0v38hG7EyJuPxqyAYJUE7B+mWFMobV19FzSs1Jf1BozyumVGmHrnL7SOoa8ypnBxkR/XxffKITreJ5QduUqWsdi1J2B7TfSOuEHF/NcMrEewWnKQwxDmddjQSUEW9bVPYyzlYwYM3Ql2etlB/S+FQYQfjGUdseU363BsIw+RXj6QUD5vd+RgOPY0wxl/UyH+fEZ2W26qHjTdZ+x5QGCiIDwgdVce84vy5yn4jCDRBqkiiYxC4+ysp86FNVdrbfbQEUhJW8jSzEDzaSLyjKERDfIKbocWaR5yPDWG/sImXBZZmyBVuW5NIFXV7u/UY7Ab3dkgxP5NhQE8C5pIvwqZ5CB86Y+lKKYpYa8T5QCafgpf9rdOFA+LaV4WYycnY7M20WYkJlm/snziDal5klB7+V7BJLbvfZZyC/trjzL4mY9GseYvD/siesz+vr1kH06sVG7Tq6pvdms2u5vemTM2WZJ+iGDuGDvgqPHzRiLLLmWi9fxMz8r3wyFUarf2GAawft1tekkz5HjLgpR53kW57jwMEitYxu+9ToP8L6MFIr9pS6fmc48PVeI4e8C1aq0YcENgtbWMqqtV0/onBGJddYy5t8uBmK0lHZ7Yr/KD1W6Xx/8MGzX1F9pNKq/T73MyzRbwuwq610Brsb34WEirrRvITq4JJnb2I+R3DQghT+wNOEQ3dTbEQbjxYAjit3aZ5lgfap7FmWHI9xdzCwn1EuhsnEhctJZp58t1pK7MCPUVOjdn/mNcLmaW5D++BbLkYMUkcJFfH86FNDFrQ9kEms+tG9UaE1UpG8wL1qehWeoInEEG+wd8MyfRr1LY93oGa7JWhM9SCGm2HjTSQo7IAJwSe6PtbPbsOAFFWnlWVMLQQt34qxMj+kwah9MCzI2qzZfRHhCgjIkfFw1lDNFbkz1uihA0bfyxXazwnxwGAg82q4AStABB0G08aZV6rG9tzKD4eH7kzwTwKwjb3+ttSpyEpS7kpkSSQUx11ov3efcJMOCkCoPO8xu8SH897lkdAMw1yfmOowAO669ed9XwopXZY2v3361qmp+8yewqh4jpGMPK9Lgnv+mfWAzEBT/UVoARfj7TUQj0kIJESCCu2g2HQz7dQzf7EunDXYwT/sgOzuofhLba+OoIq2lulvLboA4nsqKh0AHefsnLLIuLRDZNmlbyCULDpEq3dFsnk0tsZJXSo96xMn0t5Meko6CQhi+kl26zXEOsGuYOy/tEXDsSixMm4f7JGVTSC8hlnNdXFzzh2aEWOPmpHdrdvbay+13wbogMjY6iQR6PdpRXT75gZFtsdhdRd6zzYmMs+BMvh+W1V1nl7jbCWNIxlzFS2zlk72XyVuH8h3q2GaltuYi4r5ol6v5Cq+msLzUERFvQfz7E2JnTZdcsmGTmLlOrJACqd1l875hs9kcahTyhKukhJj14MlVxP4qqQVb8iXqDOvHwoNFQU83SLrffcDdTOwscxIY+aVRTKVzg+rxZ2rJbT/0QhMswbsFfSCDpptYdq6MthR1lzb7hx1oVRCuhvlmzVr0d0audIBs2zFohokYuHsfaUXvCnGcqUconsKVAJEQcT2mD2D9ZY7SG9viaCmfa/A4heEod8cMal5lfk1dBo4hXSSAY4ucVoJ5BFAyYf0eoE+FjQ+1KjB8iUPAJilZovT58IEOQbtXRQxTwcTroCPnZxuZp9ZbGPceUk5Xj5LZvPaQoZCMug7U2q1Wa496s7nFuTYJT+LnOWXO4DmYO+7YNOGCTed7PfPY3k5QKnAvdZHmRx71wYO5IS32DhwFxbh3DAc+pf4M2UoZVVMF6P3s+okFVBIxNXPdaCLGKtuZq33I4hfIWSa7GtP8AlbTqe+3CdT+CzRbO5+JIXe/m3eXZ3icUzNLGnNzav79OQvipB36zBbylvpCYnUtJ6sCO9e2rxHVJ692ONQgPfm+df/FwSplY/UJGqmQtpVGga3eGC+fnYdYejbKHwliUmMuDOq6tAo0IGHmhY8ChFsXGbdtdjv07eI1XDb7ZBow2SAoyAPf9xvIEcff/sHBSNRVIHMM/KbWZWK3haEYKZeeXHNMedxFnG6JzgClF99PdaAmTWi5KRK/lEUxGHCBgfbLnz8ijpeirFK1LxEYqlbpf1aYLuFufpIHqqYMP0kBAD5v9VGsN6kt6OTNLrXvRyqqaMxv4cmJ8XeX2rZ4krzXGA26POTz2HVMbsXS1Cbc/zA7EP0YoQ7rT6Vf+WLbuY6TCwzVltFaYFymPlEHsoC/HcPiPa48v/xEFRjz1cfn97qq3PsXs7vvWecG04dafrXN6c3Ibv1iVM9RNJVAcjKHW9Ul/GoNXz73tYU0riDEQuKZmnIPNl5bHYgc9rSlhNi77w8bqYHdwRuh0jb8EASznsYzY7aDv3TWGFF7XhgvRquxNNNos+is6rV0SkUcaXJgQEPFnsw8/R4QR03ylVnKF2e2DWuaaSd5EGLIEjytH+75w3Nw8zeg09gtTkcUNCngpzJhtZUO0ZUbAFAwfx42tpW8SAxeiF0mfIbGX4Yl10Qc4hb9naofgKv8AmlRx/qU4/5bHtMe5G9OOwyt1eP+6oznNhljJz7HqsTVYz1uRJ/FF3aMT5wuBIrVaSMznFF4lRD1lBQH/YrvL9oIwBdetkXvgR43YxUg6n+t/C0XW6QfC8A/FEg1poSnNZKy/O6apfCsG9m7rvHHwtrYj6wK8qpCNs40rTsQEbS+pzyENgZIKmmar1EL/ylHjd2CLFGmGXFLRARDMoz3/kE7M74P3mEt+vpyekM9dxQ2CD2cfmta33rEfBApzCOJwTshDFy4m3ZZwxKzk9wWuOLnUFukuwFlrAiKnhKeEQBiWKhNuQTdpfPo16Ar/a60zJvhTVZni9uqsovkj4aqbO8HXnxs6LJLtI+goCtbWWngg5SL5sSimVV9XBxfcE4lrdtATCm0SmRiFh4wMkXKEjeoTSGaMNBIrIzAAAEXrYuU612h6pyH9oCP7ocFKf0vIRR7DdaTbmW0ujZ+td5X6OizYd/TjOB0EZ3oSOmWIRrAhvK3ys/bmU5Z6yUm7cHi/5IG9UgJ73su5LiGd0bEaELozBsy7Jqr5EC+BysSUR8co5ajkhSpfAJKerkPe2uJ2dE7RlGpswAmA6o9ylAWGbgC2YfamX0lxi1ryybkW3CLb6FO51SOogOHVNLa9FID4/lUCIZV8D6RigolgF9s6O0hRwXF1bEM7/OGa5VnxkkC2VJfudqvg01Mr7qOoOx8/SvEo12xMMhOW26ZW9rcgnZELPLwzBeUC1pmpF2T4vELHC5f1EWdIr6Wy6gcl7Vjvb6FLzsMwEH+yLmSGzBRLm+V/Vg7DiGBGB/mRdQPR8j04R1fa80nXfSOByD0NuJpcUBBbpOAnruyuvrKmx2N65Y6ojJQTJk+SFBc+XqgTVayZ4Ln9JSUgGJkpQ2Y5Q/L8IVJP9ThINxL3eDg15tVq7HLoBnuwZ/JsEc4G1iI3aCkTCX4RUG0oCAAtMXdJ1i+vwP2AvrmUVkBjGeu86xAUoxfUyqrnGgmfA1ka/IuaWrCzoyO+9KJCRsug5ngo0W6gJiniTkkqE6R5FdtA+LRmPUPN/TpvZbFDOQNoo3vHC6OhCUTw9gaTuXEAAdi3bpXBG8wgpMOdcUJMBsjpiPlUjJYwOsANe5HwT6F6Y/5DXYX5HDYgR5rc3jy+nYuxBwmq+ic1WJUHMMt0omcwCz7IDy1WVeuwo801O5kW8NQShrdcDrwpkdnrRTheF/6g825xuAnnyNPUZiosd9eXOf32k2MWF8AIh9Y9xjyy3Yx4yNyDQMEBbI11h4KIKpBqYeSkeQCfJeUcUncTCHVTQ0893cyCyqDaZWtoICflrnmtk/9QGvyKO4/tI6d4tFnUAyNOWBKVNHN7bvPk1gYxd2xiCPVHxRAdfzGpcauI0FlDyX/txqhaANKHerky+7GLr8o3+bakF59KGfk0tHpI5/7XnnsZv3M6u4XR9GYb15IesoXnTQ7ke81HCFUAB3jSihMTfKxawV4OcUj8pknrlctOAPus8fEjB4q+JJ8fteaJmfroTvu9wmlJ3Lx4pzVGl/wqGPCrBdYULGAmMnybINZRgNGg95tv/oBlfrtzcXFuWlw4vE30VipF//tgkZam0SBJVlG2AX5cAwtK/YoWOA4rbuO4EbB61DsW8FKXQjYrB41S9XHadmYXz523fFvo8pZQgeliZZHhqgj7dinqJ96Ci6HqE/GlMmavRfhm0MmmDMHAs/t3IMV/+frRkj6BeQWcQjVb1lCrpUmtWwStvfedEUMxlVLe9Zelu6jNjdcUCp6bwTbJ0BBBPMk7b4wrTX+Mei4+2JvHf6E5kchYBYhB94GJdMDQZ96zQE78L3l+oZGImu1LaEULqDXC5c0TZv4z3egMWL45VsRW255yT+POIrZhjavD3h6JMOuwr/a/htfwiSruYaer2u8Rbre26FICZgrkb1JulyLSc+DBxDi9bX3SyL/TS/y2HDSpCWtU2Zjr+XkU28z7t1EuoqORhfFVzXjnh9JvUMq9UVGXKF/vcCKy0Bbro0Zxd9GHO9SVJ5HzVH1+lONG2Q5ERaWHvoKzROCOoO+6oktsLgjkHW+gxyW3gjGY5KZA0qwCzDOycenQHSWAsYGpR07vGZp0LhtAoAtXc5Zm7nVyw4H6AnpI84be+6Ees2tfb7fcla3YkL+iBkNuW4n09CqOUPU9xr/m6/JGV6Tr8RD3V/mfFHl5GnL6VFdZGeNUtl1upOH+VqgXd3MyZc1VpXXWRY959bSNEoZR3bsM0ScLIP18YnWfCUiz2ghn1RqBXbpfcYnX1rwQqz9WJ2FqMBqhOyKWyQZo7P3zEyn7tJmAuNtrITHbTxTFSKXOocqmUzO9EFh3pDN0qq5NSaS9cIgclvYLLplXhrgXjdIm4ems9GAtjx7/RrAAK9/a59pft2Ji7pGDfLOo5dBic8T66Ilwwi9Ggyab7JfPc4ArEIhP0PrG/o4DmUeeUWv8B9ZhAUEtLo2sWV3Stkdr6qHpZhHnMQEKNWzI01HbLeSesG4n7d1hKhOCBNxxfXKRCvNcHCVpWQX1C0iqThJDsDqBaUNlua5et0jPlFC5LDEu/2qyYCuKFwvEhr+siUHcvgQkDkK74pK/zvUfe5cVLV2QPdyt6aTwVqX8D40HKvFuW6rwnHbpCbQjmmLaQ7vwq57L986BB2e+8CaVmgyXtu8Ahkuu2RwfNdUwGQltvDXUQA8FG8+Hikw0H/dBsxV0WjUrXOZIPMf31hUXe3YVDjO8a1XGsOfHyqY+YIDMHlKs4vL7/LVZQdypP0vrrCdwrT4v/Z3TYPjcrOkJwFEFCzEXFhgRX8udI/DaQmvs2CzkBnRiv5v8LB8wbupVk9dJkzAmAnVMi4o18jFcQqPyw/NOwuNglik2SbZpPp1Cn53sIxmVOjRynAx7ws6+ELRAcxnnXXSW3XSjHoD9TkOtLoPXHce31pC9QPPa0sOa+HOBuvqgb8+wptYM2VCZpIqANhgzx7xoTKl61KCWZgv9MHAJUTFfDgO3UGbODf0IasKIbWyt+H3qgkzOfYGqSnfG4l1Hoj07Xro/VfBxVol7dFuXPeEP9Ygblu8VvVH8wwRtKVqwW9U8I0wxty7qkQGRunnyhVCZWMmF4ro3Rd6qvOqoxOnTQOQxb/KSaLwaVIfiSlYo0HK7BO3JP/YL86pNRZWfPZzED3LDrwpZIoEEtPpvGO9wgnV5yqhFPk8MtEIcHR6HffFDJpPpqZ1PPaBT8wbnPyuNSPRcZXBJMEJYRpAuFcKtE43AWFdnU8eNCloXRrJGARTPvXNvUnJrccSOU5KVBQJvpYvyPaRdET16tUIsWLrWi+B0PiSrivNF8rQx0A0bXNLZ9jeHzTsuyXQVHU06BBEZOKTpFHH5H/bf71yvlU1ekKxR53F2rTRIcV+JLVVz5u2GG6L43PvRS8ZNisvXE13jfDifknFwoqydppZn3S4AjSFhbicZgPy9/Gxnl66Q1yyc5vjF2bGuH0pem3C0pYqB5SlICHW947p4RCD7T//JL3YDvs97BTcpZymEjjWSFq1FWmQGdBngt2x6CEr5gxI0mDVhDr8PQsZWp/P+kUGvyqOl8cYa6eCboJ//dWp+xiUgbFG35PAEpt5++EYZhnRgorGqgbkBEUETW0tteOVt9OUBpfGkFMoc5piNo/K5Oc+5iFHsLP3WbvPEt9kP3yAkolqXxZgUBc3mV53uTPA6dtrVZpJcGhO1JqrjjwQaPJg33njKgjruLxPMofyGphJ09j5zVPJOFyOAQjHtLdXK/CzB2itOilHfIN3sPt4JDrXtAcIo1mA9pxmPxGx2puuDbqb4Pkdz7inUvSHIQqv7im5IsFEgDSUVJHLzFvVXcQFMaIxHesW2gmgiwNo9v+JmJvZsleNOnQi2PrvURXzpAM0xF71kk04HuzvK8cz6CUstNbnXy+teGPkTOegwFpqtOkk7hyuiCKwWhQyM5zrCxA1Fa7RovmJfBuaLvuUagy65n19bHhzClFdJaB/UYvzujTs3TH5XKZxyGXVu0VE32JtCXxSAF1PR3BNfwSvh3/2Yjn2huvg5c3S1y88mIzwlRi6Kq3G7IdYHz4V6J4h7o6+PgHMK81jf+bAnTMUII8R7M47a39ugsMMvnRY+QRHoJ9EhqKjsQdAtbn4eMhwAiu1eT4DQfF7Gr5FqCPFvGmpiGHZjLNLxPOd4hhpMJj1u0CNqIOhchVVJ7su4JO5WVmiuBF3T+tD916WnJNVQgrh9RYgNP9GpIGvMfsPZW/7cCGJ7eW+l5jZ84SjIMwK9+sat1+MKwikFVPyICbPICTgOY6XuKidp/q8CJYrGroMyUQ67fFgEeHVWjxlPt+a8SECkcw6bSQ07UFs/qLOcq0yblxdc6gC/ppOWvniW9s4IJGweMwfvg1gdYflgGdf3tyG7lDeHZ/PHsyHIbevvQchOp8oO34sfYUbQbLnw184l/CG6Ok5Fqo+536Hpn9wrumEwjJUgEYw9SJLrcwmwrZsm5eMKQdtuSpQLt8HF3qrd4f6LnNyu7FkA+Ta9TWqWDqyAlBdwveZtZ7EEiXGseYIIrd/RucTflgQRn61f5I5HRzbFr9gFZmELnJ1fJ4PzDFFQf0/tZp+D9I533pHvcm6P7Zn6v+8kqPoV7Js5wWyye4wRtgorPt1xlMN6vH9q1sa71Hcq/wj1F/ss4JuMZ9dbcuigTHjVC/H1EhMAZ4T+52ljcu1/r5x/6bMEZ6jLns4/x7wV4IDQcktIHXumFxleW6yvVXBTce9x1HCzysELoaM6zgcHoqbkO9jQyFwhD39vOn8q+DkO9FUxKY4gBiDnm7ybuIsW4wig2UeAiMHweyyVB53AcdKz9fVFQ/sv0rPHTJtV2+EbEBGh9lAQa95kSD+WHl3kek4G+2a0QPGp2Y9jBL/3i6WdGrvKH3ikb+DtQQy49bnWPUlsd7rUzM6YsYCxTVOQAKHCVS4OHrprK1RUABHJ0M4zUkCu9MMakr0jqBCytnsN+dNNIRohwFfaXM/LyUyffDhv+Ff2taGwdXR6WnQXwHvW5eyXbUzzPZZPzao4AQ/wGDJnY+PrTCYx6unK3VrD5BPmBYAi87DYkykSgFfUkOzdnDqHh5OKR6xqHtWRcWzTek4g3tYM77BX2Ci0vc3Qkh/Hk+SOu7ez2V8VEi6Ku23hyrw9nSaN9rXrRliMFmaMy4zFydHwSczxTOk9OTEXyze5xSwSM9pP1cR5ZwOxMZ5xXHsELj3yipUqAR4qlY4EP3HOcbgRR9F02TSRvE7RfNGQv3lRxheKkODET1G8R2DhW/GZAieTlBr+87BPLebnd+BfHu2KBlT9iubNyCu08lYzYZqMvaSgdeFo9QRSLoDwAFBe5SrpTTpBpWGesSKvF0Z008MWFpesanP/xY5EmoWg9omQpVVYpc9COlIUeKRdIfY25/78aFAtDvGCJfBAOurnNZKSipKXxfiQKZaLBnZg4TYrtaFH//05FwoGasNUOKtHszSs7ppQzgYTwENvflGUTkqRbwRoRKo4kSkoRJytyywE8BK8TA/XB0m8S1AxPE6BruB7JmJDHmtXJm/DTCVXsUZEgCdGaZAtHhbSiiahnl/JrDK7yNbn0zIytxmQ5kcCb/2a6IE/kX1cS1j3TjH5GGN16QatyAbGTAX0a0vNzXvpE1kL40wJDilYmGTyX+diX44upzSnezUV575csp6DsxsukwwoRbVXOUclo4421dmJpYNRLhv4BbA9+u7c6zOM9ZSmJvFMGkqIqfVDnd9dhkSZ8doKSv9I3J8koKTMt1qfBPDX8YreMYVi40aGlsEcjNLgoSLr/M6Rphkw0EQrLuIN09K1jeMn79cGorsG4TrlQ+yiAvzqYzCLCJXdwcRvnbquoQ7RYoyzNye1UAMKwLZ2YTeem+g7DQNkqRQFPun6PG6iawYbockQJg9r5r0T8LgRNZeIxhwTkMbVFYHQ47nKzUrgqn0eKu3TLKAaDRvJWeeIuPTz9WFynTLAfrPPynXjh6HyG6uUs4SbSMMyflWXNh9sSu2z8kw5QduzC5MIIHux5FvriJUd0lbVMSfNKde4synDZCi5TIeo5F5XeI+H/igvMR+mYbGIVgJGqssJ5jmN/GMobOaV4PpjkYMMoSYAWG70Yt0DNsWc91C0jz7uj9ZwwGRJ+FwPnOzDVnv243sFYLxcrIh6hFpR2ez/ddwn+O+KnnsTbssCXYL3g4M6idzxG11qtXK0ReH6H40gbjxTs7kc6h/0prhgklK+OJOXy5ueObZJixb6auHnrucwyoRlvYMK37T2aLwXKJ1SUh+8FtzW4bwS5YHFXno6tx+nQBWC/pFZGyMR8LoifRb1TFNkmLgJc732/FNip/14vt/QpsBU6h0e1TMFG9W1KDRA/8TK79Pn7Gxx65eTlLUo0PL/KVYCJ6Lc3dQSpBmf+Y2DZCQ/WhERwqOviCO/pLkkDjaDxmJ89t5mvGV7qD9xjBaorXdXiBJ8aLfV90WIX4XQ3c5jbkywJWNuCTPZL19ComuPlogYSfrkKqUtFtHwjRTj8E98csKBzm7MA69JPFNvHnKk9UvMoC+8fXujuJBcDmBNK4wEifTDwPy+GBJailszEbuzElnw8jrEbdsJYkKT4zlLgszjfWrPr2XLRy3dyR4TkEz5jRyl+TsWICqzSEjRULpOWGtsVa3ZL2gKRrgCDjegUdGQG7q4xlAsk1fKr0naEPcuWzL/cl8DZoRGzw4wVoMYkRIob5tyzjhPhI9A2LEwl2A59KcsrPBPOt1ZtXH96pcBqQrtFa07GbK1IVSGpJbjaHVDHVw2xP16l9qviUTmU1UoEgHorSoayL/lFrPrCSHM3ZwajPlpRCGz5/sARLjAD4wiOEFKHQpzE23yhNICne6HRq1t2LsGwUMmIkbn5lkUo/utr6eG13/lqmLd9UY8zWp4Lo7w8NKd0LACbfVuN7erQhx5BLlIvGaLp0uw+ULT1Xztja+P5tGkmtCqQgQ8BpuLBRuoGk3ir7mRZrUAfMO7fpYKCK5Gz387p6Us/LSGIEXvysD9L1iygT/c1gd4cK3aaMQQJJyctiPL1dCsQV7uRTsnG6nsnfzzn7CBE6E2HBn988JFbTpa/7V5OQMgxPtdhYXD3tcmhRxbnQ5dXYEQ3wj7G4LWYs4sq8rgWIz+X/+PhCi2ae3gbEmZvFCY67srXoCCXpzLB9p/txaqy10a3zi7ccCcUgjryTDxxAQHDWHR8SeD9aecXovYp7dKO6YDHQcd/TjXNu5cKaz/RysnnL2+MfFB6g8FJQtOpUgg/wbxJd8HpihKxPa3iEnXtZ1hZ7Rr+4JujlztajNaou/ymYV0gcCgfFs3eROecDG3NhWf4g6SIKHDcJrXVcSW3oveOhbDCRq0si8ir1k5hlFH6C5XFm0K0mMTxXZ1Cq1VTR7/lYjOZ0BhCTZSGuzGYjJAnqcSzt2b3ovbdMrHui3Z/2NX7PPf6L4JFsYn8kZr76wq/i5qdK0aBSo0jPjOhnRj+psIo8imGgaXuHsXrLOs/2yJZ8kMOD8rFv3SN5PgmYq1kB0jDisqtCAg7my765U9M7ClmPO5JGsfKwshEvM4d6W9SceqHG2mrMkv9wyI8WNY3kRZa0wnzw3RpGbCBw8m6POUAW11tp2gTIFE4M6cYzkvB6UFigHjtn0WGEl4DgaUPq4ODHOZ788lMkc9ZGW356bzzHkJ75nGnNci46UC03BckDeUOa5KxreZ5dOY7j9OnzDbn7y2k/15zp7knv+0ac4kWQXX9JXddeAm7yuonMLMC+m0PDkrOjs/cWNlm01yHht+gcUh4ZDlRjhAg+TIgydCPaodcDJyCuIzcKwlXlcJpn/wu2IcymLO+BWazudde8YmMp7wyFuO5HKH09qQxPFeD5lis4fLxfxhm8JycrmakiAkWjlE6FXLnYFBxoEost1wO1s9DzMaYnF/O27aZT4Yre92Vu4sZdjb7MxQj7l3JS8afTcmy87YqzXIEwR8mvzQJ63dLI4FzP1DrFhmdXOg0kZ/Nd2pQi/Ui5OXaOKQqlg/lr9k/3urXbO4YA9nrcgKHSHCTRwmTPY3TFccJjYvP+nrCdqp+xLA0x826oZizxAoZdG4nIL4wkQJvSTx8dE7O8ltLxKumCqDSWk0WX7VPRCXVj6Z7J85yPgYiMlYEArpMtvVbiqCKQX0NKuCWMivS28pLh7pbkyouHbWl2uTPr35QCYWWIuBr2InvL3WKRNhYj56sxSJxZ3sgGnNf/PztOAmOMydSTs+6RPUps31+PqCzA8TjxWek4UP5Qu7uSR88cAA2uFhLZ6QU9nuf/kfRU15ZVi6CQbe5BePnCTNi//r2ueYxnCHXVQ+Vk4OQeZJ9VzoNA1h3rDIQq4AoWV+CdKFORzZHyjvGJ5ka4kv6tycrOewgJKwe2wrHKOqJvSjyBGzAvU8OaUZobNBPADFPTQZv8HFnyTxkQEbwKwIjBcze2oJp4uZPT1h+6H/z8k4JVpIyMC2vRTTPFNV5zFlRzgf78a7VBHrMU2jEP6seio5SntsL6Uzd4hkngSUKMDIeWiGZDcYuiEVBL8KMh+9lx4tL3JXrMj7zeaYK/YJZZfjPzYX9txz2p1X1ynY6ehQNS1KjYTZNfbjQXoF81MeFzDpBKPqfCWqZBmNZ3oUnWryrm0Zdo3yZE5cZ2svGd2QxXBeVDgJEfYnFEyd4MxiTyyqJgyRHKX9FM6tfBaTs7+rjDucyUdHLzl2ZlSpgGR2W/WQnLW6W6NgBHzEu6u8GK3bNyaCxqaD2cHu6gXINXp+WlVhp0SvzE1KjoNSOHz2dwia4KgfN02hqcihbIkatGSwtDfcu52QsJJjbiXmnqkv0axgEY3wwfFuxEyTAmo5Id+rxVXG66vNV/0wuf9I3nEPEqDL7yHiI73OSMAWxpkK40B1Et7hserNofzVbmjGKB7AibnOwKiQHSUqnPQ0QTf18R5SJwXFTJCUeH2OLpNlHWQlt+9mM0HUGP16Bd41lLBmA+sm+RQvhEHdBU/b3eYIV1M1sClTBamrS55iJw1THTniAt2q13ItRVCeGmijCP1PsMJ3/2X99yQq/ZOUTiYOi4N0VnKdi2NKslqRC/H2303rGXh6Dt6V9inMQv6aIDSJ6bncAglXwmYIB66YoSZ/u8SMySfsyyg+6w9lO3O05VXT7eWte+uTiISLWlol6UnzsTSbtkvzLiXGiNYP5qQIhZ4ovluPai5/mVJCWP2vLUy/hbhPwp9X1GEvtPcKtIUiQ74/3sgKfu/SG0RyGygn4M7WXRU8OMRY7f6f69b5QJoG23H/9odZCuYDmQdzoWS1Amg2Svtt9SE2XAuxfN6ezS+JIdCsvC8xqsfKMi9SSxnFTfV0+Pp4aH96XMGC4fZ6XzYMTCUzuQFRyaeu6Cv6xaX8xkg5G5j/e+QM9/TI/0i3B8NADAAAgLFt27Zt27aNj23btm3btm3b6hAd5KIYa4m2ktL6vhshvyOUtE9Ls/b1WHtMi/+kCXFOjlJCn3J+cyY4Vh/JpUuJVTJHN54nasunZzRCod5Na6all288m1K2p4WSjNFapqnSeRqheSwBEMpFAWrprMcuPnd/t7QLfmW/VWj0Sz2SyvrzuSCaZozmAwRLaguCmPMeKrA9YrR15o0lR7d7jiyf0kxSrhvgwseQ/AQ1H3DsgMd5bnUwLVT/lFhTbrEzdFywXWp+sN8d6f4QF2i9ms9Okh6RbRvAXN5DjiQ8lDavfVVLdJE9NPtfPS15WtVvVc4U06Qw2C+q6M7Fj8/Kv2gYH931UoS2KjoeiZpY0cQtWOl88XUR37E4EILFUuMoR26dZ9xzFewz/2be6kF7B7tafZ47+Ilk/CMuOz50Z+ApGyeAIq1GdpPX1YMPfdkqdILUSj0Tr8EojfbqT17LiuFu44d8H1gkUQ0yezQMO3oB8JdMSEZ/8bjLuH3AHPd4YSFMjrypvM3BXcQ0GWBsLczO+/hMe/0OV81FCLRG3L5Hz5xoP0NRt3XfC7nQJY51zdElk/xgIp7aHgeyXaATF31yN5cNxeNPQYM7d3MZYkKPk9dclQz4UHRmm2dR/8LyyKag5TvH1iHIOaq7CkyrRATYa3YxiTDutbaZIRC0jnW5RtRrmUV+iQg0cB3DEqJINp3cphzXOmmvZmta1aX9sQHOOjwx4Geinr3r9q53vV2wvSCYfbsTj0pZWhrGbSUfBLG0lEceQuJS45LefVRP1DuG0UFCHWHy4Znd7xcU+NjvLKNhMocF5wavc9z9ve0EZS0XyGqEbC0nLOM2Zi/9kWAnbCvjgAymc0qoIOS9X12Zzup+H6Gi/BOg1etjg4DceGeLJHKWuH/veiEW3LkZQ+7a//58/41Bhi8QBziLn8cq8/evQrnTIm1yoLFbxlVFU4tEDZFYVxtPSlWZy+kgl8aFvGm3Yon9YnzQ94xgdxPMr62kDvKq96/AB+I5nLgEzVDR3DTTIc/yTeah0DzdOr6Q+QUlUzv2pWaISW8gT6uVUFplk/NO+9kMMtz0XOEiV/f5dBHx3D6gKKyM7LfpElyJ9OaIbP9RqAuiI31ruHIHJXG6yJp5uqPEFz05S16uFRqmxpI4zjr5AhVmn9kbe9/qkFS6t4DsB2rS4j5gRBAQq6Th1lBrKr6npC8vH+jrol0CosXrbf0e1H1xYmLG1Ycvp2ppE9/nSHd1gnwtUSdSwEAFSCG/JAeGJ8XIb2d3dV0zsgkpiKSzqHq3ym80EqzPBkSoCe8LafFp3qy73cglsst/Z3B+oUorTmRQPCQHI8wUGuaD7BnaASjHuc5HKWDuFMTWwGZXbaWMQM5n9ffAUVk9Rr1ye7tJLiSsQ9nUJ649L0HWVdjuZUo4Yt5kLXUzhjOyUCvIqBUS3KnByvcBrSFKZemZpUVJnFC0pRg1wyIslzKHcGIXoSp3ilGIgSD4gCN8xSaIq8y3eUj70dbXWT+IWTLEK1kUUcP0y1q+UBHVfYdk7XcKNKhp4K4wVOvstwSIL5HQzrvJ8ReTMEu8ZCZJxCMp7PivMrUu6nRpXHdciMju8meZIvyBw0lve/GEUyi5j7accohiu2HHniD6Vf/rQYYvCkyFY+VluSDnZ4jq1qySZu0CHGkjY1b8WlRmfgmCGiCEGbps2z5VJ85nfbRWEiAKCZGhQBt5rdtz12uVkCok51dE4zLt0+lAAbrceUmCk97Tu0zEsGBPUt68eeUpu3obPhoLubbS6tVC3bXcMkxJtO104TRUTyWuvQOKCTICRaXvOzJamdsYY3cQ8ZcWL4oglwblxEwV4KTOmO9dOFPmyvznYNDQ299ooo2qnUjwXnmvVFpF40gHmRfHbD14K6/PKS79bkkpAaglMwdwlqHAMO7ctuSatjtiFO9i6328d189B56k3AbCd88+2BVsVGpOruYHyflx3GTibqtNWwzX/LKN3EuCi0WFmKefT57h3m/qRpppF0st0apPxkRWPFUy6tzpuRjwiM5Bo8eCcMugXG7nTIqlYWmRXvtW6b3lhruNHJDmIlZROGHafJZcwQTA4oTUB75sNeBl0Ekrurwl0KjuZZLxzioU6ADW3/9yuEdVwS8XMTdhkBvrW0kDDsTJMHry1aDJNZ8kx94G8AbzQYtrhxnGWUmJZXzb6hDdNyDuHZFu0+fx1RwAI3Ifl7cQTXSN8Zat/AfRTeEHV1571Y9eGs+Z3n83+TlDriBccP1L3e3HLN8zVH0uoC+95tocfkn5bfLQC5D1XEXs8pFJnuGEZK44d6svSo2wAbfPfjpchjHRmQzP3Br9kpuol/XhcYaoEzWTaG2cj6BVksyU4Dr/7Pqgpzw9K6/aL+vSNrrMAccUMcWiXeZgpq/hzMSTaV9r0wuLnwiLfBLazjivq/v9p9WNQLxJlgA1C571SrM9U3VBMVAlMssxA4aIrOl4EzBrY783cqW9MZRh71a87YuP4UnD3B/GcW9A1YxMZGTAa2G4DRZgQqMNd/skfs8QPawxtKLpZoWsOqbIXrUlnzM6xMM/fTEjApuse+iXhvLhidiCBNGS1qqPAornAVYn5CQthxd6SjBDJe129OfGF8dxE4QUSTUeMySMSHaO8Xo2HkbR39hOYgkprAbWKxeWYiBl2ix4uFfRIh6NGT1sP5Pt7unsfNf18zoGjvOi2iT+HSY5HDP4z17gvXp+N4QmZR6BPG+9i8pyT2nWjUJOHsLT7+y5gn0KbHgyXsYhY8r8vfssg/bmPVirvwj+X6TVEZi1ZZicbxcqnVDpQSwX58cq2WG+oIZCNIfdOIpzNtf6xr4A1NLXao3JPwEcGuDoK+8zdZIuEubSdGyk6Jzl9byAFpIGJXXstuLI+f0bnBpzi4Hju6o1Lx8GkmVzg8X7d8NlG119cjk91TItGOuFodTHSjPbR996IV4nWIk+90kxBQoWDPlPaW/1cvODjX/ua487zJ62wE1TVw9Zwp6INAuiqnW9UrIOrX/ugYqD9/AJ94AEX62KxEHOURi3D4FZW4xw6DQCpXS1oLAKJWy7JIuMa6WC0/Uj2mlnWM8LjPVRTgAj3XEkOsxOsWjjeSIelNMy1NjMtwY3UQKGDpAL1QXfJ0psDalEl0DaEPUreCFIH76WNgSr9AdeaHCRiMoYcYqA96N8apY0++uDvvYX+7eJSC0nLPsKI975MiKqxm1aB8oFb3RI/C133vedAATqjeIFOrv6d8fvXYbF8EZis2AuGt14bAK8fdQb5CKoa4HRBvntrymsfsAIwt5q/EHIxjWN6JZ3eEBJ8qbAJq1CQd6CURS8eTmcBaSm7NyBohhYiYYYHKXnhkr3IqkwbXX+x+ocnXUkiT8aU3oe04hHk7h9pvjkGaxI7vgKRosTF1Jqqq0qR6+OfXzko54z4WWC0PI1p8OVfonq8ice97TT+cijec6/h8E+RMI7AuWzUOy5kmkjo4vQuv8KZ7cY79lwaLFs8OSXSqUUs6hw/i3SKkHqg0YDVINd1pIFS6CnIp9ihiM2JKYNDUCu8BxYk1souELKihF9v5fbi4fidCqQH+8O7PwQLRCf5PsLlX3A7XYLP562QZgep3QMh+YfFlMUZywt1xv9eQ8+pWWJnqfPZ8WfwSbyntDvWdH0pWwHK5KAY9L3hTnUQSj9MeKVKrbbAvuKQozq3Vg0CYck/COcfzL0DpkSs8GNJl3Ds0eTJ2pw6Nm9Z9qAvbIxwJQczgB4QB94no3UItq1VqtDyrEibaQkgJITkxzyAP9RY7xuD18ulimsQB9kTCpm+GcsW7MaVb2GATUgl9WtiwfDifhIQBuPn1UqKVZvCt5ut3IBDMA3oCaYLuj0+hJuQHYXDLnM4dlvjhbCP+kyFH5qftiZzePiFK7z5h5NoiJNTFZBBr6fB/ddysNLLbrymfBQC/F36ArqxjWt3AlUpHYkkI/YkXaqnoepD/kpswhyhMupIZcBLllzyTDAe8p5MW9X9bU7Fv47EvRs7bNkro7uZ8rvN20vuCtJKRGJtAiYRdIrPXrpoOhCuSVj7vsGaziUkyJXsuggi45p6bHW/Of7Yl48SYm7EIDknSWuF/cTEP5JdPkMU8FaOSWdKamOytMOK39ePzCaypvJiiJY8cIany7tfpcRMVdDopLvwpczrpSUGlSW/rtryf9Ri/e5G70TA/0WnC3ErqAGZhr51q2FhI5IZA65dzHYOWtcYf5RjsiK1LZexng1BHe2RoN7lljYVEX3ddAHbLo9rxFZ4/xNT0f6FP8F80Jea+rvAAo1I0xckzjOxVemc5I1df6TFpI/jV/CbmSKqm49WHnfKKbseHtWZTNLOKyetPRPKeyn4Y7Gci0JhyIDEebkNrLl4dtZFKkHmqhwJxOmMzDTekzfOTX6Y7H9+u7JRtq+/kYLbccWqDri0EUshnUg7xxyiwToNBQe7UHv3P+g29CP83NM5kRqdl1NTfccMwFGOaLv9nFJWyc/ZgpIXgZn20pZyFiM2SpiBNkPcrmlc/8YiHxH55NvuvUNqkCD9CLhfrY8ND65b958uo7JcunUpPQjV+IiGgsuF3InAd0ebyo1PZXfT99QmeEXp0yHQ6vQa2xcIudqsSVUIivYuI5sCz0POc6qR1PYBfLbmMZz8X2fex2GPk7vmxoj8s2W31NRx/geumLEE/4aLWHIQTSoOjZajv1sZeyaUqz3AqZ4yUeaRdRUhihDJAysY8c1M7G5Nv/OFiaUfuTi+qIdvKgUjNJ0cEJDwfBWnSZM9UcFoKfY8J1hvoXN3djUjCWZ9IFz3fiG3vSurcb+qjT8r4ADaDni0DNhIBsvVCt4wu8xH8vYMrHyOUGv1iY2alJWAHV9N2CMV6dlLjlMN9vgemN+yAN5CpDuJYgzasaocF+HrY6FcDJMU6teezJSmC/wTbpwP3S4oVpEZAQZuqCR7lVLR0wgoWeh4L3SA8DSuV4uuXIvwfI962adUyBuxuiQ8D37Xm3LoS+5l3jbZY3rj3kh/3EC5lqdtf4BO/ikVLamw+WwaMxq6jWW0ISxJTH2GazaDTgFcuFggZDiBEmbH2PBj0XFy9lU7UUzgRcboYWM8F+nFpG/xN2RU2DNAOiY9ZsIF87uK0dIcFvN9fcxbhYxkcDegw+GhZHWJjB0v0NRBGKDRppNIUp7NmfeTFPlXIaH/XzKgPeEu61HcyYWqa5O7gkCx5IRHOJQ6wQ1GB0YbdZeN23xrp7qDOgAQhf06/ugdLClZZ5yso2ixtwvqLS7T9VchswnVZCz2VJJKX8vtGksXbSl31M3kC2qIoAWC20BiwG0AC7PMxvbZq/WtW4xNYdIvLgq1pV1BrXfEVmPPuX9uCs/IURsvJxgPFQt0uGB0qTsZ75ym9hnRsjN5WHNsklAQptBSHjZgb/m4o6u6hNc3sbcFyvsidmoohAbMXiD0PVdnCjFL3nYFTySGCVxt7K4Q6Dc2sythKLca3gqG0L8KW7Pwp8keIUimyViEQyQvMDS2K12c9nS5/RE85wv1b0dik2W0/wf0e/3dx1f3NcAcK6piUz0+toiXceT+sDIahSebOzWqlKkDs8b7tiprcyH8GYMCMt+7KYVqJZDoFDP5w/g3hpDkPifvm5Tv2FUfTG5wTa/n7HF7u4zEi7lP/VjrfTpc6dwqdTHB+XxSSOroChuRUB7xIdzf46jG1QHxRRFzml3WPQdPbt/zk0QSH9/e376AjTZYCOq71rvPd1WZTAF/nmiIg9I1Uz9i7n9+b8BBkbyuFNCkUtj+SuS4n0f1lpBNUCU0hU7r1yFc5SUKmWbgMPe4bWigr+eXFLpTtKDf2cOjKBDWiWWt/ucFwPOOW3ZyMtJTAzTYYTtZmQcMxEutXab8iCHX/gwyI/hIsherXgnQDiQVS9YOcQ303o5eJNfelgLNNFawjHbZrTF8yeryeUbzPQjSZ9bNUh77SlHAG1UNhMzBDFm7bEPdonSYrVgSvw08X8aPbBIAFnkGZxvrWVX0mxOxh50ZVxQSLW7nVcGsuUqofPsCBGJbaFYzXIb/a+3TAOpVgZUmQRs8Zp8oaMu7PoI9VUQGJKGz+US/IJsFczTFI7V5M7zfigm9VKkBkhxLrCxpLHV6qhwCPb0WOtWct85ReKqbyhb/EdjFW9oJE5mxRB1oetphO44w463pW8i9dIJ0C8Gm4qd3uMG45sgqc/pq5Wrwl0sPbJ8tRSVTCJpYIs570/c7dI16UjffJcCz6Tv/BuLPmY7wtAmqHEn2IL5/phNgP17UObXAsWT/gi6kzmn+dp8Pw9LhgpTj/GEg83sonJFxVpLz6amQN+csUebuoG8yA0E8pQMSXNS9jtChuGxKqdsK6OVnyLVf/vEF3Lzo1j5siAzyoxAdqT5NelfUOBEWoXSGuhzK4bLYD9lQKfAP6E0VlsdSbHp+6Ly5voyrQ7wO3k5HYWN6f5S0Vb9EvLned00DNt3gnPt/FzJk4MQGGieGJ4OBtxGNFzc4tMWsb5BOuGhU3kTG1CfXb7Hf2LjKh7MO4XiaCOJmW4ETX/AxjtOxgc1QUOkZOatPtxOgwMyKff+UjsEb+W+JRHGX9Y+LX/qp6hVUnlIeAQmrBQEi9kviqlc+vO68MFWnSqKJkFZ+39ybKfQZ4WZDMEgA09qFkO/pWVpO/hFNQa09y2LuILY9rDyqLOX2Tk/Ef5IqHwMysuEMKEZFwJCMAlG0xnWFCPC7d0YNHl8VUyQV3SeRgSaA6UJQ/Rt72zzT3rSyGMJh9fWho3fIe7q46szZkGb4DDHpftptgJ3iY/K8gX9CNYT2ntuheuZgV2RbrM0udH5y/kGj0p4/lPl/ZJm+q5PGv2usjseOIx3Mv9+Dr7EWElNXpTTsVGgOed4sxmsxngwC6Mdi56c6LFjLBXko2Ys2ks6DWYxa0Nws9+84vMtwsrHHQOqcgS6ZL2yylKDAr8x7A4fUDHWBPtxmd0xRoobtHLdE6H82PXZtfv3++oFIwhY7F+Co+8W0fuej9kZFCkUunJQeJc0YHx9zyNbTjFSI9Bz9B5rxYY9+1baGmxmwRCgVTgzikXQX3BEO/yMWRG5VQ4hsrYE3gWt/y61IuEWzdpEwtJ/FpYtUWBV9Kmplj0uXsK44GdV6I7nvKbld4H7/CJHuaImNZiDxEGrkYpH211SHNR2/Czse0jOygKzupJjA9Q9pZsO3p7K038nxIEIFVjRmJlDfZ5A7MmqBxClW6bv+FwiBmko/xSZrBtgaPKoUyiOiJXxA639pRmNtQMvZ4kdqumHSD3mSZN1Nlt0/EpAv6OgZzfB27xzRzkJaTgKf8DcnIqpeebABSkouMraZSqrSa4juWywchJlAmxsOeekmGaUPRTkE0Fi71M7UrvJG2ep4SvChwQ+oZEEXy0De7evR7OWJM0kIbqOdq0sRGcbDlQlH964K17DpkQ4Pbwcpw1cB89UanETS7/AJatEW9X8LbRc1mGtPAUDIdlX/QeXHIka4zod7od6kICuXj+NCZx5VU9Ys2lhItOdJ8kWZlu4UFcPAwmkMl2QpvfvS27hCHbZNv7Z5qQL29J0z4gMiOGlLbLWp9o6yO/4jPSa13OWbgvepvsX8mxk4Hwtxg/LoaWrOqe0q6PzTLfBNggPLOgNiSalqW+mhs0QYo7p+asyLcdI6Kx8GWiQGKySqQb9Uo4Ahmi+HJW48PH2WAfZDEiusP+dfYzutypWJMx0WxjDQ2fsZKrppKqlE2jqj3ppQuuP6lhw9kTPc+86oAUa3BNH0p5KBp2hX3JMasLxKRt1lNSalGfE8t4LcUrQUfLkyJU67b9YUt+EnwDaApOawBkA0KXv5+Y4M/RboKWy1KAF/NxHF6Z1/9Vyq6hhZXqlcJadz2101ntPU885cuity9FwS9fbz0hayp4BMzQPoTAuKz1dnBjd7O1TAk6fRDu3Zm+358JLfsUW6EgCMYPjbomNABDQKSu5KHRxCWNMYrBJw/errmwAAFVJ6oW7OerEmc3zuPYYU29N5/NFiGi/FMVDaGCmCKqj19ytpKgDbp9+1NNmG/dQFDVRMuUkCFlfFMN5si6J7bS6kPleWwSlX2jo17iwroSLupYwrQ0ajk6d39Y05iCKB27MUGqYtjzKCjqZTZh4SwkgSDM9CTOc8z08R6Xz8KzfLc/LjCrKlanlEe8cr9GE4AbRVuT8nvJcTDIF5143596QWNpupJNDYJKKBEDVWZRBAAAQwM0BRY5dfUFMbgdeI5Ai+6UpKzGjDZbh8e0cpToVJixbX2If+R2N/W1Y3beaDB97kEuWLZndYgIfxLEHWpE3RuCDIvChNDveYSmLUcJPeEForL1Sz+ziDzlHpHi5IS4d4cbQ1pQQtCh5xroFqbqCEaATC3aqfbfzXWvH9U+jakyNxl+spupZC8UuFKOQysEwtV7ujpJXKCONwVBphGag3SqmbA/XYY5WuIgMlFuIlY+24dU33zQ/qik8MufUxa/bvgw1rPuKwSM7XnOjOt1K+wZoltNS2tyIc/JmxzXbxdykE+clhfhD6tfGkgqqD1XTQaM1JSw3wXAsj0i3yRDeziy8tJN9Md3Pt+XAj9poTovKUOdiFNZ4cBDOnClGcSX1DaUYXfj4gLh2+g1uaNIf9jXBz/UWP81WDsA03kMZ7DRoqqb5Do1mv+lrqvkWzhDlq/lUOPUxDIaIsQ/VDrlp6gfoWBi+in+R4fEcbxfDQ9CqANiKQeLePhyxKgLRfoopvJBzMzJE+rlumdOhZMc2FfpNLHn7GlUNJuk0/E00ctd9ZSgd53ityKMfXTN/yYT1OZIvmxwLX8cBcwVaGLFxZ6eqvpYJYWiAr5c/wOLe0Hu/kTfg2i3KWNEk//bUmXUYbR3B46v7mvKBo8HGXBs89ypCmWukJIZOAwhmItIqaLOFj+XDtCVcb/8rUXc/LI1KWWtHzO/j1USspqoZ/0WleeQ41A2Okunv1mPJewhh/BJdIMMuSYA6e9odEOmAOPtFqyAkgWZn6VTZQ/D83BotCLS34t4D7LQtU7h/LYE5wXQepsyhavOQPjM8DFH/poRcCayclH24cN+f0CpfbQkbYaiUwRH/QQhD4YNN/bKZSrJgv4SIV9CPHxTcz91uQBA9g01CL3bdNwA9GGPZK9nqeH5IRfsalaVgXJ2Xh5GN5gBQJPSSz5PatL3unK8OFCpXbaq7xWTVZIx/+FZyDm6qClDq/sFsQcmBFoMfRTV/ZmnYYtiOobdsQRBtJtvJy5T4fsrLTbaOR6Pt3qCv72peQoA5Rt8diRNLuYliEAZ7xL9FfM/Q0G/yrYjX0fx3tc9s04V5Ht6Dc9TDlUnfycEsHu9GEFQ0QdvfibXbSXdWeA517WRSdqjlq8ZREVAGCsJjQS3hcmdZr1S88RVeCAaxZPdmudvPcHvqIs4dZUMlLGMQ2PnsVaStx0nNskb6khDiOV3rCMT3MhQhZxp7It6Eq8g08DASpciCUMQK3NUsjcrLs4Rdt9qVvBk4PrLtkrtGJ1uQnbI5NSwUfm1uRM3Q8Zpxys+hzxYXqjmtZxtH4DD1uMzxJRdsj7nWxVgE9eWIpzwJEzGZcdUNJ3chUT/RKfzwNq8Ll5kHKJpkfocsmwK062hHI8KUnn6iSBSkm1kuE9AivtkEmW7TcTXBfP7Ptds5TFO3xFSld2CatymTe9UylrB8c+IAy4xSv5uOF2qgFp4YWX0atvJti4gi0KEv6z6oQvSah2TF0cnJg4wAfvWp0Kvlx1cVhrlzj++w6Ixto8sO+LLIkjHayWTA5MaRvJ0fvfm3hwB72/G0+3tMatieg7RVz7QLcmCOSwdRdvaosW9mIYVrZLM5lNPEfQ8sfpWcgDnznMYm8T636GgmE1ICFrxX3BRjXB7ucJMetzrVlFwATzs458CLt6X1nNzm9lD3WOZTU41euqKJLbzYvcA4mzCt/z2b0RcuhXoAk2yDcf+uhfXDHh4iw0RrDgWafifBOt6gQYUdQZgcMergfdqHtDUCQ1L6IXR6dC6oKpEm/0lA9TjR8PDu0BCmuQn8z4qdRwR7PsOym7RUqQGKRDFOEGbzn0c/zq3ZKDF+zU1zZ6GapwYGXpcC4Bddr3v8Y+w6CxqWAoz9IjWgZ9cH+Z/It227ip15bAs1TsjyGYQHyspmBMUe6VnssXT4mms4ekytOxLc+ogpTVSKsPUmf5nehxEgvvdFP/OyyVSaMWE1HtHagpQb3N+1AMKfAZVzoUn/eZH7MhREYTSOM06LL16uox4CKRI5dLuo3b9GvuKldObWjjY5EGRzFQL4NIRfsPnIu250GxI1nHzBLmVi+WbAAaYoxQq4grKsv4VDb5BGhmGyraDFla8ZI/TBOO3G86+Zgj+mkIYzqZif7dHdYEgJgREBemKs3J0VytxEpYI708wDhB32FkGjEROnLtgeFvTMkFbMhBKAG6k5Hh/wjiU2TvzdyUHosn0DO3Mvo8U9gdOyVhPAN1jTJjtubUDaSkTT/K7lrdeXDAgpbinFC0MVdZ0orhXgVGbShciVfYo5GEAedblE/+GnXI6sYGfMkhMLnv63o2TpLN6cMXTALmt1ZXmLNoPN9k0kzVMeaqOCdE79RdKcyL1bR1JGDi7bYvEIsDAZAFtLq0Ksgdom3xtWcrFU6pBCnNCOXxYDYTawzONZO65A1pJjHokast5MvOqY6Zfgynf48RDLlKrjeBZrG3N2osT46q1b4sGv1mjyIe0DJx5+WGHwS6gsa+7XYyjPVpOQrVuX7CGXgEa/dqIMZI1VVenUzUS7OfoP/OEGux9V4j95cnJRCL2o6dfNnf03ZMh4IlD4SEChGjw2n9o7Ulepw/T73CK7VGb/Jbuss6XRy6C0gmjnm0vvmwmsB/U/Nb6KOhaOJcCUiNy3CpAdT6nqJj4Wxb4pFNRttg8YWsZy8MSgKs63vyDAhOkNfZ5NH+TSPz2kFGvbFjWA2LPNDn3HY8nn8j/LyMA2+I7B8ryDLShbEBMzqP6dWf0i5y7NNKEEp1YAwNxC/zyhdBNPL1kF8frDsTNG+QnooVmAXTSwsfErE+oHbvOYye4m6Fyeu1Z32XuOAh1YdbVEHZ3kBSb84smXAWh+2u7IW4BfrVKhY5IdzJGghUQ8Fl5EdP2VTg/xGKpk3OqcuEBSSEjLlkpUqDyNnkva9fccauAHTM742zdSXa2iA2OhZr+NgHC4orTOuV1o77en3lY6iWhl9vDu6y++h1Jhp7w/N0J2iBTnXHSPeUWpYw20jyQU12tRpeY1Ni31v8cHf2/WDvbLuyu0/hyWeSi3qm81SEkh6elSM0F3XL4hamUP1eyVSilo9iZq9NmXRr2XB+tnEH7yWM/5d7bRGaJeRpvKFZFsiGWBf49bFZRhMpb0kRyZuGzg/mdYVh0TefsCyGAKXSqdj/NOKs2jGmaQqrp1nDtnaHpYNnY0GNahkv9S28tOOsZtFVst62UeEqOHqOkCa2/sZ9Ej9FMCme1NTLbwS02d+2DJT+Mv3SXaGHRWRha3kw8EtUlleYXd75YL+htldiGLXMdQa6U9Up9N1FFaoxBWU/oEaFK55fKKnUSFlKn0Upub5SZ/ia6vCe2OBfoo17Hup0Q8BCFWsBbBLbzeqRENTZyOBkWJIY1INm8WcW/P+ciPkVXFjdMZ+7uXlxJJIB5dnQ6a1qIynAM3h5gdsOkwkdb/Jd8T5jqZ255DXWXfT+f7m0WvoQDBQqllDe84P7L9UOvReODrp5iG+0xQWj7y3EJKRSIUlhVuX5g23+DWRu8AFZ8fEB/pnJCWjKCzr2bavPSvfKKRnP5ImeDktJyj93XXYQT/gJUiiUU8cjqjK8ooMh0jLlviaiWsNCT5GT/6bK1JkSFJwRKw7nafc1EJWBsNyNV1NN18ovIkHSOX33Too+ImyQXSUF13AhlUTSKsueSPJySTSKZ8APln9BYWaQtWypzMqOrU3qH0VosiKkXM0MLNBkNnGIPhLRuXYXTcXYlrK4cZ4/nXEQhwckW1+/0KmSqah5BETgiMSpXBOSrSdQ2yhGjaQNnmlGyGz83Q8oE/j1bFliT0zuhFSXc4fpRBMjfmF2ZRl5mQML3sl67SvcBVmBOfhSjW2HiN/5xUp6WaQtUSP6OSVJnNBqIr1v6a6ZLwrkjWmUR0Apav1cnCQJrvogTzw5YRGuUDXwjsR213ehsJ3cg4Tv6jRdX/PMOH68uJ9kF7IZZfjenlig3pSRFUUXAZdf+jd4rlAq/gXzJSs7Itqwc3KXX54+nHmXBA8dFN4qeoEsS1EVLVRBesWpISeGrhJwhFKP+1d1mYDYrWsHkv07bPbVdxIaO3uPTC5M22FdJ8vqECeQE6LYKjuiBawpm/LFgAlegkLGXNMo98oHQqv1BG3t1KuGrtnt58q+IdUW7h/SWIYSBjZYF/7p5Pv79liG/gshIm6jGWuc84T9YwpOedNBq66jMiCEeomFNlfD7en5aryasK8a6YmlOlrIaoR5L57gkgLibkrV9FBCvd2F2QeIdBn/UXvHw9w40nlxJwDxht4pe/FbFqwKCI9EA0DribZ9E/bMItU/FGdzKsBgsCzevZaYQnoArifJAsxVkQp/IbOAnBxxIjUaSgtllzdLTkSuf15qNnH6Kx7Xfd50ecm0TPLtfPTSP7kLIilTdnFyqe96E9JJsXArNRKioEJgpwd7CEELu4soijs9lqkVaO8BjWBoxRGpIqNxzwCJMvIPUwPpTQn/LjvxppBnQg+aDv8rPp5iknnKAm5VI3zRCzT2fd8rcwtB00cbzovL5jrSYpnTXpLwYEKNqYmAqmC+8orY4E50ZhDLZKq9RtcbCDYLQcH+1gdi/8TKjnJaoLHDm02TwijfjNAcSSFJxM9dr+PazodqUd35ZIXO+RqhYQ2Kidbbzctcfn4bTQaWX71Nf+53cKf1WFPZuoL9G1OLBCb7kkiMrR91Gw18KJcLC0IS9p3pCVbxLHn7ju1Q4YDcElIxKN12nPWrIAfxzqwF3lwpZR1CQb/6a3k//GYSSRCjAs4Fs+XucbYSjBt7poEOQDOvpmFOaopdxDBqYeeB+6ShNYR5nwYkGfXcSej1UoDHqYw4D1ro52VQOODtd1AZe3kgXJSVN3PuFI0X48RIQk3cN2XA3bH++XAyp3+C+lUkEAEFA8M9Tk881CxQccWxnprYM+zR2VOSrswXhXPhdiXnmUA92hq2mYnSZ9YwSbtyExYyGvsI5uR6XQXiofDWVBvghkqmRFfjjt0m0g+ycMe5SNL1EtJ9Kx4S5Im2LBGb3t9roPRbFjiR5KIHQonf2IAgpBzdNwTCDMA6o5XWlgXuUYP0wQ8uI0zoq6Em3Om8iIi1qzSWVkjGTCOiPidyu3S0wR04M6WBEZm8mMD7oSZZigRCAaX7muUXhG0m64IQCjTyhvmaLvNcFruQ/I+wsx2mv5veHrkkr5qWCna022k00DcqfbWxg0bYQbuIRf7C8CCvRAOzcPUVZXke+g20kkZzLadVsQdZKAWxsp3sK7OHc+fycD1bIVvx4oRcplykdeAM4WbBfqqaWZnBKvmkYU7S4snviQrxOIQI5NiLUSHiUxmCM1bEJBtCQMmuiyggCUp7apKRhm0N2+JcaURXwNufMqrbTT0xdEof4tg5i6Bmv0+Q5cMOv0/ia2EuLw4WzHpt/wNfbi7F8xowFx3Q+RHF9d/MaoWr/MBI6wxYXhNDdcfRzcssdVB8fccUqr2K6iyYpMESOZH5wbasIDQ9RvHyWNOmgoWlFA19TYtnhB5+AmltURvhkgGENM/3gRTQ8LLNBVGnre1sih/GERxOWTewc+NJ+xnNHgy2agUfXYv3Gligrc1rSBBe6Ko2Zsfz/mGIh2ud+atyWvCWoS4OYykr2GFX5HLv4Hmj1VvZpOXnNHSNTrEZeT1q8wtsUCxQh/tt+/yQXAOBSk+13ZRXsRFyFgQFnW+S+SgqKpG3BPgRAVbuQ0PDdQYrYDrDDwfy8PkzkTaNSYqlEWRJvFJLpAqIjmNMhYGjnR0ked2Clomje08Aw7rmE3Z9uVVZ6YlQhqEfbs4+nOC/niYkqDngiF15Xbf11i4XO/yrDb3dcgqlIcIMRnRFLD1ZtG1PVyAdgPpxDahtXD6NurcXkHiQkce7UDwkFnaGUyYyB8UBPCauhEzpXWLCQcua2AFtZEethvxZoc3bX4OLIkoodv88+pA14lgn+4OFTBxO/oiIYgp5f86Ltb8irUaCWqnVjc6UkvvvPIg60bEZpBP8eKFYM2hmRVjOp2GARYNenLA7M1+RXRQH8JhkcKh3/+K2UGvsuSwvCkfvmwffyG8EDxwurHRoQqtcoeBE/ayskGqvVaujClmSqN0nyy71/KAXU8x0NAfdNm6YLfJZFp2VM66hKSrFqnd3E5HIq03NTNei0mj8jMF3yQ7vMcYxeooCkocfSzakbmMHxkTUmqZA/Op9XnOB/n5aNIk14N4ClRk5Bnbl6fvezvOnwZb3DN3a9l+JyF4zHVFjTisgqi/Yi502WanY5aMGWWmIlC/oAD8qOTwVyuNqSb/jzU2LSAtN44H2E28Xb5C14qWqVHg4Qh3xN++T+gIUrd/P6/x3tc31hbPrmr3UlywrGIjKNeDGTKK4ZpP2yrmOVrM9tziRjj8BAj+e+AFHDr62qGWBsbvHYKJUjsPZPdvEZ8E0CIlr7IuALz5xac+LWPU+slWEIYJegXkHkb0BhAtk9lDs+PZG8EgYu8FFf5slCHrwSR244zVk/1hxChME6UwFUHYL/1dYwhFwwuSmd9NLTSKiDFa1LvP8S1m+tkxUx9gxbXyEEIh0Q4z2UcVxG7pls6EaQOxbTAgOkOPXryArA7BJMqzPP8DxImmz40s2wJ565cu8Do9SkHJ/x3kUWdY2kFvHLZe8zNI60jC9acduXU1ymIsi/0rVRtDThrNt29HNtBGVqvGis+BQGHs0WUtEqBctWyU7O9sq4F8Puh0mmWxT2odm9bXVAMhjfBviXQYRctPxTwvWMjcFlfBJPF1gEtzYWVuiWNxcGBkl6Ib/gaTtSkoZKzBQbbWluSy1cksnNZUnkiljKXYObKkw93eYEuhwZinH0oiUfjnfi0Xrsst+9qSloGbh+ZM58K9fRDDG49hOf/8n8x1FEshSau1vf03T/7XZxYR9hzDftERHQbDHthqCrLOtPlZexJITSwG28OOcZDVMphMk9e+gmdsm1Tblt4GjPySMruSOW3px6FDdtGBzAabA0SAfcTmfylIBLtU8a1g/bE8zYosKbNPDJX8R58rk5b3EbO5OUNkI3b2Z8IAyiGZrPE2Xs/Tam68oJGARhgecWmTbeE2XVF8T+dCTRQCUKAHZ4HMjBR2j2s1+D04WeIhJPJMxf5waPjc2A4Jyma2Ma5DXP/MWUgzYFGmIm6YlA7IH7xvn1FJxJ0dpWp0j4XUaLujnubtsgu/tAickrvHnLNKsJRdiOO8agpFSqqwHa2ROY6OQr9Lvrp7efcyaQRne+2amB/pKm2Eg12m96xdc+SInUxAMyDHXt8VE4pxdyEmvtULO6+TPqYBlnZ2In+tnKJcp1lzEh/sddMp89S0UAUuSXQyRPq+8xphk+VFbbv+kmoFEiBDoJLaOUnuB+ZjYGa05IbJ4iQT8n1q+RIMOoJudCX9pFSqBFWft7l5bXfm1oFJxi+faEbmJVu0Mivqiz/WUICaXACnRQhIOUc+XrHsUW/XT+M/K4WVmMg7dTHgoEW+2jWaqJF6SQy/IXjdjkrLDDZ4OWMm7mtIK7qvHwnRGRE2YgTAW6N9cESPE2UXZ0cARZemUJqUlJrVoCX3O9AIQckpLqG63JngayDmslXN0cQUT38GjnucqEGLolu9coVOqa/6PWU5CD4r01KiZWygnQH/XpTV+9dqAK1jm2Ub1kES4Os1AlTG39GT7GrRZlarTUeFO/WgGzSbN6O2PTU+6i5lXDpa7IeZkINnXPAokWUxABX4L5RveLJ1XQcuGEWIn7K6xfiZkhnjs/zjiRExjqqMUEvK6WY3psKjx35FrGuNvRgXpyWz9ymOs/GvekqeOza4VIFK2sTUlSk0OwOWXC45y821QKW4ZeWm9z8s8dSnzc16wt4bjCmxHx2u6tsfqc8zNmUJjfySsXpO7oiPPAu9Yd9yh5cE3tlRVgf0Qozh9OwALJ7+MNFJz2L4A702g7BB96v3r9WkomkOG8QmYcImMVbHeTNgIg8gCxCnNfONj00by1EiejTDFqLU19mZoENg/NAS2LX7SMrr76Pdpsd9BtNZm+Sm11TMb/3LIAzGVwhukPBOFEqN6U+cM7SW7+gxzW441S/I6KPTBcXSfhnPrMtluFNY7DsVhbgxAOse91EfO711fO1vg3cbh81qtvxM7My4joEmkHmH72GfeQRE5N4zsOzeU8AzY9922RBXskMveD6MuKwj6KtiUs8b5Fmq5o5gSWEz6/neKsyiPGD65Ar18H1F+sd/MW0KapB4hUCWEDH0P6YW8nPBo7GcZNylR84cHvzM+o0XDIs2hIOB4jltaMDVDEn5ojCs5HkB4RJsHZY8XbIExHO3Fs0Fwaqzh+0q5LleAXQnZ9J1vOOPTwDS3TLFplHoc+T+P667bPKCMpEmN5bvZME9Nonoy9dUMEAlVOI/fCOlfXb2TSagU7JIZ0C6r53ih8EzdQXD9IF7LR+SCMUsJU+vDDlbKVM3l1m929495xzlAl+nedlwNfBw40rq/zN+WsM04zIcfAOqRGlp3TNj4ISwzUchQmEhkTM0f2iPsz3+G9Ggtoe3RJVidMU16LdKR7uCE9cZfQMYCpRP/6BQWfJPuJ8aAD3o2RaawpjkM/W74IO8xacRWg500LMu8jJofiZuNSMF+RggMhTeUz14a33Gxa+AZXRFNTwAx1mhNuQXnvA3Ze9SJKgaQHrAzzO5wLzJdvkO/pRj2Cn1x/JsbC+orK+Z4Vvmz4LI/B7HKTY/y1ze5GvOn5pGLDdPva3corFThC4DKdhDBKj9W88pksjByxh24SaphzQHDVS3cBPxWo3lYHcccAlQZt8Ox2EqPuKF9J+0/Ky7LvyPNdIa+ws/ssReuK6iue5wkcDIvxQc/E9/JHr+HQPIRKlqNiSlP0jXz0XL/OtdWTG+YgAyz8N3a2lwXBbDN+S0YZ8CjwlQVO+AL7j/nToTiidKkuTl16EtF/duk1ggSrfwQOSdclgPdMmowJdosWFKjUGyWt1OdSpWTexf6mXA7WOmKzj5sN/QqenEeHVo6mC0YrAlL5VS+NRCAOR3/QaYJsYHF4UITQZWe8r2woCOWEO0zIgsheioF0Ijqg+L/rzOlMZ7ZrLn1Rh41FmzoIlniTef/aq1hbVlhcd078wOIMYzvPtthDatMTENmzYysNJN0NhiZQofcO9yTjYkHqoKGqI5NnP/Msf3PjvRZttUKoSfJUrzMoSIvu8AnseSYBspYeJEpzwczbkb4gMMyE4Si8hWkr5MaGc0cNaacVKhyPq6pR0LXre1woo/ib4siDeL+nNoL9E0Gqqwf4Kl2OeBGNPxMWGPC2icNyoZA4yoydH2XaEQX+Ai13j+vGl9VldnWSsqs1IbWqsB8optbek3MXIipqOgUgtV5vyPLbaXRsZ2nz6bjutXCrDaQ8cfB5aiiiOmvkz9PFJPVzg4tBrMLKQ5M1WWtmdTrphKVyaR1bpywEcNEU+tASZUNzVEVMAar82VLugoYYO3HoCCBPttyMV/3ZJ/vukaT4WTq0nfoe8j86JTHF0sBHyZG1uiezNOQQFB0TJ5eZGHV/5wpI0kEC6p0u1Yo9fldkcs50k9bhQ6lDBxARF8Kur3fFbB74be7bmDcwLWKOqEzSTHxXFn0pGFfupLIANVVnqqbRV0zzLDToE42UWj29DvsDBcbFw5oHEKurUduP6WtOzgyvrpMXPphRNfR2wFO6AtKOG6vwhGyY4kZjPjpfEe6DalwJQUNqaDliuWySZgDWo1YiwNtkrDAjjGH0+QbCTDbCvsLhr4wEHKZ9LQ42+WQenSDj/Fzfdch89ENajus0yKZkpBUKZfFX+FL4P3MVNHg3vlITbMzFK7XGBWpbYVZ8Ze6jfoAzVRRig9yI9Lw3/cWPjIh4t/RHPuotrz8WSG+PjzNzipBBaEU3X0HZdb1BeRzGKr73oiqthRtDBWZr3I+8BMhR87k6BnFgR7dL8GQlfyBdyGfyjTkewLczb/m1oP8A7C/yT2m/MTBr8NRkejBBe2bYWu/CRjxIRfo3EDE1VEKy3B9Tjz5DwROeMzgg2QQPH329yJcN/p7BOSwfe0uAuPGtWa6xPNNs9aZZHyYTN8KU7ITzj8ZHo1fzeSGtw2P4kswGd7aqlROBPXioDJeOOh3a60OsH9dPSJ3psLYowFOfaMm1VtQjuYgTHBZzPhzLfxWRaE79v9mKo0/TE0nGiSrGHm73u6O59deY65IeH05jtdhxCSHwkvPw3jEZruJp0SKNQ6q4fE9PRN57ty9BIsGDnMTNzUYmVWnpIM7v3iTZhsCijYMdO8OH1BKa4HdOIwO2O+VbJdJKxuk+vGUP5Lk5bfDCz6pWrKkX3q7oUWEHBOAzHvNQlJ7A6lahjFrR+2DY7QbFpkoj/PfCWwy527QtMIlgjbAwTUHmN/dY4eyxImb+ldnSz0pndX1dsnxLmpoZ5iB6vaEZ89eB1PPkgr313K6+X9ftAJnbycwQ9lWLvGkrjc6b3HY4pc5hDBOAUy6b60ehVJFLgbsQ44hoYjjDG7GAY/dIC17vxwiaYzaJv+CDGi41w9vTYQA+jVz0rL/LYtKNCbCd2vocYkagYZnRxfa6Cy79EAzRBXgUjDRvBGHRuKG0b6ZWLmzZKFE93oBWGjjc3ESI3YT0rXQtOsmPdEC3BoxMmQv3X9vvgXIttYQPpOAINJMTmEMVWkoulEKEjMJmR9vlmmT2BBwl7Ax3ONUql0uDgOSMF7bqorAOre/CW7rcvIH7VR3ibvPE4S51wBUBawlFSG9W1u3UzeRPpghKRgSlN49jz5/on4pPUry2frgAxWVmQsqTfMso5XBxJovNCTSM+/PnHxCWVlahjmrRv9Qbf1rv9EoVkxd35M673hCyIodC2hracYGxKDKQaf0K+tOIxODzzHrrkNU/pm/3ExXzmWo7yK+vAZxmyqQQE4yXUdcSLiWH8J74vAqL3ZoiO5E0B9Q7Nu1RdzXp0VLuzQu4hh2JADWi15vfMK+ayMvhpb3xFZIZ/wez2Ejsm2KZuwcvRdbuTrxaCXXzFBUr2DPRIFEUBsEwSuh4IRvS4cmTTP/P71qeNdsUencK5dwDQ3962qpM1oUVnjEBNR7Yv3c524w1AAaQR25WZykCkE/L9NXxnuEjyRdsUIJEmH0hMq2W4gGcBJpCCuMZ/rwJR7I0YpCNY/jg4MCXzfGnVFEZFOo1TG2dJ5jHQPg7MLm6FmrYuE5IhgOIhxrNqHy5eskmrHrLkIAC8YPAhegQH4zH0q8ECZynjRnbkAibk46BDq+b/+n8nns7Sk60MW+s3x37oFYmxNoCtv9L+M68pQ9REJabvzdRz8xZNneH0CkJkl/BAWTsyg3xTdg9JCcBtsJzcpjx99buyUGEvF1kWXp9hdeeiUVoSKGHwdElw/KWgVZh8hpU1pC3oheYQjAg28gmKXeBriyxbqpOLNKl6z2Yhl3r6XNvd7Cf0ysllgFFyJiQM/Cl1LSJpy3lCZFLz/Vx/Y56GhPCvszLwY1ydjLvQgk6zcv9Gv825kNwn9R3JSvNZaxYy6R5hK4puocI93+wVIpsOBH91XyYvQCq/ujE3GA3ZlXVv+wq5/aTm5TedQxnmXiIrxEBhR6+YmFMk8iQ3kCpECfsympWWmXPk4S6/WPF2BQgteEfUB1IiwUzFGErmpxGjA2LMF+sd4eNoic7r8Yc8o422oDl44rbM5TKsyS945bR5k/tvywIiGsJXOrgJhwuB+Mh2zK7PWVnoNSuREzGfiDR5uiUWoGngt1H7coVQ/E39pnaVB+8ZD2eAH7zspk8SD0uX8uQ6+VaTP/I2K2Vgc2fXr9MHIpCrlWQivYw3iqO1GBGLOAbAa6VzX58YMymtraf+FNfCdfZ8ULSryqu/ChlTkh6Bxa7NcR1QI7aynLKoUfyVnusc6kfrdWkSjvMWAvyNxA8j/x8PwhRQwFwuX10eg/yjLOrBw/Gth1DDZ3rOqc2iFf17YyzN239NQSQJ9rlvxIkGkhP7o8v09eCZVZdbu8ihnEGyFWzz5DElUVlf/N3hVEAMv0RpDDRq8Z18wXTalf7UOxSJxm1Q5affAgNwLK1kxn/6Yo8RHf6h4xGP8i/fiWd02EkOYoz1bv3s7Sf/2vR85D6SW5BjLzgrTopwbZh8DDvVinJDR1hqF42Oli9zRITLPJgBGo/fqpzZ3aN5Y/ByPGEQI9M85WZ1+6JFJMthOgk5kYu+HdTwhWjnJtsnONl7bh6aeMfk06iiWr5zEHB+zafLcTqQDbmvxbaGd5JdipFM78PbVl9Hn9mZ5jqX+PU5+JwhSTKBE1GnOXKkYFFZxPcUdtIJFLcf8EeWCVL7HFvCPyiLBmRnQpNq5VcNXrtustSitnorECZBR3wxfn3dXSyubDfG78udC2uVF2pdAylih7tAKO7PW1vMV/7ki65MgoXXTPaaX3stmTVlRZry071TzLO9sVW8ZeA+zYeWQdj6hUjNpW47Cv0SE8ZzHC+A2BUKfGFebvCh+fAOEcIcxUk+Rq/cA5IQc/JhEYyv+e5f5nejRUlQW5GgKgWyQwtcKvOy1KvpGkhv+4qXk3r70jv1b8eRBpdaIMVTkdgrMIMylfkm4XzWPnUzo4vS4O4fLW99Nx/cIXIyj2eGyhDfkHZ+5DcPdiMyrgIMlL6mvgq/dxf8N7t4HyZ2IW+BATCshuS5jVTE2GVc88gOX+tdVLGnYAWMd+MHBzXoDgBlJoxYLOYT1DCE2kLcxved6g+yMvxFkpOdmapM0N0Vm0PUyzKqL/UIhi5MADOH9nXZiy83juCFQFSNirY0zBIk+txXwdZIACeb5Nkeb5H/QQjVBmDVI3zTxiIuxYIOru8zC6WyIAxSVX7sGSqOyRvmFANfSaEfh0aIyQxWEXPBlAN3z8TzOmhf3wDORu72J/cUo48C9TVQMQ1C62NFL02dgKuH2PZdb9eAwjp4prvJQ2WcBqJeEgIFyvplMokJZgVI14lAwoEqlaBzVikKTk7QpddJWZZfV4Mk9UDciYm+LGokPW9PyKpT5le0LbTbHPcGI/gYH9xgucFyAaEtfH8Wvvx5kYL6JhjK+ijMxVyO5R1FGucPhdy6m+C9MPYkOY8AzX+Vyb/pawYchh7abwXo8wCoCyS33Lqe1aLiqQrJGaW3JG+sADPpdfm2dbqkdOdEedfFXbxgkK7TGdV+qwr+O/Sq1WMGhwoIu1zhl/C7SFn2B7kVyMfJ/f1F2+VkFCAW2wniE10t9fiogce+FpQqjh0pf8innuiqgvvndybtskvFlIo+gw4h/pWLdaRhDuKAZGYe6dGtzjAiDW+BHLJ5PuFhp/56gO6SDSIsbT2dLRj/WUstNKkAPaX3D17mjWGDOungZFeuWSW9cCIj04RHfwulbAbPfNmo8OS4htlulritWNv6McWCadMVs+Vov2txAeHRjks/rwGrishz4woftxqusio6EP8+Fed1iagOead1sDmvy4dGPCIItn5sfFAEuZ07aDn0j9XZJVjxr/NbS93cG7sWEKiI6Nmb334xkyQDMw/Y/lBAnuj2Nx/cYckSWQdhxVDf/aQIoQYgTJ/zn7peo8opOmRpHyQne0AWeFWtpihl+3Ooz6IysZwmosr6wpRoRVgIl809ZkFv8n+k24NBKAgAANBs27ZdP9u2bdu2bdu2bdu2bd4QN8gbsiZ5gbJcJ89QAVZSswHhmD24ujBV87+rwRUJ/9cCL+glPfdWbOHkFZS7wrUZu870pxkowQXbrM21KIqvPgd19hd751dmFzy0cfZX7RBlGcTrfJSOjYqufio5URrykM2FgXfTvT0kVpUIvOkErggBQOMcN8QYh0avszS0i7VL1cLkgMxjcwdcG1yKt0RWEPIOj9Vw+yMaPQWFeQwAVLMYxtQlQL6PSx8KFsZ8rSZVq5FLTkel73fL4mN/zt//l+B2VSK96tHHapPfljFi64q3SaHiHe4x3c0g+Su5VnfEgSO5Vlbh9VOKo884gCGhN3wp1R9nZrrq2nsBIbaY9pNk+cJFHHt7HMerVwaZoDt9L4LAtJWUDZBUfYThI8E34lrtGF15Pm0TvJYfUQKlAoLYNoRcsEgGTREMj+QyXZ5LsnHGRLcM/CtKkag1lv4QBD4FPfLJOkFr4K7WaDGTpZDd5GnIUJmpOXR752YMH9baKNEvHzV1qxiGikpRqFd1wnnF3PPVmAy8LQnmkZQ5+eYy7SCwNl+38Fwv/+rHXRqhPRc6HpzRySi8FgWzAgapLlJihXiDDSka47deXZzwBPZNCPtqOnZCrJpOuJh5hBNjoLbaKzcb2T3KPNG89uV6dDo/18anporjJt2xHZk62eLhwFUrRAZhynBYhJ1N+sn714SIPHULlTNZOBYFtMwc+eMQhCKR5S/FSJ0jkSHTx/HqIcnfkpiyYyx4dYqSNNu2Q0fGH2TeCUbzs+Q2StgfkplNGhlttwYMwZCmRtyIt9GoLIO9caOKFwVRAPENmA6QsV28xCESZjh7SY2R4wXlKnkJFA5zCF9z+9rUAvlckKkM7Upa3YjoyZIWi7UVo4zOOLwBgxF1Tq01zFeRgyYD6msm9/QYlm5PG1zN0BrLwzSLZ0Zm1S8yW039DrdbXmQtfPpy5wTAW99TFWvdHWOFZA6e3LwWm9j1lWIrwRjSTKRmH1uDMlZOpkqLIDFhulX1bK5uTJ98oCkK2rkLXC3YVHf4qmzlhIRnrlmfKnHCKYBmzaCQ4oaWEpc0SDst1kpclsN1/wSsP+oFGd3Dm5X+c64HR+6iQ2o9RHLOSII3BODvCrm39Jxg3MChabcqZp60W6A82QSelAzkLZHkUYT3jjb9ZA/yUblCB5wM8rMwaysr/i/cNNikymWE46serAAzJVIkV2VC18jWQxPAehTnTHq0QBdBLijIBK7hflGUmSXh3i7+db2rGuj6qLHxcuztBu34xvS3/GPrYqPpqrhGumzDUXGVISBUtf6yvX8RKHFA1VcUJpbpuL4dh8SATRjdEZ9B8gOaLgVdawBqNCywEmIbheyqmlkL+ygBZ3nqSfVkc70kLyhCCxowGEg51xklCzr8xxNst8T5hCObCycqV30vx/SVF8SxHSR0z8bGQv4n2vKWBqR1ciWYMg2bSpSSGo68CeiVx8RBBzm++94ODw4i5mQdjcAJJ97kgJ52N7ox8lK1GNhD1WPnt6B9uoTa9zVe8ixL9XIR8pTE2e7Yffemokaw7DsltWEXdNXxUpvnSXcXMQjZTG8Vms9N76TkvLSHGygP21mUsczMhSgd/WSk9Gf1yhwmVc4lp0UOFF1ryhuIDMmKVXPeVayljySIrOPrbhUTxEBU0aYU5eMNT8gzlEDQdVme7a8dyc7WHMWpJC8pPO3z/unV/a/L+/oKOsGqgPdyiN6i/bc1YzHtL4wIfm47Li3Qyah7SHUg4CDZjVXzT4v3zLz1XLClDPjl4Zlq2YG5RZEkx1jZqi8WpUwQlRHzyLUlfojai60H4x/a7b/eBrgK/5IwSs6yEO6AdO6OKXt+T+AfFVXnwcZFbg+TmaaXY6/fx/nm4RE1Th8cp2Xnh56cTSETbmrKz2Mo9euitL3H62LrpVDH3RRF90jKbdgbEDXykBdnlktjguYRA7PuTUePRPsVUVY4IUQge5bAG5KznzIJN+fVhoztz+NHAtwjoW1wTr0Qn79R6vXcI7c1giE+TVa8fftkcxGPz3iIzKcaeCs29Fu4H7ZBpG0yRVPekv5PrDjL+p6LXqzozqI70/dZn2kPAMXP7Nz0n5g255GVyZZoaDB74U0dnE3HVKS6/Nn5gB7B6vlwRczr9R44O5eDtrlLcGUQMg8pPpJ4mfSIptdYWT2zeC72MP9kbFSUhkdOICn8QXkhPELEvUF9N8cwN1svI6ohuPRNQ7qgzvOHlxWOM+DasVwdMgU7+3Sob1eyiDF9wOfKAamUrctZnDbQRf2JeUaLyC9DzT0CjI+Kw/uL9Cah23bSMmYAP61TItjiOIh/Ymxlzl8HoJ1H6ZRthzfPPxM+w2fEW2KyD+eRMxZTDO8krmDDARe/w7NwiknwRS2f0B1E/Q66SryB08kQy8TvZU9YAgIiUm0HSf0ezKCVpDs8yCKDnzwx/8bNwwyVjiJYZpTUeNg3HXB8qYrBmUmhvkr5Gr0PB7nDjHc+K5E5tyr/wdfXEndJjAy7I2uD0DHX90lupvGZKfMCER/7btaMBIu2ABuL8U9x0UdJX2752DwYf5tyP5sSeFh+TWkXCWEr5vmz9wqcIwkqw/Vgx2Ola42KiOliwlxJ+1FqdzMdPMZT2YXbbZKBzy91ww8tozmRoLsj4m9tBrMhEkWODcZCWTxPFNZdqvd8BLAGDa6je1tss8JkIWXtJULSnWZv18J++4myKHSUMiegc/1o0BRY9acpJBKeIIx0ksfBgjmm1ShoGkMYPclw+uwjGo/qm1t8Ru80T9lMP8uS4hpL12jSXGazoTe/4e+VWXsFnI95V5NBjlh2sohpDT62VN0GlcWzflPO3Kwevanlfix69BMMay4mRjlO0nNhzEcRbidooLQ3fBs8vvLnfrcIimRjU+NS1XhGtz9I9Edr8xDq19+9XxyihQxVSD8RPeQ0I86D68HoZhkisqmeU64Anj0BHK59DZdAqOUxnREE1lnD9UkAmfhc0S6ihkd3DRFJpTOc/DmvXdLLholNTWRqQ97VxPhgywjaZCMd3HXMgNaMck0fQDBuKEWLZT/M693tQDQKx9gL0u2KSMNNiTCs12Wd1aNK4/+KORCLyjIJlxUuj9bkLTKXJ7CnudIrkkD9ANZ0XnFvmxj26Argx67aGV1BS9LWpS/kuVfQ1ITZxFomAWLK8lheNd7klPpsd/twkl3I62UftFOB7PSHqrx1KgiPd5so8Hc5hnMK4O4b+MT1FeM2CTNDCP4JiZr3REEq2l8/JJEINaBlT22Qizjr1HDNm7Q9p53bvDmqfeytNr8AzxIN44uzNUGLb8+4vt/DwSnWUTgXcH8Eh50hMLuyiSz3SL6UQI0iuYDnvvYWwIKhZL2CzESY8imRJ7+znAzwp6xNcshAXhMkAwUvbBbbw6ViwzmDN7qpIpNYWG+7qeLBcSvG1k5AwRQXrSVS2jLwtR6cbkoCMGblaY2jtPE+HFOvgwffXOAVaq1+b1FQStMQLLw8LtD/6yOmSlpBWvBvctfxknlgvmzOq15cuq6lg96zm4O7cx0G/amjMA2zo5jBSqxENoULRbP+DCXzKAy6LMxrpOvstEE6OeC3B8CBVhv6EwvlWb2ZX9W4iD+HnNC6C66I+tD/kiV6/6unqyB+fnNo+tUjzYqporofnZSOi2a0oEazXP9GrEcyN4kFtm7N9Irf6isEgYexGu6Tn0Qi+xcUqweZeYFUzo5JJOfgoNBahRCYLKJKqDcw8+58l9DCFaGIG223NaY/4MOJE1AMDrlJcVV0FSfWu0yUWDDuHI+kQ0mMyuHOAFrBbfsEN9/9KoqRWmc7nSQ0/7gN9feWZk2nNXd0kzRpf7VJtS0w5rcWDNl/kaZ9KyVGTysEojDE9XiQZ92qWF9NtppDmXnPaF2WhR/8wHzchal34oJIxleK3i4eN+gs3tKw5KVt1zhmn+nowikyYZuo2/+AvjdHhOi5tXiKp4z+s4Gtv96yLkj7ixzPCYdUke//sbyiLpnBve55yAZGbWKC2ECurhGvIbqq3g7Z537hiywHq9dj7M+EgMVFdqNaLSVApJMEKYZBkMOwGIse0R8jMPqwsxrut5DGCnvRUxmXmTQz1CrD8PX1bQlnpOcFKewUYZj+PTV9b0t+L7HkYfHVutBYTLoLkEirgMXmy89JAvzeXpAwQybq3y/spSvSgNnVQoFUQQuN+wDW5CCJxbnFCHQlevAY7/uhvNauzQA9XtuHe24p6ZVih5WVNeoCtTnHR9HRTj4g5CHHwIDmCgJwFzV9QMyKjhBL/kAJ01eXbz9roJEVLiZ9cPkG+GYihhuUXUYpU2IPC5UjuTHk88svQezNhT83Q5037alC0j6vuA3lBBlO5Iet+ahQmYl9CbSr8nliCUIf5Zw7Xuf5a/WRWXGXh3xYHNEdNOXLlck96MQayEq5OO6kvRLjmAvpl/p4jjJ9kqyS472heosG12XA0yYXUaeZJQbUz6NSr9ovdWxvY64moJp531Gp4uSh6xxZrtsxCVns1VFLJ9vQ2vd562qRnO2FXbTUnNwPgnNFMqx1E1pIocz/7uZN/coRERnbf6xSKR/B9nWIE1COaOCtS+Bfw6JRLG17gZ+jUZwHrZ0xeoAekrEJpGCMTzY01HhSQUp7pc0/p80VFr+8R+A0r0gIsQY3tclP4KRCpKAAdwuCYtA73WItDWj4UkrG9NxY2M0XgaKNuwwDu551hHFO4rvnIu5/j5wMD9rrUBAA0PzeRhFRsKW81D36Lcw4jj/MJDLosoOOLMsMGHKW7rNY8kYYHbJMEZtuecGzHv72KCLYeoFhvrsW6cs7s4IX6Ufm7T6Wf2U35VUkfm/sQDf9tvDaASzFdNtgvkdLDyExXI0VPIaYlLxea2dM2MYhkas+3vI32HUZ431ZD3g8fbLlmG/ZRUQLZ+4FLw0RlRpSIO8pNobMIlmoLU14o90p6iaubHFDDCzfcNIbTWjqvWg+ghLNcUqZqQzQ4zrlER1dN8b0EjWkocKBINUd10T0LzljoL77JGCptoO0K5Ctyq5DPcLNBHVOv/rDbgqJiuni9pGaB6OlD4P8chRpFYKFR4OjWAxtd9U/CNKARt/BNkG6hLGwWbEenGYWqZh8uZtQr3+ActVkraPNpHapuRg4dXFKVq1W2O2ASIS4KeQK1zs1yrAXuiLizpSSnW/n0yBsx940p4frpBDIbit2ceyfkbsdvCjaSkI1uw8x8K0S0AyVyGpl6hUjEfYBF3sLk6D17bAcyECi72cBWBAtxwo9QnjLEoICIgxna91jB8JhTn+5WTmjPF+24YiQL/e1DnN8YkZCa9agbFaXWy0T8TM9muuB2D5epan082T101WJwkVqEphNBmolhYhzyKTKbjySo8olBWRXcvgn60WnwmzBnhlvOH1Fyim2xYPbDyx7RC5pngEkabCyx2wygPFRjuH8q8HNjfyNo3V0qLbPC68yLYASxb5i+V1vuHMu9l91B79gxQTNtkyY0s1g3vKI0bUJj5kzuIJUBBO3FAI9vtuhI/fY8jZPr2sKeNDt2htNdDBxBoIDFfgZRUp2o6zaI5diJbpEgLRcF31J968K7g2QH8JXWeMVDxaeDOYOAkFT1kIDSSq8IPdQ1hdTybeKfh5SILjMtXZk2fQ4LN/8Jb0wDBfArpdoqhhj1TAKbT7pIRNVeIALzFqZd1PsuMR9hoQiOQMDAmhCISgj/7Zwmcf0vzRmjNVIu4NH25hh9gqY+GU8xXVl7DHGHI3mzQdYyEYYx5D2ePva1MvlbYletNeCIuhBgeaLqIEYIHLpnJyw7T6AKzuS8cHxwvU4MaLk44HhMtMp2Ciw1g5LM2w/7BlsOFNZWPzAuDyIkyEvMyAEGT0XjOTqI5kXWXzT72X/yDamQX7z4Zgs++J5YFVy0A+9IhXnzdjxy8tDE2oKpNNr6GX7ExEUo32e4ZiVDRRKiArcOeaHm8V33S0ZwrnxVDy+5XkLv9gyvW25npkvhzZzR6trQ7LqgK7C0T+peg3dWdKiSPudwmw6zEpWJeCyHgW5PyGFPgY3GK0H51V2UjuszFUmI0K+X3Oe6JIpwci6v/7GTM5zfoCVa+tFr1XfHhW3bNezyXRTzk1efcN8IaNWpVHRjXl+IAeJYciAyOPhDH54flYEYKVCQJL1MswHJPuF0JJSIHuU353XiDX1bIacL2PRPZv8HF4tf8yPFiZ0ArOilfUV7G+G7bop1vWS3+ww4i32Tj9LcaS1XnLhPJJvGn2JLfWSx8qIzV9VwR+gHr2yxt+cdD8c8sqZzvucWk/CIc45MZ6blJZes2juV+GnzacpEFX+DAhzdkuIrvCL07frCQ0giKKWCPsMFYg9efDwG46ps9Gnzsko9m1m7iZeGnwUzgtqS2M9rLac4EE5hk+97Or7rAs8sRee8eculxaUNT17n2gNBkgRTWsOtjcPxbmmAYeFjW6QG+H3LzYXuo97OWbplpvoYQr+SP8Xa0/MoDS6EOCkjJg69EX8OqWMDiZ5Ty1y8wPcQ9Ns8oNOSJFljxrlXIU0nekN3jPG0xxtWpXZp1COZK6QKOKLFrEUN0vFxkIid0a+R3ZnxsCKVlECysFVTVsJ8Bk1o1/QZGfixUyA1RriUlXiyOfS4z1who+q/HME3ZeS8uZRGz0aEBcREWmWv7o5XOJOgvAgcO5ldCeWVRNmVC6r+koOUc7rz6S4BAcTTpIdYMfxCQruQOtW+Q906Ic4WUG9LaxuR4RgItPrRvX29bgkq9uOScPO1zhxdRLbQ/kBtevEg655SkFbPPJiTmsLPH7lEJeBr28hclZrVT70jsjtHQf1EqMlzCkLPyBX70y1424G+G/a2guxo+xFsjrigq6a+WSiyuHjQs7JyddV+UJ3a8Sz58zvlVvTBQA9zcFlwPUWz6n2bu7g0Dw+SCo7pFI5TX+n6s4g6XYTkaf6FS6Lw1Tb38aIg54vZFhP076a66ZGT1Uq85JizUSyn/Vo+dABvXFVM16+Xn9K5P7kugGdAc87Y742qiCWmcWsBjaklGTdoGNNzcRvvQZeoB3oN6p003fWGjQvWKZcfjDLBGNZ7lvBhnVjzhJoX2iPNka2uMCjLX9s7eAtEDbn8AX/p6sbr2278PoyX8k803VQgVtc1l4VDvLVvEowQfCtF2fl8irYbqiWAlaYN1+Fub0zpr8vwaGHZQIlVeTXZOFmzraGNhsqvbS6/MuiUszywSxDUNj5xc7PlVD3iGMjPIwI7LLWVEezY4chKHitGYP9jiFL7Oaqt9qwVD0mRcKjDix6HO6qcCtr7AeN7TNF+YTjCzLOGq1c09ASUzw2Y01SobFr/whpKBt54856ahPBiQ/lsNgwKzIUBat6oAPNKvdB8t/Xm9yofwWkC3LErLJJgefMl3kPfQYILs8q294hzyTjMwfXc/3TIUyi4LLsMZCQEdUyoQTw8p0ADDmYlG2v9zLNNUVyIBptMiCw9ByZJyKCgPS3oJtUE+SUEsBCqtC6wXN8unlC/GtYZ3nD2xLAV7UyWRyXss6Bf3cxQLVUFP76w1+jozMCubVVhyJEO0Hw918LlubH5oPAzI8K9Lxo2ZPzoqc5f4rjqH2Y/C3SKgKrBKTnYDHsrHJqCTvkLOWZ7y8aBMkeXff+ZK5oi3VdY2c+ksqzNK0hk3a6sDMeqH8fO7wQS3TzIhoh8fiCfgJMrdWUdd+bFSK9dqGDj8o+gz5VRyBpU3NKUpJXatBwCLr4n8Xq76PCdZaZY/V94ZvIOlD8LdgZAHGOGlbC+pu++YLPJSSDNrYgsLbOQ4tDapOWw95nA0eKpY3XDqxzI2bWDEISeSBp2UfuKlZQNzG9FQRSa1sdoGBHdKcTP5WyRJmf8GPJVfgpct88SBT3CdDOwtODJBj7kuKfhNUHft9W39LRA29ARjGDGYfwHL7iFEZ9GmqLu6aCdMQtqKmt/EgpzhRRjMQ9sbcy+CRqydC7LFpi7UT4jt3W+i0Q4v3JOLu5zTHIgMP7AZhE+DfhB61GgeToK5cIYQDmW8O5nhc110Frqz2Dj8fus3rE+fAJbeJubVYYO25hcS6iIFepeHQ+Xayp5Lr5oSV4AjzUiegp9xwHioYGcGG+QaHoFbdwgOcaMuz1VbMn/utUtaiBaffU3/pWeysOlL7eR0/BYyTvzmeezgCeJKL3revxWuSyINSSst083N9syjH/eB73Bscy2t+T7eXYDX/ak7Gb/bOujc66++WWNWFRHMvydeUO8xpeuHnMNg5vsdO9OnQtxTozzqN51Y3PX/lJz63DqBBuM8NFg3eP2lsRlxWelA3fMetms+avn/WtgKd5aLmc6BzWxKztOHQgqk98RnGF7IyqRYlXnn6UphV6dg1AlDZaES2F7ZSFezaxepGFmRFsPqz3bW7jxwwcwcNzvV6wB96hhuEKCAXrirk4pP7R8Mh2xAaQBBrrI7FCzpwcE3EENEl4nvEwqGm6TuisGblO0liaV4RXoxXBZVngTb0yzCthfDQUWtgjjFQT62Y+u5SPnQjkMS97YSzx3TZRXmUUP0vPXUndFGOqOW5wXjWDnhfjU/MvHF2b0Dja3v4mfNAK/Yqs2PnRXw25mlg1jsPju7KhOBeEVRwsZ36fzKLWimMSE5eEu4kch7dkGIIbK2n1IOGyWHLks8gqfkr36gmuEeO8ZeDCM1YiN8oulF0TU5k0irVrCpNvoe/Om3yPklqVeAWhhPrBV5teF4iwwioydnaiPJdQn5NgebBdKBrhoavy0NlHo350GLk3HHd0m+tDj44SFsba9UdFDpnP+4tA2+RdUMGCUl4B/Ms7rW29iErlfKaVsZlk+Uyjz+brq+66u6QTtCCYyxAuoM4meizEgcfmuAvTrrY4BNEjF/C4Nie8eBqGrGBekhYSmLZ3EDihf6dCnwfFgHzCxi9STWGcDNbVwYuELCsc3qDr6gchvMZ6Z1p/Qmz7rCzEq7EZqnjxD0R3ZdS3c9ihnxlPPOXEwDR/w6DV2lW/V4nXzcGjW9oVyGtZ4InBMi09G5I9Q2dYDESCawNPrLvkKKOLpwq43Deh4wHlO8+lasW/76T/zGon/zCupOzjOgbk760bS5FgJ9qyHhyEKoHnFsgLORJCyCX4ValYK2lRNbnyCvQZ8F1aOMGAYLPtp89QwEqaAF+IxAU4f7ahe9r22Z/jKcsWcIRFTEGE5mCWdxsAy9TcGXh5OSvT1iX8tUjSyMMxqC8QE/fkM+IYgPU/bfIIMRKbfN1aEgGgmuEj1sBsWHbNfIAdidl3qFtY92Xdrzu0NJ7oOOyuEtgZ/A4U6rCdwYeulCmiSmP3U1JQENOwRQI4dkUkt7eU+xYJVQ80KDJc8MPGi1nWUTtE/5H7t/C46Q7ZqJyhTqvnvW+jdNbg0T5cXcW6gw/FE85C1yPHjJ3R2hOyfPhW09O7yI6np83jbWhW4QqDWsO64Jrvb/x110x1HlIclAF9mL3eAlqU0Rn3eouTVUiTnyXWAK6DZ4kzl8Mmld8cIRcNbwZriYapbpDaUjoMVBze9ytj6lBr8UNb4Eg4OhM6sHlmVPDLKOB+lK/9j1civ/hf9zKNZdvAGInkKvUxmn84q+JfEVukLXHwZz/0RFXKFkZ6yzRMFG3sPrnO9F51IfX0Itt6Z+cH1LuXHvV+3R5HSerFKInsM6B3ZFCu9NuWcsnBK70PSpA0Tz/L9wwh/LavMmqVmDmAPP54aYgAbervhX5sCBB+S/MM3RfnR7NJtl3+QSzV2Qczo9avEZc/wA3vF6Gh/yM8DhXZbx+a8Mi32Q1pCqdBTmCvvdnhBgpyvJhZY6Aabsi3UpTo6Br3hK5KGOfiHemdp8OlcEmnsXASofXxbKzLwqFtUjiNYTXoAZs8X2EfK0TBqPTQOLQTgaS7Xil1knIoRRghpyaxXc9T/kLp8VLKWS54BanHiQYkYIBH8t0ALGH/s2WuSCyhqPhSqPrk4GwwFjJwKyb+7L1+wSgdNAJxMmNESttsCNsAGcRFmUH+mha5zy9eK2pzmHUIqI+UODJu5zqBhGP83+NFxrtBDApmUdnWX769mtcXM8tSyEof0PZO1JSPCZJyu0mlbgf5nqKhHZp0oFh+rvAq5K20LAjeWg2g2/fnSVtFyJaeEaKwJ6sBQaWQ67TQ6W7ma4HWYxF9Qn+K3/EwkneqaCpdgN5r9jtS0DrMr5Y1Yo8xUWWNsNMxW1fMgqzSZdywFV/EgEUAKTXTJn4LgbQ4eShRDwNEmHIr2dHrYLnEkfW1ZYPhZj5gNjcKBKpSmPfF7U6b0RNmuVOvxZAsSGZSBeFGBwn5u9uiXVZO2l5ULG82JxSKx7oCo608FaUTp7QgjMlMDkdlvr9vVYOdNJ+CpTpnQGY/FbnYlEMaSFgq7btisiL0bQ845VdT7kNFmsDxCk3h7l8sZHYLRWly8CiTZmtaHRmYlPlit7JIcOTGKg8P87OhhKwrFJcoCeCE3uOHE4G7sot7f2Pde7BQmaKShw7nb9fUQI4PWbOKbA40f79LI1w0QFqWqQF7zP3EYSnZZIQVtMIJasMABjEngo9Z0JWr+OeDX/en8EKS3iJaJabVi/Z9BiKXsTx0MbOm1+DhvCrWeldcSqemuXttO59kTfISjSWvaoy1d6DW7pn8TOt6Zg6Xr6oir7/NUOLv5th1of/DiyrspdTj1RBSyTe+13q94dTagzNcTHYp7vaZDVgoTnLyd8oyRtqxcQx6CrxMLbywAPI92QShxfxZmUtr2+D3kIAdctWY+daq5pTcBCILT45crBb6OjKJR65PWCNV9tTTDME4SlJFV8uUpGqPzL5wGTqP1lblczJ0ATWaSjVxZBq77KgZAiOJY0nduEfneb6h52HAAb9Xd3ZVAX5EYcrkCLwhZcdphGSeAxLyTHWpSYs5rz1L0IPosQsZ/NnJd89ymv7zKW9TDuPOU1jpM8Q/mn1hZon3RIuLDrhnihhc4QAhfssj+jIbVlYgPmSmaNHbV01o0RJ5D8nbGxQLb0al4/sJLxs+dSG+H7Bcop0NcULfU7hrVYhzcmQ4YhfjSSuofVob7xmbOY0ULlkvWe+tMNDxSRU6I/DZdFmoBiVGH55zFg3NAc9ujpTsqcknfZtZoV1jHMEjrMt2nHiLaxDuH0wr0opg5AWyJRZXFUVhyF2Iqa2WDwbSoFOcax+7Wl13w77316vVpScxDc2Oqv3x+f4+g5r2VlYmr3AbXLwyTrmX75tJL+yjM4zYp74qBPZg9oOQTB+l8wwDkqGUSUEa0NVGbO67McuB0+8MT1D6zox50C+MuX+xHMerNufcrhwurGjAVIpxAN1B3q6hPnI1RLoX4nqJH7ZX8ftKIeaQYkAdhrS4+IURIkRXh0/gUrI0G6/Kh1O/95UGJgmD7j6vIFula0KRbh3czl+b7BHfgVjCJRWvEkKgnWKMPPUZ2xU5tQQS1xXfgfCD0DepIgIYT53oBB0oU1YIiGzKSlu8/F14tEGvzsBbd7C2RH8jaB2bYZmxJybniq7zOtAnSEbbOKVacR26couyswBnqhQoqs6tVK5ciS6w6dCJ/pBWurFHDxOr857BMGQNYQOO71mGOJxRiuPDDoEt+zzOnjpyyWG1QEmwoCPZKtA9NLnTtRLycDwBGz8JmH3Cd/oPDMnmqB1hZxEIi1hoTZme8pU8cJoUsBgT1MtmrmUz1APz7bDlZlL4jwGXxOtbzH5CYWI9e0mRZRKgd5hGs/rxyO3PIVeCVEq+hwW7yA7AcfiFHZpvWVQy401q0qAkI+DqZkPdPSi438zxVcH08NbwosNM3hOHt2UWFql2GOh+CZTIJsAbhZAv3ElPswsm4PE4c4KELN1s3rbF23Y6ZRoUa3iKYivbVlbWstATyHp28gef34125HBgEKuPYwtQaKHh+2dqu23fuJeZycXywfhEIO9Ccv+42T8CCK1X2d4/znpc7FIrjfZsk+THO4/Weg7LD7ahRyRyddNd3YFPtsNZ6T10Q5Z0SyCcgu2KFKIM5PQujXADwaS/7cezeoFLQ85x2slHEwtpUjbB/V70xgLt9PevTGXK8jDNbkMv8R5LPGztvdO3HSta0E/qbV5/GCSNMY/4gVw0Yk7Og5eZcYSfRqnkYldAhUPnsFkM7wjF/bpkUK3v5EsQC66MiDCbBhTWbfv41DHgpJwMqsg1ePjvKAF88ek9+gvbrvcN2kFodfN0OMnaby/9OvOheoCvi9TCtv5IVVa7EE88S9xQTWHCFuPbtHtBu5toLzWQOOJ6/TJfUBo+0u3w+v6JxCJiewJ4+FIXu08JbVxeJ/cA1a43I9JvPLaaO5oi6/+WdYC9chVAssLpfWMTQ8Zb+f0Q1CKcXFOhNoi28ZCjojXzBIQ1nhf24qYFNgWcHBkAhe7+OgXx9AG3sdKfY0UHuuFf95ravQL9Fsu+LyhTUrhJlgDJI9lYLXa7FS1wmuI3ZOph+dfTq/Fw9WayjCv8GqH/YsQA5OFqO1fZDoRYijA/S4HBPnA8CNkZl3oOyx8hU8+WFkjN0++7jsHfFh0UFOJNYCXwMkCcVpwHtO8gEWj2v/cjsjXJcWuBfVwDZguAJnKAqnfFdiFjtfM93A2j3MpPIg9LOArHaWm9Itdp29E2haxcAly+2uINwS+nm3UuUAdVQgYZDIaNV6N+Gl/WFK0a2Vc5IJa4mEZY5ImmEdc/hoh4U7PUG0hp5dDlLkgWZYoc+1BGJztTZ7dwDneSF2fJfI1AE2hL8GLTNIuDl5luldXrOgXHC2lNuLEYBhMz0wbljeQz1qaqQWR13G0hBp9Kc0qBP8lQwa1XT/CQztYQ/+EIe9Rz1qGYQPQ4RPIs6Y+SyiX7RCXd2RS9ByAPmCzUQoe7mbrx1gS/lRVLDw+vuem1lVBFZdYEpcKJp4xLeKEc5cdCJVwDsdY3TR7EXvePiASpbs/PesCn+GTf3aQCXU6+Nrq+y5q5gyhrQQbcKLW67SyBg0hGF37ICm4XF7PXTQKd6uxFsoCuw33ANeZFmhT+rIu/RA7sOPh+OBTPYzCKOSvgf5B9IIt8jDrMwOTaBXHawCwkmHlJzqfOxjU0HtJtgK51YQq6Afaqams0tgvKK/uS0cXAWprd6zeYJYL9RtICCq0ScsXzu65eaMAlZnLWlFfIAywhO5S9MW1jRV5koWiUMSCTQm1uOCJfLhnlqnurFGfuHrYuED6sLLQeDBBUhIhV916s9FtBGJGcFt0QLtJzoNo91TWAZaBfzVPnujeAImRFMFwPEccXC6NeZu7Bd6HlLHECdn0xojNK6j4JjOgb/qUXX20nXI2BsQs95SDJAfjbVMRnSqsVYIawqbHh+uTKNgzFm16XageM7wFr+ZdcYKMhKCbBkFVTLHHHl8bv2CQwar16OtmbcJ29P3uA/qRj5zZmQzkjMHrYE6LzikDT3aPtWeWtoDPu+ZX4Mdf0umGeXO6zgYr0r08uMhAgaeAdF4hqzVXOdFwRKibS7AJuMX9hID/dnkE/gOd+uoxxqvBqgPKyU4D7mNmoI8qosqdNsyyU/nMWyRyjt3EvyFjJnQL+pQV7eAO54/uzOxl0se2SsjqPM4yKPy7knaXluS/HreZxVgfZC1anJwaSZbffHEn3kqYfqOJ7blnvJKhfVtGhXu/PbSUG0+igt49a3uq3LWNtcGi/A4rERrtnEJRW4h4OloFZst4S3kTHDUnuP2IoGEfiJeMBiichPfkb3tWiooiPeEigxbobUZpbiY11JueZXDi+A4U2H9ggJEcIudy62WMsCXFjcKvgCOSyzCnK+y6CsTUvGut9vnXUXsFSt/xqa2lNWuivg7aC0r5BT9o6pPap5dQRso1eH/mDXMGNtrHzLpukpYGbB78H3k55pzDNqBIhZEi+TYwix/PraPzss2mSF2mqRDRPKHvYODPUaRl3VvyTM5kfSMBBxfb/bV5GeVGA6VhfAnFOw8RvMzmTFBSndJLYcFHIJxMPM81uFnh/YutqaCwP1tetuRkQiTEp6lj9y2z9OmvMRd1sbgQUTo+jp52cp93DUOXYCLCEawVOonLvJuymYulPV6sXTI7wPNRttrFaJ3cNDGeltNXa7gQqAD2xnHrdLa8pO3IW0poVMI0fJHR5ltvzOUKfujgpr3cInaOLJmxjooSjBkZAcbYj6b0CXiMpdWhDvvzuKxNo9s70dXqudoiwxDAgb6xiFekliiEc4ey8RDXCNDGUCghWhY607wlxZB+khyoOy0JhtjNtz+dpLffQGtgyRg2fhDLuhku4ItytNonH0oPNL5e9iw/r0ecLRV6w7+17pJwYwhdPLwuoVai1gEi+pQvYzISkZ8/6ImXWuWHKcxYD3DRTEq/miUr9wdaXFbJJf7o0E3+9DnxBBaoeVPFClNT655+XaztD0Qz4Dtt4l0ned3Xo/30i+7hdSquMFJIuOAoWFSxsSBffLVrZ93IvVFDqGtFiQkpgArpWG/CkMe808JS5ccFPS+g39wwDu21NGbVzNIhXHA18fQQVlnUVn/RelqCp0212HDRN0M6PatVgoMnDftMMq3J1jA+/02qYW45PYXr9qhE8p/eb83AmMqrLyxl1JyUmh8BTq3NM83BHvHVbfsKxDZhSt8NptoSAsY5VpQadS9dxvZMjY8XaeeysIiEZEj47DY41IA6/+Qys06eyx42eyomcCTCmseEuvbsuDymDLWcCWUHPwLG7mSU3HpDAIrnsGU3gJOjEAhf6Wtgqb2OIKRmUbVkvpjXTZRAz7PlqQl8nBNSN13v1x0RNiyVD9jyRX5VPzAiRJ59f1M3b5+tXjPljidYJX7U01buZLaYi+LLJCt1MNXA5Xi349f4AYwMUtEuCgrxHokCg1H/AKxzbctlRU7g16ooTGbHTDxk3kOlDgCiOmah2Hi8nwKTQXrtvcx6YZ6Fte403eYuVjnBT+nvHSz7pscr8+xiKlRIy0WppLDY+YzzNTTBxh8AS3xLJQNgER751Ud7iREFJEn0qekFfLlQLqiwFi5RGQrF+dZvRDNxZivIUkpWkRBauwp2IVTyrfzT/vSy3irOsxnzNCwAi1m+EyoQZKH+PqGbSwIkhSQB7ljNLGaPrjZGcGQlukPPsBwhcQ4IKdnaeEsw+/JLe+NpCVBEYa8isW2mZH0NgSTIKproqHk6VM6ALV3zQsLzmtuZpMH9gfRVKauizFxtjJ50nJ5/iLEZmHYjsXy63dv0mzYM+2wjT1ZTd13Y2CgqnwDP/4aE0mUgsbj0D6sKEpFYTIyXXAddldIPuDZW1IRe8yLgRScgUFnVkVtEJRjTPu2NV64xopBO7blcFhBr5cgFU+y/dnrn+WPrhEIpc5RPnugGwDJTqXHMAR05YP1cC5S4ylNZ0zm+n9uRD219A9klI3zQV0vtLVrOIgvWmBi3eRe1E2JFxfrs2OXERLZMLualxG7yV1K0MD0ZQXM8z3z8wOMbWa/n8kuBr1ap+uMtJnu28NaS8x77MYQm4J2Fx6FJAx97MemlV5ED7bX4TJpKnWZZ8+h7e+F+/q3xEVS+7BwoL16EUSL3xI1/ohw96Ok28NUiryELPfGgNWKVU2pST4KEwGfu/lJgoIPHbPNTFy7I3yhIEQcB79xxjjOzXTrBaTmmG/cYojnT3fJGlNmIQ+RltqQ4C5x/6PyQKus0IQcHqGf+kwWsLyMLdQjbZ6FFd9VfI+CW+htiWg5S6CthsfkRulgmnjwyrYqBeIoHH56GiRdFMaakKw6QCRyYxA71KwQ9t0l6c64ZPpw1JhrZq1jwzZwOThrTk1XAelLOQqu+TPebk5dtEVtZ3oBpPdGTnLw5b7ddvsxyLvOt9MBDOsCJpgkOYMhvZ4nJcO6IzGO49ksmg+r4isOIv9mQIaMppQh5QapI+oj8HOwVIRrP9hm0viKnOxNcNqZfp2TqALIDRuETWB8NpE6maCB5yaO8gX7oZIuR/nTXgjckkOo5i0IO3opprBMKlpk4VI2J79Uy1JfB49BaU1hHtHlgAwr6csXEGHG3wW3k40dcjfAGO+IJZb0ykTOtuFGkdzvvw3xvFVjmcRtMMOO4cM3s97VH3n5FRg7DZ38akKZvhVEaPYfmDRKq/d5T1aLKcN+WlnCD+wrsSdlbImGopHEFVBoAaUL8r2dd6X2i0fc+qmaK9xnZLvlHBoB5q5nw3C08koB8rFQSGoccquHDe+P0vKYDddI4dmew36w71CLKZu6fpCRjz3ZF51ZMIGnDwgWlLkz+y8Y8wQEp5c1iuuIQv5IVbDcA0bmicmx7T5JNAm0+hxk09mVEP3r9A4b1yTHYVLGjpt42Unu2I+nnGi9jfs4MwLbB2ixBT15QSBt2fR1j0kxPCpJUqACtJ3T8M4GmibUAK+flN0QMKaT7lV7vdOaaC9Sm3n+fVAbI8pYsnsx59a/cGwH5YKAqatuqe0cUZh5VzEXkuoKwnz2LnpqWsNzQR99b5EBh5jFfh81CGzPBxHLp9J+oLrupHFEufQZjDUJvQXJKnSRKsv4w5dKvtHtMjaU412c8oPfWjtNFt2/HwC865ySMNAAKVSALY++dM3Cls+y2BqXm1K0qMO7yZE3ADG1vhuDQzAK99IrC1qJs2SHo8tZL28NyioZ6Awm7wMcp6NhKJCNf2NFXcQNB0sH1plJNb16zK6dHbXzttoxlwP5TvRGZo5rx4zXh+28PfN63dlidLGMLji3pFGziyH1E63bzKg+T1QM/qySHwz1yojato4CE48gvHC1EE8T6HKxA9cpLIn4JJWfZWFdO5At8b+6QVcKxBCoxZ2hNkdUnOpW92euM+3i8/yqjKtUKvU+Fu3POSzLOU62frrCXQy2GByo9ktGtOFRUS47T522ESAT8jNvGE/pIyNnDA77ii6Dj6NxxZRCD5flefLRsvTKTZwPlQ57yAED1jy97kXdC5juBhcsquot3tj3/ve040hzrOCOXFRYEMvTtUToICaJS5j7WlBS0izfegLl8ZGLu9i/9GIluQp2ZOo5/fAv8QP59Gz12qQe07TjB/68JkIv0ZTKtPzhtekRqttnwpRQ/tMJNMepRvgmyY+A1O/b11ZxMdafn7KWi+Cv4YZH0gH2O8zAR1SpZij7F73E74VstA5M32ekKzKjbC3lg7kDGWxNxb4cHv+r74XUeXybDloFcPv5gxuQNSvfvjLbtTC0yYB8mMwjgfsGLJ3kQ+1aqakpaPrgY8cSvPQvYd4lHAS89bczkgLTrB34pFMmmd+P4+5ATquj3ZcvzKWSq4Z8rJzQ07KEn9mxBeSteP8x23KV/vy+FTsYPJMNgzR1UcWhjlRPn5aXaVnhItOSg4SC67IOZ5kr4fU+PVEqepNpFwSeOtkuNje9Tbx+SyeYUgGfWuFXc0ReRGJtmcreYyE+BMWyG20Ooq5u6uZlm2y9mzcXeCoZwx4HtOSC8LaP2jWvld5zq/3OOPw26w0zdX0xm/Hg79CJZhTO+ODz4oZmqKyRms3eJyKZrOgRzi9B0GYL91l9JEmJhuB8HX3vzW2tCMVDsj3Ql6OFBvozA493HFTGUPxUNZsJefYWLsj8RSKyGbsoYBgsgDRoIYhb06BR1uKEiiquiu5tgdCu7jYkGMA8zN9dKOeafAOjK4ixufQDJlapw0+LC8jX+2p2/h/v29CGpzL7k1Jt26N0WkOuPAU9+S45l0tC7gFebtdbH8OjoR56WwZ0uq4kLup4LCWUYqATxTONFkKwRCljTisnCNUIapgFLH4VK/L6hUAwdKQXECRjGHWhk6QfwpsFuPp8lrPp+/akVKBnzW8rhR2VLY8pZug7MY5Jr8wq0p1aPZKETCI8Lz53bAcpZxUVuTowZnfdOktRxoMHeMPLa/yYIAOOGmT/Fdh2D5defQlXbkcdX1T1205hQanoz9gP5RVsHjzyvWGbaeO1+0STsgU1fCuErElhaSdHj5M8OWAogICLrHZcwQhRDF8OR1pvyE5wrF+v7FCCe2TfefaBNRFhprCOkAPWj2XnAsaqGpZY2p4wJ7N2h4EhRvPi2Zk+Z9BT0weQyM1kkPsGdORDY5ZT3FsE/25xTR+25ma0lSD2CzKlJZxLuArMVlT8hvvjzsESfKhefwcdsDqgy/dEfLriNUKH8AZ1nz/G0OXatbKH+BfJv6iF34DoiMzYU2hX9miVHIuvaOUP+iLpyFlGtnsKmfbZNProNuPsk4uJkY1BhJejHkO9ekKeWyaYMRCHhYamMJS7/K+DQ9jm4eUUnRxYInT+jrlY7Q5na56p7kB2Kl9NMTBzl381Vo+yPtocTIblsL+Gp6L6S3QWLI7W69x+DXCyB4/AMFgQIF0qYeRqRPm1A6DCXmk/FDodse87U8r1rUZBgWO+8hwD/K96RtVahiXMOlwG3W9ZSx5JXWKnOE8ddj/kWCxZoGV2wjb/9IT8/0WpwqAGnBVCFd+Wu9Yhl6HrH+eUYYUHdGdd0gJrUbwOiPfBC4gHX5FQyFQ381gOjyS130jG7mbu9HGglDfK+3kvzCnaqOuFj1HSwL4vB5vV2rYkG6Euy7RT6AwZM09Q9CO1MzB/IGcjSH3rZMVOmMW+m1vkRvGSci5f4RwBHGdq44KR0Q6SfkaH8QhZkHOAqRdgvbeW/xycMA/qhoZQPXTh/TqCfNMaHOkI4BvRvhjRGHjo7vBMl2PETNTq1Vc6MZYCl6Tx8pTeIlF4GlVykjtLfCrRaSsMjOe7krBO1kd1910+gYDL3kFbab4NUkHraT8OpVc62hkdrcfQiS6AXRKabwxwUTyqqOGz8XA95uyG3pskhdL8umTSPZCJHwuAMaEZLvkQaOZPqEu3Flb4uD/GESMImiACHYV2VfwIrmxCE3y7qoH2+YHj9eCkWZx+3miwR/QcAG7+ghFQzVUXzWs0g02AUnve35Oqg6HjAddGdv902mQoO3bXjDwwwzSRYNG10ZDmdx9JybK4BUJjbE8Lq9hKaNPWu7fLYjTPg9c4z3UApzkwr97kUeOZWBcVNWu9gSVyqbkjsWCV88Zlf6oyDunrP4iQqwaaSszc7FA0LBD3GuRlU/k/Q/FfhNzglQd0/V1JvyIQHbBrcnWr7PvD11sWkoyty3s9n1CKbq7bzI4mfXNOENgjwz9UBwGLSC1IJ66BlEAs49KINI/PE/+5iW4KZJS+BFR0dXsz/tUkPlSQ6DkbFx/F4tWmAk4IWh2cq+d3qF+9nWxZcxv/nA6d9M4NjFbNnmO+APETa8ImyNtSqVm/ZDGSunfHdjA6wiLHydlZZRxCX305leBbS9mxr5Ucup+nMVly7Hsw2okshrpZ4Bhvw9zw865G1LBb5sJ6yqiC4NfrwojcfZh1WHltH2eJ5HKR/Qr5zwJAC1Ubympiii9ALNIovAY10/yFux/+Fvp6+u1VQgAXPQAeZ4UIoQCVtUpWp+to7X7at86/FhiBhvbzVojxwwCRtc+sf1f/0/G0/8slg9THbGbDVoKxndvpIwjq+KCv2volVNXNCfKD/IoVPNtxMRYQqXsFLaExujSEd5PARm7pQhALcf123OhTR2uulNpm+Wyj+n0QkuoD5owy9Aur2WnEUHKu26xBGBgbfEJYLeSHi/7jalWdXT7FHFy7Vj3oYax+Gnn9G4fQJhRn6x+JG/xs7gYv5++WRBuw/N1stJkH9SEUhoryc646n1A4KnSqbvvxlBfqqCv2ullpRtuZV08owbwrVbR8RNxRAcbp1BDdSKmfTc4P4tskzMN91YFA+s++OP0b2VTwv8F69Iv3fdHu19QatJBk2HBIBnarYLd/EN1Eoy943KHblEvzGh1uXeSIZCKx2EEDlVXj5iUGG3UHsX4jr22MTlP/T3F5NItAjofnxAkNmozP1vExJfCjjXb8XIIxDBN7UnHPDnanpkNW2JkvsrARyL5ojkrg3YS1K8LQepzPcXlR1+607xYc24WhbIRonSsYB2HbI6z+aMscOdAmz8gY7w8jGvK8CzVDF0OxanYqAMNjO6J/h62s8uO84QfeBY9seo0kJZSVzDtG5OIJVOvBIjsv1MFLeRNb97dFpvlCmQweUgowKL0f0tnLK8WPSq43a2sOFQIoLAHdFLW5oe/EucKb1BpNT0pfM81xSCA/pi51gP0eXZb2wVQrMpnzaiqZ5HW3/BKghpRswxLwFJgKSNmx2EgxpTRPC2dZuus1i9iGCPH5CZLCiExhyfLluRsU9LEz7x3FbaAlISNcp55btGb8W9PN8mHl9CIxX6EPERnM9ABiY3uZyNp4lJ7q3/zFtXd+6sz4lHUQkX4xk4JhUXM+zp2xcQEHQlZDaZ5ofEAzGr943HdpNhw2KykfN9ACVfsb2RjqlE/7YfGtmANVgkjjwSgRXNi5j7rtb+bfDTiFY7QOfxB4NFcgpIJdW8szEm5T0dF91k3g+l3C/oWOErRMYtlW38Ce5i3bBh33YbrO5Wcc1BtFXA5Yh+y94BWoOkQz/atkX7BZ7pFpsWvUzZrzvDXBO8+1QiRDMTxZbHMkLBnzr/QHO13PKmJ053WhkZOw4wj3Yw9bFH2l7Ecw758CPIaQ8FWEj2XrL1W7CMPAFm5urhSNcyeHimbxXyKxRzXLpoe4ERJBfJrp5Hxj3O4ZnimF2G1Ol+xhsmyiO1McCcXd2Gi9uwcrqdmW7BCSmqVzP/w5bXDzIO53Wlp4SS3/hRsDyFsSI3SP6jwebt50gwWgNb0U7ccHdLlLhI+E0DwgThhpbrmkS7LHpAIDH0iJLSQ9N5GuyNNhRuaRMxChc6Lgso0JrBtnjLVNn7eYeRrIGyM1+CaG76WgvgrN8E+ThiUzYwvFVtursf1Vv+YUP7CRrMkjhjNF4qzzF1Cmw1zA4ejVxstYhv58F1vfRSBqlGruFGJULN20+pTdFpkYmiggU3Qff2gAvVfzMKjwShoS3uj7R16qf7sfVBBjBgvc+IL5CGgqvGpzCAeC7M3/e71dylAH2RiYwoZqgj0c3+M3Blm3tw2Zy1WuICUkR9TMM/O8nJzk5+ro2lmKeA449YOIBgHtGlgH4Gqo4wGj/L2O4O3xCIjg3vz087aZ74fPI+VaU6NXQ4kUjgFPMQs5HYvFWPwcq2MbeXuQgnWCRO4/DjyWk4w8j5CaSCsHeARdGZKZyp7xt5VFWv4jHdjZlS0V0nz1R0eaKZE+yzrUTCz1nWOr5XvupkZ/UEhErNh9hicO++NifhUt8BwEEVscjETcLmMZRfiPCYnbSU5EboiXpfF0yLfKP3H1os8ZXbJKff/UWJfTtN8WsvkAS/r2nwJr+UUCIVU0AZeAY3sM9/dxVDKevAeqZ25c9gFHTvD4KvOJ2IwqC4CtCwSTF228V6PXs1B4FGM12IHrqAHpsGingL+vf6GaHXFprW4ntd4w1MVSr6TkRf+fvvlLuOwvQrBSpjqAiXAfkNY6L/YJ7jZ5RXsZaDKQk8JCzkV58yX5qrme36zU4J0BZZZjhtf4+Stsas+qpL8WjPQTDy8Lzdq7eRvo91Y+VeX1hiK7chT4+gd8bUfgwPTkE/D6HfR9c43Ah2Sizr6+opQcftR+RDlQENEMDkSpYEQikP1G3mLKWcSNwzWCH5PXq9iMzPQo+ayEivnbN+OSvw5rNExpAlhuvW7qaXGTXZviEy9cOdsJcMRrUGLtaHGX59U9JBMOQXJmZyX9h+6bJvzBZdJr9mo+uBBl5sNOADsy1zRhyg4GQTtFNVRqrQgMeySD9dAiLUurU7lxLsbTH3MuOkfRFABd3CHLE+F9nIJFlYHpTFu6Q77CKrR7D+d5xqkGNa0BGHFW3uy9hBg2S9gh9OZnt44eyEpxh4z9lfdz4HBAicNZitKRRy/I9nGn/ZkQqIxHg6VL0bdbMmieFQORm2cPoiMdxGxtuinT4vyFsm2PtB7uI/0u0BoRYFAQBotm3btm3b7mXbtm3b/Nl2N9u2XbOIWcgx8g6s0BaTmXRL4i1vxz76PR6s4JXwd78YJS+DY/NR8eJicoORqLxSz5408OvrvFVuv4CgPNHNSSZz+TFkaOfZGqjC8AHZiK2IbstcVq3SMsc6Kc64fO93UnJZlCmCSQgwQ+scL+rzMAAEDCSG3FFBZsty65HqQ9tr/A7JiIF22KIoyuLgDCzYoF2e9EkiNH2uXD1uebS5zrlUMNwB7Fdv4PSOD1iSnGIPfP0n+IyRnBrMboV9s7y7BSmcJNWP+0hwCsZ7SMV7+/nrAnT8ywOlN7jJ3fk3m0w/Pu0LfZ4Z1dokIGpnQZ3JcvyqGeq6OuWq8X//02FAkX/tbC62Hm12ZbABx9nf8F95K/TfubRHKg/6b9v6eNiUcAf5RRXNKbwMSTj0NsuazYmk1GiWoUsHeUGmutSPYZf8EvVT+Bb4GVYGXg4K1SE/TAaOLWp2hlGJk1YItLYhqpgHpJe1xpf1b1Iou43H02lB0pZ5kJPSdVuFeCHo0OsihoBIt5c6emFssjks6uH0EP/eQoxX2o/+w1dp0v3GSEnsonGlIM01WrKJ7uqw1KBGIpPuYoqZk90GYPnv8ZA+ErEaKohiM9dzElzR+GzGuGrE/APRhxHrXEbT9ZFFoYMSwc4G7oKcOxD/J4Kw5gZvKqIRgiOUiT685vNlCLokhczmuI8fR12rTLywFGVEGnH942fe8ZPFP/783sUqJu04+Dq7WMTL4EDQiQ6rEObc3z8izDbmz17QLsv6QDMDwU2xuxL2VoRw9ux53UMc22RaHK0YJM/324poxH1ao/qoIOEFc2+IlefBwrfQkb6JVRbDQtRCIQ4pqNNmZo/0QM7j36aExMw30CiXbxtNa/dWBQkk/KTo0ZIcGUxNTLRzeZdLUMLR46Iusa1E43AUsZK9j5X4ONqZbMzAB2ZwAA1YzTECL2Jb3BXubi24ONbw+BSbazsy819ZJKW+HiIM3Dv0DQHvKAlwqfKORuhVCTb9Xonp73Swo/VHgkvvX1DM83FaVRDG9MPiJNfEZE3lBR14DJak9vhVOYvYmHo8GhvVt+TMZ57VAr+icehL1eRscdKAU3VB3FqDTszjLfzmVxL3ze9+AiLox9wQ0fj1+kVdKAFlur2G6JpVGh+3oH+jIDGkNw5DPozRf2veyngbNGiqoH/ABB1oj+DNv5z1olCni/dg4n+woR8buY5d3/uanpKi6VV1E2vHgkxMuuqfSOl2bpZfHS0IqFc0kNFKpPcf4fzQ++7MomcwTovT1h7styNpwagKiRi4dEu3NdbJcF26wVMTFN42Mb+6VR2aN0Y83mludh++RevFhPXHjTv6dad4EC5ps3adEYcdxMrq8yyYXN+M9KPig66m4CWfbQ8BWHZggc8bueEe16yRuNdQ4CWNV8u8c2cUqLR40rwqgjpilib/SGC0yEyojKKN+F9vmiZxEhuNHJuMrJznTrrKIDb6MYDfEFf9ySq/5ZdhHWmwEucEooW28Wt1UYBuk/0djRGnEboru5Y3/pDTO3Yupt4y+Ej4mSRS55pvnXe68EE1eysyHdPjkB3DS8jpNSeXZ/iiutBZiWsNrBype72T/9gZYa37Vge03uL5xSxJ1emZDguLy4JftFB2Gm8/N3/OWwB+q2oyRxAhtZKb5yKizeN8znhjNjj01JTDsuG06k2ky1wPOtVlG8F0mC03XBKpl+jwIMlxdpXGv2ra7iI58Bs9+JyAYt5qcY1PRsNTDvKNDTraxRTp09oDzaxS05kgn0+KnlHRu/192Kk8UiLsqHM1E8TJpr9Bmd+cunEGl/QZR1Nba/AiKA5F6OVgjqeWVGOeLs56a2mxYDhzk5wDdGbQjqKWv33/AaYR6PkrQVTJHOSxnFBqnrrVuwjxxESXJSuQiHoGyQ+HC4rVwrb5DbwI5uAmnDPUthS1Be1ilSL6KE5+GA+d+c3/iFpLs2NUQ59JvsK2LSPHCtpyjRkTofgFTnMrHRJhqtuVGFEDpGcXMhMU+pu7+0or2xV+5LeFs7n4zKsK97e3c0OP2j8wUDjUZjFfyP7zmTSqNv7PrvQxovZWT+zgpZtk9BeWGue0PYfc1s/GZRgCMVzlUJAWGyxOtMlZI+2o99v7+QrQr60IzNsBbwd3jWTKuOmWN5n5+uwW63EwFpP+AYkuluXbytcxeJSoKVWTs6eqtLjPp9npxz/NGonQ5p26GoWlDp0CXHy7WU2hugtWUVojNjOZfzcoCUm/kx4XYl2E8Jx69kxW2a4FN30wEL5YylzaOHpslFe6DBucrix603T0dT/c3xNz2ckH0W93z9lvGyWnbxnU/2vvtE51j2YdOU7OggDJCL4uKyH7WK4QPpWKt73XGAZ/55pgBXcXKnYXdx8hrixe6faQiBot3QyIX2A77Igjy20pVrsLi/EDAJIlutg2ZFRx7L+OVRAbDoKaWpon1Ra/7DO/P7jvBiNBGa24o4IeclleTnSZCLWvMHpHM2tOVsV+rVWALwWIjkz8x5qslhnsUlgjjEqQIZAgAEkttcZ1n/HJwFmCa9xbm6TYI/Cv3tYlcc6F5GZTvXUcdDf9Dgf26qUnmfgUWNJoyvo2kaxagn6eY74W3qvPNB9Lz5ValUXeT//kXvMWxCxV/0An3P0JcIdz0BflgrIuyie7rmT7cSvDFEz9tiiNYC4Py8gOSa61spQppUW1pFwnJr4IzQv5L/dhGnzM4F73zJxi/xSX1disJ9RzdF1GEn9OZ4eVWiuB7S7Pi97xxdALZQc9eYgCU/bMI2LEysjVU6Lx/5DdhCZeb/1e6fnonphbwlJQ0SmHM8HGNiIkVb46fRnbEa1Ed38ISfc0C57dDHdnaBiVnVPkkzqtURIXw93lvKZEAt8G5muEmsf331x/LQeeFfUwE0posjvm/q6Gp8ZLEbj9xe5NXIaPg+Y8dRJ+WEJlhVWTP9ybibf2JkY+rxtRR+ROU1ua9ARDHj7PT1jUNOzBVeaW208nIOcmamag2y4Q8qc1vxlAxq4Zm/hzWZrDOwwYGKKC9jZq0JUcM4p4oEkNrEalzyjfLJ+xH04rVPlkKPHn83qBehKlSwoEW707XVyFpqhqTmB6Dn5TpT5+ghPFWdGN/dvaGV5xr6jFGgKHIBch56K+Ei/tqmnG9FCNlPg4Ruu8QlgTRemOu2WG1kEy5+G/R1xWETbzJ59yI/u9CH+1CkT3ESClt9APNVBWu3t/TpDJo6QmD/2VsSTiPJsVRKEVYyMIKmhEp7d3/MQ6/j2Y1lbhjaTuf2TiZsM6Vsvnp79S5dzdrvW2iVTrS4lBg1GJ8v0lMrkqSuTAsONo0YA09lw/I8DEdPDCukcwCkyQJfrpMKlQU4TE+hbkNL9MtKNj85lhB9PrLmx+yBAlf+4wMAUXBq3h+l7F0uVKKxLGzz68daGZH7Ql8qpMP3buYpnodp6QA8WXAzbZo5AhrIimg18mh9e8mCoe7qajwz+kM8xbEsrit2/+eXB2XEhvgz9iw7Ln5QMGqsLlOWN2iHdTHbripPSMZh/va/HsnClxZgz0nW1AaC0Xf678lBIKjXh1Yqqx96Y4PrAEUFGkwfooXDYxEoT8JIjkTqOMRqLupuXuGmBRibTZA/50cXZ57LTIzeO8KTEfoiEh4mnFL1UKGdK5lfDagCs30kw67/cC7kSJWgVXSSade9SJejq8KCXozGCorvCP4M97lfcGV2EIlXN67X+i1GVPdP/xOFXBiMSY6QGeVsm+6jzuR98ggWgM3XoccNIppnhP9JSaexoFkAUsj/BXKee4XnAIbmw8fETRFZsyZkSosdJmUD/S2IqwvPnNF6UgrO4XBvpbaWkKcEyT1/3x6eylHMrABdHMsuqJp3yXwMy5lJ23qw9mmstnHUzQUfofSSsiuE6PpOoW/ThWSunhGp2fZQ/rhCGBY6XVC6xCkdNVmW7aFZ8e2lQYRK/NpnO2OX/YCZAvtGZtTyy+I7/CZfgP6q4ECjI4xMqT8sRVp71Az9+Bn0MKbabSikn6J/oH7fJOwckIL6WjjQLbTNGcA7pziT0o7uMFo4W0lXB5GkHG5NnO4ZmWpuJefOMEnZhiyCGBz5CzQwJLruLh1UWC7pfowmzU/nuXN/1Rx55YJMZlEwoL1nzod8yokSj3vQ+2m7Zbr6qqc0of5OOQkNHr/BKuXm+cEeHN+iwOGl0pn7OvC9e5SvfMVGFCWQvTtW9C4fH1U4qv8eZXfxqN5EHCV2bmHqOaGBLdyssWeVyVtVaNZ2VOKVetmp+5kqJ4bYGZcv9ZI59dCx8lN5K1rOGCAXqS5YQ2uMglXOGKAr2jTF9qyp2/mbo81csCyhOGXp93M9IU7+uS1tC1lp7TCOmVsl/0ZZR34rdmmW9HIQsigmVDJiO21LbSpilWtNO+cbvQj6viYYOaCNkVw2ff4/Kfl6B43niyLpB1NxFRchT0jY6W7pR/jvW26zsA6gVFr33a6nJY95gQxMk2rfbLXpUOFSQITJP1BIsgCU84ikyid9/rWnVvPfMCPtHe0Q45voB64E3EyWExtMoaI9wOXO3EAKy2Id80ss2Q1/5r8vBVVIUBAqxQSkoL/omZOz1ZhJvMt0hfBcavjRbrwOrl+nXtYkw87b78FLP7qg87RIzGDO06Yw9Xx6RTJdbaIpq2B7LmMsRhbLsaz5h0dVwZ5Ws3Katq4+d9/ytCf6K9SoKX8FK6nbrKKEJWFUG2I15pxGk9KMfWI5OscHyN2i8nxxOh+zmssAjeIBNPXuo6Hi2/GGodnsWVyCZoH7sc+GYBnC9GgaanNnNJmosMv0WWgOyG7h3afSjfOWCx154s13aCQUx7EohhCwSTtvkljIULJ8vtrvOArUPM5w3GeN8bISmYYFx1oY+Sg8QlCi8fJjJ4u7ACKuUnxERXs9nRWI5zavmhFXA9ISf3wYg8nSc9L8nj7mqFMPbPvvZtPxebBTPM1Y4m8Ma0pzJPMSTPRrFlYHzpRumdf5N7Y8ico5ZNbJzNKJiPm7h7qfs/fqjMvbxm/q6s4RRUr3lSYEdEEeoI3+QoE4BmVgjNVidFqQLJce/tDYsihr6kRtL8Mj5Y7XTSx7X5Q3ru3WmgiQavmzsgTBNhDzGdBzdZwtUME4t5O/zTA3/2CK8Qp3vq/wFSSBwSSmAQE01e1kaw8y4Za7vMeFJBDdwyN2ywCiRte5qy6LO56ubzcG0vbCPA48m0xNEK9yQdA9dcCSQBhkt6gdml9JM94kJDJUAjKesbxswxLygRgtqTNLx+kw8ZPsDyDV0lu8LUjsAEWNFb0H6kGTQsE6nV1zFuPuAmrrSsCBWZ/O6XTwzSkzquhkVS3vtsnfAmdoHDFlZa+oID5XxWRO8PNoU+yYs5qFEvluAH2bE/6ouBGPKZI0FjoSSI9GcIXAbNtEOW+ZEvUR2omqjlawM/hpXwtyBWOS5mHFZu+/8tnkFrRjc3Lp+5MDFbeck47iqp+gNcBYgrJpeRbg8Wt7ktLSJ5ruKnOGEHMlG9oQJt/Bhv5/QfoqDLyrbgq68PacZR6sFuVaUkUxzW2Q0LXv5mamwr4nI/EJ2vmZefpJ9H3BRFCjvlkFZ8g0hx9pdkmHvDYTOvOoVW4/WusFu08gQObnzzjasRQv0FNKlLmsRuDvK7a3OA5lhsyYPHrP3oEO2dbFDew4T1TJzxCZ/kE+l/FO5ZtZ/pkGie/e0lZYrNOrBKFPx3GE269cZIzbY3+z+agn6GNJV4Wm1/A209Pxf0OhwOaeTQleizaeYpgu6BUkrD1n6okPdt8sPTUuB1pq2zGkuMG2D7wXlsw7PAbJMTT0DZ2pW1JFwQD4YOIMcjBrqvUJF0OHvjLLm11Tik1YiRvhlsHh75T/s9cr/XzbbGW42gZwFMNviz8IfD5k9A9hr6+cQ+MIa3MUBOIRwajnUDqwForb+T1tDbDhemPAZ0LyzeudZDLTTrqT28oWzC6mr5x9uORwvnqNQCuiqH5rZ6JGtyCKIlA0zl7dStDzbCciEhEFIXbOmNB853WbpMzvBDItcmIYETms0aNgMgXihuN3Ku3r21RIKR0hxuuKxJtWfju/HOqL6qPwsz8chyGRwVNzXGemWaamyPzIbE9jDYMk3bIvVn4FvzwA92eIz5Pgt3S5WM2kKFOIzDr9cXLkWD7BLYQHPnzp8wsflRHxyf5C+B/GCsfjMSWXCMQqDUR9BR2U5fbGaNRnBQAWfQEJTJ5e6UIQhKhf2tLteJaUxMXsnBwN+ZG2x2bm5uQCjz4/9iI1d+k0IPRTDhkiXvKFl6ez4qEdTRz6FqoZZ3SD3OxnF1DAXlHCZdem41dBeNyvQqKNAB1ZVsIf6VGG03ZDq6ykf1qNaYpt0OQCEUC6dvc2YHdprGcSjKRE610iuo/6mV0I/tqP23MTZi0GYjKAwzA425Nd5zMmfSYKod7AxuuihqJrWoghAkcLTphQ8sG1RfmhU+rLZ+p2JFSoaIPuWbnUcKq/tllxUga4WXP4Cyuut3fNkO50i9jYzgCpt/7tPnWeUvRKLSkupEIIKZ0dn4hpDkuZvQ3GT4lC3lxF5sarN9qdnNg3Y3TP1SaDP3GfDbAK1TYmxZtF9nJh7Cljxa8se8xnYFh/ELKDTKomQu6rp8RArKT5R7bC0MK7zcb7yktnsg71SHr9waD4rmXS+SGM2P5AugXTSy5Zo5o6/BQ+9aNKHECYYuWCgmkgbpKnAxh+n5fL5Imnq+G/o0k8GsDmGarpDYAEQ8KKAnjW8Fo/EvSZI8PyZ/CybxwwXAEVwDDCuZhEfHUmvjwA2N7XUBW84M8/pu/7kjJsEh7SWsVQ24LC2OWUwFkOb1YmnSGF/7xgRdHR4WYs5XZwtM4poenNoIu1hkOOCWrvMoYGDgEDrQuw9lEV8J8qPoIXdC8OMMuReDB8q2nEnJOteDhRy3jGL+R6mpA5ztdNIcm8rg+C6jssYmFU0x7hTSiEo5zolmfiATlrXEB0lmeJxXYiAeoUQJDbzE1zDh947y7wyetnE3mIqyZHDO4T+i5JzxaxCJx5+NbuhanRdKcX14lpuDf0sis9myEabCwqLslT9HfKV72Avj4u5d9DL/FUM0VeguXE7di2t5FZ0/CgSpRdanCBTGAFO2xN84mCraOzS01fzJzDilepepUSBbGc1XEbqrvdQTx1+zsSfYBuL9wHJCchpCd4uLI1TUlG7d2ua4WH89dDb8h8oSq9+9ig4Bbxz05l0rnellULEOzXlcc4tqi6alK9S81S24NIE4MYYrPouvfe7iH6SGfzfmWi53UfCTKWghDm/c4t70SqHzZiODJmzGPbuWkcb+sfeiS5UfFLUucKIVHb6jWmR25s2vjz9U7EhGIMEg2/eUvvivzMz7PGtMRkwrc3/ogikfx9/6nlbwArpJP+XRIl/ln6bNr/A11w/bPqm+LMjz0QiSfThwuzXFFEZYACHvGMIyqmB8E7w0ziazIfeA6g+wfAe+/cR6PVSvvglkRLdybyDzjL64Xy9XGOA4fQmsWwUo1doFJoYDljonms7x1YTnKWD2mzX9NNn4NzsC8iPtF8ykYs6vm9ZZNdTWWgrBAzdR5uB8Y2nz4W9xWi6ye3E3U1x2AjNNQu2+rL1Dx+1Fwuab5cUh5XD/aUBqaKVdQpBBQ2RKzqIjX2s7eE2cJoHPyt09YPJevzjy+FsmRKHYXE4/uQbfpjMs9Lt5z0zQtqIj+HvXX4rUHK5Z9ljZcuyS0P3vQLogtN4ACAy8EE53rpI2UEJVUmwZ5qDGehjM7b5fC8Aq9eyiEY8VuAgGNE+7H/+rSDTeSwtCbQSk8egVxX2LpPh2raldQakpLqmhkrx+d2Dm/Cg5HDB0xh6byJAMWAaotAZmYAEXW2peisHPjtkGZZsXsuSfoIQleQhcAU8XOZ1d1oow+fxum9+vkrjyqX4aO5/GZfLR5ST0aKUKStVBVKpMhoIesBczrZ0s7DEUonyQuRPRO7xx0K08wd5j+wd57QVe2IWfN0DXgSxdpGzrHmnv005xVVROAWW1kJV0wlT5fWJKA9Z6jH1JT0OhaM0+r0F8/C33vVCDXtdg93sUQwwMaWQ43NodtzeTCojb+HotFaqo/wZf3gOtzvA7gIfXdDjUHd3ajOneXqu9Pa4tfTmLiwY0caJaipRRd6M6UHPopXQam8Y83yZKXULhP62J3bCfYRHpc62G0MuXbwIjRcK/X+T8MJk3DnKSnOaMgNnmYn4bDqpz8VtzVWaRRvt9OUUz5cpiH0R91v0DGtQRp8+DGl3x6mGlTvdbhjdycLHJaJJ1KM5lfzv5rhV4RojqCJHQyLsCKDN97V+twFwBL4xm9JURTVvpEUokbDL0IqNnKjXcHhn6G7hlu+Y9vmqUF/fH2cuwdSpc5qegu5I2iLI+QeQqhbKWRbPIh+dibZdtRuR+ptDd0aj0U4DHCRccWM7pqZ+W5p1gRdJZAchXhfrsxaa3VN3m5obbo872jx0qxXCBMES1SqFnN3pVM1QIyuEgvEDmlJRoY0shbKMxvbVnVZOYNbod6UcD0PCHRpcndNnGgI+Wv9QTKwpl4dQZ1PCmIXZzt+OZPJ5ap+AwqZXnjeO5LWGaWPSny+Fpc93lkFQxmnZldLrQgtzdBeHb1zxM+SBhjKjMaqE9gh48xjXqe0WVYFgA+MfJby9Msz48hVEGCi/yHKyhaPMiuUvabiMMYeSlMlbkJBEWsuWGM5Kqgj1yIhJkItvZ/ke19qdo2OsxFye0L8Y2yJ1GP+ZQRjrdbDNOuxAHkm0CAxnLkIC1U+4qp7Ncdo6UzaTJxNtcmSO4pwkHBl5tOtY9M/niW0FugBDA32VnYek2mZI6dpvD78oCv9liG68j/9FMb5nUwBARlIUIedd3kARm73fQWHTbRvIZ5TSxua8GlgczAwIhfMaxzyL0bH9X1SIcQ61L/KEIcyVOQAwQMDdlcwAiXCJ7hO6zncnuh74nDIfnn4YpoaJpxzEGybCVCGUorBOUqgOMx+VFb2XGW/Qy/Pv7aU5wA3hk1ykPIJI5ZbnHZetL57n1/qbp5ptMxtjttREJkVQ9G+LKIGe/Clm50RFyjYRAXT/ynVpmNphxrA8+YuDle9Oj+snJ+QQa9bavZzbohkXEOB22kVb3DQ3pMX86KR8za19f7xPYQsCsBhO6DEbyJpIysV9VTdjj/K2S2o3y78NSZZNocihrUen2CP+8e58ZsPyw3SBX1gpaqmd55UkNAOXp3ONt53/8SyB3GExjPldqaLdj695FHKFwx+E4kvyMAjQMUM6swW+MkxjWLfhqSZQBULerbgDPhNIb1Kn9ESyiL5x1ULap9FlYfa1NzW4SNr+KU3TDinQ3qgtqPMOpXyiWP/kTZZZtK6Qkg6aLu4OiXBnIkai7k6c/WsWPQSHNtvNBQAJhKxb4541fR1Xce0Qh6PfUv+I1iyynyvHm0UwM5M6x1XKMXVNn/6L7KhL7STyJ/oPzzb8Z8o5KffVWYagbEVxbQrBUIzB9H/k5XmgHHEfIJ1C/BDjj51MSKyN+zwJchjQ571xdQ4EPgxuM9oj5BnS+xQ0r5god+Uwz/1uYUgub9aLMpYijA8WzxL+S/wixKBhcfD7EWVvtcYoZw/pFB77Mccnuz/v7TzBfeDr5mRZEvdhlOgggkv1VDI2RVWhrpRjkBh5fRoot2QBkRH3aXQD7XxEYM2c7NSHSJQgrw1825RFCLtE3BuFoEyXFgC8z6D0Hd3l1b4UeRvWAVbxIKG6+Rp06DwTsQg8g8znxzGlM2nFEYJOhPkzEJuk/tPw66EjGyJucy0hFeoaUVaZUadC1YoFm9+lHaNWS8R7NMWfRgViRrzLa4qDwcerNPkMfZnxO9sTDpSmWgZt6GZgOXp2CzcL47UbaVLVLdp5MJ6Pg4UHR7tqlXsrBbgZBpp3bJeXSUrM+4MQKWJZtqNPoB6TLZRLqEVqUexf2bd0gMs4z4GWfoeHq+/5grIa0OHS9Q2j4XqRgCJdnP+oytXzUaNQwI8SIQZCiWGtFKx/UvjDlrfjLeYenLMSsveb2679ZFr/MTRcc1buRnRsPh8jpDfb7X6ggWy4eqdacD4y/J9aHasr82OKyz2rhvJCoB4Tg26EbbzCdGKzSBh+jHlFJ3LO66EOFQJNuF8MKSuP3deuCye2UA0nF3CkNjU+w7xBi1EiO7KWcpd/GVfiv24e1cXDNM9LNiJYEUw6oGX+SAa+DCukFT//HNCNaxjYL+4JTEN5Eg2+kwZ7XnNt3uUQqMu7AWhEGR+GE5tc48PkdSl4SUDYqxA0FXNI4AfFqhvP54eJ/4hKkwgmYH8xClJrtEhJUfwHDRyx1OluVsznYoVmvnqPV8WLEk7VbOoXtcj7Ih7uS8TYKW4YB9pa4vBfRyEaS8MXqE7sHk0T+UgLaFbhZT2+xW+EaySyn2MBS3M/h3b8iJbhcZVpDSJDoCrtT/fWam0AHPcvX00/rGuCPlGLq1xQD/dA2k3hFORkEwiYDDIw98lcxvxS0ke/v+ZrC/7mgFBwrnrFJMPUI2fBvyUtLh/x3jsY+Cwp9WfRWxUiBeIZ57pMKVpDgZYENKpCcah7+S2egMMooZ/U9LKTmv89fGvylHuWnjmEtR0k7GllNcfLpd+zq7OJYq+WzVok3qTdYtuOrNJqmFcU3TXFxIEcEGabdH7UOwTUHH8GgXcsjcxqu33qMZ/jsXp995ros1XPvUDnhGaIjX9QQ66JOmYRn7V2Xl9IMfqfa3Iowu5yKSSE9vUx19O+OjOHwYkf2lseIBQyRTYH/z5sbhYPTHJhwThNCuY1iNt0XSFQrMC6yVk/9lkhotdKLv5dGcG52dZN8KQPGFVeeAZUp9GOq/RRBF2n9CNTXuDznBzrYnAYZopCidP9+J0/eY5iwDwMqFieLxI/IITlrIxbl5fydcwsmGn6639cFxsXUvHGVfD1dyTV9vr7wzcj4jvL8SSCVuVaA2zouS6YgcfHM2MyhTdW2fj1rI0D88ebETeH3j0giYrqFRPzBG3bnqjY3loqQNF0Jf8aNqhX0PE0p6zviOy9A12X8WXDnFAfG4+3LimbFDVbxv6kNHChcDra8YrK1fa3BU0NWS+673mb11bVmY2WgZn2IsythY/p9KqcWtk53E7IxXCJH9AOfVTSWo1XRgkOjwhko2vLuU6ubNA5rNFLOilJ402Ws0nz6GjPFZL9xzltLscbNPMt7l2l+Z91noZuegVYJug76UkOOZ4n6J3247CndJ9R2tnLWrru2c4+n8i4/sPVbngHog12e79JdPw+MCDhQ/lifU14JID62roPr2OA1Es8rFS6GjPsJl9gwV3XTNoZ0GvCe8LlAkd3kFNIEjeH0SLqK6TZtcTXuKhzCtwHKh9aygm5c7lY5UMfE7d+JlpdWhT8gMBWt+HgLXVVrWfDepXgd2xChjWRcmQWU6IXkdaO8FvlU3HgZTvx2vAQEUPQV/KY7OEQLMISL/NhlXhVPIOsJ4RdYQxA3KxT+oNszv+IHUUcMJ2VTL1sMGB0T2ZH7i8sHkKeZdcWiybDJEBsbwDuu8ZYr20wNuY3nj3Vc/Z48oomLAKF7IF1xWcJtlhWno23fUa0J8JiGQhj3ZgxTssrX4xyLcAwpiNyzc4pna9E7fZsag3ye35SoggLOZMorfYTtSi7VQyfGD5HhFzj+jqmQf5cHU1mylCIb1wT0OT0NtMGpg9kzUgxuB1fGZYd5557KPgnJaJNOV3dM02xILzlNKLf5j7rTwGTMObGbx9bIdO/rITUc4eXi2Nf2WJMbIq+luo0ssJcn7BuWnqP/HJCNw3SdjRn+Lezkcnh9hUd+D5tX5C2lG4x6RX36cQdjvzGarbu/rDu/BCz3TCgpXlnThc8o9Uay0+XLaqX0Lam1jHRhB66X/saGKp5R/Yhc1i3+H64Lf/IONuhCLDJCyXOeb/5j1xiGSHXWUMMGB2UCMu/uKho/Mu4/onolqsTqO93oK31KNpuun8rEibE3zOlDbkIMSywcYPkbOi8YWgplaQ8QdjeLzPsuWVYFbWKIPrVtRAnYKG+dqXVRwdjuAzWTIfnEsxMPglp/zFiss/y53QZkmJPKc1QohzOrlt4qOBaobVuApjheRJhhBXlBlYuw0FweVwRZwmBtMDT4dCWfUjVdzGW/Gj3ImDp2z03GmkIRdDNCrHeOHDoktdTbDD1X5OOxPJJZ4WCqKg0PoriekBjWVK3oJBqCfUthM5xink9xuqllBXEBaZxkLWVaYL5R1DDVg7rj0VgFBUTW15qHTstGEC3OWsRssnyTgme6SK3eSzJxxWRObZE4sgSrczArcuf90NWCNrMdcGsN0IvZoTwryBqOZYbTxyQjvYWkI5BfzTsjH4EOyGwyiFPQVfzL00qgEmhUDSBUoZHDnnAhTtNXNX9lYl5XlajHTlqKg8ucRydylXaqklC9LNpJA8nnbQKJif9X1pVcgXuxjWInZ0koNe8tNyKY4ueSObpvnwkpqzNeFuEsC2Zm/eWdmsTIzIJotukr2ndjkRrmiq+UkKCuGFD1Cdh/gajibEDEMrkMgXOuKTMF8W9jRr2mF7gByUAfczYNP8bkrp8tqFCkuazb/IykxqN+ydzAGmRqPOM7KsyDZDNTrAMn+UQU/URk3bJPVBxlRiuRAxnxdzeFxu3fUZM8Rk4fAN3HZfM8wUmREgsNpfuGwzupl3QQUPG5g92dX/pI4SdJe78QnJj4/POs4+X4G557JM6/IBR1T+I9xUR//nwAQmb9U2KN7PGaQLwNefV/sWWtNpC/FpZoJEymaRHRuINvv7Lahnerx2wmIT0PTreS7V40ZlNlMuhkKUzr+W9LIG0+Sx3QlHbMA7fLQq3veoCwht52TOperPJxiUWlgyISO4p4uoDudPwOpSqqiUEW2SlaePWxaX7b47u49iHQ2/zZ5ao7ixs6u2XnSqp/yhAZAA40oJqpclIx3bCemSj2HyfPBXk+2WauQDzhopQ7ifzUV2XMjWpjIT+MmpFYQW4vJeM44WnJ1om8FIfEZoQWaBza9Pf1NxQPpyhAfOV9fHQ8nVzrvWwTV/s80OFnlMt5wkpXixCDJ28HcMBn8Wj6wwfReM5pUUE0k+JNT3xYA2ieabii9D62/SYslibbizUu2cExpE+/nxSszLsRx6bfs4c49DIVtdozAfxquryho4sNx8pq2QluxTQLA3qevTjkAYWnaFaTcAVbOdSGgsqQzq/136a+sjFWmgcFfRmrJL+vMNzROVYtYf4HKSiA1hoJJ2FA/Engg1dV3b0SiZMHb/Wuc6+h1n+RZK1wZtmSrnpIDrkwwQfKbdNIIvJ97l96DwWoyj88vk9JUolXVySlnmxvQ+Hi7XArk0J8xwI0plMaZbdhvILk52SMvssJ/hf4T3Wl2NctnU5p6XqMFj5581HZryuljQVzLcTBLiNkIKSNDU+x9Z1R24qAAmVVVMrq0xDhrJoHt0qqNxfuyZcJhG9/Wx3Ey55rEX/aTDVR3KoxnHGg6hWWUvZmjK72s2BHIQnLENt8aJ0o5XdnHxmC/6qvLeyTE9kgfP9kQZSAY8o+e+weC+XPksUvMTObyZaP9PsH8eURBWd79DaUfrpKMOPtwxxzjkxLFsg1fK1el9iMUmBVP87ASUg5Moy2loItv6Ui5ZYtmhAK30zuF0jYPEd8Uf8RdNRmLbiKP+e45YtPIZB53cRzf+EpyveHl8sKhwRIGiL5NKSSAKvp+RiHBkMKzKhi890aEEHDB0Xp0OPsjCHYyZ9V13R1u0kvKotwkYfz/ISM/2Iseo1n2jy8l9V5zgye7izXqpuFUPCKb5od277/w7XeXHVBwzDW3d7C2G76FFFrhZila4t6tWj1S78fQmerwl3I4MYQCP7QdollmZ+3JhtXkZ3S8RS69f25luZ0ODQ/SI/lslj2u93xWyUOtWbLsPaQMoGCthYq0CDwho8FgEoXxpD/iNX/Qh0xZUhPMRnFS91hY5YwImKEPIuK1lCxelaAQQkaora/ycTiI3Lxe3zKpnrWiN9XTtuZV237n6nNMw5dI1p4Ul/vSOXTsIBNPEgV9EZuX6zl2umyWZVv1w16W2rH18cEeWeUHWNpSL7/F9JWXfkU51pZNZOr1AfnQjzZGIS3ko4CwOz/Bnehw6EJ1Yzk6Wz8W64cnklH4UDsujSq9aCOJbWxtri6zXTC34eTuH7IfKIs5xZ3VIPG0C3v1BPhOHzUNBIIJ7ZcCYOdTwa3ZjIXYBe7+KNb6sLjOECoQkmN49Lg/SUkksMbU20gkkik+69NT1F7J5r0Lhy4xGIgKcZnzfAYNNHrBeTQ1mm2Vhy8HPG+q69FL4ZB+8hnEzdUH4E59cqA86ixCJYTjahKGiOc6qxYNmPLwQ0O03QagRBbjBA8pyPlZKX3zwt14DE/NTMRR/3pt88xxsGVdM8YUFxizCYrg2Mex3Oc7w0eP2Fw9gai2Mm1XeY7nko0gonMxctyzYlyIqHLQAp+Uq6ZmSYrhdvxM5dP/EAM6eioXLe4PaBsHPqHL4JTLsxn1cA4xj+v/bhoDQszdQlSwQlqUdEo7f7vpj3uRCDG+xmyelFKb9pmbG2n4HSRWLiLKz3iUcHkuXJNVi0PR893S9Zbe9AoPXaw3gyhlPxg3xLa2PAQ7njq/jkmvsGjYxfabfc0gt13L+B94XrDrb2CA4i1DPxUK8cAOaFBwRoW43QQXRhl6yg5j2ntNmxcEl99JXXA1wVt2Ni0m8HjyIG9JoQCPnwdry+0yVm5PgQ252FPWG3ARvY7KDvn0t943BNl87r9RmkWcFAsdAE6H2QXMeWSrFh05JGQkEIPdmTXLIy4n7KnN+fo76HEvDdUokVunZsywN6sQ63MCJTNXU6msW51M9KSHQ/q6YcPmlm6m7rCFHSSVIX5tstwd15QQWXU6M36e1d57R+THglyidYGb766mc0jm5SACsr2D0PzY/QDZ0zmsXIKF6yuBHGrDsogriL+B06IPm/MGAguGEjzaH1VezWxOCvq7oc6XDPiKd+QM/PhV7uCT5rpsfFPZuioy+oQkt7mJAuRMhgWRgtfNze8ki/mH8baFigrYra0g6nRbDeAdPpb2i9BqQTf+FaopNi4EGXWS7PXEINCeQOylQLCBI/OFkUCVNnHq+hJDj/5bH4H+Fg/XHoeufJw+Uwuh8PEHu9qWi/VXwazgakGdfGl1yqi79R4K4/EEzUp/soXM6rccd85zfh2vqutA/y27tGkTjRPj1CWoEfJa1c1z/YuaZa4FkJQPRSkiQJEUK3sp2uWE+x5kYzHHIKGl8dRrzv+2rlT/m9izdD/+KXp2cnqZVL5X5h+e/T3HMmFJwGGsZ5Dxz+sla4v54yxLBFQoQZTHCqxnAE6itWbBDj7SGrD9UbcZ57/bAhIK2afY8G3R0Msa7fyhH+afZqsWjhXZFdtEKYQcK8T1sFN5lK4tJjRL0JCvXo92Uec2VlMm5xSUlV+bdG4qq8Qu9aUwdaMqvb9fhz/nXDoCCuTxwReP0gPj2Q1DixkerIDHlBzs0sASej7xk9UxTS992HmYl+oZKRKqh7DTopnQ32pXZzR9TxrqVaZk8MKbVg4FQ51YamfUzFEV6as6nWF+XcmkwN+YuaL830OMvAG6f9gC7UuihYDMrg+wDIl4Hb5Ad10jZbCZHL0ojo2A3GMMSYOjrAkuxKb5OEDcioHovR/kr7K2qSHybDuvLQkJoqsJxmXSvCtvVLhaOTc7YXsPX5ZcXfkaradu6DPhU5n+ghQUt4C/CjXMnwqCH1SmNcsbBZiGaNwBX91KB6GRJoC/sHWEwcLB19bLa9pX11d0HoCcoSGeT2QLzYIAcb4ZEmqRfwcA1SlPcnnWBSt72+jybiHU/2JYdUSKisx0pXHnG2zm9ylae0aye2Q9FE1SC7D+dxqYFj1gPO7/6LifLK+0Fb36M9S6GPosC04UPpaMjDMy0Wze9jEB1wIQfksFeICv/fxoqRtPeJT07WR5G/fmKFHlJaia+5Db2RAqcpjA3/2VWPd8gr+o74Zr45NxDpL31JDb9knTOsPky+1UNA/ZMCLlwJ+JfAaZapKHxkyW3DE2C408+l7PgwnowzZrslNxqKfNVtJNt2TOI/WP5epJ7ULqrYcLQQK8rPqUpoHliP4oiN5uizkBUcSTZYThUsq6RVYRSrhzz+81iJv6sjJPlSJuQVZfXgSsGfB8bqiGIf3rjTas8Y3GKJtznMzzG5GYuzK9BIF8ecAfJvp6DjtkU3csUci3Tw9FqCDczNYBJVrb7Pdn05wUKe1Ozk4SBO63qmehaNV/u7YSrDpdouX/Z6yhRUgTSo0ko6Xrlo8nasYgwIQJKgv/vkGb4TAgVIFOnjQtP83zmub0p9ZgGzO4xe2DozksG1VvhYToKU1L5oUD/6yZiTZ5bwdFq6RDJ2FTaxPnIWfv2NszL3klaAgnDDZ1XM8/jCPXD0colB/KikyD6PiarQvX4UrNVYxT/if2piG58CZvi++s/mF+fyXIBgw1MU/BqOv+D7+YgWilkNgkWdEKknI1hrRQ2KWuiu83KNUvRnnTio/6Fpktm5MFnd0Wq4Oa8qM4LnM+wE9EIUFYqyBdclhIvQ/D4KzM+trUr3qnXOdkQKB6dMEJAAdSMeHkNYXPXi6lR3oz2vNO/82yse4+I/k1a0IS1lPSl2UIDjWKgsHUn2AwuQFPOtBEs92KmbtWM0fxVz5jqoveSurhaMlLAkuejNK9+npE2vK/sA4Uo4kMSbDi+x9nB+ScKYK5RJSYiTLltPXHY/CD9a8Hkzu1tFAYT03TImuSBzGV6TlIb2rIhXYQ1SHTTS1Fz7SJVk/RZnSqvZ/GMad198/6IL2iSQr5Vg8H7lr+NvyF7uO63cVwW4BfYyo2uvahZZhkyxKfhSflwewxkJOwqKqCtaTOivJKLQzmyD7AWa4xWZFwx5DsUeVcSSWnmzzCBoZP39a4hBcrmVLl359hmtR3hnek3sxkLejWqxGCQD9Io/yh4kzkGy/78a/nNn0USHl1cVdsAiYVLD6klxdyqsg355DeMYR+lTRsIOjp/LrubsVbmx6gq9VTTrwv0zXqXwMLozjUPs5AAa7U/IZLn5dH08uABzG2Fdw2BBpzN/vgTOYYkW7LhAmu5d6xSgzfAifyvf6DgnD3nttVyNGoJ/Sf6Ys45lLSQuMg8BaYh5yoOm2nvp7WGRON/894vt3lsOu7XjgRkHuPuOucoPhXSTQ7xfXXUc2m1o6mHzRVqQLD+9nI4oAq0iHz6JY1u6WvwTPmAg1ftStBtGWn+yc5uKZt9ar62BFqfafMtOehJXQW0vfKhJrWLKBzWzFXpbif3SHYbgVTR5SUsCJde1eqX1LlV96I85OpoG9jGV7pZDSuonF+3idKNnFYs9KNG8sKaudJd0VaKhy6sahiVLo1KkyKq3WmLWiODXm+iKaWS3yArw/MsgzrdDfP9FxxN6M/HFkm9Wlk2pAuS5VmrpET6/lB6RFpWxZHYvA5EfGWYgKKMV+TIJr5MUsNy8okjt0VIlQlDWdQgx51CYrUptZ810y7DIWRrNWh3ufsGQJNPvuTx4kc/LjLTzq/whRfLCTez/o65T+QekIPERovG6dI1Mt75bhutMCLyVFEBR0GR8cqVd0ek+p/qr4vS+ijrIRmMaFmjjd5maUxWqPgwmsrL4+HqNTRHDF+vTFGv7zL+TbfCnuB1JhvUHcn5ZurgyWNgAJZ/ELeZoR5DrYuT8Ltft68AhpOju7e+Q1j3N4fWHpNGtRr1mD3y2p4K8+LCw1RSATIjDBLZUG4GpGChNCaXKYsgqfCYfjn6WjCQf0SHUGowqaU0o42sNY4oYslxhMeUggIDYPc0lfrQ2thyh4RpfGW/Ga8j6mK4uZ4/wtjtLNpUCXHWAAgO6ObL0laiakIPvediBnBwt1d7hV6CCVXktGLCgqfgBYXDTkt6UDF6p2HgpwXJGHfBNgLHxrDjiZDfLMFf5KhuPnSfyX9X7NJjpDEeXm29BBVlAAsdP/ZAQLSMgIJo0yNXUhmhK4xgMpVadfv5Tb92HcFKdBhJkIYCvm1QokTq0LZYPnjrGF7ngkihITOcMrunaY7kPqYUvs/hKGTFPSuPJfBYMhZpoF8mQlYNAPIKg3ZzReiWFIpvWpqQ0Nbr2m/N1ZnGDzy+0n4a7/ZWtjcuK1cEr8E73GNTRV7sO7kwPaUfa2mLpZuy0e/Udgr49sa1354maEPUtC6c8+9cJt1ZsFoo3jXMws11cBnIpZAHK5kOEd0cHywAb3uLgv853lCxRkDfnbY42OHbA5QV+bPZ40RIAqydNa40prG39ew5eOlIGvPCxz+H6nyn2Ivb3fE0LP3yile34Y8NN2+IO/RWyLRC98FyGb+NWvLbEcUHEpZMVBJOdMcuBcFtfi5B5mWJsZBz4Qr1c6B5eIT0GaZ+SCEt6i4gaIRSzrKQ1HumPFKzTJsbluouKLhNAI++cuM1zr3S6ktVXGt7F9qzY7RLAdZnQfZDbzV4wRGCKktIX11k/mkr29N+8Z7SD+Tc1Vg2rxAFGxKyHyZ6ekbhURP1WFb620b3SBiK4mwYJzZyqkvflRznUQ3W2HSQp8mXkKOnhLZgC7KpC/Fz4W0yNyhTOq+AfeQWhBMWJMyx0hwMsTE8iiQb4V144hqm5W6XdhL+JkQ8VveckZLNhbmDv0KJhMMy7e6sikHHzDwuqg3ftLjB6PwUvZmiIgcKiUc76xU4OTY9aZtgsLM1n2w+9DokhReciZkXPdRV+frn8zgFu9Li9c3y+IxAdQYq2TMAgq4XhDC1AobktdO5p6KqKDgRyGZNY+vyoJSwENmHID7NmrQSarZ2yt39e70+7N/qXcuQbEJ7I+x+b2NtQaZCNcgeN71CR0hc6NuOVy92Jnj5boIHZlcongU/eXxbczq21J9Yc17D1Gam5JkGc3yzpiLu1I/Ydu53nMs1o3YJPU3xR9x5i9DOd+wQF1HCS4O9ZovC3ccy1dNZw53bg/hmMbtqyGTIbpgjawhpjH6ImkV/70BlYlyK3d1Vws3mxTJCi6D+BDiFP3DkIJy9Hgix6IVT1xkpZHC6wha94UG5Y7b0vDoSSDyk6b1Q+pupRFY1hZwGclBMFsCRu23Oi0kjNRciYLDv1SNxji9F7a0C5l0PFmYLmhFBMSmlfSVmuH07mQ8ESsKs9+J2Iso0Uaj5nRfeJa4oh0esScf2VMSTYXSwK7E/SkCiaZcIK2s9Ep4dyIapTY4uWTzWlF/fPUNROZUE9NmrW9xyl0VdMnne/4Hf2eaHCEYFAFRMu32lNRHH8KcfpJV6+6yH9hsX+89vUTEGg272c0c4Z7t5f9v/4/OJiCBWUMeuOSxl4Ak/6Qa304t2alFq8IQuJQcOUFHQr5vRI6THPv2CBY45ypHWbOZBW1ltEHs28J177h9dC2QULVsTmiouApUSfMO/cDawVma/LfxoH2QrI9HdAdW7E8+wlzS8i/geDShpjNn9uhQa146Gb0JW+k4vcvohauiOdgk3nGe69cOfhspvfaqthC6MCkpJLxg9Dvb7jOE/+4z5CftENopHwFi99DhtEWRTbN9htWWrQRT/b59/WvWk5rMsKrjkO+Xo9Vs6IEizoToepr+9kJ1MbIlU7g5sMwVEJlVs+Nv9T3pdURol7UTPz3WaUfxF64toaallMHSiUQO+xgvRj8wp0VjCDwZrmL3AjfUtxlHxfZ1ESV4xgQmy0Hk1RmRHi5iAQWEOOZ1JcC26uL25nY16Xytj3H0JHc7xwM1crSODpSuzH4JRxaS8xA6cyRvpqhyI1F38FHX0cTOFAuamj2hgDvy88sSYVPRzxgmjepvaVtFdvic3xmXZZZB5R1W2QjbDTR0Rf7saGs/L2yeDYDdLZTpRtoFf7gYDk9xbdnmZb+wgjt1KcfE04rbrxkYSi1Ht0wr2EpiCu2BX3cLYUi50+v7+MZ6pD1941v5piwZ9Jd+JtpU4icZDAnKtFgSy/OfAYv2c5UrmIKDa8rgqz0UXsfR1YIi7cOZlAinliHbJhqfTQKSoxinr5X8fntbUTtU1BIu22QLXEOal5EWNVd/Y1Ojhn3RUNhCJR/dlfko1QGy9pO42GC7wWhkjKSLEOpTQfYpyWmZObKoOrMWcTeHdPinWLmmk2ST6upJgakKBD3Cjd0w3DDpdElyJ8BmMAIqEDbt2FUVCYxpfY0U1f8oMJBEwRvrFIWB9d5PDeCJ91yF57MTDvHtv7T+callhl3t2h5gSOb919mPJ/jEJ24lsp0odJEBLc9EUTL+cdfNqbPfEaZ9SxGbZyCaku/0lEn04qSy7TlZ/g1s+DMR8nQZdLwaxP+FeW4TxoouUopKH+OzKfWifPAFqBDKxVgyRcX6DYNxk7+0dIwWpSSOzFvbo/AV45YT/elaP55bRQvWzVRY9/S0InuBuAszyMboV6WLwHxsnwdrF/mf7aDSPmge1BZ0t02KttXz0Mo1XwDS6J3vaabCf9auYTHGjRwfnOhZd0Rf/htYKj/rtdyRAXoE21fOl14JDrVLuS5RpGzaw5iKsFJ78aY6DbHiiDwhpZMFcUVdlpt1m/mxk6ld0jCNlxoLGKbNykNqRipimyDadWZysXtYb+wWbegMA3QC5xjwnWcP3LHxr5D7fY7uVh/SEhuXL5P2pRP/uNeUrKHz+fzuiq9yygjfCM0qG1L09khY9Oz/3T4vfhdTbubLFFx08LGGkbZILEs6MNNEMklNWfIPbFRizntZUDosuYObNVSOd98VuJmt7K+nfQHxbnGWuUV0UrH/KFNEBhfyoDjD9ncWD8UJ9WSkxCjVkUarlW6UTObvMcD2YBr86viaEbgj0A2o9qqM9df70VwAh8f3ACxHKl5uL6qi29QAfKLGSKnJshgkWUBdo016pKkF2dNfsoZs2B6pMJrT2zv2AmWZt6RWkwqWh0IYwk+XX2+5zh9qASpzYZZr1Tpo07hoboLZatUfJKt8POCnM+SVfkHhc4SM1Ojd60ZjIdc0m49Ydzq3293HXrxMebd1vzIcCecgLtNAFTdAu1T/PrWWVGw0yBdL/Ngix1puc4+Fuu8Tm/sf8DPtVwSsr4ck3iw3M0AqDSGSnmvbzYearaogN9W3/6CCxnyMgq8KG+FLYbZeLrXBdqA8wsJFkPblXAHRKBPHvPBYP9HnFGtqQSjO6ASjkE+3GSJMYAEYkjq7h2FmUYlVB31xF9vJIVldkLS2v2RIIOFgRP1kTr4tpQJS6MRitwuCshOcMfviYfHIWvVI3zr16czYuWVmXv58lY0dg9dbaIgerjskYAtFVs1TA3DK0XLd0MQK1zHPMdzQ2VoFpm0PiZUfYLphIsNjqboGO5oKPo9P7n38osMfjKItdhzUxyLAeu/ghHkZJMPpJzYrILOCYpgwHlXIN4Ui5nPEil5V9RokV6im710ha3R6qe25sQRpITYzRlyFJHHNZ5HnTrpXYzjo4IyAitZdahzGNlh2dk1LE7tQ3Ae+fMmEaITUParWZObbQcNDosjgH0TXoBrg0BW+CmkR03r10EbZrrVUG/lh7OwXleEWv25T03cf6IhyPkgszyAjguaELhuN0FmrZwf31Z+wjxEU4+YKPudzLTwldkASjpK5IYbkl+MlaMqE1Y6Dg9L1SlGlqJl/Mhj133DPkmWqGSTwYLXaCKlVW331KCXirvHZW8dsvnpwAFL5j8d+dO1pNabfrTTEc1XrxvvmDt9Tph3krureCoCcfEVwjzdqfpTUtYUYh0GHjem8yNL5j30GLqZ/NzRmZCpWCs8e7x37IDAuZPklr1op8s3npXrt4HDeeV8wH+RLEzx7vxvnlwso8lRIBOmLIOfkYnimYxOOHyT1u5/pNuDYSAGAADA2LZt27Zt2zY+tm3btm3btm11iA5yWYrSchvJo7vIgW+3J5yDjCMdC5VBFWYHiQJkVI2QTKUAJ4BYvZak9CDGF1UGVdFfuZMoHvfnDbwmJiIXRQK3vqBOZhqo6xHQ2BwkhiF6NstPNyYQj51plliIAiEaUthBQlyA/MB/TGslf93jB5yjhAw/vy2jm0jYsx4guWLd4qsdWPvwBwcbDgXoed6dr01jTrCxQ0ryALkLxHa2USqRLIKAeJoejTpfPpeQhHwBWP8C/jVoFYKqR0c/KbsfQZZew7W5qDWea37jrMOcPT8CGoamSsMImDvAZPJtu7cWoaUcbho7HjeIuhEt097u4tZTt+GgRvBsOdjZV9M3nsyZX9PeV6XFNEVXNBZGvysdf9gBWoRH7oXLU0ufJIczMxZqsStPsMZtAApA0/IhJzPShjPQM8N6L+jfqIU8E+rX9V1oVa355Le1xX+x0KvM6kC1ZAAlaS5nvnZznm1LGgAYTozm68bek4vss4Glit7qatN2H2sSozoWQknBYETxgeivJNIV7ZM4UPhznM1b6nraRLt31+xt+7kfzVoZ4TjVxUBlPx0rRK5+EH6WZih7MZeFGgHpk5mejb41P/l0sJl1kEKgam3IBCrajt9E995xYr7xKAgrgf3dspvwIDTByHnvKS3DDLh+OWX28BoTJPNkoKLBYXq/aIGWUvH8mTjkRjNe2A+ZKHs5xE3dTxUTk0LiikWDV4RhLZcXR5ClfdD+dYDrWgBmePcEUoBSTBSxqQlUdxSYJ24LITCvkqQ3e9XwM9XGMGv91mRFxmouZpJPBEHi0D1YTwN5cc5axVbYjdn86v00Mznq33x0NdjDmu68ylx0IaMUJwxx/+52TomD3GERT0DlK6dGoYKRY2+v7PC4KnOK3WyqBS2klvYih+bDDXrzBLA4lpCENxgqw1GoOJqjDynXbgPIuWcOwDfHSo7vITAikN+EHT5n188mYNqbFtiTVxtJb3c60vFZPvj1EmPtjq0/3mTcAthYgu0J8a/KleItPAIPU2Cw/H5Shti8lizCzbxBy5N7FmIlf6qfaIrLstIqFZsVaKE8k9HtnrdOU/RqJfMWwa55OMaRASj7lrLRbpdEfYTv79vtHp25SnIIFDhvET+qhXomeKLNEuBwaElaNFPSgz69/rsa22jGjj8CsavwYuGjGJJqP/cmg8pCK/Lp4vihV8SpE+18X4jl8ypdC4NEFVVVPaepgKWfhhPsQffcvMbXVVcRrNbZKWYmLLVaerh1CPSDMwNm3GaoC9/i2j78ABhiDg2nnAXocZLkO353XqSyOVVN5BY0edOjC873a97i7RwKV7Ly917UF630mKQdTwP+4GJdUwtA9Rc5+Yz3EMS0OxBy31bVp75IrhWOl50YD6KPKrpMMtQHAxfvE68sgxYLTchvVZNrFqx2wYaa4ApmgU2VDiy2PUUjC7Ro4NznXNARB6Oc2k7sEZlnDpte0Mapg54sADvbcqtrP0LIGToEYmGSN839i990ZfQmfKt+RorSvNumv1Hid2Z9DaYOsjJte38YAUFCc37PF+tzWrnfwzzEZqH1G72o7Pc685rd3LxhSrdzCHEiPRGu41KaOCltAZi7NxNFoYks35E6QqOVdLN3u6ogFsI92WLyd/7Ia8//9M5rVjLz1oLNb1g/9K3cav5SjfaogXkdt7NShla87hlDKxjpaGqNrQy6VmJGJviBoDdLvLavI5Wyu8XBu2d0+LJwfQusQtBMtDFsJKAvYwMpsIl2u+iegQVrilIQeZmTKobap2uIx9HAsbud/rxXXNWS5Ode8vvwjp+33aMacaDXn1WaoTCf2h7J1OQLkLGIxHBxEhmCMH3iVCYu4tVJXgetpMCIaLrPdOpd6ZSRZOc9pawnDEDjQp/2owH4alRVw286936DnDlIPWGcm6+VJztxHVWyfdvwFR8XzTO/0wEjjGSSbOkTjOyR5ewaFjhsBzgKGKNDlIKU971taiKD5VkdexHTPYe2FJ6yRBAOGBALb173B/Lqsxq1g7sRQpMm2u2Njpy3ztckvf7ZBD9zY/ockh+lvhjovwX415JxnHAnERYNmRDjN+rqRrMx7orspNFwPseTXwa/RknDH94Lic407TL4mclBYG8kTEHGN1nxp/CQNwO2tYi+0gG2X45EytipQo+GZJzerwNkwdB9nJHoPhcuCNXUuNtf/shVttSxGB4GaUkneifOXpZYsqr1QX0SZyyu3NDCdixSznw1jnmDTc04Plj/41n6Kblb4ZPo61Yknr+25k+OczIvfvsk0nSE2ufGDofz5x/SxyepEX5daF9q8bk/rCjQh/SxNlXL0OCFY840ZrQ0rnt42H6vRHcwsZwxDFlmAo2duTAhX/Rg6H9I/96uASJxZMm5DWt8woWgY/9ceqrl0XhX475Jvy0oo3TmBkt+vCG02q07zc4dJl/vGDWWssQfDJgEL1EHKfw90h7Nu6bMYTtwuoQ4DssgHaBP31TPT59WvGkMhcwppT7zJDWxsspDbmogxPp9xzyV2Umhhh9cvO1heCqhRtyKfL4Xowi5TEFzVR4DuCsQJivQJw9rM5FmiUBkK2xVtFU9ZiXCymU3AEEpUl05AQvbmG9aN5nZ12mjlO92UrPLk9sP7hES2IWpyUPr6moRp7U0Pi2T5gXr0z9ppzifZe7tNdfvRx4LI6yodprOdNEOSY8E2kgJ1Suu25svwTFNV545lC2GlR5wnCBcMIMnCdcWA9zq7e6yR9HxrsidGd1FbDSSHDOnG5BOBBkBMtZhBIsDzy3TEqQvG4vNXYJv2KCAFx9Fl5kWdKjc+o+lkhki3Vm07QKVxpfwTnE6A88DP6huuPLIm4HZcDOVD/md6EN3X38TEddKtBq/AxQi3surezXMTXLPjJ6bj1yQgfb1LRgtUAosNQFHPLbuqwNWYusPTFPp3gEgInVzUqaiAHGZEOJiHnFmqSdArc5Gr9T+cOchg+7eLGuZ5/G43glFbts4GcHhhmmPLo+IhS/5h3lqgOhPIBquOzWXM2M1STd1+tkM/9lACofk+8YeT2U4dH6diop3XhrcteP2GiAuFklE7JUdMlY6kn3Tfz9U3kF+5tWNLMA+wluHI/slyQiPOzSFUCg6CfVjO0tr/jJQM/Ha3BWbzN6O8qZJptczX/qIffT2FLk/ssjvYOKg/Ywq+TiVCxBlMb+g1DucWktF77X0eM/GM2D4hktlg7ELxMfmsFF6cCCuPFq/CVNAm0cie+MYcoiskCJgWxjsaVtWrq4rZ/oRDHMPxFFd9Wsm7JDBnc1vEqpPrXjAD/vZ5Vo0tPH2WyAEte3hYeZJsHEkI+lhUdw7NxAz4gi2BlKAvrft4RqOsxlqAtmAXSVufQmxXKU8YINbs5ih1Pww9l9v7J+vwMxxTsS0uyuFdJ01j3sALiSf+EfPoFAohwaUzR+lvFa+anW5fF/T8frYzZTOS5dEmV13goSZCQDQ4eVC8alFaOF7omAT7poTMCuwaBhzFSau/4xq1xh67eRrwGnyM7IEAJdbP1S1+xknFhZfE78ElyxfU4tTGS9q9ttbVgqkYxuO2ZORygJ70MidebtsKe6MzDNTZTjFLRZf6NAEqoCJp8BI6uJ2/Q8JNpVXfSEi2LqYrz3AzB3x6bV3FDadUiyT38n1BKphpxSIvfiG9UOIshOzJnVBPbC9zJyRzHYI3r+5TFkdlgrchSH2JR+li2GjcuHPmJBlXg+vax51XhrLmUCg0TNWS5h4kh3WtZ74SSKkULOVlkEj4q3BTygfgy/ChsJs5a8ayYdhhPDEP9jfaMGw91UZsMH5+JpiC6y6kyYwV1g29CQCBsAmeaseKKx8kDHGaoy+aTQPFyFQlTR/e0ECzLHGYrBvK0DlagWZBS7CCOeD7iTRJKC5jF99OdRYBFTcHBX5Ol413cdF3149O0BEgX5cMwCSLmc3n9gtNzs5pXmivrktfJyl3Z+NAsU8SYuUirB8EM4CgPtb+G+BUelN2sd4he3RgUbkFyJQiRkRDErERqamtgV2PiSKf4RqVxvU8a2UK/k0EZIY3yvDbFoJFOZbqCbQ67Po/0BcNvPZ7XFwBdTwOnGIVq8NSfxSaJEtTkpC36mxn8fgQSFQPpHO0nmOp0JphQFrycE/AVWmCNtGgzXwFO/P9LXGk33cblOFOKffvWP+NT3BmOUWVAgpIrNXZVOeCgUqS9HZf8UIVlGhYyudSBJmAdHyrPcPp6ai+OI4jjTHpkmugTqPREsFXuGRhj0MWCMr/dSj8Dkem1YwSCUMsZy/fMKQIz88Bkq3Rnuy6PG3ocuK44bx2wyJcbQU2x89iu0WaItzU8k65SyiouwlzwRVWG8a65GRe3FG8EKBnK/8CwyX/OmTQDWCcZ6zL6PB9ywFJH+nSZjqd+hl+Iq00+cJGRlhEuWdZKdElCx9WohEalral0Cf1nDIgETU4IXmH9zcOIJ9KeCocu+jphPgreFwjvK+m6TWaL0oW6BSGLqSi6ImxGG/HiGSKAZRNip9gtIM2Ld6ZToevx7cK0w+tCHrFBItYDmnhIDNlP/pxfmghgdz6grej5htC6F4L7oAF5lAoJiWQH3NMX/9gaL27qEYCLpU34foYy16yysnSRF8/VwU5qzrSLN9SkNyabGh+Kh2lwizyBt/tykFoUbsFYACNZg5NHR2Yyw2rqfPTnInwIn3Y1vpaPIjV7CLkI5ju8ok91SyMuIudvROjmJDihHd4SgYJeuAYU2sjWalQtOekySeakcKYiu5Jo91PdRI0+z3XboAu9y7Kj9b6Y2dRwfdVjqRab/tPZhP0dqrfc/DChJIVTa7cFK/BDBpAxrYk48w+BSBEGvBUICwsGGEJtrBY1G6K/YM0SoKOcRFjCFDAtxZ2ib9gqyO5F1Cd6sbtwSbeJbH2T52nmRYA0ehcLLXHBxvSm+kp2oZmjwHLpKp0oAUz/KWDcKPjYYd/6l9mGl6vZW5bMrFzxLdTkOAAoz/nByZMn/gfQsRCpS9VyQrFjLZciebm7zryk1OnFK5EjHDHH9+2tJSL6Jy9rhRKCPNLGpw/BW2RL2pCs8mNj2xrO90tKV/+LebLVg6Ni/Sd+havSYve23oogpZ0xtJPG4hJRV6ZhZqE+ckkmaeRJaywJ0ACCWKeonnOhcNTE5KK1NQXamBUKeMOArpSQKatPk4flGz1Z3Ah1ZDuHeMIRomKik/fuE6XxX3AR6dh0UXCKDtpJAZeBGnEvA133IR8NGUeIlfOc9ZE5u4cxouEuKBKUkXQ0Oj6kb2rS8kTRyykEDtgNYyx/VC9i4jcGrWlqqhSodaA/HcAAgksjkLXN+7/6o3UeYQ5Kv5IYnCeMYtNpKT86ok0RV8OwxyfbZ41FYTKdrE87qRFKB4YSvI0PTae0SQkDRNUMV3LUV9Qrvl1jfNVck9B8hlm9SeW7vjfiIITwVl2qckEmcrdwTvPNyfN+e5zJ5Cd63WpEf/kXsUkQBbL2+J7vozGyWmE5lTXdnVcMc8WyjektTqRG5nHXOKbGERv+OBkrfIJ6HMP8uhoCR4O0iX8sOLv6Zz81cI1t+QCmg3vvubKFn7YHca/O5NBpJM2DNLJYuRihacwnD/AcQvlo1s0rY4dN4Gh4kKu6d4XwWWAsokz2PU2R/Hw6AVl0d4yLJD76fZxLx/qM0UQo+/bSEe2VPhhnEEeQsDfbsAQ1tDGGsQoJFDTSSZpCos4HSBmM/ews4uvBmpBirUmkKpRs9/L0tf4Du5spWCiI7gsX09UXvm9hS1A+0emXPPzI7PfvH0jihj2iJH0IURl5YzsP0cWjebYks2N+DdI3RMpcvagEnUleCmxeI9y9hQBJc08Ym1XSskLbAWow9Qi+2Ep/IhYNZaULieq/2HQfrALgX/E4yWZjWFD4GKMwFiEuKdwO/VcIkpjXICL8mkMKudr9vhc8aZpnFwBHQ4eYhKrc45nDOujkJ3A2O5xugW8lnHXFd5BALaUVUuyuLwanzK4SQiiXv2sliP6C1bk00+d/jcVjK1pKWZiOYNw4OsIVDem+DO14ArwaAPc+AdoJVnHcdsPiKg2ruExq1fTFgToRpo36Qv5OvRM7tnOilksKuuspLgETnDD5zQMPKi02jOT9IxyRoqBhtcvdoicDQDtyoRLj3I1jb2puHh1d7gwzNCGso1AiJjGFVE6Odsb+EKSxCROSFBzFrRnrKMYTart2oQ+GsDbcS9ubG/VnyJG5bIy+JOL9rjlaKu8IMUVLRUbbuJbOimqyTEIlQj3AykGEMsZT0Ps7u6YbOK5tZbgLtQ4okm89UoEauiquujSrwzXgKiMpwrFbp+eRYpRZUoViecJ4cofq7WAllyCmlP7aosj/1PqTrXLE8tOo5H3+voQHdXbrv4CuShp8Yi+/sHjquYV54XtbUYBtkUZWMe+KK4lZOkP/YoGTI4t8UikDoWxYFat7D4K/1cs/Wzbb5ItkW4/+3ebllUeYWfkZmuJ3XtA7MjE4oJuYbFwJqsfVCIwIGtPttTTpXbU6JaUakEpUFqXlrvNVbeQleafoJviGtjR3sYzEW+hqlc9x/am+56iuAJOvggQ39h4gl6PiAsrhDoFbuIzKUNFi/nQq576L4aQrvGhAEAKWSw262o0B100Nk/jYX9RTw6VsYl80W15pKhY2zrzZ2aZCxsjAmb8MKKg24a53pcJ4wndjKvbMddIq53l4919goUG2ybT94d1nRQoLSHUg61LkfB+TrEmVE5ohed+uIio+AV+k+YlJFKEFQJRTpTjcfBHvOtGeFslG9+HmXWhYSPN8nAW0cu7MJZ7eRrAqZc8t8UTeeM4Z7AEhp0xByX8giLQS2wTqunG8ymmhh/Ui8aeSZBwHp2qkHcIOIpnsvuGejvhdMQU2cY/HUuPoAODz57MXMJZ6RB6FknZ/9XL8n9z3oDlaasfBM+ePN2X+McbCSO7Ct/QqBahBaZPF+HniCdl3U5qW98xhKzJLsgr/gsmWwPhR1hcpF89yOMzZPOLbuzE1L/zH/enrNIRTEaL6a7sj/r1WlSRaeSgStqp8t0vYsRzPWPydrArJqOnva7rQEeDpCyFr0o2vqDNe42q2iLS3Kb/xRmeZbo+IAxbjsd43V0IVbRaGqi9txG9u1pWFytNJWBX+NBk0104E4ozTe048+GDIE+rfD9e6Kal8IQqYZnaBZsx/8p2VHqHAsVmpmzehcGNz9vcogiyj22qRMG/Sa3L7uaFhctS0GW69AbOckLEU8hZIPnHC+1sEFbCXKgwePe8sc6QUPQwggvomIdPn7guRY/xsVNYZd6gYauZBggThakLVIlzImxqRjVrvgLh53U0j2FXVNo16vIAMD2AyYD1COjSu/UMpTgHMhorx232ssasM64gCNRV7OlfKmRoeel8ZyjQ0oAkLMdr+z+unSKzbcNK32MSnIjaTbSkY/yCay9i/4dVSTeZBz406FY/DypyfjZ9CJnFMCbfH1L/IsBB/p+f4joqdPtyHVt/aK6Aa237S20Ek/UOdUyrya/d4oS96rdQZ6kGj4nCToGu+adDZ7CxmkanTirt7ee7c3ygtkR9Z0QnHAcj8xYxK38S5D8DFxy38koLhNx/jeNnXJ+jERBOf5yPIPLbpfXd6/nxSZmBDOt6MyGtvf9+J7xPeWB3FBG5EUyrdysgRmiSVUB7ohM8UUFiivpVMhIXoG85xKTDQ5+OhxBAnKSVG+bRXQD2Y9eo+rwLxLoFxF8rMb4Mlpi5GZSc9qc6Ur9Da0VJh1J4SxqhTb1zcdwRDSySvv1CopOI1bxlg4ySzGo4OYY61Ru1KCn8Q5bSBg2GMsYaul0hqJGjyIxBWQsDpKmh8O6DkI6CKRotMYl9DcjEoCneZ4gbnvCEgrxKzPQZrblZP42+6S6x65P+wYrLrNwkMU0SGJ6mpsbEUEKpA3LzNDo/hF7feUl7HInLHzVBXb2gwrxhr7/IHBXA3FqHAKuZ/I6/2vXxI6h30RtFTXoTm5nHnBF/0UkGEhxfXUOm7/gFyWyHDZfhE5EKhw5P/pQu6j8i1HISQv44DiMCUKzWgMGWl85T6o3Q8jFO2CWaIVLuxqA0r5TD2fF+sVF/mMxFKDzp9oB8dde0eelEV/rQJ3NB7RpKPZCrG5gwmgCKlr5m38OnNivnu8RDuYKuikhPeSHbe7mLJEsiti9IXVrHz+gu3b+hepdl+M+cRTewocJAnvfdAa8+DwralLtWmGeiwvejbcTPG2em9a1CFEX1KBsHrkzp8k8zKubxlQmkFzZ1UZ5My7XR9CRG3LBkyoDjAk1RUcKlx8HvKFYQPPOnYHI9aXChwMuvouB83NsFFtTSsp2wmtsvL3GU7e3u2P7zym+NVLvTU6tSm9nH3t2zVUpA7EezRjPKZnv442z/1kd9wJmS0hcqLqxL86iLGbVdrESmc0ki7rbKtJNkYIxFoyXR7499NuA8WIajRM5JD0niz44G8l1FrnDOEYddD46LgIddGukd/l3tLFwIeqMG0EO/N1Or5EL1ImO/h++CcFkwqVAV4px4L/Xh0vkJW3Vp2FZKcxHD3u3oey/s37Wt3vim8m29SZ2VYzOVaFHYgM2GuLdVeMZ8y1Ewj1mvjNLVWsw8QBxxzaVN1JO3h3E/GNRAuHdLKM/lLA2sS9cZvd65zUMhmSVEHVMkMH3QkZl4ULYSyuzAZK6Nos6psGUiFZkfULdpzprXqEbMcbl/f3rVurLACYax1PVsVevlbzR4s7IzbeMkZDD94ETQ1b/e/9getowgQCQuvAD/Sji2GURVx5yJEY6LBuromNxKninWAX+gcJgv65Moq5krsw/22UXEsa8fmdoQ6MalMw/17XgP9PxF2KPgMoll8vZQbVVep5flSeoybN511OY95ZBm6H+bh4tecVEVu5sUTbfy9PgsFRz7o2faNn1rYmbkGNgllCbtKVUAA3HkqwffNbwyYCfbrKb7rYBraGe8268bVy8oiGt2dT42t6ejC+mXDbRvLu91cZ4GwliGd4WgfKEXCOZSQadzldDvejtSI2nihO6tFuCpzsI8KRZBC3ggoelxewA9SarsJljKWLeUf+cDyEVAoCVG6eJznbqKChXV/TsDeNoKGQsO/5J56PJovmqK9b9OqCkFLzrQG5XcJMw4scpxd3cjvaZWseYlLzhuFlh3THSSXCPM52gchaM8yP6tdWaFKEYnWyNw+NZON33AO0y7hDrtUg/nEglHTwhyIGGBaRj7u3uGktYaH+FJYnuinFc7LUdAOplfKPjzY9VAEtnm2rT61CD9CjZP0FmWHxuDPWQs2ld60Webxa38N2XjH32Cyt5khn/K1etcMhXs4TW4oVBQRcg+hF3FC+S54tcFohR4yPKmlfMfkfyEElGeKYktK0K0sJ5ycmzGNMPw2WQP+DWkv17AvJDpuZ96V7+K6vkN6PFlPWSKwcPs38ax33+I5zsAEP2YPmA6S4Y0VVZJmZDBmDLl4bPK5+rK80SZtLcuqoHMf3w1aQAkKCqI9mblApSBFC1jTloHDWf3P5EQIutUbIr6Gz/1m4hJjTkKR60p3Dbq74blhXOExlLlEFm2Uy91wE5O0FLrIQAO5ITMqv47ERBbvVWOzQKoGFthzoMbJw60fOJ2i7Lo/eDCeO3441u7T30/B8qduD6WSyartmdfS+klL7E0Fyr3Vr80mLz0sQce4mM9oKeANRmNJ+WRaR3zQJkBRupE0P+7JZeofaeaqszFjnlcueKc20RoIRJ9krv0Xilq1KqMSOltb+xNhVmc2yMoYCj+5XFN/kpW6AbIkb8auDEZC6r31UYDrzkYiwXDsnTZWIrDsgkGWJxxvBOhhQ9BsZTclZ3jc+DVkVQOCvoTNKQZfdVXfFYjUjHfnKO7S3hsCcJMmI4hX2lfjtrfTzOmOKsc1jPirRv1nQBMoRiEK1qBi/tV4HjmSrQOUYgX8UbHPaYMYRjZ403z0ZjD9j/HLXXvI9c2F3UEqxzMJhwiFsQJ6Ojh7csfr3M68NE2xWRJdL8TR1aPTEALWKQiT5sWU7ZbG3I2a+DylJkH4dv6WeWrfqGVErhD0hILVfmTBnyREpAsmFpvgeBcUYp44Svrt1aJ89dIZ2Xm+533JBtIv/Rd4jPsYcR/9vM86Hlq4dY75zfcIevHXVRwyUL+qgNO1SHpY1DErnTctzwvV93GAeU6QR55zd5LOBvzJTVAplUMnuMeLUKJBA647u8D2Hhs8wN9qhjn0lENaZqteX1q1wd1oVG+YqYGjuw9+3M1x2MDeqB4Nv3+lyJokZ9ovTWYy2mAM3r4axCtcWXHJX9+4iPK9DrURpuZ8DfQcEjE0addecXZwZ5exqSrB9pZtJRpiF97Mtyc9gUBJd8mT7CkXijwtvMNNH8mExL6vG02h8AWPN4MhHtiqpUWNeUPozbv8SjRWNIIwV+n2uBbqq4S4D/6JrgN+k9tk1qqmDyKfcH5z8RfYVgt+xDFMUn8ozLPxrpw0crwJpBWvhqC9r7iRwit5yh8UA6MjN09FGO2W/eUE1Ii2DcBcFW2Icj0mkPXVUiRo+jw4jMC2SzCt9CmHGF1KVybjSOa2tX572Sqg2WnmPX2fgIoUKgZlnPR4rKaNPEtGHkCKJdtpWTN5NEZ9mqsJuYeJ8BjDvHfpNu4l14N/Jz3Z+P3fJcyHPywmgkO29s2IfpX2Hu3Q5uaWj7hKlbKZleSN2aIm+7ueak5Rymd8dblNCrCX1fxJeKdXeYfjrCPdjyjQ05iXnBhF7bCxthDLXoi8Fw42fNYH5wbl4qhea0C9KiETRW3Q72heFhLDVIaQGcx7hjuWHVe5ffC3T5PPGBlF8ji4GqhHvXIAJEO/H6ahss1ZLPGzcUW73EBmk5FtAfJ0MKtFBC8QUD558lX4Lgq7HKGEPeR9r8BNRRmiXVhHdqMOKXCKJZt9LMBqIreyYNIRNpv1kJSrQvf7xMud5EVXc6kjqVkT3Sr9DgvtuMiPYT1W5ac3Hx9xMNqWAWE17UBuK9eSzET3Yt11lzXyO07LMdZh5MRd/rSV55RXZr3zISFSB8t0MYwN3Kp80rUhelcbu6Bxumx42Vd8V9p/vdz1CiYbWkXPgxU0F8ucB2LxFvbnoNUFkuNVOC0hEPjpST90FznWC+Xe31Omi5E8krtyqTFpIScQL8Qbv82jI/AoRaChu/JbZO5iQAvLCBZvjT7+PedYwzav5GYH88lKrErkXvwmSeIQ7wIX3ia4DH3GT3xGgSWLCV6qdvY23CPItEAK9utd9sveu6x2pEd2qN71HTm2fDrYfnSEzxWtk8h88mdWum7w8k02YLNzY76LW79eh8kMv+ZvXEpDD1xkBQC0tq9QGn7G8V6DM1XHttGAGuGlD6dCkc4voinMwFgz1wcXrAAXvPhZfl7Munq11rPIy4G0rbUSSXe0MmXvuju7RUUhM/79MN66snP/o1jx04hg49RoDnFcngBbkOeHa6+6f8DqGUTc/AMoIs3CXqbaHG7tptrgRKOeyyoUDtU0Vl2HtkJtLkmOjPwmOWOCCHtxCRaeCveepfMScMFd3mVPxME9JoS4Sxo5xRkWLAafzEyHPyir+0FlkU4taEWXXgX033AfN1Lr33Bvzeard2OWmLyLG9cm5Gy8HJ9SLkq+hqoWUDWjqun/qP2+o0CziDC374o10PSVrMIgiy60G68bybnK9x7m5Xlxdpb1t9pzXGCZyHaNU+Q+1rXlS8nvPnHDPwjkxO3/9s8u1vhzo8s3eNO9G/0rgJfweW9s6Augp2PRLVsWpeiRwyvNi46tDyWyJvyvSq8D6R4YFE22PGQxd2l7gZ85oA4j688n7NwyQh0Kjtnp1mnLDvIT4uZ4ebzdRchEObAGr53mvI5sWAfIrYCRR0qyubvst7J521Bcf24lxX4cfTFZwWQ75DabFpw5vsFYkEI5/5ZHlAX8AwyxgfKoJcfP7C/I2hEIBttd6LDMKdPbOFIgdBZERNeDHqEsk7PQf3NMeJa85jEO0i1FQXieKPZ3vEZNkC5oA7R9db+PB2f6PK5Gpm75g4Xt4tn/WBRxcA9Q9G2sfyZJHAx8NxZIk7ETOIc+kq/nEM9c0xMBc1+hwdKcFXIanmfDZTKAiHIJAtJAThY9CWwTBBzfooKCY1+RWukYpENm0hBCsM3eZDzqyTeOrh41L9yvHpIEyILboybH906CURNiMyaUDmTr5O1MNQLo+E9zsQixHTOhiNpVIEg4Zo/c0VaRn73juUl6b/miDmgTihxX3Ueaxiv6hBFV9F5h+nU7XZdgEfBjQhikzz/Y6ShhX7KNxvY8eVWqjALoN+nRHhTC0drNTa7uofbomRMg936WGqJ2H49tbj7jw5QSzdEkWS/4uYr5IOspKmv88K8IQXe05Liw/XoCteYJIbpHJkyFDdm+Y3kyA/R9Wj0wbl1t0xHjjG9B3BbS2DKU1ONm7EB4QqwUzPM4ic0y1EJWd3lcQUC9jFEhe4TcfGUIzxr/rd9NkSb/vKHE42+8ve2oWOSxgMvEF+Y6llnHYsLpyUgR62nrQU+RCf6FhJDxXUR23+vWmaJI/9tA1GJRsAyAhWGuvKwa5kmRGGWCOWMa/3jxieqoackEUc/qdPs3Tj96ANv6D6wigZAFuFOJzZZRdw2bsZBSF3fDneLJDTR4bHBQv6PN9DjbvOuCiXPYXQ1ZgopxuD4+hCmZXe0/tUucd41WCUAWSEmuhBJMC+1XFMUtxYHLHdXx6+N46AATi0asdnUrLWG0G0uKmQq3Ck2OpGsQNNuJuQv4xcwDd1Xu0ZY41koLmZdfnr60Zk6/v8NIoEvPbid9697R0WA0yDLfbWgDy6Nvtr7w7Mlwf5SoTz3YpImDOWqDjdx59K6S9BNcX5CuQ7NuM7mWXkbGN1cJjT52s/gz3ePJ75nDbOD3WweUcXPKtYl9QBnz4uRyXMuNWA1TnchrFxAb/v3279i4RCilRcpXS5FBWanAGnyZNSG4de8BaIATqQwQvH7USexU6YNJg1WzEp+TxhrlzQatL1C5nPbQtJaGau3M0HDK8uXA54vxf+YmI/bRSAU2rVOwtIQJOpoIbGjkkmQnyGy2JCuxldfAW7dcEXPjf59RUOoPh3VKM+SlSWn0a8OWib42y1qwhSRbqwkjyVlHXJ7OS5IlhXwNvxxo5G/QJiWYclp/tlSD/w+ltRVicHCk42J1LIful3NuE29OJlXxiMIt6XyrFK3EQityNvTzfT5oGzgAqCfiYVSZ3kta+a3SWZ05g0faRngMz8Z6wCZuf3gvFzYZZoY9w1836P8SET2RW+P7XrQqSfm/pWrCBpXrxRevVxbStBgDA3mf7SmKxvJHqdQzHjK7BCBlFrzm+u40e55u5K4DlwLnhKoCIvp3aVDXegKwRiQVcfyc8yX44cB7IIUUjmVxKIAZwg4PRivmqGki43CRbd3TTb+qqAB4zDZcdDSh9VwFrw+b5nLdsO+cV5C5m3kJZuKCh5WfIOWXWCgcaUJdJW/0JWdmzLodsgqWCKo6oIFJH5dIe/f1+JPXdfS7hCkT0u0XzojEwNVloIh+ttS7vPob1CTjGKC+Uc1O5xjQhFju+nLAshk5ijlc83n3HvSNbbNiI1imEWOobT3/Adn+xzyam+qOVf743/NnGAqna+YCDy9RYiPTmElRd8MXnrai7Hxv1H3PyFaifh+ZUdddrn4u8PG8MvjrmKDs1HSTUbkCafL1/jvRlBCTjUapMY2QydNpTN1gwH49Nx7ZiR7vCyHlgVtC4vAk2F/2pazyy6qd61hVs0h/m48oR0ydBXKow2rrp5L9B2r1D6jz3kn3BlEVP9Y4D9LZ+weSOYg3DINk92nFeWahmf7F1w4YRoKwQlWtJMBvcrdPhgryqHZLomm9IG3JAj+j5S0A822WRreb9fyIeBSdS28MC+iuIdINng0RsdtzgSNMenfuXAFySb78OrHLY96OWrsYnO8pK5rYhUU+Knt2eU3YuVeM78BSbLgNCXutxMDMUgGc/pY1/JOVtPrON0KjEgODR3JiGr+9AAlHAJcgGwNI6GvAs+42m7Qh82Myz11LcIgYLmZJsPko9QtG9SoDrUv+UzX4YS46LdYqrA9kYBP/Zyj/kVC0TH9DjwX9TQIxBuQmiGMxpq8KE7Bm3jeZzuluNCDHj5TZUkHYszBqaECRU2hSAJfkZ4pM/rvmezTjYWx7Rq8DNbxdgVfWPSFS4wIt2CFY+N3MMG3teM54zp64xK6Szxt6fPZQrJLCTTqNOiSjTustRAVpKMuFpP3axZn/RoFIbDILPGjyeu3IwlsQ8A+VVgLffnOEM8B7GxsE+xmiUQvKD4AuqqTpwXJulJoa5bfZUl5GcRcYywS9R2fAu8yE73VlGNOvsSOsxYv7pSW9ZwKEG/I/RIXQU/0Qpi+dANCMYbmJrR7yaSYgZeZQVJkrr15kwtlV+ZqH/1ynu+uuJUlk5r7ccKqwlpKsWh6RXiqS11L3ZMhwZyFxRyBeMV+WXab71cOkAQL7SOLV/GAsoCeDEQL0ur3cIXCmWRsoWOW+syo9eQ+ELqnho1OuI34HB29BCCaMcZhYaO8D0IhP6+LRnuSoyA3Zwsj03ujVDB6VmJQ2HR+8bEcGSgTbg9/yp2VBKw/vwKPOvPjgkuD215dmYS4VZb+DgVHvjuz8JIDVLVQ1ElpgVC/X1/HNWIGiKGWet+3UHMy7gv6JsipIKFjjtzoNEdop4P9XSN+MQkRQHaEsniIMeFcdL9jYl3OlQAaM5EXF4yIG72p3HkGs1/smXkAaPetVe/oNMVPxdnUcEfW9pwj1rucUkcDh5qibrIO4MOi+ZTnU4XV9W7CmC0tyUpiXU82R1yU1DyjJMCXyZ0peKv3k/eLKxW3t85y5dvGofertU6YjqQCd+k9U0Tf4nOV0PxLNLlU7ee1KynsyPBKz4G++4E1dgxMFKkmB7WV8U9KP3YFQrZjdYnnR92yGgk5OgDo0pcw+dlvMq5H36NZtaFRigH9MXQ48lJ0jsGDn6MM1pIRC/jGp9lTdcHHX2k6cZxFaj6jNCGLEK2C7MQq7cFwcvGa3/gHy+9Q8jfcCa+vUd6dVDuJUwqfPk0aYDOoZ4DkBHOPr8bNCtEFEuQ+KeuutO5gtjozT6oRFwVCTOS9L9b2TWRozRjonFPrImMdtKX1rexqbRXDsZRD3xdgziiMzEsx7qmAwM2XVNVOmAoJiejr6E2yGfHVqJtY4oJSHySfRh1aa4DKF5nWt0uiZuuvdTsOh2EPLFFL8H5FFN9wHdEaKJOtajTa08swy9QXj6TCXNJuQUzMA9eep3+xInL+O47VsfMUyVUm5QorNP09Uh2yii5dUDE0xMPUxnXfXrjaTEvLKje97yzfpDf5AEnAgJOql6Krr9lZQCTEzSAREi6Y/NTY5kLyGvuR1M4dMhk1awBNLkPt6te1ZbpVckLSpveo79Z1MHlTfcXZ1VsdoGcomN5QFh08nGI5oSREI/2QTgZHhbCDb25aGLJ++Eu7yCK62MjFJj4p0scmhPybbVTiqjUfmGxA59FzaMyxJ5KvYSPrpzjFnqXB/x0XglIQ6rryXVJdnXYnL72UitdJeXuNynh9ZEaNU9RyhQYX6RiQJI6vYK4iVPDeRjosKGQaTo/VvBtE8Q7p2VPbGat8ScQTioE8Yb8+2jmftbMvm7dUv/2bQ02EB1ixeNz5EiqWfTGds1K8BEMvYRxKpL6o8Y9r5dJ2+5Tv3lDbm8lInQJLy4m8yx/2hSTpnAwMPgo249KACb22cwcm0gVNUIOlBTJvFRrhr9rJHb9Poqh4Fwwg44l9mG7Bn7HLpfg58PLCNYdfUufW4eI5eazx+9cb+/Pu+EzYdjCuiyNfA72HL9YCPdJqY54UTcZOAsBhK+ZXS4JDQ1XONpGay8QIm7S5x7DeLH+2XpiSk4kuSZX/Y0YCDheD/+oDfYDhomi9j8R4rVegt/5rMKITLYG6vppaLKEivwagQRB/HplrZfKioFetQsxdwk5Kbtvl6ZrIx6lrCjBJS4VGcCoEZvk8MWTWaELJOqnqGiC3ucjOgopoZ98UrDjDnsEbFIJF3S7dFZtSPd1W+pUw2/kR4JQ2e/wIBANeJG+u9AVEJNYa716he5GND+uxYdRyZKIDN9m0IBjUtHZc16CvcpHogL/JeU8JcaiRjjpa8Drz4OVVeGHz5LeAH8LQEC99j/1gLrW1vA+T0tQb9spXN47oKP1cc1lM3HelOMVQK+K0bqn2HTZcrmO1f1col6Ex5j6QY4+ikdmFM97cdVNCL1+tLw6kBk8CYyqhgpUpas36kZaGtKqL88xbSw85KG4zzIVKFgfbHZ0vHQSi1t6TGD0jhvwxrtuPEjcgG0Suk0LfFwhD80bMN9GQYkZXhP9djPV957JUe4aK50/0lBQsvqk9SYlALZpdP8lJDytH38emYXor+LqTGY/bBszOpDp4l0PxDEP5wkJ9EbXiChh6Cs54Q9mFOwvGjjQ2uEbEVbGKEjCly3KlS+qt1P4h9LvNoBDMRyVJOsnRsJZNokJ8J6HDJTAiLKSgDbsTI9HhSF0bo4MfrgoRgG2QMt9jnyvHSsZ7q5Bta3tsM/uaerzAljwCcYeWXhsmI5cg8cMGsOeFND24Oxcbz0wMp75gUVVn9bxrEYxNLjycE+jtvyyrHvrt2QDnTJclIRSb89ioQhPWE/fgyST027cZAw2z2+fjbyJrwF0RwxTphwqoqC1UvPNLj3xNSY78GZqD4cAo27pWEktAKJEGz3sh9SBX/YL4CcL+Gq4XSh5gXVDGS6lgqIT/c6dmR0F7JHMNrTenBmcsLgeQG/0aGEH+N6lxPdm9D1Bwb+i09Yp28G+gJ8wuTIxtNbnYQ/32XHRmE7Ea+2gCQ5/C7MVRHvQ/uZIK+Eqa4SCYfIuK0OC+5Mg942baBHYwdSd6bgayAs0h+2+UiVOE1pUZN+5m8d5hXSnNyVr2N7DqBRRFqXLgTy+Gt9qW1ptz0alopIHi8Bd0Eorj5elbW2y9gUXNLCf+Blc3WK4hr54a7kZQnPu3aGzP/rtO4dUAZRMnVe72bkvjd3ZVH0dY21SpDhoUX7MPqJaEciPHsq8jxjCxiibdq6Btg15wpJuAkWhJA78JGKhBMTX7IY9Do9J/uD133dQfIx6Un7U/CI7NJoRioAkW2+eqz4eqX1IvuUOVcLXCOMq4s8WC9ZZolFDTkGypuGnC41awGLUv1CQy9C1CNGJM6rBlRvVTItgWnTpc2n3zaGxW55OYQklBFI2Nh9CeAYFWzidLbTNlmD34+L/U+sszinPm86reftyZRjOl8H3x6Qu6JwzEZqosJFK/F674rxENi/HeMw4aebPWDrAI30A1VlGoPpjtZyTak17eYIdkqV6oTdVHy7+Tsb9GUVk0g6UfVPWy0wSJDt2wfYnC20m3ef9cAyljeuSjOn+5nexAQ8lWRWl8lyfLZY8TkzPGu7iF5kf6auNx9YowvUlh/6gnDRwarKTHfxlqpk90i3N0NbGNSBSpmyN7ZWZvrw/Udq5NdZs+zFEmJMHJo8zJzEQ5UiYURwA3gDuIdbRRBcmqf0QANkS4aOtJVCr/7c5mYpV5xTW/anzcKtW8ZahrHwwgfPDsfw9iZkgh7WzSNfTczgUnMrkrxYjmVOC/psIEdcr/WoI3sYTWUdGKwDKLbUV/pBF56BLjvlpIKskZePtJQ40xgjrxxXkgKMa9KCrkM/kxSMk5TYrjHxHRxYSsIBJWmwweLL0Pcsi4u2Nwa82peYTyKIV16ObR31XVmZSQFnCki7uMiuYBYnEO6tuPcoR9I5ijv7iCZLSXEn9UOcNY9u15NHASRsRRGdiulNSikkuOOs6T08Ciu/JqNKb69pBH3FZAqnBtyV643/kP7htf+tPLnmYOPIuN72STFo+z/SEW1b7Q/wEDQIUQfhlxj0saK8wTSvHbk49LMCbnAE/ucX3kpvXqndhCOqb3nP4bd0T2wZ7mqBoSLgNMLuEAu7MYE+eUfU7Sf2JqesZuDmlDJ/cXeljpXpPZ0sepzHY4wCbJDjvhYo4zg3TGCXwD3z1KErgwhBeYQQho55TLy0nGdZKU8mMQit4NYDF0O9gSuUe2CsnFVU/Pw2WwuwVL76u7yD4RiM5vfpXi9+nr53cST0EvtcloKlTQod1/pfWfoesjbSwUvfhNoC5U352/xs0Q6DM/aw4vRINfYcj/TcZCaUA4XdPlU3EzLp41atZrgzaPscmnoSb1LkZ5F9erle8QuEu9rP4Nne6Bt4lz3PcE7acXBSKkKgOL4L2lDUzpLYMeJAptwjPlljz0I3uy3Ulto02cpzm4oK9XiPQxct4ReeSsfWGd8x+Jmcs8AxyY//5XsVuy6ioXfSnQhaV/MX69qBcCfHCnEMiUfEfXPUAgNDAQuiReOWxGPsAdhQDSsFrljUzk1/nuH0qFPN7ycUJYhCKvQhpvEawhREUmqXmCwrotzCMR0kBOF1EjzWCLG62BkcdzvICq2+sfZZDMDtqn7O1FjkaP5Fl7EU05x9g+Q6Goxzh3E+NhibyVP1mc4DXjZ2GcxmzLY80FXum6uxg2F5VphPF/QpzkVRKHZciELZqF5MQZQwCltkVG03a5XAUkJ8m3S12Lcrrk6AQlQ5jPcnX8kQmuEUjuN3OrhxwoKh/1zRw6+PgyT3NrKW5BiV3b+hBssKfvuzlys1eLy/Lteh1RwpVo6FH0VtMt9kJHMrNh6kqYYu9w1efDRXyLmrfkPw+ocBwHoLAw8i6HsGuIcSCCfqigT5U6irwPgavFNiIWNWEnwa1VfrDwhPQqLEfTk9YQMCfA9A9OD6HWbfaml+vspEgFd1zu4+TTFjQvd9Ask/9ia9O8M1PXvSqVvpiA9z8sKOqizTQVYUyxp8UzsSyQS56VndjQlzXdwgjZg/6ZDcUFAzXK0/gU9PYsRXFs+4txy8HVI0BMrHoeiJWBP2bMPJ1d3hGIhpDhWsXhvAawb2W/jA90R+ww6XLHyXRcCpheFVUXMLYI8d0pVtB9uZayai3b+ALp8R5oNob5rr7RBARJQBa4bhCANZU3TkSS0s3OiP9Xo74Z38OiP4iadN7H3UlirJ44Ju8adnTxrRDA+eQ2AcYO9i2VktZIdO/oLsblITMmDSyvTM+MG/5BlLqGR9zlsRkUJAb+5lkDSBSgKgsgL2Ol6biSMYPxa5Yl6H6sTISokxEvSbiyi5zjFvo7xUnLp0nWmi8g/aUoOTyc41tsMimKlLkTH9hnb7hqf/r/z17zXw/truqVfONUmbDoMGBsr63aGcpNjQfA7TR3tHTJbg23cmKhk11i7++4RAhyn/QmyVKQ1Qws8Xn474QlttHQfySBW98bBlEBexS+v+t67jHSkff8ZRVVUN26DjknmAdWPyW8g5xFo0EY4rZXN6+vTKZMpnNHxCbMFYozJ1QfTBEAiZqDCFIssiHvOcoUbYwjosG2WfUhwI+IHfvs31wxHnDG9Oeo8AfsZ9SsT+pmQJUZLAMEmPbdPy4BrefOh0nbT0MwPo/aeBJgiRBo4BM2yDHw+27Brt8JledD88gK95k8OUvq7apCR1CxpagSGyY1zL39fwKzF14b4s7ZxDRR+3KYoETJKk2l5FC6ShSnEcF92wbZrZNla9YNgrcHa9XE5GxnRytLm9V7JRODJO9hZNjV8FaCGyX+3CVVhut/n14r7jq3RYTiHKhsCqxLgW+itOHWBRE0YN6oRhQuqrVIrl7VX+1Ejz1pgL6iY4Y7PrfvyrGHefzhMGw1bs+pun03JvY6TZfFqU/mT5eesHxCmb5AjMcZV9Q79xMDn90W11NCnSSahoUocfYhmKpbJUjz7cj/5y1lECQUUA4mWzSA54c6Eyy4tY9BfO3wfPQQ2HvgJ7UhUeae8g9HUBRAerIlpcJKmdIUvJCUCHTIjcPHiKvdmq7obds0rTZ9FyAVC8HH96PvSpD1qBH5qlm9uynpabJqJ+hDOkjwn6TT/dOM0cQ1pdt/JhwQDq6cRte1KyHF7Prx8KDTpNRjcpns+cLy+aKP40bsYr/88Y6rdVlEVrOwhQebFMqjaUSPxQfi+DqFFLB7CYvah07TSqUsbT3lKFbdy/Q5Ok94QZ2tlhzlzp716gQxNN7JrF2GNWqVpNMhz4Z8EjoukaP0M2HmNoIzme6stbDBBLeUVtrazJlp2043PtYLWfAdZmYUB7FnkdEXZE4U8NGHYoDkHUWY7b49yyYSgCpPVL/7OBQd65V8ST/M466v1beKsp+4L2O6kkcrbtRbqtkDYqN/AxGTP4Y5jy44UoRNbawCDrkEqpY7/o1aginMbN7sxeHKaBVDlUfTxcTDhXhGoh0ZTpTJ86FBbaphF+BT9kypbQFfBMgmbeXG6HnmP1jcuoHvhTLRFbpbWweIXl6nEWCZXBOfDw5+DrcIheiGH8vclzWtGwiJuQ56MWf0j2qL7AMXMCvvZYoK+8jfJmqJaZ1BKn/jMh4kmzPE/KWK1LT/X7oCwGngUfhVG7jVBoUTDdIMW17dylQR72eulQyArKd91porXEKP5vUkps7woBY2U04yJfWWIRs2e6ldTEsd55LZXsN9Xak4JxxkpKY05HH8uLJjoCUGoEnssgPffBEM2tpaEG4+3Mps2XurlBM8HsOfjlHeJWeXuvZ4RZmdvyZL0UrxSKEWAWWbKxocZFWf7lYwVs7u/CcL5XS/zrXkPGaq3blrgaoDtPsRxxDDRS6JZUBn//USbFQhPlyNqc6WCgiWzY9wDawDdPbwXBxmA7CGsGKG7C+3+1sdVjwgkmnFwWGq0Etl1+WViy8mHSn8yciL+l5bzV3tXZuS+7iamrIwaFtE3Ke1c+erJvUJAzBrYUTiowF+AqjQJZm3OIwrnLSn+Ulc4A5YUdfpE4ArZpF77NSck+rFF/3Jg38dOzY6EyIk4bUSbbdADdgpFuKXwAVLGNuaJQc6LhhaS4KFH6byLzNxUrdJyWzg6Qmn/TJTHPEzICbuQzk+I7gGMACdUfaq7nChNcMrizxwrKSjUGas8mHL4XNTefYAJxt3sMbNReQES2Erz9RiGAqopP604EDg0dPwuiC4+TILZ2xM8Voz3jxTR3/YIu42dOK9rrlS6SLh+LCwNLkTcSIukXg6LUwYx5AX4gga82xgA3EAAiqGZZAckzVvnf38ke5nb25Izc5zQ+0i2BBqbtu2ipqyay9suOPgetw/R1OJiHEXWatTO1j/us0zCrIy7wc2Z+dYNgZBWnBhCqcY9SWDArUU94IdDprobgL5vcoZbFUJb6f45QRALgdfmQtlUOUFrv4Nacx0pcoQZ4F2bwaU0VYUW92sxlExsMbvJ/idGg5CbP2IsqO4gxo7/3IUrmri1WGvVvY8Mffv0sV7on0dyLFNJs2xQJkFjbT0qqwovEM03M3fgyZ1s9/0FjVLhCup1MT4AgjE8m/AtSEiOjRgIVf7hyo4wQA935Cec/86NREF9SoE1RjsnkjT31U3kce64BMX9/bpkSvDoci/Yra75thK8J7G0ZZmjMr5WOTfr5cUF3u+74GwB80yPBUV0ZWJXBCFN+Lo6p6Zb7WRhzpUPZpb7KyzRNozahsW3rFIjuUafYB8uFp4ZNgJd7YEiAAjecY9QxCGiKQxj2LkwJKO2+RRy/JfTQ+LIs0BqYbQBc5R7E21ZAXj3i9cfHzGFB/WtRy7kN0tkWIWWwwy2Sz7TasSTYR+FMNh2AXirdnOJLPCcYPB2cSaEsNoZfLQbcAbfnNiAX8boIz90eoLruf6IpcnJNpEQlQ3U7XwQxZgzPGufPJbCAV5G7EUQm98kcS0znshf4U/mVFov26bRkCMp5JMLe7CqGMSm2pzDbob9yKYzq1aHn6HwAhON7HtOAiz0LCvUpwiz9RJvFqJK2LM7pI0EI7bv1eY0gQDYqC0wGZ10+1oi1HtuBU4vN4ztrP/LiZBKYdddDb1AcUPmWVA42dCuPVw7PW3snpg4BlH3YMw2TtgZYFs/8fJBBquRaX/6b9neYnwHfj0v5UEITOEzALC+wRRvla5X8NC9alucOY8YaAb10nVhUG5FV0M6LPFQCeSYrnre6dY9AbI5Yb5opaAo8mAdDoDUaMNEujMdYRuWw1i2Ay6LTaBUf8Eb68R1Ea3nMIs8piZflAB3nAdjKcdRRrxv8YIvXR8w55YFvbAZmBcYuYbQ8H6DTLDZ3rg2D/t/Yd+YI0XvJ6dHkCly0t5PjSBoGnjZPslv4jwmv66E+xTbQ7AeHpBWuMSeA5LPoziq8vC1bFY+Pl5npdX+/jOI+F2QyJnaIPAZj5dtRYZCosk63uejR6XkaB+btbEPS9iqUe39bj9Q59o6xi0U2rQVC/SXQC/cRaIV4Q3LAcNUlZsQz+Mcfp2H/zPYD8Br2MxbAQmAiKGVDu0NsBqhMpZ6tHIjX1ZCSDsl6vtpmjDm4YTN+oisFsfYC//Tji4fxwWCHqac1f3xaR//fVBXAMUdhgzloJbuY36+072dcmgSbHaha3RbwxilGrBIDGCtAEDyfa3ltLBsQI5WssUOn1a5uLQl5VXC+GMItvHDgwPxzFww8m5mutf+cvcKZe7Uy+cCTrGqAfVkML1XqR3i9rtnsRE58OISAwQb8IDqz1mjPZnNnxkpFBuwtNC+ue9sAx7tMd4056EAn4clFVsDFYICqdPpqaLoMSSniZepN1rWFnrVSqdMY0jMVNNEzF3JSH2EcEnV6rsPoDyPsNDj+p7KhyaQlVng15JpBGvcWeoQxwzisC1JnsIk3H9/6NHrG8s0MrHbWDP/XyIm2cteZc7kMFWMGO06PIbOGvB7FY8kTNDeITSsfB03HP+/rk20CAGiVy5THFc82ptbTVKOzZtpa1TJB6ntBFRHp5u0COw5to31BLtR9Fm8P/ye7PQSUxNuX5mWbyGIH9ZW68Q0VAXFINvAY0ZbUhQJkzXQRYVzMtW55UFRRzFiKvjWDBgBZdlxph4GtwqbODR2XbK6lygC1F/S3surjQmKjCodHzR2HGzyYYeafKvwb7aASTcKS19DQS6iX1FOjs2tBSZhFeSn7LE4cjGAj4tFljdY8/Qa/W7UKLMIGwUS/WKlTD86+7W2dktlMFKhKUnJr4sLA9RBEiWfkDs9EA45UNmA/d6cfoVwHBa67ckCNXF72OncMB2a3Ja+GIOzYvEjvQqYaSiseZclF6tcJGJCIDd/Iwk27ojLvXPfkw1kcC74gdgR4OFJji8cE+sfw0XXSVjvQqeIa2QAoIJm6bobN4lslTMNlwd8u+jrPHZxxg1m7KM12WRTB9tEFlbvTmW0v8V5snRSvrwZvYcKoRGOXD1QEjtXp6BxBZL1eQdLh1oDpJajo7+Ow0SYoveAWghmjy2smRzEzkuepEiH5cfAvOWMgAdKudTbrrKFMbdzn1MODe3Js7L42tTaHzLewlMd+etfvrBZjk/RoRfbpwOjw3KXV3dMxvO+HV8XdK76jwLlC8ZEBCMOemfo/eeb1VmprDnQczfdySaKDrpFp6QYdtFRtCPqxAM7VQvSjBfEcBNEY7PZ9mJiH7G0vo+74lIBJaIWzfydrj3ry6SFDGkNRbXtQw14wpbDVDEUMG3YYi18Yi4nyXCh3jHLN3sBr3MH4rIu/aMkkangKUM5yj2nYffTbPyIFMGynKcxkhAl/2lX7VIDJwgKn6TBZmL8E0+tzHBIhG1GreADC4P9Ga3IfN8W+AQkTkbioJIy1K/ztZl2uY+sFDKKLRoySWtJmY8kfO2r2bgiA/fhPSBvzfTjPZKmxTwm5czibWV5lixOMd9k/cuPpjnO5EHIMx/Uei5f80R74kUAynpSSZcQZ8HVBrJaUgi2dL/UyuTZHrVVAQqkIsyCQJ1QeWkPN0PQD9rvvK91QnLchFY7A1K3oUMRr1XSRTFazifGU2nhX3FR1B0ctHRmDvNfXypBtkP6XkYFdfT6m3liIypcMvkIMYoER2vOdN5tX5jcAndAwrv/Bbaq/l/Y0QP4J+SIlKK9NGARdb6h+hekGxFVJaPn2/DY1aT3zlxvxzpvTyFlEkF8n2tCYAyksoLLmG+F0PFwmXnYtEoJwHy4bXwSRJe8hvCCEgQRm4h1PBW9amk7A/zdu0XVrGvnSrXw70SwvpR1p+S0BFVi8DNpj6yhVAy8m/liz0dkwe9AlO5LZ0K3YcSbRtO5QD0RorQZ7arPkxv72cMqJkVYUINivF/JkCXsYrA04CdjXKctfoQywzjsU80SIgvAHEhoXiT33IxgKndGmY5DUJG2SWCUidYT0dXlkPeObXTs/AYdVwSLrZ528wLFucYQElfpOnilt9j+DPKP8diCI1DHeKyKMOV9Ks/E1fuMGSESTsEcd9NW4ELa0VThEW7afMaSRPKVFmtU49NBkEoad1TjI58yVXDNu/Sofe7opmt/KzNHnUaPvBpQdDv1bMGAZz+4blvaMPs80O0wTqSaAj7jPQGXppXQxdmAk8U2aDEIRDuUjRVTWEonEzP3+9BDxNZqkXBu3svwf2iUwE8plOe12ro0+8+fBVUg1NuMSf1gKC0yrfylVbBEA9uVgan+PmvRBxoelq/1eFnILVcPAzCABmHRyVL5/iaUaR2bnoWcngu6OU529AwKpYN5o8tqWiKDmT6QxBZwAq9JION/W/I8FULRbqtwsQUer4MoI8/ExvHc26/YHjeRjm+HTvi1yzhm/DMANzetkooLpTkuTFIvxUgR4veE3Dfv5mgx2fGGtIPQVqAzgmrnPYj3MphcTMsfMro19LTMR/3PBKpus7lS0yxnz3fi4XRG4gQYCrIfuAkIlROLSkp5f4boCIiweSZMS0v81/46e3uStsQ586Wzq80pGJv3tU14H/A9cib28OB4Tq2p+SmZ7pxDjUfqosgGjxhFGX4wccdLBjxeHfnXCmuYds5UTJhWH0vk9yVELm0rc32ckagyFyTOCWP1XrLa/YTWzNX4YreF3DnUqxVyInIO+dohe4xFzyuiQXcOa0SirGCDq2c7rLXLfKVUt5W0FucCwcmAu3uHx4uOlJujIDBI4G2EFzhnq1MkvkCg0OZDbaFnVP+YsqrhcM+F7dXMI8Ueh6SXyoFdXc4PirUlVPhJ7YmV3mTF80znFD766cjbwUEwZh6M7AMMia0ME8ZsDReikFzccGq3OZqGjAC+iDCZXR5E8UIN07kNNEH7ZvaGHISjX5WcSVo75UQOz5Zl5uukVyDeZSo0BgDHNW7+JNIov+bqIEPneRxRu3KQeYwrZHaDDaEv4CKH6ed3qj+6HDVbF6ci02L4RhT7rjjadtzUQGIZiNALz3mvJdWynAlNsqIxRtADr9QZQ+eqjyyU4Z2dgp7LSxf4T0LYwemI1pPfSh+2WQMoZf9RVMPx1y0lOgHfxGLiHzNqiXHfrT2hXWke+k+d3BToa+Hr+vPEI6ldn1swVHVO+hUKw5zSlVL+NgJXNi397O9AU7RfmKP9gx6yt0mpOw5vNc+0DIzVqmZ6FVgbwv7e1Jj9nGJ3OED89OnkWcBSzrv7t+nf1rByxPu41Hbje5PFxHiEDMJkeYMOEDcHTqqHaqQWz1mv1Iu8Kiz7SdOpUyTlQ8SXZtuyTRNOdFzMqijJm/J0kGu2aybc+Y340v7VDYhCSadEsRUurL5eONzIh5gNo4uJdYSmX6Bs7dLJJPl8usWZaWUWqPZXdCdSeNDXQgqI3wrES9ZMe64Hw/45fFszALI72s2aBfuftzEIwKmdVn+6my91qzYgfhVX9r8lXxM3/wWdv0b4yUdZuiEnfDN4T0FkX3tE9sWWpYzpLmZhkavj7FNK1EF1+8MA9Ops5kgwmgQs5zmnkZeYzYXiFsSjB/bi3fe7z+EU6Afv41Tbn5X+cOflVb2RFJULs9PZhwWMiu7JPknIj8hWUudUYDtCa8DZDzQISRxHC06xPxmTmfTsnxOtqVEDxFNzBeaR506aSo3WR+cRqV5zy2LcYBjP2HuOE4F7RO4U3mY79LyAg0HzJpSeIaanXCVwd4QZva9XM96WI7vvG/kOwNfHRO00ZZ8BDm5jhjcWEnDs0x4U+H9i44qXQ7S8KZ6eD+lTqO2qkp/mBDyd1ohE5TAT6vpE7nN3dUtPoOis7QbUFnosWcykzaGGnAbrrftkiFUAFaiFIsMOk+hk5mnYIiS5Z8MCC4+o0eTjULbLzFN6SLV4QRY6LjW8a7GwWyEwo3wo53HGy0vomzl7RUyLWeWU/swTtZ3yGGiHTMLLWnnE0H1WoZjew1jrTmajMbdsEot1fN2SrLlI3K31mQ4EiqOHTUar751wGFj5XegEHSpoKMmeImJRnkkaQlN/bll9XSijIksbwl35FU770u8dLLuiyuU0fVfofAHifYTaLb0zMKfwWlhhSh7Nur3PzNOeVia6r2gSN5kOmZGnMEIGpk72hkSCO+ikIOdoWeWeVODl7JXuKh7O55YkFEUMdsFonHtI+CAuw3+yJM9awL+hVwMoqKPXx3qxALYQ7E/7R2DVwfwTPvb2sSUHoFHwSsV2CvpbKduDQ3SXyDw5x25W0HD7kGvO1ECZ3i0OIiV18i5HSWgt5G7qyYB9o0Gil+6EZTpVdEn/iyeuaUk/2oZvin7XlbkjuXIjUUCHg0lktUsVsWBkTZFUhaFpdyJbiEe7DdCFE37EfK7wr7sgViBUSOFlLEjoEn/GA553wVwjeLfn0VoN4pMxGorzhOeUkw5j2B0+HvZs2rFtHP2hsFTaX3CGNTqAxE/qJNbQft4c7eyzJkjEk6996vmRXwN3kG8eOrgj1lgcwo08wwrWtIPkvHnOLJ3YvFSZPpR+uN94e0QpQ9P3tSfq+LN4oi2CcnjYnF6b6PlQiuPCrUYpA1hjCvs02wTu3gpOtJ/Z9RBGn0/lQOSwZkQQ8hbLvJDZeu1iry5gYRDMkZbmON96kQW7w02xT5NwaJHV1kBsv8124XTEgTJKbIkSsWJNhHgHMoYM4cePy/7+CLKymCPbkT9kk5dboP9IR2EGhlHPm22pA+CRREjGzZ1MqJaHj1KCvkyMwG4qQEB4V13h01y8YwjMW0NduEohYFp9OmFy1lEcaL9mOdSchwSBjQPOC473qoqiqvQ8a05OvxgjrGYuBS+id11eHwIB8d7XM86A2pVaiiaQLJVjPmKphRkkdLWvIH2m33tP1fxhzU9UNaK9K6331IWk2Ljbn8zj4e+OAgHkV4xUZzvB3F6h2E3y5uXoTN/yQSDKGGSJYawTS819bzWRm/bc1XEOaC8h0dIBx8KVJcxTOcfZeOO3SJNebVfzAAKnmxbVRRZ2JeFdlEElSUUGTPGhwzu9O0n4lmxQkhceaZ7yWN+rgcBFzVbrMLaUY+cjT1E3tzuEF3Q0gAus6/xoX8N7vp7+QHDT8OgOV0RW2Pj6ZOBEsuty6iLrU/4e/jpEOTOsp/jLDjy2bX1ylsvJHpC5wISF0Bs5UBJTqmW1OkywKJ4upfQE+h0Pzy7J3mdbYSE25c5w0nHCFBfJ0riqqsE4WZ0HF5QO3Xf1C8taPKsukHNzCWZ/JkFGKlQHDz1RkhLTnBl6YpR1L4VIYggxE5vxw51l8ZTRkREP0ukSKr1jerY++R5bTIDx2FJlm0GG7BnpziauS9tvNrFPv1StFlTz7lyebBwuIvvhXAr4zMV4DAc1+E2+mnkLre6CRcBsfKNzMcs2mACoVbq61c1gyItDCmn1Ohp6OCiRVJZHmb5D0G130x2gbtO6ziPF4XZS4XTINftyv1PuMGs0BqJokCIbTO91APiOV6b1cSiMz/3Npeinofh7+XvRe+CEiV0PzhetXjNShuMRCR3Zhg6OIO4TRzwRVfX2ljXjlHBOqnQAlFIkbPqbNm22Vvh9HwT//R8EeIwGgwhx7Fpk/u5oW9gPCxKG2DdDBJ9//HHXRL2b7W9C3Gool046G50wZx+ADl1GOJEDx++ue8FhhfwaZ/hnCdAdK2bt/v3v0SYrykxwMvmUcL70Q0wf0E0wG+ibMcBELT5qwkiDkotFqTQHgYZ9QjLsLb3wlBRoXpextZVL1G9zAXQTbVfxzutIdEPcUyGFe6c6vuGIDJrTL7sH4ftSpojZ1v5tsN+Q7NTJSDID9jbuAJJtyiDAls82Nnvwt/HJdQ4or+mawLJOOpDCcObbgVVIRzzPIZ6l1rVHAcHBTeW1V+tCqNE/OrvNCK/8wqXmsEm41jTLlZV+g9QLD3OvRUTMz2sfSwjTwHsn+42MKWKkAHZtEfx1fTg+QlB3IxDbNMHnnziIBDsXvL6RSQx/BPisy3JbTQ5+9SHByBkpf/COdN8L2ahZc5vqmzfi+A/V3BXna917h0D3So+6qx9oPbj0XeJlQ6VJC8picIcjaRJGmgtEj6d5aBKNnJLuEBDzN61HF+/xXbXtPASbNBHgMV6qRZO35mC+2gIOnMIDkc74U3RXaFOw8iuJJdXYHFhFu4NMIeLRFKXtsyDMmAZSfR2vG7uWqP7cb56OrUIervLHII5Aky0xwiL49vDvhvfJjTozNerVvNMqvdlCif5NMJGqgkv18wMvWJIVbjnM09eRW54doubayebybpYRGNFLhGLhZhO7muwfHrnsf6c1/E/NCrfHsA8CSpcoCH90XdqkJYPRGa4CR7I0IYDGr1ud1KZgDXzRTZ3XZNRyYh7huu8rsEzXRh83GRSrMmcQRaFV2YMeqHk9d81ZWSxDQPhsvBg0e1jsC6I4uWvsDL3dyuCqRCVC7uH/UMmG6ms5hHs8wJL/a8gt1z5RYdugiEo28ggNaC/8J0v2L8qPnrCq34Ko1zPOXyjDoESxysVKm0v3GfAt+uk6C65ziXMB54YQT0d5fw7gea2x3xhgB0NWolqjggam1xayw+Z0U9KHaYuteystaNwkEqr+O+XExfN+Dsxfn2pEaYmvBMTiXrqsR57ByMHrdPDBZbSpQ1k8wzkJbWatWI1rEwsWaV5b2V7FV6k+q1ZG+ltfza0tO8MGybbBVJSpCrRU5l5eyeO5jpYR9dwggW7JPkR+yD8EZ7GDAu5iD35l3jMYdMnccr/YYWup2mtV5GMRUOYvtA0OjCp8pYBKOdWBhqUOSXE5Ymd44lsGdN/GBA/iRWgs34Hu4ZOjrzVe9sw3TOFDhGkwb+f4lX9IkoMSpVdl0ENfZWo1S5ySaE7YvVF0Bzc/1dTgdeMoCHheql+YDCRfbwOJeCkK+nC2vYhViX+YtqpIW5YGXVN72Z9l9dPiksLggZpSg5HJlGJNeEhp1+bqZ/sYBBAk7cB9ynwOpzldh4UlPehIM8z/2zHcgfnDK7hKU+wXddD62uAKJr7MXqGBnTP4dv9FlhqDwZ4WuJzpgocfWFE/j7FSvOB4yUB1RKwTJpLuGRCgJDmViuBIdxUTQuqWxLazWkn1Hz4DGRaeubhdVAR5VTml5o+h+9WDwGTSRitnXxlKPdtN1bpnz7gHgkvT/yXp0WZ/qJ5wIOjgvst5yqAPV0rcgm1rg3EW+on6HQ1uGTqq9aVkJCXYQpBUXylKB1OpFbZExJT0viGaGncBbc2DhDGuoAAc3IrnpvxohyZXjZcChXpUIkKpaSg6XzObp/B142uir5Jc4w12ftblX0sDGIDVYsE5rkahEz8fnSPyXvug9t2Hv33a9Wil+54KO/2nMXYjcAmV5VXIqr0mP9I4gzoSGIuKX/vrSAug4+bsSBqJkUcqWYqAMNpthsa8fy9RROpUnSghHqWPqnDr9MHmZcH+m1IEtdb4SCLDB+S0pxQHN67CP8Y7ZbNfhqEAJtvS2GTmv5C4nVO9xySmhbuMEkGbv1uP/qUOwIQiqaewmxApRx42cr2YfH44O6GqoTLXX3UV8aZSywhu5F1XBIwT/rR4X2xBkWNlq7K10JGS9fLb6v7CCPAg8SVwoYTbrcPlhZWZcE8UNR0Ah++knSRbMomEmJ1cr5RQ80v8DtFulrWPqZE233taehJqYRLh4LoBrRoPifBV3TwcflM2k4rTLElLMZniWZtCCYJTWykM9FMlrwtKge+mtBSlg+6wlSCpgf3zKEHWBjpYb6R1pyRgTaA7xbkqLg0uCjas/kxzpVFHQogQc4Z/WcBlnoGQT5ocdUNpjmJWLWdEwWnOejbeAajWFUNP875AM/egZpbD0J99FnHLz1XNcAU1ZVZHB3RWdpok5dnRFiOiNsVT9mGdiYMgmMVd8Ww+j4toGEkQZ6c8wvGl6VuzmaaXHTdgJpR/ib3i8oKzOBCSi+Te0RKsTXDiDVOfI/NmgJCFvuv8cRgTE3damcmu19i6OLFhOEx0YBKGWd2pRAQzfbFHTmfx1wUvOYoOcO0gXF6U1ucxsGxvUc2ABlpYEo91Wvrm+uOO1tLsQwagoR/dIrDX1fpJIp0xZpNuRkGlOUj9R2w5+Kmf2XTl7AdWS1MV2L+ZRxvKDvL2bSQksbGr5OuBZCJ7CFyaqUVuoTST/VY+Bv1Rz1aUx4/IqTBG0iiXtANhvNFUMB/0t+08FwxF5E1EXSrirjiUfXsif30CEmPG8pyhm5EQwxW0Pd9HL7sWSR6QAtzLGvUM4DNlgep7+7spDBkaL1B6Qc9WQ4QhcOkU+uqyfCxXn3JurRIWhZX+G+mSUhKbblazOR96sUEM0eDvPc3KvS/aIk1fZgIL3Tg6h16LRtxakZJfF9Px9e6vtOKx85vsWblhiPjD7sGQ3z0REmOnXZMooch5uauOm3XUoXxd6ebzqiFoq4mBba39a2ofJjT88A7IQDeBAl7WiQxISXQnBr1D7eb9zvCfL20KBE5lSu9iQTb1A9RZw+iYRYWKR6kSnN0k7yX24nHiMd/V9fjSqvMPxtr+g1/soVdek/Zn9p0NlwGLqyGSt1MkWZ0pRq8qhze7aL4v8X8lLOvTae5po1tK8lHjPxp0bJxXeBMzRNkpn8qiCwk46Kz/JUS7QChLjF+5QGBmAlfdEbOqnI7WGNJO3/IwbTIPHQMBDjOn0gV/Oztg8d1ia5ciAZlUGoA1NbCdxvAyi8btoze16pN5E8zxCV9Gv7OJ/bfORChq4/EDogdOF1STQKRoMa/rd3Bfwl9RaFt8Hcs87Bt+5/MN7XndIKDtH0zBjLz8+kEDYf2PCPMT2Yqc3QJt1D85BI8lqIR/T8r60YldemM8b+08NemZURk1i/TmJs55qIjApUzkCLSPpKy4nNfZSHuS0vESkon6la8RAycE6D0Fcze4XMrLtis0kFogh+aiVVMKOdLo12OSnfn72NnkZQknX0sSXW1F1/vpo7h4p6+HKigAZhrF9RC0xP6Zx3WmBuLYhiF5E5/9ph3IYcAZ5VzSh26yon2EvvaKLHrTMkHWrcMbTngwA2teVxr5FP4BTJ+Btm/0cq2yAmMuNxBmdm6IQsS/1nxGwqviIaF20OLnhu9V6+KYh+oLHK4GsKhHo/6m5nGq+K39/ljKL3aBBcgIKy5En84lKuQ0EC7d4JWOxLGdkXpqKOa3LaGDSv8OZQ8ElQOils8o49EkjHZXIR0gKRzheY9JB0PxvIUoWOMtfv4jFfvHZxCZpI4Q7Py4H2RIDhk5PpDrk1K6qmPhRKfvHcZBhxfewE6M6vubPpoTbcf/Z+hAKVtJnfs7ITPkuhE/LkwOEh5c8hdBGMi216S0j21TTOXHlCLAhqY0YFArgnzmKjJmg8e3Mq80B38zEIoOZ52e6mqBz15wfI8wUNXfsphCmVIVIXxNE8U1vjYLkv0IVZ7ipxPgAS9q9cAd+PFjkEutHHz9JKROBH4ZtvWvuBx8ovaQgKUHZomgEJGfQnJcK3iaM7b7jVWRtGb4/GPbUPHA8caQE/mWiHuCRNLN7EF6/qj6PsyZsM+J7xQScYGX7xftCHbwLCT7sQrkbfBRt52hS2wwSrzdBSAei9WYpShPx5BnPnxfcCIG3A6G1cQacMHp/b6gFganpsnN0kn9/PrLmVhAAe3Sirurd7UQTpjo47NdQyKvSQjHMSsM+/42n3p7PNt9RJS8KCNwDP1lIYij6pjR9gFFadEgOuDBKKwApBCXYJsWOGvef4V7kI6pXl7rvvgrg/jXnsS4ZadaKnCQj0g6/p+WIx0dHCkUdTSKwNzlGvit2/K2YcimxjKGVeuWZRJSAH2MXqMb5K0ZbejOITH2dJ5nwCm4X1KgNOr9CWg2slIIGGgOJqyPPfvNtxdJcA4Z2NQWso7u+2A7fVFTdzIi8Se3b/JNe4QZ62mX3ujRelSwBW++tHGw/FNNQq7MRZDiJox9Fnl1cmbi0WP8g9mQDjrhl0VmygbMUW1H4PsWWmdKPavPyhXnIvDjRQ9FiJKuq+fkUxUQEWIX8Hv00HMA3pfkh0+B9TNCCnLusL1qSepcIYuYzJkL9yV8O1iOdbSgOpwBB+HWOOewomH5lxkKBJsfTu/Gcy505rDHm1N3Wfq/PpoGC9cL0knusoov6iWR7jVTE9mXIc9pk//tHj4wGZP/PtJYgCB16oztOX4kaw6lWKETwwvqYPxHdUYMeQH/CwU9Jcor3u6Y8aS+XRlqi+Z9QIFZGcXxPHzITBZtVeYD62dQB3rNehfBjxWjaZbbBou/VJkkMYm10Bf4/bdrqx0A2w28A6ZaKzH4gZbEcROW1V/aVMSfGaqXMY2Nph+oWE5HsISUUHfPAyVbUphJmMdKjNRawBKJ1YM6e+tHRP9TawGyV75MV2e+qT6lekkVBQwow56/027L4ZjpD+caU1c4IqbpSzD3NK+wop85BRVZvjJbTW1t6GggkLbsCYh6VY2trgxYDBITFqje1CkzxdKhujnFoXkAAHOMngxAuNzf1UaHVLZAVUAVfgF6p6sihCeZlaNOL51N5zlvwYuA+bi5/dJw9mYfecpMgb0lcwiROielLZIivHdoOPc/V2pNBHEiqbGEsF7DKtUbA/bPSlsJVD/r5fkeNTH2bAN12oMz/qgr0fEmCrXvzo/kxnzIcEuKYKDe+jqZpymfLpw/vDla+wSsB4Ql74oXk8wDC91GA5/u0XiB0eCGLbNH+M85W3T8S83E6UC3TjrB8bE/uvTrW9+P6Tl6O4vFG/onGD/0rDSpZuuCIQlwB4CnlDNuf/UXrXn6tlcd4u2SvMBIIJ9lBv5MD/UsC7IF1lSp3FM+T36oVVqs2Xd6xcrSm5fdTe/MxT7n6iBBGbKImxl28KJWvF799Cmi+2JC8aQ6LZuSW7Sks6JruOD0XrWWCW9iiFzlX/e+KR3ZZxcVnhBrd77rU5INuCohinvacV57p064MXQW/o/WVt8vlZg8/yUr/mp7Deq2B+uFONHalUBwwU+PLMsn1vEJ9tCNA8yWppuow07sB7vPVm1I4z2mWTw1nWkjuzmPf9RHPhb+HChHW3QN0JgHXmwOhkkRPcOjilgivQbGc+IbQfMMTv18SRCfAMEs3BLFFWg1/lDJ0yBaO81iZFzrplSHcI6+tlLZ8ZtY+kWIy09bNfTtoTbQiopsvDBGvGpFC6wkeJ+hcRWkyXOe11+PIc5EH3LguQ5u8CGuf0aA6Im7EwYZ4sD5WjYM61c8cGAxOYYq/WF/qbnzKLpscMQESXpmNQWRRwSntpUzFWnXKXRbqh8c3L8tNZyOzChFkXt/+lGL3ExOgS5Ch9j4l7KxtHSm7+GnhvzlU0fW2aAsSJJ+v7hsE1mxyvEwo8AmfRHOY0Szn5nUPMOGDqf2GjU1JNHJh/bs7JLwhtVlq2h4cMsSaJ/x4xx0RI+VAtVAyn1asJ6onoOIn6S80LJ/fuxXUvT6B7GxkWDzuhDObbSFnYJcS6L70SgDp7toIvZeCnqTtrcd9bBcga6NplRdIYHbjZg/fcs3XvWGbUfGexvzHIz4Ve90N7cmVqSblnFatoeKeeePB85WgMXWZzBRtJNb2Jf+qCS3fcIIrE+ULqK1KozvBuldBmWkBz2cQZYfZo/baEZKtAcKsresa9PN9QsR+Tmad/f+q+ghPsveJkYWw/YTWS/IZwPa1mxAdHzJ0BLLGhO4FIMJxt5+wl1pRdZXFGudWZS4/rERFcpWB1O+KhL8bebK8stIlWXbk2FWuVFOG7XoMDfg+i1GkxdpTRWxbFSPwg7JhpHtaDgjCoW3YukvpPG8HaQPCC45pmVTrJ+dY/ViRCK7wKvNYafsaAJIB6Te8BYE7TjhROQsjsgsmXmMzzphC+hw54M4x/cTNIvTTIID70A381AnVx107l4PuSp6izOgd4M0GlSSFEzFS8xAqURzaTYGQ3eKiRqiI0pJiJahk+0Iiito/K6CeI9Qag8yh0Mr6zHbJpEqNb61A1y/z18lUnMC32VDebzdgAuC0wadYjufpDQzd1q3CJvGr4I6ui4a9wsjbSUcrbDIZ0ModSKhs7De3qvlLRB7z6Cdi8uXNqgoz6f6yNsXv3p2osQq2WFAkBjPcZLnhwIbHoPq1FcsVbSIYH3LB5NllmxUN2MsXDPky3IWyOXRSlAckm2EwizdYA72OBw21chxC7aL/R6Tv3HiO5zuMcv3407DedlPuBTT13WtlUWV0TFLqIRpWePoL3TiaP1koUwGtYnCa5R64rz2v6AUnvC+FO1NgPRIoBxDscbYJn9fNeePMNz1j3vGWiUU0MjArI0fa5k3SBjuiPXlJPtc3LSO4s3NaOzF416Xb3fsZyBeMecsFwIaspAtoP84G0+erqCbaRdhn26mpOtUZO/dtY/UWx6+/NwaAdCLR5hjnlyZJbWlSwdDOk8Yxhcm+LsTHAEspULrJKisByAGgqoHbKz5LMRnyU7XLaW5lhOV2s8aIksVdVkWnk6isZpgKiE8vRHyyILQQ+ID/juMQiEHcK0pagGtBEnULuN+ZfpNQmPfw9LfkjoBeaSJZtMM6E4lFa/D3Mwbd5B3kuvMYOYt+vB6D3j+sw4w1ilYU84YnhJ3YK1s9/rhZOEiogjVRCZbuvjUQqxRYqquGDamSquekSdp7zOT85VoGBX1vjnDFAuRVBDBISh6O5fjf9IjCdRVLSCOHn4rP6IQJFfPFRSEV1JIIPplpPjTXraTCFfAAyrMCkot5CIjPu2ol2EsQlp0irNnl52Yczw4ofocLn3GzAfbLWCprnp2qqSb0Pdz6AX6VjdYx7IYfuCnpOJQNJXzaGyTUYIrBU04RPFyTB502cj4Rj6jfsHa8gSlEoiswSkYmDv3TwC07roHGwWUvf7u8YgRAfLLEWRPOir9/CBF4ZkZFSkj82ehdnwRzO2uz7cFbPHeBC+UILqzlnk8DDkL3nS0u9/xaoOuCamnUXKW7vW91IOvHsQBp/oPXbi3WnrbGxWWJAdfPY7ylIRfOU3hLQUeYpQPmlFCGxVJvOKx3lstHTvYzPhdOT0/L4v21+9rF1Q/g3PFc/ROHziYNdiby8YGLO02yC34O4+uhZzUUOwzVDiYbf76EMV/6pvMUhoJWbLYC790sZDcpxLRAywAH1eFaI9iQW8K5O1R7IXY2zotxWuLHmMGWwmLuuLuZiuGmiIiezZXzFbPbkatH6xxRSpjJDH/z8yPx8MGMW51YF1ZECpaJoRQDTuZHy4/PIi5MiVUcDWvJa9m4EVB/1xUdUqPce5UC5tIcoRz4xT3KaxTCLBpODYJkOK/rRJdeg2ljQHcY1gS3fZNmcmurng8eX180gTse3UPczzhdSkSdw4GgAgDL28pKDQOKEZQvgQKjAYrdCu8AnOAdizvTrmGYPTE36oGor5FPVAgvJmgwuYKvjiEV91D/vse1iDGy5qIdtnLhSBgmvOt0mzSCgp82KlBETHOhfRBAAmSI+g47S65j0ZoU7SrUPaxAwzh2HQq86OQSa0PnPgDG9yGODbtRU2UyRmUlbqIXZuLgZ/JrbbFfVTJyu5wDqbGrjLxPoewJ9hRTTWTny5okrGZIhVFlpQYiDNtw1Y+O426lC/vWjxoBZpt7F1iQRS/HpGqwrnaVo1qvrJXDVVka4rfEhx22/eo0HWoi0JiIXlgNZZPCOo6pXXY9fLXyl2G4ZbtZyhrHVcWQB3K6b2XN4vUqiXwy9KilmVryPAYbM9Fxa7hYwJun9b+AXw48Gpb5vwSVCSzcc4nCOe/RJFpHldkoayyUPJV3WHEPZTMxnDQ9Y9Jrj+hW5ZiPlQw2grWxispPmUfLTrCZZ44tZulhXuDz+PzESXzvFvGoMgb2Qh37VkHMIsJgMRc7cNKcjUOpdQL8nWF9LGsHtjPg8VW1zr4SyaPXvmtUeTXjeOnUMLcibHmJtQ1BYZ5mZMuXbM689gF8xDUs8mbnrsZ931Bwd7gGM8NNP1BkWFAA+lFoSFuQjzVo75BcX1laD/nqd6K5bS4/s7WnyaotV7u+8wudLdfv9Y4Hq8pZX/bE3P7uleAAe+pLDSx84yJ+cC2BbqVgypf8H8xGKnfA16frYXrbHv+QrAvAcqZzuo79//bqTxHUNQ4E21gPoWKOlxEDDjsy63cHSlCEU4u3dR0DQmM7R8NhsDR8XhOR/8EEIwdkEnfazZYqmSQ707xrB5Nv9kKwEJ++ai9FSX0hkQTvRbhdJ7hhFGacwoy4iZR9SYveIe3KvjYuYVcfAUqNBRv0g/7ZYvqS59p4Ngl6+wW6MPOK1JqHNtVnu3C15+Sa69E+4TqTUS89Q/uUdfOsw4uf0YwNnWNqbnpq2mcEc74uM29RS98XKbdnaep76GazAqJdYVUts7IWYxF058OB1u3qOcU6IXgH6r1Z1SEGFJcVgJXvpW9tS2wtThN+/Bb+p1HLXribf5LhRzsqRseulQmMYDaMrYww7mX0FDuXUI+uvZqbbn1ezGAyQKHVnnekktp2Ep7N1UlIN5gCcfknwgOpMIaKt4tL1giDDCnywVvxMCYrTA2mdCmsHfXHMO/TIiFIL0zlVTzAD7+GygCw1DVTP5Hv1C+YvlJDT1R2mOV79JOgJGcx9DynD8/pIxv/1Q77L1DNSbYmte6SHawsiwsooDGgoYvNCyFkfHa9RcOB7/ZzK+n19hngYVdsbF9fON2WBYHbrpxY8ufuggrtlNI93jbDlleK5mt/shvh3LlYuK+1PiDgpN4RODiXVMpmVHjq8cz3ofzDqJHazmbSVdQnIj3fQGRr6L/XLexT9bY59ZaqrEZPKXvfUsjHJTDsVOJpfYxHp2v97tnsQPpiA70v/Y3rK7FmuE0waQc/ugcXCzVGscntOd1lVLXKeB7JQMd8VZhgV4MKCFompJjW9ffMilqn8OIR6QUqEVSGNCc2n8B6L/CtQmP8eHZ8U+0aiEPrabJA/7w+3tKHdHZrGCgNhgAt6AAlzEUHYH8j4CE03xudUIAaz7htA1g3+U2KlsTL9F93A9KDKrZrSCZJRMbE4nv+70oYVQC6zHywILPZTH9aL79U8v1yZKGIi9GT+SGVrEBtYf9WjrouotLEtvXp2sbnIuTK3jH3p7lEqcolb5rnL76bi88TMzkRclrnyyB4neK1Y7hhJ9RPfceMu/DiW7igmDHO7gFR64idbpJcjd5TIdTy0AiajehpYbQs2Ctde5z+rrGeUc2EyCIKo+eDPuXRuzPfOkYQuSenUH41KPiGs5GlSJsDTzyTEr0jlJWIKEwYM4st0/dyOn7zQdu7yp8crDON9UPCCad/AdLWBVQVHpPfTqi7XHJ3SJHpi3wYnNusLrd6p42Ei0r6GQavLAFzEpFS+vJlvbCt8UqwyHqkwr6Zmwvgoypn6zFd+D/pDtPO8FSdtkUW5vK8r6+3kmOAo2AQerp8Rez40KjYZrQ3iROmEOER1ZtTXvtrcKZSWqzV0JgCaNfI1yxQb4Nmc0QrjqMThAgkWmApNpdAaxeatMwc07w15BmMcTyOQQmsu3XkfzqjZeuJMrrsruk7/mcdD3MrHMr4x4bYUNNZnuPrZYgED5rVJHrUhnR2gd1Q84Lfxsl4KeA/eoU7JB3T2uqfoNEFi1Ii4lAIDm1hMb8bDCIcFh9nEb2YLh4q2XILHEdZ+ZxY+kDI2KJ93byLCX7ijr8FzLyuEtOldvaR65aCFJklLJGSayW8mT9Eu+coKNLBo+EtvYkmuPX38nKoZ/zIVHsmJmL5MYTp6KrJxQWkKuFmBxDTTRt9eDGLzuJboNpeHP60/qEyvDzTw+Jv9Lc9U/BdddnrsPlBoTzukpL9faD0M0bx8tsxAe7LwymR8cYpzDPcYcufQ0qU/og783+YYJr3GwbDwYcVjJDBhV13Ci0ilYoR77BuItvYAu95bLHSkynpC2vhpTGK0CoHoUoJ7jmTSiCFVn87ccWQJzIurha5nTyYAowB6AFaMGnzgXQgWwfuEvxNweY8caVMvrrGoaymdIflBPzOz7fQ7JXAEWAtw21faCxgiWeCe7MaeDTilCLyJwvkZ7RWIhcNWAIRNSYkJBcebNXMiYJxFcGzmtRTTE/PN183MFt53b+w7WJLBFy0wVhEndvBvY/TOqMlRV9MvTaVK3nSruBuBWtH7OyY9gtbGknFLz/h9Kb1e6fCP+UKec4uOU7lo6ZAWwFlwd4LhT0U1NZ1YL8QWkQs+6odxRlGpy0rxTHismGYeOXI9I01WU3BT0BQPCGcPGIzivCi214NwBBIPqhGxq29+wPPx+QLHROQlvS3WaMH9YXzGMOTmcENThVVuWTZXE1hF02+7aTH3KwjLHtlU8ePOP/ZGL5vOYZiYY4SZNQETu8mxCoeNNdpwdA3GlaVZ64jbe35iRpwIk/wWYfoESSUM0qV1WLno1GEwwSUUnu2Y/hJJYPgDwrRNQ6AN/K72qCucfPgL6yM9vZ333wJ2CuqeCiy+IGIAPFHMMARoKQQzDk3ufz3NELAeWAvxlxsE1MrSl2IYi8SdYeVChXPUinThxlWxxhCQkFaCoNVVjtctzLm8BjmGPFzwNpaGeWD083hHP6/d/tU+oUUia1EEa/Fug5vBMHlEefK7d8KlvHQd7rca0A4yKL8euZ0fyoZEI9HBvpMURCOs40Ue0p4Puawq8hHLboSmAd9Zk8QZhZ//XfBKfBDfsWTDQl9ccaP515kgOPEGUiT3K7nR2QKm0TA3tHx9VCYdVpxzMKW5Q6vdSg1/L7HzXqOtxB9CIEmhIlF5YbUpdKN6HF6Ud1Wb3H8QF9Hv3VCaGcY+VmOt2ZnS+VuzsBX4BE9fy9zyHKJtbDT7gZeCkgXHt9Kny2ZBjBV7ESyrjB4IbQToE3RKZVst+NX4Ozzcb6owiMHTrqFFTjodX7NVEdZ+Y/plUtt0C0irptIwwheZ5lpmR6XL/Hie8BhLyO7RXfo2vKKNhZs5gjQ96pGTDNRjIqEZfcdM7GIqWZXV6eHaZJ2X2J5IaBF6Rlo6T4oY3lehQNNrPyrzQaIbUkywRDfzke1zERoTzB1N0NsWk0ZcIkBIDYlV6FkvJ0R65IW+PazKv6sODKe1ltqRjX8BB9U1J0yUloO6qzML/5XEBx/Z8bkCgzDGIeiqtYahfAg0+VRBaPzfceGr3P2PttOuQQR918oGXb+O13uZJDmYMAXNcuFtb5lqUxbTaWa7vRrrIa0i67IqEPAwNBfG2WT4zdffCsphSxDcFNElrEYGWSFKwaF5ACP1PEz1WgpjueIMEVHvqXEhWzlaVYqhUbEmNjqkJD0nWTIKNWIwVppO6jVdnavZTtf0WQcB5TZNRj4nY9iF+Vj8Zs3SKADwpIcZfQqC43e0n4Cvh8iXsGW/76zbDl0P1986MRqU3gYrJrgMYfvJdQi3mZJnHvfKf93FMQ8AWCrISElLncQasIZ2VF+EIF5qKgCP74UWBg38ug0vOvINlpIBkattzI8OkEHbr8ar0Ecw+1+KtueCDKh5hf39EnwFZ8b1PsG7Y+wxWOWXxAv+dHyDzd/JEGQZF2A8Etrz2YYu8rJf4d4omu0k7SkeDsZwhwOlvVZ/Mu0DaoQ7pDwkT4mo+GxwjCXw4U2OLfdW7xOd8+1RbtfFBO9j2tOSLmbN+1hkPD/rLPrG+MuhsWoZuPIxEoRNyrCvAXShrWtfduOdst0klBDqx/medMWcuD0tDtX/FUPadwPpzGFX7xjJxjzWGT2DYxu+oAk+CWiULowz1XN/15Dg2wguNFUqpxau917fah+KEN/4Y2R4y7/xcTpfwNpIE3BKCPyCLV6gxot2jJy6/uUfl6BkeCtaOIWrWE5E3o55WYoBMGgqUHAzdfYC1BZmS6Gbf8m6tCxydHRjpx0IlrIvbaNVPgT2sBi0+myPOefzEdMXYpDQ4PGcpz8XcU2Zd6Iv+AATaTMXS8OIp069Z0bhOv4O711mWAyOI9M0ZMNC3ACAJwFdxH2/54FonyMlQcBBBWCsacggROjlkG5UaAEn/5KeIVjw18DV8mw3ke/vp5WXZU56KyRk9Sse5sE0d2nvVwcS/N0bkFPwkAOjSB+ndi6f/XyO81o4UJ4vvyYpAINzIiE7ePPc+9/rdswXMcG6MYsrDDLwgeIG9QYY+n6vWqPdgbaU7DpgutKiA9ju3IY/5Hat1c9zkXQNhuXIcYowURduJpiaQ8nP66QCnCAlOMKBcuVm48T85zIoV/O+Mn1m7YzJ0Esp64d9JwtIeNxDwZrv0x9O7gnue0eDTB+kscOHWKn9Iy8P6aevyEVqQNV7xG4qWG03ZBwtbHe8rJOPk1zGeKMaO1NsDAqyyUqOiebiLjaTCaw8TnhjrCoGj3hmddRu/qN3Hm5oq7iaE610fs8Ykjk9/E8bcAR4jFA3TaiYUjkKmVB2ef0Lhv0rOaho1+/CIVE0bho8pHJlhqa+2Gm0fHJJQ50jpfPCjbF+YNqGGkg5OaET4tkD4FR7smv7Gpz2zXx62Sr9N+Q8DWIwxkNZDZjlTlZ4CwiCTfGXjqKAmiDGfWSQD/tpjEyCX21fIxD7BFHIaTRbcxn2RWBXIZ6ZSYDZkX4it2sGGSf6BBQ9CWzMVJY+al6/pfP44obBVlSDQmiEPnLDAOl9UYj8ou0ERZ4JykNRR1pixLyK3lMbM6M2K8BzgBnPHIOMaSU8xt3MOWduLqFpOU0giUgedRs4kDpD9/EU7UUAtOrHCD/e/s4c8pxjUo1WNEb5csGAQDbtyTGmCuiJNT93/VPeSicIsmtvXacq0zlBGdr0CPkBAAD//5VYk3s=" + +// Set accessor to a real function. +func init() { + compressedBytePointsFn = func() string { + return compressedBytePoints + } +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve.go new file mode 100644 index 000000000000..6d6d669f198d --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve.go @@ -0,0 +1,1310 @@ +// Copyright (c) 2015-2024 The Decred developers +// Copyright 2013-2014 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +import ( + "encoding/hex" + "math/bits" +) + +// References: +// [SECG]: Recommended Elliptic Curve Domain Parameters +// https://www.secg.org/sec2-v2.pdf +// +// [GECC]: Guide to Elliptic Curve Cryptography (Hankerson, Menezes, Vanstone) +// +// [BRID]: On Binary Representations of Integers with Digits -1, 0, 1 +// (Prodinger, Helmut) +// +// [STWS]: Secure-TWS: Authenticating Node to Multi-user Communication in +// Shared Sensor Networks (Oliveira, Leonardo B. et al) + +// All group operations are performed using Jacobian coordinates. For a given +// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1) +// where x = x1/z1^2 and y = y1/z1^3. + +// hexToFieldVal converts the passed hex string into a FieldVal and will panic +// if there is an error. This is only provided for the hard-coded constants so +// errors in the source code can be detected. It will only (and must only) be +// called with hard-coded values. +func hexToFieldVal(s string) *FieldVal { + b, err := hex.DecodeString(s) + if err != nil { + panic("invalid hex in source file: " + s) + } + var f FieldVal + if overflow := f.SetByteSlice(b); overflow { + panic("hex in source file overflows mod P: " + s) + } + return &f +} + +// hexToModNScalar converts the passed hex string into a ModNScalar and will +// panic if there is an error. This is only provided for the hard-coded +// constants so errors in the source code can be detected. It will only (and +// must only) be called with hard-coded values. +func hexToModNScalar(s string) *ModNScalar { + var isNegative bool + if len(s) > 0 && s[0] == '-' { + isNegative = true + s = s[1:] + } + if len(s)%2 != 0 { + s = "0" + s + } + b, err := hex.DecodeString(s) + if err != nil { + panic("invalid hex in source file: " + s) + } + var scalar ModNScalar + if overflow := scalar.SetByteSlice(b); overflow { + panic("hex in source file overflows mod N scalar: " + s) + } + if isNegative { + scalar.Negate() + } + return &scalar +} + +var ( + // The following constants are used to accelerate scalar point + // multiplication through the use of the endomorphism: + // + // φ(Q) ⟼ λ*Q = (β*Q.x mod p, Q.y) + // + // See the code in the deriveEndomorphismParams function in genprecomps.go + // for details on their derivation. + // + // Additionally, see the scalar multiplication function in this file for + // details on how they are used. + endoNegLambda = hexToModNScalar("-5363ad4cc05c30e0a5261c028812645a122e22ea20816678df02967c1b23bd72") + endoBeta = hexToFieldVal("7ae96a2b657c07106e64479eac3434e99cf0497512f58995c1396c28719501ee") + endoNegB1 = hexToModNScalar("e4437ed6010e88286f547fa90abfe4c3") + endoNegB2 = hexToModNScalar("-3086d221a7d46bcde86c90e49284eb15") + endoZ1 = hexToModNScalar("3086d221a7d46bcde86c90e49284eb153daa8a1471e8ca7f") + endoZ2 = hexToModNScalar("e4437ed6010e88286f547fa90abfe4c4221208ac9df506c6") + + // Alternatively, the following parameters are valid as well, however, + // benchmarks show them to be about 2% slower in practice. + // endoNegLambda = hexToModNScalar("-ac9c52b33fa3cf1f5ad9e3fd77ed9ba4a880b9fc8ec739c2e0cfc810b51283ce") + // endoBeta = hexToFieldVal("851695d49a83f8ef919bb86153cbcb16630fb68aed0a766a3ec693d68e6afa40") + // endoNegB1 = hexToModNScalar("3086d221a7d46bcde86c90e49284eb15") + // endoNegB2 = hexToModNScalar("-114ca50f7a8e2f3f657c1108d9d44cfd8") + // endoZ1 = hexToModNScalar("114ca50f7a8e2f3f657c1108d9d44cfd95fbc92c10fddd145") + // endoZ2 = hexToModNScalar("3086d221a7d46bcde86c90e49284eb153daa8a1471e8ca7f") +) + +// JacobianPoint is an element of the group formed by the secp256k1 curve in +// Jacobian projective coordinates and thus represents a point on the curve. +type JacobianPoint struct { + // The X coordinate in Jacobian projective coordinates. The affine point is + // X/z^2. + X FieldVal + + // The Y coordinate in Jacobian projective coordinates. The affine point is + // Y/z^3. + Y FieldVal + + // The Z coordinate in Jacobian projective coordinates. + Z FieldVal +} + +// MakeJacobianPoint returns a Jacobian point with the provided X, Y, and Z +// coordinates. +func MakeJacobianPoint(x, y, z *FieldVal) JacobianPoint { + var p JacobianPoint + p.X.Set(x) + p.Y.Set(y) + p.Z.Set(z) + return p +} + +// Set sets the Jacobian point to the provided point. +func (p *JacobianPoint) Set(other *JacobianPoint) { + p.X.Set(&other.X) + p.Y.Set(&other.Y) + p.Z.Set(&other.Z) +} + +// ToAffine reduces the Z value of the existing point to 1 effectively +// making it an affine coordinate in constant time. The point will be +// normalized. +func (p *JacobianPoint) ToAffine() { + // Inversions are expensive and both point addition and point doubling + // are faster when working with points that have a z value of one. So, + // if the point needs to be converted to affine, go ahead and normalize + // the point itself at the same time as the calculation is the same. + var zInv, tempZ FieldVal + zInv.Set(&p.Z).Inverse() // zInv = Z^-1 + tempZ.SquareVal(&zInv) // tempZ = Z^-2 + p.X.Mul(&tempZ) // X = X/Z^2 (mag: 1) + p.Y.Mul(tempZ.Mul(&zInv)) // Y = Y/Z^3 (mag: 1) + p.Z.SetInt(1) // Z = 1 (mag: 1) + + // Normalize the x and y values. + p.X.Normalize() + p.Y.Normalize() +} + +// EquivalentNonConst returns whether or not two Jacobian points represent the +// same affine point in *non-constant* time. +func (p *JacobianPoint) EquivalentNonConst(other *JacobianPoint) bool { + // Since the point at infinity is the identity element for the group, note + // that P = P + ∞ trivially implies that P - P = ∞. + // + // Use that fact to determine if the points represent the same affine point. + var result JacobianPoint + result.Set(p) + result.Y.Normalize().Negate(1).Normalize() + AddNonConst(&result, other, &result) + return (result.X.IsZero() && result.Y.IsZero()) || result.Z.IsZero() +} + +// addZ1AndZ2EqualsOne adds two Jacobian points that are already known to have +// z values of 1 and stores the result in the provided result param. That is to +// say result = p1 + p2. It performs faster addition than the generic add +// routine since less arithmetic is needed due to the ability to avoid the z +// value multiplications. +// +// NOTE: The points must be normalized for this function to return the correct +// result. The resulting point will be normalized. +func addZ1AndZ2EqualsOne(p1, p2, result *JacobianPoint) { + // To compute the point addition efficiently, this implementation splits + // the equation into intermediate elements which are used to minimize + // the number of field multiplications using the method shown at: + // https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl + // + // In particular it performs the calculations using the following: + // H = X2-X1, HH = H^2, I = 4*HH, J = H*I, r = 2*(Y2-Y1), V = X1*I + // X3 = r^2-J-2*V, Y3 = r*(V-X3)-2*Y1*J, Z3 = 2*H + // + // This results in a cost of 4 field multiplications, 2 field squarings, + // 6 field additions, and 5 integer multiplications. + x1, y1 := &p1.X, &p1.Y + x2, y2 := &p2.X, &p2.Y + x3, y3, z3 := &result.X, &result.Y, &result.Z + + // When the x coordinates are the same for two points on the curve, the + // y coordinates either must be the same, in which case it is point + // doubling, or they are opposite and the result is the point at + // infinity per the group law for elliptic curve cryptography. + if x1.Equals(x2) { + if y1.Equals(y2) { + // Since x1 == x2 and y1 == y2, point doubling must be + // done, otherwise the addition would end up dividing + // by zero. + DoubleNonConst(p1, result) + return + } + + // Since x1 == x2 and y1 == -y2, the sum is the point at + // infinity per the group law. + x3.SetInt(0) + y3.SetInt(0) + z3.SetInt(0) + return + } + + // Calculate X3, Y3, and Z3 according to the intermediate elements + // breakdown above. + var h, i, j, r, v FieldVal + var negJ, neg2V, negX3 FieldVal + h.Set(x1).Negate(1).Add(x2) // H = X2-X1 (mag: 3) + i.SquareVal(&h).MulInt(4) // I = 4*H^2 (mag: 4) + j.Mul2(&h, &i) // J = H*I (mag: 1) + r.Set(y1).Negate(1).Add(y2).MulInt(2) // r = 2*(Y2-Y1) (mag: 6) + v.Mul2(x1, &i) // V = X1*I (mag: 1) + negJ.Set(&j).Negate(1) // negJ = -J (mag: 2) + neg2V.Set(&v).MulInt(2).Negate(2) // neg2V = -(2*V) (mag: 3) + x3.Set(&r).Square().Add(&negJ).Add(&neg2V) // X3 = r^2-J-2*V (mag: 6) + negX3.Set(x3).Negate(6) // negX3 = -X3 (mag: 7) + j.Mul(y1).MulInt(2).Negate(2) // J = -(2*Y1*J) (mag: 3) + y3.Set(&v).Add(&negX3).Mul(&r).Add(&j) // Y3 = r*(V-X3)-2*Y1*J (mag: 4) + z3.Set(&h).MulInt(2) // Z3 = 2*H (mag: 6) + + // Normalize the resulting field values as needed. + x3.Normalize() + y3.Normalize() + z3.Normalize() +} + +// addZ1EqualsZ2 adds two Jacobian points that are already known to have the +// same z value and stores the result in the provided result param. That is to +// say result = p1 + p2. It performs faster addition than the generic add +// routine since less arithmetic is needed due to the known equivalence. +// +// NOTE: The points must be normalized for this function to return the correct +// result. The resulting point will be normalized. +func addZ1EqualsZ2(p1, p2, result *JacobianPoint) { + // To compute the point addition efficiently, this implementation splits + // the equation into intermediate elements which are used to minimize + // the number of field multiplications using a slightly modified version + // of the method shown at: + // https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-zadd-2007-m + // + // In particular it performs the calculations using the following: + // A = X2-X1, B = A^2, C=Y2-Y1, D = C^2, E = X1*B, F = X2*B + // X3 = D-E-F, Y3 = C*(E-X3)-Y1*(F-E), Z3 = Z1*A + // + // This results in a cost of 5 field multiplications, 2 field squarings, + // 9 field additions, and 0 integer multiplications. + x1, y1, z1 := &p1.X, &p1.Y, &p1.Z + x2, y2 := &p2.X, &p2.Y + x3, y3, z3 := &result.X, &result.Y, &result.Z + + // When the x coordinates are the same for two points on the curve, the + // y coordinates either must be the same, in which case it is point + // doubling, or they are opposite and the result is the point at + // infinity per the group law for elliptic curve cryptography. + if x1.Equals(x2) { + if y1.Equals(y2) { + // Since x1 == x2 and y1 == y2, point doubling must be + // done, otherwise the addition would end up dividing + // by zero. + DoubleNonConst(p1, result) + return + } + + // Since x1 == x2 and y1 == -y2, the sum is the point at + // infinity per the group law. + x3.SetInt(0) + y3.SetInt(0) + z3.SetInt(0) + return + } + + // Calculate X3, Y3, and Z3 according to the intermediate elements + // breakdown above. + var a, b, c, d, e, f FieldVal + var negX1, negY1, negE, negX3 FieldVal + negX1.Set(x1).Negate(1) // negX1 = -X1 (mag: 2) + negY1.Set(y1).Negate(1) // negY1 = -Y1 (mag: 2) + a.Set(&negX1).Add(x2) // A = X2-X1 (mag: 3) + b.SquareVal(&a) // B = A^2 (mag: 1) + c.Set(&negY1).Add(y2) // C = Y2-Y1 (mag: 3) + d.SquareVal(&c) // D = C^2 (mag: 1) + e.Mul2(x1, &b) // E = X1*B (mag: 1) + negE.Set(&e).Negate(1) // negE = -E (mag: 2) + f.Mul2(x2, &b) // F = X2*B (mag: 1) + x3.Add2(&e, &f).Negate(2).Add(&d) // X3 = D-E-F (mag: 4) + negX3.Set(x3).Negate(4) // negX3 = -X3 (mag: 5) + y3.Set(y1).Mul(f.Add(&negE)).Negate(1) // Y3 = -(Y1*(F-E)) (mag: 2) + y3.Add(e.Add(&negX3).Mul(&c)) // Y3 = C*(E-X3)+Y3 (mag: 3) + z3.Mul2(z1, &a) // Z3 = Z1*A (mag: 1) + + // Normalize the resulting field values as needed. + x3.Normalize() + y3.Normalize() + z3.Normalize() +} + +// addZ2EqualsOne adds two Jacobian points when the second point is already +// known to have a z value of 1 (and the z value for the first point is not 1) +// and stores the result in the provided result param. That is to say result = +// p1 + p2. It performs faster addition than the generic add routine since +// less arithmetic is needed due to the ability to avoid multiplications by the +// second point's z value. +// +// NOTE: The points must be normalized for this function to return the correct +// result. The resulting point will be normalized. +func addZ2EqualsOne(p1, p2, result *JacobianPoint) { + // To compute the point addition efficiently, this implementation splits + // the equation into intermediate elements which are used to minimize + // the number of field multiplications using the method shown at: + // https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl + // + // In particular it performs the calculations using the following: + // Z1Z1 = Z1^2, U2 = X2*Z1Z1, S2 = Y2*Z1*Z1Z1, H = U2-X1, HH = H^2, + // I = 4*HH, J = H*I, r = 2*(S2-Y1), V = X1*I + // X3 = r^2-J-2*V, Y3 = r*(V-X3)-2*Y1*J, Z3 = (Z1+H)^2-Z1Z1-HH + // + // This results in a cost of 7 field multiplications, 4 field squarings, + // 9 field additions, and 4 integer multiplications. + x1, y1, z1 := &p1.X, &p1.Y, &p1.Z + x2, y2 := &p2.X, &p2.Y + x3, y3, z3 := &result.X, &result.Y, &result.Z + + // When the x coordinates are the same for two points on the curve, the + // y coordinates either must be the same, in which case it is point + // doubling, or they are opposite and the result is the point at + // infinity per the group law for elliptic curve cryptography. Since + // any number of Jacobian coordinates can represent the same affine + // point, the x and y values need to be converted to like terms. Due to + // the assumption made for this function that the second point has a z + // value of 1 (z2=1), the first point is already "converted". + var z1z1, u2, s2 FieldVal + z1z1.SquareVal(z1) // Z1Z1 = Z1^2 (mag: 1) + u2.Set(x2).Mul(&z1z1).Normalize() // U2 = X2*Z1Z1 (mag: 1) + s2.Set(y2).Mul(&z1z1).Mul(z1).Normalize() // S2 = Y2*Z1*Z1Z1 (mag: 1) + if x1.Equals(&u2) { + if y1.Equals(&s2) { + // Since x1 == x2 and y1 == y2, point doubling must be + // done, otherwise the addition would end up dividing + // by zero. + DoubleNonConst(p1, result) + return + } + + // Since x1 == x2 and y1 == -y2, the sum is the point at + // infinity per the group law. + x3.SetInt(0) + y3.SetInt(0) + z3.SetInt(0) + return + } + + // Calculate X3, Y3, and Z3 according to the intermediate elements + // breakdown above. + var h, hh, i, j, r, rr, v FieldVal + var negX1, negY1, negX3 FieldVal + negX1.Set(x1).Negate(1) // negX1 = -X1 (mag: 2) + h.Add2(&u2, &negX1) // H = U2-X1 (mag: 3) + hh.SquareVal(&h) // HH = H^2 (mag: 1) + i.Set(&hh).MulInt(4) // I = 4 * HH (mag: 4) + j.Mul2(&h, &i) // J = H*I (mag: 1) + negY1.Set(y1).Negate(1) // negY1 = -Y1 (mag: 2) + r.Set(&s2).Add(&negY1).MulInt(2) // r = 2*(S2-Y1) (mag: 6) + rr.SquareVal(&r) // rr = r^2 (mag: 1) + v.Mul2(x1, &i) // V = X1*I (mag: 1) + x3.Set(&v).MulInt(2).Add(&j).Negate(3) // X3 = -(J+2*V) (mag: 4) + x3.Add(&rr) // X3 = r^2+X3 (mag: 5) + negX3.Set(x3).Negate(5) // negX3 = -X3 (mag: 6) + y3.Set(y1).Mul(&j).MulInt(2).Negate(2) // Y3 = -(2*Y1*J) (mag: 3) + y3.Add(v.Add(&negX3).Mul(&r)) // Y3 = r*(V-X3)+Y3 (mag: 4) + z3.Add2(z1, &h).Square() // Z3 = (Z1+H)^2 (mag: 1) + z3.Add(z1z1.Add(&hh).Negate(2)) // Z3 = Z3-(Z1Z1+HH) (mag: 4) + + // Normalize the resulting field values as needed. + x3.Normalize() + y3.Normalize() + z3.Normalize() +} + +// addGeneric adds two Jacobian points without any assumptions about the z +// values of the two points and stores the result in the provided result param. +// That is to say result = p1 + p2. It is the slowest of the add routines due +// to requiring the most arithmetic. +// +// NOTE: The points must be normalized for this function to return the correct +// result. The resulting point will be normalized. +func addGeneric(p1, p2, result *JacobianPoint) { + // To compute the point addition efficiently, this implementation splits + // the equation into intermediate elements which are used to minimize + // the number of field multiplications using the method shown at: + // https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl + // + // In particular it performs the calculations using the following: + // Z1Z1 = Z1^2, Z2Z2 = Z2^2, U1 = X1*Z2Z2, U2 = X2*Z1Z1, S1 = Y1*Z2*Z2Z2 + // S2 = Y2*Z1*Z1Z1, H = U2-U1, I = (2*H)^2, J = H*I, r = 2*(S2-S1) + // V = U1*I + // X3 = r^2-J-2*V, Y3 = r*(V-X3)-2*S1*J, Z3 = ((Z1+Z2)^2-Z1Z1-Z2Z2)*H + // + // This results in a cost of 11 field multiplications, 5 field squarings, + // 9 field additions, and 4 integer multiplications. + x1, y1, z1 := &p1.X, &p1.Y, &p1.Z + x2, y2, z2 := &p2.X, &p2.Y, &p2.Z + x3, y3, z3 := &result.X, &result.Y, &result.Z + + // When the x coordinates are the same for two points on the curve, the + // y coordinates either must be the same, in which case it is point + // doubling, or they are opposite and the result is the point at + // infinity. Since any number of Jacobian coordinates can represent the + // same affine point, the x and y values need to be converted to like + // terms. + var z1z1, z2z2, u1, u2, s1, s2 FieldVal + z1z1.SquareVal(z1) // Z1Z1 = Z1^2 (mag: 1) + z2z2.SquareVal(z2) // Z2Z2 = Z2^2 (mag: 1) + u1.Set(x1).Mul(&z2z2).Normalize() // U1 = X1*Z2Z2 (mag: 1) + u2.Set(x2).Mul(&z1z1).Normalize() // U2 = X2*Z1Z1 (mag: 1) + s1.Set(y1).Mul(&z2z2).Mul(z2).Normalize() // S1 = Y1*Z2*Z2Z2 (mag: 1) + s2.Set(y2).Mul(&z1z1).Mul(z1).Normalize() // S2 = Y2*Z1*Z1Z1 (mag: 1) + if u1.Equals(&u2) { + if s1.Equals(&s2) { + // Since x1 == x2 and y1 == y2, point doubling must be + // done, otherwise the addition would end up dividing + // by zero. + DoubleNonConst(p1, result) + return + } + + // Since x1 == x2 and y1 == -y2, the sum is the point at + // infinity per the group law. + x3.SetInt(0) + y3.SetInt(0) + z3.SetInt(0) + return + } + + // Calculate X3, Y3, and Z3 according to the intermediate elements + // breakdown above. + var h, i, j, r, rr, v FieldVal + var negU1, negS1, negX3 FieldVal + negU1.Set(&u1).Negate(1) // negU1 = -U1 (mag: 2) + h.Add2(&u2, &negU1) // H = U2-U1 (mag: 3) + i.Set(&h).MulInt(2).Square() // I = (2*H)^2 (mag: 1) + j.Mul2(&h, &i) // J = H*I (mag: 1) + negS1.Set(&s1).Negate(1) // negS1 = -S1 (mag: 2) + r.Set(&s2).Add(&negS1).MulInt(2) // r = 2*(S2-S1) (mag: 6) + rr.SquareVal(&r) // rr = r^2 (mag: 1) + v.Mul2(&u1, &i) // V = U1*I (mag: 1) + x3.Set(&v).MulInt(2).Add(&j).Negate(3) // X3 = -(J+2*V) (mag: 4) + x3.Add(&rr) // X3 = r^2+X3 (mag: 5) + negX3.Set(x3).Negate(5) // negX3 = -X3 (mag: 6) + y3.Mul2(&s1, &j).MulInt(2).Negate(2) // Y3 = -(2*S1*J) (mag: 3) + y3.Add(v.Add(&negX3).Mul(&r)) // Y3 = r*(V-X3)+Y3 (mag: 4) + z3.Add2(z1, z2).Square() // Z3 = (Z1+Z2)^2 (mag: 1) + z3.Add(z1z1.Add(&z2z2).Negate(2)) // Z3 = Z3-(Z1Z1+Z2Z2) (mag: 4) + z3.Mul(&h) // Z3 = Z3*H (mag: 1) + + // Normalize the resulting field values as needed. + x3.Normalize() + y3.Normalize() + z3.Normalize() +} + +// AddNonConst adds the passed Jacobian points together and stores the result in +// the provided result param in *non-constant* time. +// +// NOTE: The points must be normalized for this function to return the correct +// result. The resulting point will be normalized. +func AddNonConst(p1, p2, result *JacobianPoint) { + // The point at infinity is the identity according to the group law for + // elliptic curve cryptography. Thus, ∞ + P = P and P + ∞ = P. + if (p1.X.IsZero() && p1.Y.IsZero()) || p1.Z.IsZero() { + result.Set(p2) + return + } + if (p2.X.IsZero() && p2.Y.IsZero()) || p2.Z.IsZero() { + result.Set(p1) + return + } + + // Faster point addition can be achieved when certain assumptions are + // met. For example, when both points have the same z value, arithmetic + // on the z values can be avoided. This section thus checks for these + // conditions and calls an appropriate add function which is accelerated + // by using those assumptions. + isZ1One := p1.Z.IsOne() + isZ2One := p2.Z.IsOne() + switch { + case isZ1One && isZ2One: + addZ1AndZ2EqualsOne(p1, p2, result) + return + case p1.Z.Equals(&p2.Z): + addZ1EqualsZ2(p1, p2, result) + return + case isZ2One: + addZ2EqualsOne(p1, p2, result) + return + } + + // None of the above assumptions are true, so fall back to generic + // point addition. + addGeneric(p1, p2, result) +} + +// doubleZ1EqualsOne performs point doubling on the passed Jacobian point when +// the point is already known to have a z value of 1 and stores the result in +// the provided result param. That is to say result = 2*p. It performs faster +// point doubling than the generic routine since less arithmetic is needed due +// to the ability to avoid multiplication by the z value. +// +// NOTE: The resulting point will be normalized. +func doubleZ1EqualsOne(p, result *JacobianPoint) { + // This function uses the assumptions that z1 is 1, thus the point + // doubling formulas reduce to: + // + // X3 = (3*X1^2)^2 - 8*X1*Y1^2 + // Y3 = (3*X1^2)*(4*X1*Y1^2 - X3) - 8*Y1^4 + // Z3 = 2*Y1 + // + // To compute the above efficiently, this implementation splits the + // equation into intermediate elements which are used to minimize the + // number of field multiplications in favor of field squarings which + // are roughly 35% faster than field multiplications with the current + // implementation at the time this was written. + // + // This uses a slightly modified version of the method shown at: + // https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl + // + // In particular it performs the calculations using the following: + // A = X1^2, B = Y1^2, C = B^2, D = 2*((X1+B)^2-A-C) + // E = 3*A, F = E^2, X3 = F-2*D, Y3 = E*(D-X3)-8*C + // Z3 = 2*Y1 + // + // This results in a cost of 1 field multiplication, 5 field squarings, + // 6 field additions, and 5 integer multiplications. + x1, y1 := &p.X, &p.Y + x3, y3, z3 := &result.X, &result.Y, &result.Z + var a, b, c, d, e, f FieldVal + z3.Set(y1).MulInt(2) // Z3 = 2*Y1 (mag: 2) + a.SquareVal(x1) // A = X1^2 (mag: 1) + b.SquareVal(y1) // B = Y1^2 (mag: 1) + c.SquareVal(&b) // C = B^2 (mag: 1) + b.Add(x1).Square() // B = (X1+B)^2 (mag: 1) + d.Set(&a).Add(&c).Negate(2) // D = -(A+C) (mag: 3) + d.Add(&b).MulInt(2) // D = 2*(B+D)(mag: 8) + e.Set(&a).MulInt(3) // E = 3*A (mag: 3) + f.SquareVal(&e) // F = E^2 (mag: 1) + x3.Set(&d).MulInt(2).Negate(16) // X3 = -(2*D) (mag: 17) + x3.Add(&f) // X3 = F+X3 (mag: 18) + f.Set(x3).Negate(18).Add(&d).Normalize() // F = D-X3 (mag: 1) + y3.Set(&c).MulInt(8).Negate(8) // Y3 = -(8*C) (mag: 9) + y3.Add(f.Mul(&e)) // Y3 = E*F+Y3 (mag: 10) + + // Normalize the resulting field values as needed. + x3.Normalize() + y3.Normalize() + z3.Normalize() +} + +// doubleGeneric performs point doubling on the passed Jacobian point without +// any assumptions about the z value and stores the result in the provided +// result param. That is to say result = 2*p. It is the slowest of the point +// doubling routines due to requiring the most arithmetic. +// +// NOTE: The resulting point will be normalized. +func doubleGeneric(p, result *JacobianPoint) { + // Point doubling formula for Jacobian coordinates for the secp256k1 + // curve: + // + // X3 = (3*X1^2)^2 - 8*X1*Y1^2 + // Y3 = (3*X1^2)*(4*X1*Y1^2 - X3) - 8*Y1^4 + // Z3 = 2*Y1*Z1 + // + // To compute the above efficiently, this implementation splits the + // equation into intermediate elements which are used to minimize the + // number of field multiplications in favor of field squarings which + // are roughly 35% faster than field multiplications with the current + // implementation at the time this was written. + // + // This uses a slightly modified version of the method shown at: + // https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l + // + // In particular it performs the calculations using the following: + // A = X1^2, B = Y1^2, C = B^2, D = 2*((X1+B)^2-A-C) + // E = 3*A, F = E^2, X3 = F-2*D, Y3 = E*(D-X3)-8*C + // Z3 = 2*Y1*Z1 + // + // This results in a cost of 1 field multiplication, 5 field squarings, + // 6 field additions, and 5 integer multiplications. + x1, y1, z1 := &p.X, &p.Y, &p.Z + x3, y3, z3 := &result.X, &result.Y, &result.Z + var a, b, c, d, e, f FieldVal + z3.Mul2(y1, z1).MulInt(2) // Z3 = 2*Y1*Z1 (mag: 2) + a.SquareVal(x1) // A = X1^2 (mag: 1) + b.SquareVal(y1) // B = Y1^2 (mag: 1) + c.SquareVal(&b) // C = B^2 (mag: 1) + b.Add(x1).Square() // B = (X1+B)^2 (mag: 1) + d.Set(&a).Add(&c).Negate(2) // D = -(A+C) (mag: 3) + d.Add(&b).MulInt(2) // D = 2*(B+D)(mag: 8) + e.Set(&a).MulInt(3) // E = 3*A (mag: 3) + f.SquareVal(&e) // F = E^2 (mag: 1) + x3.Set(&d).MulInt(2).Negate(16) // X3 = -(2*D) (mag: 17) + x3.Add(&f) // X3 = F+X3 (mag: 18) + f.Set(x3).Negate(18).Add(&d).Normalize() // F = D-X3 (mag: 1) + y3.Set(&c).MulInt(8).Negate(8) // Y3 = -(8*C) (mag: 9) + y3.Add(f.Mul(&e)) // Y3 = E*F+Y3 (mag: 10) + + // Normalize the resulting field values as needed. + x3.Normalize() + y3.Normalize() + z3.Normalize() +} + +// DoubleNonConst doubles the passed Jacobian point and stores the result in the +// provided result parameter in *non-constant* time. +// +// NOTE: The point must be normalized for this function to return the correct +// result. The resulting point will be normalized. +func DoubleNonConst(p, result *JacobianPoint) { + // Doubling the point at infinity is still infinity. + if p.Y.IsZero() || p.Z.IsZero() { + result.X.SetInt(0) + result.Y.SetInt(0) + result.Z.SetInt(0) + return + } + + // Slightly faster point doubling can be achieved when the z value is 1 + // by avoiding the multiplication on the z value. This section calls + // a point doubling function which is accelerated by using that + // assumption when possible. + if p.Z.IsOne() { + doubleZ1EqualsOne(p, result) + return + } + + // Fall back to generic point doubling which works with arbitrary z + // values. + doubleGeneric(p, result) +} + +// mulAdd64 multiplies the two passed base 2^64 digits together, adds the given +// value to the result, and returns the 128-bit result via a (hi, lo) tuple +// where the upper half of the bits are returned in hi and the lower half in lo. +func mulAdd64(digit1, digit2, m uint64) (hi, lo uint64) { + // Note the carry on the final add is safe to discard because the maximum + // possible value is: + // (2^64 - 1)(2^64 - 1) + (2^64 - 1) = 2^128 - 2^64 + // and: + // 2^128 - 2^64 < 2^128. + var c uint64 + hi, lo = bits.Mul64(digit1, digit2) + lo, c = bits.Add64(lo, m, 0) + hi, _ = bits.Add64(hi, 0, c) + return hi, lo +} + +// mulAdd64Carry multiplies the two passed base 2^64 digits together, adds both +// the given value and carry to the result, and returns the 128-bit result via a +// (hi, lo) tuple where the upper half of the bits are returned in hi and the +// lower half in lo. +func mulAdd64Carry(digit1, digit2, m, c uint64) (hi, lo uint64) { + // Note the carry on the high order add is safe to discard because the + // maximum possible value is: + // (2^64 - 1)(2^64 - 1) + 2*(2^64 - 1) = 2^128 - 1 + // and: + // 2^128 - 1 < 2^128. + var c2 uint64 + hi, lo = mulAdd64(digit1, digit2, m) + lo, c2 = bits.Add64(lo, c, 0) + hi, _ = bits.Add64(hi, 0, c2) + return hi, lo +} + +// mul512Rsh320Round computes the full 512-bit product of the two given scalars, +// right shifts the result by 320 bits, rounds to the nearest integer, and +// returns the result in constant time. +// +// Note that despite the inputs and output being mod n scalars, the 512-bit +// product is NOT reduced mod N prior to the right shift. This is intentional +// because it is used for replacing division with multiplication and thus the +// intermediate results must be done via a field extension to a larger field. +func mul512Rsh320Round(n1, n2 *ModNScalar) ModNScalar { + // Convert n1 and n2 to base 2^64 digits. + n1Digit0 := uint64(n1.n[0]) | uint64(n1.n[1])<<32 + n1Digit1 := uint64(n1.n[2]) | uint64(n1.n[3])<<32 + n1Digit2 := uint64(n1.n[4]) | uint64(n1.n[5])<<32 + n1Digit3 := uint64(n1.n[6]) | uint64(n1.n[7])<<32 + n2Digit0 := uint64(n2.n[0]) | uint64(n2.n[1])<<32 + n2Digit1 := uint64(n2.n[2]) | uint64(n2.n[3])<<32 + n2Digit2 := uint64(n2.n[4]) | uint64(n2.n[5])<<32 + n2Digit3 := uint64(n2.n[6]) | uint64(n2.n[7])<<32 + + // Compute the full 512-bit product n1*n2. + var r0, r1, r2, r3, r4, r5, r6, r7, c uint64 + + // Terms resulting from the product of the first digit of the second number + // by all digits of the first number. + // + // Note that r0 is ignored because it is not needed to compute the higher + // terms and it is shifted out below anyway. + c, _ = bits.Mul64(n2Digit0, n1Digit0) + c, r1 = mulAdd64(n2Digit0, n1Digit1, c) + c, r2 = mulAdd64(n2Digit0, n1Digit2, c) + r4, r3 = mulAdd64(n2Digit0, n1Digit3, c) + + // Terms resulting from the product of the second digit of the second number + // by all digits of the first number. + // + // Note that r1 is ignored because it is no longer needed to compute the + // higher terms and it is shifted out below anyway. + c, _ = mulAdd64(n2Digit1, n1Digit0, r1) + c, r2 = mulAdd64Carry(n2Digit1, n1Digit1, r2, c) + c, r3 = mulAdd64Carry(n2Digit1, n1Digit2, r3, c) + r5, r4 = mulAdd64Carry(n2Digit1, n1Digit3, r4, c) + + // Terms resulting from the product of the third digit of the second number + // by all digits of the first number. + // + // Note that r2 is ignored because it is no longer needed to compute the + // higher terms and it is shifted out below anyway. + c, _ = mulAdd64(n2Digit2, n1Digit0, r2) + c, r3 = mulAdd64Carry(n2Digit2, n1Digit1, r3, c) + c, r4 = mulAdd64Carry(n2Digit2, n1Digit2, r4, c) + r6, r5 = mulAdd64Carry(n2Digit2, n1Digit3, r5, c) + + // Terms resulting from the product of the fourth digit of the second number + // by all digits of the first number. + // + // Note that r3 is ignored because it is no longer needed to compute the + // higher terms and it is shifted out below anyway. + c, _ = mulAdd64(n2Digit3, n1Digit0, r3) + c, r4 = mulAdd64Carry(n2Digit3, n1Digit1, r4, c) + c, r5 = mulAdd64Carry(n2Digit3, n1Digit2, r5, c) + r7, r6 = mulAdd64Carry(n2Digit3, n1Digit3, r6, c) + + // At this point the upper 256 bits of the full 512-bit product n1*n2 are in + // r4..r7 (recall the low order results were discarded as noted above). + // + // Right shift the result 320 bits. Note that the MSB of r4 determines + // whether or not to round because it is the final bit that is shifted out. + // + // Also, notice that r3..r7 would also ordinarily be set to 0 as well for + // the full shift, but that is skipped since they are no longer used as + // their values are known to be zero. + roundBit := r4 >> 63 + r2, r1, r0 = r7, r6, r5 + + // Conditionally add 1 depending on the round bit in constant time. + r0, c = bits.Add64(r0, roundBit, 0) + r1, c = bits.Add64(r1, 0, c) + r2, r3 = bits.Add64(r2, 0, c) + + // Finally, convert the result to a mod n scalar. + // + // No modular reduction is needed because the result is guaranteed to be + // less than the group order given the group order is > 2^255 and the + // maximum possible value of the result is 2^192. + var result ModNScalar + result.n[0] = uint32(r0) + result.n[1] = uint32(r0 >> 32) + result.n[2] = uint32(r1) + result.n[3] = uint32(r1 >> 32) + result.n[4] = uint32(r2) + result.n[5] = uint32(r2 >> 32) + result.n[6] = uint32(r3) + result.n[7] = uint32(r3 >> 32) + return result +} + +// splitK returns two scalars (k1 and k2) that are a balanced length-two +// representation of the provided scalar such that k ≡ k1 + k2*λ (mod N), where +// N is the secp256k1 group order. +func splitK(k *ModNScalar) (ModNScalar, ModNScalar) { + // The ultimate goal is to decompose k into two scalars that are around + // half the bit length of k such that the following equation is satisfied: + // + // k1 + k2*λ ≡ k (mod n) + // + // The strategy used here is based on algorithm 3.74 from [GECC] with a few + // modifications to make use of the more efficient mod n scalar type, avoid + // some costly long divisions, and minimize the number of calculations. + // + // Start by defining a function that takes a vector v = ∈ ℤ⨯ℤ: + // + // f(v) = a + bλ (mod n) + // + // Then, find two vectors, v1 = , and v2 = in ℤ⨯ℤ such that: + // 1) v1 and v2 are linearly independent + // 2) f(v1) = f(v2) = 0 + // 3) v1 and v2 have small Euclidean norm + // + // The vectors that satisfy these properties are found via the Euclidean + // algorithm and are precomputed since both n and λ are fixed values for the + // secp256k1 curve. See genprecomps.go for derivation details. + // + // Next, consider k as a vector in ℚ⨯ℚ and by linear algebra write: + // + // = g1*v1 + g2*v2, where g1, g2 ∈ ℚ + // + // Note that, per above, the components of vector v1 are a1 and b1 while the + // components of vector v2 are a2 and b2. Given the vectors v1 and v2 were + // generated such that a1*b2 - a2*b1 = n, solving the equation for g1 and g2 + // yields: + // + // g1 = b2*k / n + // g2 = -b1*k / n + // + // Observe: + // = g1*v1 + g2*v2 + // = (b2*k/n)* + (-b1*k/n)* | substitute + // = + <-a2*b1*k/n, -b2*b1*k/n> | scalar mul + // = | vector add + // = <[a1*b2*k - a2*b1*k]/n, 0> | simplify + // = | factor out k + // = | substitute + // = | simplify + // + // Now, consider an integer-valued vector v: + // + // v = c1*v1 + c2*v2, where c1, c2 ∈ ℤ (mod n) + // + // Since vectors v1 and v2 are linearly independent and were generated such + // that f(v1) = f(v2) = 0, all possible scalars c1 and c2 also produce a + // vector v such that f(v) = 0. + // + // In other words, c1 and c2 can be any integers and the resulting + // decomposition will still satisfy the required equation. However, since + // the goal is to produce a balanced decomposition that provides a + // performance advantage by minimizing max(k1, k2), c1 and c2 need to be + // integers close to g1 and g2, respectively, so the resulting vector v is + // an integer-valued vector that is close to . + // + // Finally, consider the vector u: + // + // u = - v + // + // It follows that f(u) = k and thus the two components of vector u satisfy + // the required equation: + // + // k1 + k2*λ ≡ k (mod n) + // + // Choosing c1 and c2: + // ------------------- + // + // As mentioned above, c1 and c2 need to be integers close to g1 and g2, + // respectively. The algorithm in [GECC] chooses the following values: + // + // c1 = round(g1) = round(b2*k / n) + // c2 = round(g2) = round(-b1*k / n) + // + // However, as section 3.4.2 of [STWS] notes, the aforementioned approach + // requires costly long divisions that can be avoided by precomputing + // rounded estimates as follows: + // + // t = bitlen(n) + 1 + // z1 = round(2^t * b2 / n) + // z2 = round(2^t * -b1 / n) + // + // Then, use those precomputed estimates to perform a multiplication by k + // along with a floored division by 2^t, which is a simple right shift by t: + // + // c1 = floor(k * z1 / 2^t) = (k * z1) >> t + // c2 = floor(k * z2 / 2^t) = (k * z2) >> t + // + // Finally, round up if last bit discarded in the right shift by t is set by + // adding 1. + // + // As a further optimization, rather than setting t = bitlen(n) + 1 = 257 as + // stated by [STWS], this implementation uses a higher precision estimate of + // t = bitlen(n) + 64 = 320 because it allows simplification of the shifts + // in the internal calculations that are done via uint64s and also allows + // the use of floor in the precomputations. + // + // Thus, the calculations this implementation uses are: + // + // z1 = floor(b2<<320 / n) | precomputed + // z2 = floor((-b1)<<320) / n) | precomputed + // c1 = ((k * z1) >> 320) + (((k * z1) >> 319) & 1) + // c2 = ((k * z2) >> 320) + (((k * z2) >> 319) & 1) + // + // Putting it all together: + // ------------------------ + // + // Calculate the following vectors using the values discussed above: + // + // v = c1*v1 + c2*v2 + // u = - v + // + // The two components of the resulting vector v are: + // va = c1*a1 + c2*a2 + // vb = c1*b1 + c2*b2 + // + // Thus, the two components of the resulting vector u are: + // k1 = k - va + // k2 = 0 - vb = -vb + // + // As some final optimizations: + // + // 1) Note that k1 + k2*λ ≡ k (mod n) means that k1 ≡ k - k2*λ (mod n). + // Therefore, the computation of va can be avoided to save two + // field multiplications and a field addition. + // + // 2) Since k1 ≡ k - k2*λ ≡ k + k2*(-λ), an additional field negation is + // saved by storing and using the negative version of λ. + // + // 3) Since k2 ≡ -vb ≡ -(c1*b1 + c2*b2) ≡ c1*(-b1) + c2*(-b2), one more + // field negation is saved by storing and using the negative versions of + // b1 and b2. + // + // k2 = c1*(-b1) + c2*(-b2) + // k1 = k + k2*(-λ) + var k1, k2 ModNScalar + c1 := mul512Rsh320Round(k, endoZ1) + c2 := mul512Rsh320Round(k, endoZ2) + k2.Add2(c1.Mul(endoNegB1), c2.Mul(endoNegB2)) + k1.Mul2(&k2, endoNegLambda).Add(k) + return k1, k2 +} + +// nafScalar represents a positive integer up to a maximum value of 2^256 - 1 +// encoded in non-adjacent form. +// +// NAF is a signed-digit representation where each digit can be +1, 0, or -1. +// +// In order to efficiently encode that information, this type uses two arrays, a +// "positive" array where set bits represent the +1 signed digits and a +// "negative" array where set bits represent the -1 signed digits. 0 is +// represented by neither array having a bit set in that position. +// +// The Pos and Neg methods return the aforementioned positive and negative +// arrays, respectively. +type nafScalar struct { + // pos houses the positive portion of the representation. An additional + // byte is required for the positive portion because the NAF encoding can be + // up to 1 bit longer than the normal binary encoding of the value. + // + // neg houses the negative portion of the representation. Even though the + // additional byte is not required for the negative portion, since it can + // never exceed the length of the normal binary encoding of the value, + // keeping the same length for positive and negative portions simplifies + // working with the representation and allows extra conditional branches to + // be avoided. + // + // start and end specify the starting and ending index to use within the pos + // and neg arrays, respectively. This allows fixed size arrays to be used + // versus needing to dynamically allocate space on the heap. + // + // NOTE: The fields are defined in the order that they are to minimize the + // padding on 32-bit and 64-bit platforms. + pos [33]byte + start, end uint8 + neg [33]byte +} + +// Pos returns the bytes of the encoded value with bits set in the positions +// that represent a signed digit of +1. +func (s *nafScalar) Pos() []byte { + return s.pos[s.start:s.end] +} + +// Neg returns the bytes of the encoded value with bits set in the positions +// that represent a signed digit of -1. +func (s *nafScalar) Neg() []byte { + return s.neg[s.start:s.end] +} + +// naf takes a positive integer up to a maximum value of 2^256 - 1 and returns +// its non-adjacent form (NAF), which is a unique signed-digit representation +// such that no two consecutive digits are nonzero. See the documentation for +// the returned type for details on how the representation is encoded +// efficiently and how to interpret it +// +// NAF is useful in that it has the fewest nonzero digits of any signed digit +// representation, only 1/3rd of its digits are nonzero on average, and at least +// half of the digits will be 0. +// +// The aforementioned properties are particularly beneficial for optimizing +// elliptic curve point multiplication because they effectively minimize the +// number of required point additions in exchange for needing to perform a mix +// of fewer point additions and subtractions and possibly one additional point +// doubling. This is an excellent tradeoff because subtraction of points has +// the same computational complexity as addition of points and point doubling is +// faster than both. +func naf(k []byte) nafScalar { + // Strip leading zero bytes. + for len(k) > 0 && k[0] == 0x00 { + k = k[1:] + } + + // The non-adjacent form (NAF) of a positive integer k is an expression + // k = ∑_(i=0, l-1) k_i * 2^i where k_i ∈ {0,±1}, k_(l-1) != 0, and no two + // consecutive digits k_i are nonzero. + // + // The traditional method of computing the NAF of a positive integer is + // given by algorithm 3.30 in [GECC]. It consists of repeatedly dividing k + // by 2 and choosing the remainder so that the quotient (k−r)/2 is even + // which ensures the next NAF digit is 0. This requires log_2(k) steps. + // + // However, in [BRID], Prodinger notes that a closed form expression for the + // NAF representation is the bitwise difference 3k/2 - k/2. This is more + // efficient as it can be computed in O(1) versus the O(log(n)) of the + // traditional approach. + // + // The following code makes use of that formula to compute the NAF more + // efficiently. + // + // To understand the logic here, observe that the only way the NAF has a + // nonzero digit at a given bit is when either 3k/2 or k/2 has a bit set in + // that position, but not both. In other words, the result of a bitwise + // xor. This can be seen simply by considering that when the bits are the + // same, the subtraction is either 0-0 or 1-1, both of which are 0. + // + // Further, observe that the "+1" digits in the result are contributed by + // 3k/2 while the "-1" digits are from k/2. So, they can be determined by + // taking the bitwise and of each respective value with the result of the + // xor which identifies which bits are nonzero. + // + // Using that information, this loops backwards from the least significant + // byte to the most significant byte while performing the aforementioned + // calculations by propagating the potential carry and high order bit from + // the next word during the right shift. + kLen := len(k) + var result nafScalar + var carry uint8 + for byteNum := kLen - 1; byteNum >= 0; byteNum-- { + // Calculate k/2. Notice the carry from the previous word is added and + // the low order bit from the next word is shifted in accordingly. + kc := uint16(k[byteNum]) + uint16(carry) + var nextWord uint8 + if byteNum > 0 { + nextWord = k[byteNum-1] + } + halfK := kc>>1 | uint16(nextWord<<7) + + // Calculate 3k/2 and determine the non-zero digits in the result. + threeHalfK := kc + halfK + nonZeroResultDigits := threeHalfK ^ halfK + + // Determine the signed digits {0, ±1}. + result.pos[byteNum+1] = uint8(threeHalfK & nonZeroResultDigits) + result.neg[byteNum+1] = uint8(halfK & nonZeroResultDigits) + + // Propagate the potential carry from the 3k/2 calculation. + carry = uint8(threeHalfK >> 8) + } + result.pos[0] = carry + + // Set the starting and ending positions within the fixed size arrays to + // identify the bytes that are actually used. This is important since the + // encoding is big endian and thus trailing zero bytes changes its value. + result.start = 1 - carry + result.end = uint8(kLen + 1) + return result +} + +// ScalarMultNonConst multiplies k*P where k is a scalar modulo the curve order +// and P is a point in Jacobian projective coordinates and stores the result in +// the provided Jacobian point. +// +// NOTE: The point must be normalized for this function to return the correct +// result. The resulting point will be normalized. +func ScalarMultNonConst(k *ModNScalar, point, result *JacobianPoint) { + // ------------------------------------------------------------------------- + // This makes use of the following efficiently-computable endomorphism to + // accelerate the computation: + // + // φ(P) ⟼ λ*P = (β*P.x mod p, P.y) + // + // In other words, there is a special scalar λ that every point on the + // elliptic curve can be multiplied by that will result in the same point as + // performing a single field multiplication of the point's X coordinate by + // the special value β. + // + // This is useful because scalar point multiplication is significantly more + // expensive than a single field multiplication given the former involves a + // series of point doublings and additions which themselves consist of a + // combination of several field multiplications, squarings, and additions. + // + // So, the idea behind making use of the endomorphism is thus to decompose + // the scalar into two scalars that are each about half the bit length of + // the original scalar such that: + // + // k ≡ k1 + k2*λ (mod n) + // + // This in turn allows the scalar point multiplication to be performed as a + // sum of two smaller half-length multiplications as follows: + // + // k*P = (k1 + k2*λ)*P + // = k1*P + k2*λ*P + // = k1*P + k2*φ(P) + // + // Thus, a speedup is achieved so long as it's faster to decompose the + // scalar, compute φ(P), and perform a simultaneous multiply of the + // half-length point multiplications than it is to compute a full width + // point multiplication. + // + // In practice, benchmarks show the current implementation provides a + // speedup of around 30-35% versus not using the endomorphism. + // + // See section 3.5 in [GECC] for a more rigorous treatment. + // ------------------------------------------------------------------------- + + // Per above, the main equation here to remember is: + // k*P = k1*P + k2*φ(P) + // + // p1 below is P in the equation while p2 is φ(P) in the equation. + // + // NOTE: φ(x,y) = (β*x,y). The Jacobian z coordinates are the same, so this + // math goes through. + // + // Also, calculate -p1 and -p2 for use in the NAF optimization. + p1, p1Neg := new(JacobianPoint), new(JacobianPoint) + p1.Set(point) + p1Neg.Set(p1) + p1Neg.Y.Negate(1).Normalize() + p2, p2Neg := new(JacobianPoint), new(JacobianPoint) + p2.Set(p1) + p2.X.Mul(endoBeta).Normalize() + p2Neg.Set(p2) + p2Neg.Y.Negate(1).Normalize() + + // Decompose k into k1 and k2 such that k = k1 + k2*λ (mod n) where k1 and + // k2 are around half the bit length of k in order to halve the number of EC + // operations. + // + // Notice that this also flips the sign of the scalars and points as needed + // to minimize the bit lengths of the scalars k1 and k2. + // + // This is done because the scalars are operating modulo the group order + // which means that when they would otherwise be a small negative magnitude + // they will instead be a large positive magnitude. Since the goal is for + // the scalars to have a small magnitude to achieve a performance boost, use + // their negation when they are greater than the half order of the group and + // flip the positive and negative values of the corresponding point that + // will be multiplied by to compensate. + // + // In other words, transform the calc when k1 is over the half order to: + // k1*P = -k1*-P + // + // Similarly, transform the calc when k2 is over the half order to: + // k2*φ(P) = -k2*-φ(P) + k1, k2 := splitK(k) + if k1.IsOverHalfOrder() { + k1.Negate() + p1, p1Neg = p1Neg, p1 + } + if k2.IsOverHalfOrder() { + k2.Negate() + p2, p2Neg = p2Neg, p2 + } + + // Convert k1 and k2 into their NAF representations since NAF has a lot more + // zeros overall on average which minimizes the number of required point + // additions in exchange for a mix of fewer point additions and subtractions + // at the cost of one additional point doubling. + // + // This is an excellent tradeoff because subtraction of points has the same + // computational complexity as addition of points and point doubling is + // faster than both. + // + // Concretely, on average, 1/2 of all bits will be non-zero with the normal + // binary representation whereas only 1/3rd of the bits will be non-zero + // with NAF. + // + // The Pos version of the bytes contain the +1s and the Neg versions contain + // the -1s. + k1Bytes, k2Bytes := k1.Bytes(), k2.Bytes() + k1NAF, k2NAF := naf(k1Bytes[:]), naf(k2Bytes[:]) + k1PosNAF, k1NegNAF := k1NAF.Pos(), k1NAF.Neg() + k2PosNAF, k2NegNAF := k2NAF.Pos(), k2NAF.Neg() + k1Len, k2Len := len(k1PosNAF), len(k2PosNAF) + + // Add left-to-right using the NAF optimization. See algorithm 3.77 from + // [GECC]. + // + // Point Q = ∞ (point at infinity). + var q JacobianPoint + m := k1Len + if m < k2Len { + m = k2Len + } + for i := 0; i < m; i++ { + // Since k1 and k2 are potentially different lengths and the calculation + // is being done left to right, pad the front of the shorter one with + // 0s. + var k1BytePos, k1ByteNeg, k2BytePos, k2ByteNeg byte + if i >= m-k1Len { + k1BytePos, k1ByteNeg = k1PosNAF[i-(m-k1Len)], k1NegNAF[i-(m-k1Len)] + } + if i >= m-k2Len { + k2BytePos, k2ByteNeg = k2PosNAF[i-(m-k2Len)], k2NegNAF[i-(m-k2Len)] + } + + for mask := uint8(1 << 7); mask > 0; mask >>= 1 { + // Q = 2 * Q + DoubleNonConst(&q, &q) + + // Add or subtract the first point based on the signed digit of the + // NAF representation of k1 at this bit position. + // + // +1: Q = Q + p1 + // -1: Q = Q - p1 + // 0: Q = Q (no change) + if k1BytePos&mask == mask { + AddNonConst(&q, p1, &q) + } else if k1ByteNeg&mask == mask { + AddNonConst(&q, p1Neg, &q) + } + + // Add or subtract the second point based on the signed digit of the + // NAF representation of k2 at this bit position. + // + // +1: Q = Q + p2 + // -1: Q = Q - p2 + // 0: Q = Q (no change) + if k2BytePos&mask == mask { + AddNonConst(&q, p2, &q) + } else if k2ByteNeg&mask == mask { + AddNonConst(&q, p2Neg, &q) + } + } + } + + result.Set(&q) +} + +// ScalarBaseMultNonConst multiplies k*G where k is a scalar modulo the curve +// order and G is the base point of the group and stores the result in the +// provided Jacobian point. +// +// NOTE: The resulting point will be normalized. +func ScalarBaseMultNonConst(k *ModNScalar, result *JacobianPoint) { + scalarBaseMultNonConst(k, result) +} + +// jacobianG is the secp256k1 base point converted to Jacobian coordinates and +// is defined here to avoid repeatedly converting it. +var jacobianG = func() JacobianPoint { + var G JacobianPoint + bigAffineToJacobian(curveParams.Gx, curveParams.Gy, &G) + return G +}() + +// scalarBaseMultNonConstSlow computes k*G through ScalarMultNonConst. +func scalarBaseMultNonConstSlow(k *ModNScalar, result *JacobianPoint) { + ScalarMultNonConst(k, &jacobianG, result) +} + +// scalarBaseMultNonConstFast computes k*G through the precomputed lookup +// tables. +func scalarBaseMultNonConstFast(k *ModNScalar, result *JacobianPoint) { + bytePoints := s256BytePoints() + + // Start with the point at infinity. + result.X.Zero() + result.Y.Zero() + result.Z.Zero() + + // bytePoints has all 256 byte points for each 8-bit window. The strategy + // is to add up the byte points. This is best understood by expressing k in + // base-256 which it already sort of is. Each "digit" in the 8-bit window + // can be looked up using bytePoints and added together. + kb := k.Bytes() + for i := 0; i < len(kb); i++ { + pt := &bytePoints[i][kb[i]] + AddNonConst(result, pt, result) + } +} + +// isOnCurve returns whether or not the affine point (x,y) is on the curve. +func isOnCurve(fx, fy *FieldVal) bool { + // Elliptic curve equation for secp256k1 is: y^2 = x^3 + 7 + y2 := new(FieldVal).SquareVal(fy).Normalize() + result := new(FieldVal).SquareVal(fx).Mul(fx).AddInt(7).Normalize() + return y2.Equals(result) +} + +// DecompressY attempts to calculate the Y coordinate for the given X coordinate +// such that the result pair is a point on the secp256k1 curve. It adjusts Y +// based on the desired oddness and returns whether or not it was successful +// since not all X coordinates are valid. +// +// The magnitude of the provided X coordinate field value must be a max of 8 for +// a correct result. The resulting Y field value will have a magnitude of 1. +// +// Preconditions: +// - The input field value MUST have a max magnitude of 8 +// Output Normalized: Yes if the func returns true, no otherwise +// Output Max Magnitude: 1 +func DecompressY(x *FieldVal, odd bool, resultY *FieldVal) bool { + // The curve equation for secp256k1 is: y^2 = x^3 + 7. Thus + // y = +-sqrt(x^3 + 7). + // + // The x coordinate must be invalid if there is no square root for the + // calculated rhs because it means the X coordinate is not for a point on + // the curve. + x3PlusB := new(FieldVal).SquareVal(x).Mul(x).AddInt(7) + if hasSqrt := resultY.SquareRootVal(x3PlusB); !hasSqrt { + return false + } + if resultY.Normalize().IsOdd() != odd { + resultY.Negate(1).Normalize() + } + return true +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve_embedded.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve_embedded.go new file mode 100644 index 000000000000..16288318c1e4 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve_embedded.go @@ -0,0 +1,14 @@ +// Copyright (c) 2024 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +//go:build tinygo + +package secp256k1 + +// This file contains the variants suitable for +// memory or storage constrained environments. + +func scalarBaseMultNonConst(k *ModNScalar, result *JacobianPoint) { + scalarBaseMultNonConstSlow(k, result) +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve_precompute.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve_precompute.go new file mode 100644 index 000000000000..cf84f770edd0 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve_precompute.go @@ -0,0 +1,14 @@ +// Copyright (c) 2024 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +//go:build !tinygo + +package secp256k1 + +// This file contains the variants that don't fit in +// memory or storage constrained environments. + +func scalarBaseMultNonConst(k *ModNScalar, result *JacobianPoint) { + scalarBaseMultNonConstFast(k, result) +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/doc.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/doc.go new file mode 100644 index 000000000000..ac01e2343ca2 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/doc.go @@ -0,0 +1,59 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015-2022 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* +Package secp256k1 implements optimized secp256k1 elliptic curve operations in +pure Go. + +This package provides an optimized pure Go implementation of elliptic curve +cryptography operations over the secp256k1 curve as well as data structures and +functions for working with public and private secp256k1 keys. See +https://www.secg.org/sec2-v2.pdf for details on the standard. + +In addition, sub packages are provided to produce, verify, parse, and serialize +ECDSA signatures and EC-Schnorr-DCRv0 (a custom Schnorr-based signature scheme +specific to Decred) signatures. See the README.md files in the relevant sub +packages for more details about those aspects. + +An overview of the features provided by this package are as follows: + + - Private key generation, serialization, and parsing + - Public key generation, serialization and parsing per ANSI X9.62-1998 + - Parses uncompressed, compressed, and hybrid public keys + - Serializes uncompressed and compressed public keys + - Specialized types for performing optimized and constant time field operations + - FieldVal type for working modulo the secp256k1 field prime + - ModNScalar type for working modulo the secp256k1 group order + - Elliptic curve operations in Jacobian projective coordinates + - Point addition + - Point doubling + - Scalar multiplication with an arbitrary point + - Scalar multiplication with the base point (group generator) + - Point decompression from a given x coordinate + - Nonce generation via RFC6979 with support for extra data and version + information that can be used to prevent nonce reuse between signing + algorithms + +It also provides an implementation of the Go standard library crypto/elliptic +Curve interface via the S256 function so that it may be used with other packages +in the standard library such as crypto/tls, crypto/x509, and crypto/ecdsa. +However, in the case of ECDSA, it is highly recommended to use the ecdsa sub +package of this package instead since it is optimized specifically for secp256k1 +and is significantly faster as a result. + +Although this package was primarily written for dcrd, it has intentionally been +designed so it can be used as a standalone package for any projects needing to +use optimized secp256k1 elliptic curve cryptography. + +Finally, a comprehensive suite of tests is provided to provide a high level of +quality assurance. + +# Use of secp256k1 in Decred + +At the time of this writing, the primary public key cryptography in widespread +use on the Decred network used to secure coins is based on elliptic curves +defined by the secp256k1 domain parameters. +*/ +package secp256k1 diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ecdh.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ecdh.go new file mode 100644 index 000000000000..96869a3cd908 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ecdh.go @@ -0,0 +1,21 @@ +// Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015-2023 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +// GenerateSharedSecret generates a shared secret based on a private key and a +// public key using Diffie-Hellman key exchange (ECDH) (RFC 5903). +// RFC5903 Section 9 states we should only return x. +// +// It is recommended to securely hash the result before using as a cryptographic +// key. +func GenerateSharedSecret(privkey *PrivateKey, pubkey *PublicKey) []byte { + var point, result JacobianPoint + pubkey.AsJacobian(&point) + ScalarMultNonConst(&privkey.Key, &point, &result) + result.ToAffine() + xBytes := result.X.Bytes() + return xBytes[:] +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ellipticadaptor.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ellipticadaptor.go new file mode 100644 index 000000000000..a3a45af31785 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ellipticadaptor.go @@ -0,0 +1,255 @@ +// Copyright 2020-2022 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +// References: +// [SECG]: Recommended Elliptic Curve Domain Parameters +// https://www.secg.org/sec2-v2.pdf +// +// [GECC]: Guide to Elliptic Curve Cryptography (Hankerson, Menezes, Vanstone) + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "math/big" +) + +// CurveParams contains the parameters for the secp256k1 curve. +type CurveParams struct { + // P is the prime used in the secp256k1 field. + P *big.Int + + // N is the order of the secp256k1 curve group generated by the base point. + N *big.Int + + // Gx and Gy are the x and y coordinate of the base point, respectively. + Gx, Gy *big.Int + + // BitSize is the size of the underlying secp256k1 field in bits. + BitSize int + + // H is the cofactor of the secp256k1 curve. + H int + + // ByteSize is simply the bit size / 8 and is provided for convenience + // since it is calculated repeatedly. + ByteSize int +} + +// Curve parameters taken from [SECG] section 2.4.1. +var curveParams = CurveParams{ + P: fromHex("fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"), + N: fromHex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141"), + Gx: fromHex("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798"), + Gy: fromHex("483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8"), + BitSize: 256, + H: 1, + ByteSize: 256 / 8, +} + +// Params returns the secp256k1 curve parameters for convenience. +func Params() *CurveParams { + return &curveParams +} + +// KoblitzCurve provides an implementation for secp256k1 that fits the ECC Curve +// interface from crypto/elliptic. +type KoblitzCurve struct { + *elliptic.CurveParams +} + +// bigAffineToJacobian takes an affine point (x, y) as big integers and converts +// it to Jacobian point with Z=1. +func bigAffineToJacobian(x, y *big.Int, result *JacobianPoint) { + result.X.SetByteSlice(x.Bytes()) + result.Y.SetByteSlice(y.Bytes()) + result.Z.SetInt(1) +} + +// jacobianToBigAffine takes a Jacobian point (x, y, z) as field values and +// converts it to an affine point as big integers. +func jacobianToBigAffine(point *JacobianPoint) (*big.Int, *big.Int) { + point.ToAffine() + + // Convert the field values for the now affine point to big.Ints. + x3, y3 := new(big.Int), new(big.Int) + x3.SetBytes(point.X.Bytes()[:]) + y3.SetBytes(point.Y.Bytes()[:]) + return x3, y3 +} + +// Params returns the parameters for the curve. +// +// This is part of the elliptic.Curve interface implementation. +func (curve *KoblitzCurve) Params() *elliptic.CurveParams { + return curve.CurveParams +} + +// IsOnCurve returns whether or not the affine point (x,y) is on the curve. +// +// This is part of the elliptic.Curve interface implementation. This function +// differs from the crypto/elliptic algorithm since a = 0 not -3. +func (curve *KoblitzCurve) IsOnCurve(x, y *big.Int) bool { + // Convert big ints to a Jacobian point for faster arithmetic. + var point JacobianPoint + bigAffineToJacobian(x, y, &point) + return isOnCurve(&point.X, &point.Y) +} + +// Add returns the sum of (x1,y1) and (x2,y2). +// +// This is part of the elliptic.Curve interface implementation. +func (curve *KoblitzCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { + // The point at infinity is the identity according to the group law for + // elliptic curve cryptography. Thus, ∞ + P = P and P + ∞ = P. + if x1.Sign() == 0 && y1.Sign() == 0 { + return x2, y2 + } + if x2.Sign() == 0 && y2.Sign() == 0 { + return x1, y1 + } + + // Convert the affine coordinates from big integers to Jacobian points, + // do the point addition in Jacobian projective space, and convert the + // Jacobian point back to affine big.Ints. + var p1, p2, result JacobianPoint + bigAffineToJacobian(x1, y1, &p1) + bigAffineToJacobian(x2, y2, &p2) + AddNonConst(&p1, &p2, &result) + return jacobianToBigAffine(&result) +} + +// Double returns 2*(x1,y1). +// +// This is part of the elliptic.Curve interface implementation. +func (curve *KoblitzCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) { + if y1.Sign() == 0 { + return new(big.Int), new(big.Int) + } + + // Convert the affine coordinates from big integers to Jacobian points, + // do the point doubling in Jacobian projective space, and convert the + // Jacobian point back to affine big.Ints. + var point, result JacobianPoint + bigAffineToJacobian(x1, y1, &point) + DoubleNonConst(&point, &result) + return jacobianToBigAffine(&result) +} + +// moduloReduce reduces k from more than 32 bytes to 32 bytes and under. This +// is done by doing a simple modulo curve.N. We can do this since G^N = 1 and +// thus any other valid point on the elliptic curve has the same order. +func moduloReduce(k []byte) []byte { + // Since the order of G is curve.N, we can use a much smaller number by + // doing modulo curve.N + if len(k) > curveParams.ByteSize { + tmpK := new(big.Int).SetBytes(k) + tmpK.Mod(tmpK, curveParams.N) + return tmpK.Bytes() + } + + return k +} + +// ScalarMult returns k*(bx, by) where k is a big endian integer. +// +// This is part of the elliptic.Curve interface implementation. +func (curve *KoblitzCurve) ScalarMult(bx, by *big.Int, k []byte) (*big.Int, *big.Int) { + // Convert the affine coordinates from big integers to Jacobian points, + // do the multiplication in Jacobian projective space, and convert the + // Jacobian point back to affine big.Ints. + var kModN ModNScalar + kModN.SetByteSlice(moduloReduce(k)) + var point, result JacobianPoint + bigAffineToJacobian(bx, by, &point) + ScalarMultNonConst(&kModN, &point, &result) + return jacobianToBigAffine(&result) +} + +// ScalarBaseMult returns k*G where G is the base point of the group and k is a +// big endian integer. +// +// This is part of the elliptic.Curve interface implementation. +func (curve *KoblitzCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { + // Perform the multiplication and convert the Jacobian point back to affine + // big.Ints. + var kModN ModNScalar + kModN.SetByteSlice(moduloReduce(k)) + var result JacobianPoint + ScalarBaseMultNonConst(&kModN, &result) + return jacobianToBigAffine(&result) +} + +// X returns the x coordinate of the public key. +func (p *PublicKey) X() *big.Int { + return new(big.Int).SetBytes(p.x.Bytes()[:]) +} + +// Y returns the y coordinate of the public key. +func (p *PublicKey) Y() *big.Int { + return new(big.Int).SetBytes(p.y.Bytes()[:]) +} + +// ToECDSA returns the public key as a *ecdsa.PublicKey. +func (p *PublicKey) ToECDSA() *ecdsa.PublicKey { + return &ecdsa.PublicKey{ + Curve: S256(), + X: p.X(), + Y: p.Y(), + } +} + +// ToECDSA returns the private key as a *ecdsa.PrivateKey. +func (p *PrivateKey) ToECDSA() *ecdsa.PrivateKey { + var privKeyBytes [PrivKeyBytesLen]byte + p.Key.PutBytes(&privKeyBytes) + var result JacobianPoint + ScalarBaseMultNonConst(&p.Key, &result) + x, y := jacobianToBigAffine(&result) + newPrivKey := &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: S256(), + X: x, + Y: y, + }, + D: new(big.Int).SetBytes(privKeyBytes[:]), + } + zeroArray32(&privKeyBytes) + return newPrivKey +} + +// fromHex converts the passed hex string into a big integer pointer and will +// panic is there is an error. This is only provided for the hard-coded +// constants so errors in the source code can bet detected. It will only (and +// must only) be called for initialization purposes. +func fromHex(s string) *big.Int { + if s == "" { + return big.NewInt(0) + } + r, ok := new(big.Int).SetString(s, 16) + if !ok { + panic("invalid hex in source file: " + s) + } + return r +} + +// secp256k1 is a global instance of the KoblitzCurve implementation which in +// turn embeds and implements elliptic.CurveParams. +var secp256k1 = &KoblitzCurve{ + CurveParams: &elliptic.CurveParams{ + P: curveParams.P, + N: curveParams.N, + B: fromHex("0000000000000000000000000000000000000000000000000000000000000007"), + Gx: curveParams.Gx, + Gy: curveParams.Gy, + BitSize: curveParams.BitSize, + Name: "secp256k1", + }, +} + +// S256 returns an elliptic.Curve which implements secp256k1. +func S256() *KoblitzCurve { + return secp256k1 +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/error.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/error.go new file mode 100644 index 000000000000..ac8c45127e43 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/error.go @@ -0,0 +1,67 @@ +// Copyright (c) 2020 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +// ErrorKind identifies a kind of error. It has full support for errors.Is and +// errors.As, so the caller can directly check against an error kind when +// determining the reason for an error. +type ErrorKind string + +// These constants are used to identify a specific RuleError. +const ( + // ErrPubKeyInvalidLen indicates that the length of a serialized public + // key is not one of the allowed lengths. + ErrPubKeyInvalidLen = ErrorKind("ErrPubKeyInvalidLen") + + // ErrPubKeyInvalidFormat indicates an attempt was made to parse a public + // key that does not specify one of the supported formats. + ErrPubKeyInvalidFormat = ErrorKind("ErrPubKeyInvalidFormat") + + // ErrPubKeyXTooBig indicates that the x coordinate for a public key + // is greater than or equal to the prime of the field underlying the group. + ErrPubKeyXTooBig = ErrorKind("ErrPubKeyXTooBig") + + // ErrPubKeyYTooBig indicates that the y coordinate for a public key is + // greater than or equal to the prime of the field underlying the group. + ErrPubKeyYTooBig = ErrorKind("ErrPubKeyYTooBig") + + // ErrPubKeyNotOnCurve indicates that a public key is not a point on the + // secp256k1 curve. + ErrPubKeyNotOnCurve = ErrorKind("ErrPubKeyNotOnCurve") + + // ErrPubKeyMismatchedOddness indicates that a hybrid public key specified + // an oddness of the y coordinate that does not match the actual oddness of + // the provided y coordinate. + ErrPubKeyMismatchedOddness = ErrorKind("ErrPubKeyMismatchedOddness") +) + +// Error satisfies the error interface and prints human-readable errors. +func (e ErrorKind) Error() string { + return string(e) +} + +// Error identifies an error related to public key cryptography using a +// sec256k1 curve. It has full support for errors.Is and errors.As, so the +// caller can ascertain the specific reason for the error by checking +// the underlying error. +type Error struct { + Err error + Description string +} + +// Error satisfies the error interface and prints human-readable errors. +func (e Error) Error() string { + return e.Description +} + +// Unwrap returns the underlying wrapped error. +func (e Error) Unwrap() error { + return e.Err +} + +// makeError creates an Error given a set of arguments. +func makeError(kind ErrorKind, desc string) Error { + return Error{Err: kind, Description: desc} +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/field.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/field.go new file mode 100644 index 000000000000..f979bb2efe11 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/field.go @@ -0,0 +1,1696 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015-2024 The Decred developers +// Copyright (c) 2013-2024 Dave Collins +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +// References: +// [HAC]: Handbook of Applied Cryptography Menezes, van Oorschot, Vanstone. +// http://cacr.uwaterloo.ca/hac/ + +// All elliptic curve operations for secp256k1 are done in a finite field +// characterized by a 256-bit prime. Given this precision is larger than the +// biggest available native type, obviously some form of bignum math is needed. +// This package implements specialized fixed-precision field arithmetic rather +// than relying on an arbitrary-precision arithmetic package such as math/big +// for dealing with the field math since the size is known. As a result, rather +// large performance gains are achieved by taking advantage of many +// optimizations not available to arbitrary-precision arithmetic and generic +// modular arithmetic algorithms. +// +// There are various ways to internally represent each finite field element. +// For example, the most obvious representation would be to use an array of 4 +// uint64s (64 bits * 4 = 256 bits). However, that representation suffers from +// a couple of issues. First, there is no native Go type large enough to handle +// the intermediate results while adding or multiplying two 64-bit numbers, and +// second there is no space left for overflows when performing the intermediate +// arithmetic between each array element which would lead to expensive carry +// propagation. +// +// Given the above, this implementation represents the field elements as +// 10 uint32s with each word (array entry) treated as base 2^26. This was +// chosen for the following reasons: +// 1) Most systems at the current time are 64-bit (or at least have 64-bit +// registers available for specialized purposes such as MMX) so the +// intermediate results can typically be done using a native register (and +// using uint64s to avoid the need for additional half-word arithmetic) +// 2) In order to allow addition of the internal words without having to +// propagate the carry, the max normalized value for each register must +// be less than the number of bits available in the register +// 3) Since we're dealing with 32-bit values, 64-bits of overflow is a +// reasonable choice for #2 +// 4) Given the need for 256-bits of precision and the properties stated in #1, +// #2, and #3, the representation which best accommodates this is 10 uint32s +// with base 2^26 (26 bits * 10 = 260 bits, so the final word only needs 22 +// bits) which leaves the desired 64 bits (32 * 10 = 320, 320 - 256 = 64) for +// overflow +// +// Since it is so important that the field arithmetic is extremely fast for high +// performance crypto, this type does not perform any validation where it +// ordinarily would. See the documentation for FieldVal for more details. + +import ( + "encoding/hex" +) + +// Constants used to make the code more readable. +const ( + twoBitsMask = 0x3 + fourBitsMask = 0xf + sixBitsMask = 0x3f + eightBitsMask = 0xff +) + +// Constants related to the field representation. +const ( + // fieldWords is the number of words used to internally represent the + // 256-bit value. + fieldWords = 10 + + // fieldBase is the exponent used to form the numeric base of each word. + // 2^(fieldBase*i) where i is the word position. + fieldBase = 26 + + // fieldBaseMask is the mask for the bits in each word needed to + // represent the numeric base of each word (except the most significant + // word). + fieldBaseMask = (1 << fieldBase) - 1 + + // fieldMSBBits is the number of bits in the most significant word used + // to represent the value. + fieldMSBBits = 256 - (fieldBase * (fieldWords - 1)) + + // fieldMSBMask is the mask for the bits in the most significant word + // needed to represent the value. + fieldMSBMask = (1 << fieldMSBBits) - 1 + + // These fields provide convenient access to each of the words of the + // secp256k1 prime in the internal field representation to improve code + // readability. + fieldPrimeWordZero = 0x03fffc2f + fieldPrimeWordOne = 0x03ffffbf + fieldPrimeWordTwo = 0x03ffffff + fieldPrimeWordThree = 0x03ffffff + fieldPrimeWordFour = 0x03ffffff + fieldPrimeWordFive = 0x03ffffff + fieldPrimeWordSix = 0x03ffffff + fieldPrimeWordSeven = 0x03ffffff + fieldPrimeWordEight = 0x03ffffff + fieldPrimeWordNine = 0x003fffff +) + +// FieldVal implements optimized fixed-precision arithmetic over the +// secp256k1 finite field. This means all arithmetic is performed modulo +// +// 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f. +// +// WARNING: Since it is so important for the field arithmetic to be extremely +// fast for high performance crypto, this type does not perform any validation +// of documented preconditions where it ordinarily would. As a result, it is +// IMPERATIVE for callers to understand some key concepts that are described +// below and ensure the methods are called with the necessary preconditions that +// each method is documented with. For example, some methods only give the +// correct result if the field value is normalized and others require the field +// values involved to have a maximum magnitude and THERE ARE NO EXPLICIT CHECKS +// TO ENSURE THOSE PRECONDITIONS ARE SATISFIED. This does, unfortunately, make +// the type more difficult to use correctly and while I typically prefer to +// ensure all state and input is valid for most code, this is a bit of an +// exception because those extra checks really add up in what ends up being +// critical hot paths. +// +// The first key concept when working with this type is normalization. In order +// to avoid the need to propagate a ton of carries, the internal representation +// provides additional overflow bits for each word of the overall 256-bit value. +// This means that there are multiple internal representations for the same +// value and, as a result, any methods that rely on comparison of the value, +// such as equality and oddness determination, require the caller to provide a +// normalized value. +// +// The second key concept when working with this type is magnitude. As +// previously mentioned, the internal representation provides additional +// overflow bits which means that the more math operations that are performed on +// the field value between normalizations, the more those overflow bits +// accumulate. The magnitude is effectively that maximum possible number of +// those overflow bits that could possibly be required as a result of a given +// operation. Since there are only a limited number of overflow bits available, +// this implies that the max possible magnitude MUST be tracked by the caller +// and the caller MUST normalize the field value if a given operation would +// cause the magnitude of the result to exceed the max allowed value. +// +// IMPORTANT: The max allowed magnitude of a field value is 32. +type FieldVal struct { + // Each 256-bit value is represented as 10 32-bit integers in base 2^26. + // This provides 6 bits of overflow in each word (10 bits in the most + // significant word) for a total of 64 bits of overflow (9*6 + 10 = 64). It + // only implements the arithmetic needed for elliptic curve operations. + // + // The following depicts the internal representation: + // ----------------------------------------------------------------- + // | n[9] | n[8] | ... | n[0] | + // | 32 bits available | 32 bits available | ... | 32 bits available | + // | 22 bits for value | 26 bits for value | ... | 26 bits for value | + // | 10 bits overflow | 6 bits overflow | ... | 6 bits overflow | + // | Mult: 2^(26*9) | Mult: 2^(26*8) | ... | Mult: 2^(26*0) | + // ----------------------------------------------------------------- + // + // For example, consider the number 2^49 + 1. It would be represented as: + // n[0] = 1 + // n[1] = 2^23 + // n[2..9] = 0 + // + // The full 256-bit value is then calculated by looping i from 9..0 and + // doing sum(n[i] * 2^(26i)) like so: + // n[9] * 2^(26*9) = 0 * 2^234 = 0 + // n[8] * 2^(26*8) = 0 * 2^208 = 0 + // ... + // n[1] * 2^(26*1) = 2^23 * 2^26 = 2^49 + // n[0] * 2^(26*0) = 1 * 2^0 = 1 + // Sum: 0 + 0 + ... + 2^49 + 1 = 2^49 + 1 + n [10]uint32 +} + +// String returns the field value as a normalized human-readable hex string. +// +// Preconditions: None +// Output Normalized: Field is not modified -- same as input value +// Output Max Magnitude: Field is not modified -- same as input value +func (f FieldVal) String() string { + // f is a copy, so it's safe to normalize it without mutating the original. + f.Normalize() + return hex.EncodeToString(f.Bytes()[:]) +} + +// Zero sets the field value to zero in constant time. A newly created field +// value is already set to zero. This function can be useful to clear an +// existing field value for reuse. +// +// Preconditions: None +// Output Normalized: Yes +// Output Max Magnitude: 1 +func (f *FieldVal) Zero() { + f.n[0] = 0 + f.n[1] = 0 + f.n[2] = 0 + f.n[3] = 0 + f.n[4] = 0 + f.n[5] = 0 + f.n[6] = 0 + f.n[7] = 0 + f.n[8] = 0 + f.n[9] = 0 +} + +// Set sets the field value equal to the passed value in constant time. The +// normalization and magnitude of the two fields will be identical. +// +// The field value is returned to support chaining. This enables syntax like: +// f := new(FieldVal).Set(f2).Add(1) so that f = f2 + 1 where f2 is not +// modified. +// +// Preconditions: None +// Output Normalized: Same as input value +// Output Max Magnitude: Same as input value +func (f *FieldVal) Set(val *FieldVal) *FieldVal { + *f = *val + return f +} + +// SetInt sets the field value to the passed integer in constant time. This is +// a convenience function since it is fairly common to perform some arithmetic +// with small native integers. +// +// The field value is returned to support chaining. This enables syntax such +// as f := new(FieldVal).SetInt(2).Mul(f2) so that f = 2 * f2. +// +// Preconditions: None +// Output Normalized: Yes +// Output Max Magnitude: 1 +func (f *FieldVal) SetInt(ui uint16) *FieldVal { + f.Zero() + f.n[0] = uint32(ui) + return f +} + +// SetBytes packs the passed 32-byte big-endian value into the internal field +// value representation in constant time. SetBytes interprets the provided +// array as a 256-bit big-endian unsigned integer, packs it into the internal +// field value representation, and returns either 1 if it is greater than or +// equal to the field prime (aka it overflowed) or 0 otherwise in constant time. +// +// Note that a bool is not used here because it is not possible in Go to convert +// from a bool to numeric value in constant time and many constant-time +// operations require a numeric value. +// +// Preconditions: None +// Output Normalized: Yes if no overflow, no otherwise +// Output Max Magnitude: 1 +func (f *FieldVal) SetBytes(b *[32]byte) uint32 { + // Pack the 256 total bits across the 10 uint32 words with a max of + // 26-bits per word. This could be done with a couple of for loops, + // but this unrolled version is significantly faster. Benchmarks show + // this is about 34 times faster than the variant which uses loops. + f.n[0] = uint32(b[31]) | uint32(b[30])<<8 | uint32(b[29])<<16 | + (uint32(b[28])&twoBitsMask)<<24 + f.n[1] = uint32(b[28])>>2 | uint32(b[27])<<6 | uint32(b[26])<<14 | + (uint32(b[25])&fourBitsMask)<<22 + f.n[2] = uint32(b[25])>>4 | uint32(b[24])<<4 | uint32(b[23])<<12 | + (uint32(b[22])&sixBitsMask)<<20 + f.n[3] = uint32(b[22])>>6 | uint32(b[21])<<2 | uint32(b[20])<<10 | + uint32(b[19])<<18 + f.n[4] = uint32(b[18]) | uint32(b[17])<<8 | uint32(b[16])<<16 | + (uint32(b[15])&twoBitsMask)<<24 + f.n[5] = uint32(b[15])>>2 | uint32(b[14])<<6 | uint32(b[13])<<14 | + (uint32(b[12])&fourBitsMask)<<22 + f.n[6] = uint32(b[12])>>4 | uint32(b[11])<<4 | uint32(b[10])<<12 | + (uint32(b[9])&sixBitsMask)<<20 + f.n[7] = uint32(b[9])>>6 | uint32(b[8])<<2 | uint32(b[7])<<10 | + uint32(b[6])<<18 + f.n[8] = uint32(b[5]) | uint32(b[4])<<8 | uint32(b[3])<<16 | + (uint32(b[2])&twoBitsMask)<<24 + f.n[9] = uint32(b[2])>>2 | uint32(b[1])<<6 | uint32(b[0])<<14 + + // The intuition here is that the field value is greater than the prime if + // one of the higher individual words is greater than corresponding word of + // the prime and all higher words in the field value are equal to their + // corresponding word of the prime. Since this type is modulo the prime, + // being equal is also an overflow back to 0. + // + // Note that because the input is 32 bytes and it was just packed into the + // field representation, the only words that can possibly be greater are + // zero and one, because ceil(log_2(2^256 - 1 - P)) = 33 bits max and the + // internal field representation encodes 26 bits with each word. + // + // Thus, there is no need to test if the upper words of the field value + // exceeds them, hence, only equality is checked for them. + highWordsEq := constantTimeEq(f.n[9], fieldPrimeWordNine) + highWordsEq &= constantTimeEq(f.n[8], fieldPrimeWordEight) + highWordsEq &= constantTimeEq(f.n[7], fieldPrimeWordSeven) + highWordsEq &= constantTimeEq(f.n[6], fieldPrimeWordSix) + highWordsEq &= constantTimeEq(f.n[5], fieldPrimeWordFive) + highWordsEq &= constantTimeEq(f.n[4], fieldPrimeWordFour) + highWordsEq &= constantTimeEq(f.n[3], fieldPrimeWordThree) + highWordsEq &= constantTimeEq(f.n[2], fieldPrimeWordTwo) + overflow := highWordsEq & constantTimeGreater(f.n[1], fieldPrimeWordOne) + highWordsEq &= constantTimeEq(f.n[1], fieldPrimeWordOne) + overflow |= highWordsEq & constantTimeGreaterOrEq(f.n[0], fieldPrimeWordZero) + + return overflow +} + +// SetByteSlice interprets the provided slice as a 256-bit big-endian unsigned +// integer (meaning it is truncated to the first 32 bytes), packs it into the +// internal field value representation, and returns whether or not the resulting +// truncated 256-bit integer is greater than or equal to the field prime (aka it +// overflowed) in constant time. +// +// Note that since passing a slice with more than 32 bytes is truncated, it is +// possible that the truncated value is less than the field prime and hence it +// will not be reported as having overflowed in that case. It is up to the +// caller to decide whether it needs to provide numbers of the appropriate size +// or it if is acceptable to use this function with the described truncation and +// overflow behavior. +// +// Preconditions: None +// Output Normalized: Yes if no overflow, no otherwise +// Output Max Magnitude: 1 +func (f *FieldVal) SetByteSlice(b []byte) bool { + var b32 [32]byte + b = b[:constantTimeMin(uint32(len(b)), 32)] + copy(b32[:], b32[:32-len(b)]) + copy(b32[32-len(b):], b) + result := f.SetBytes(&b32) + zeroArray32(&b32) + return result != 0 +} + +// Normalize normalizes the internal field words into the desired range and +// performs fast modular reduction over the secp256k1 prime by making use of the +// special form of the prime in constant time. +// +// Preconditions: None +// Output Normalized: Yes +// Output Max Magnitude: 1 +func (f *FieldVal) Normalize() *FieldVal { + // The field representation leaves 6 bits of overflow in each word so + // intermediate calculations can be performed without needing to + // propagate the carry to each higher word during the calculations. In + // order to normalize, we need to "compact" the full 256-bit value to + // the right while propagating any carries through to the high order + // word. + // + // Since this field is doing arithmetic modulo the secp256k1 prime, we + // also need to perform modular reduction over the prime. + // + // Per [HAC] section 14.3.4: Reduction method of moduli of special form, + // when the modulus is of the special form m = b^t - c, highly efficient + // reduction can be achieved. + // + // The secp256k1 prime is equivalent to 2^256 - 4294968273, so it fits + // this criteria. + // + // 4294968273 in field representation (base 2^26) is: + // n[0] = 977 + // n[1] = 64 + // That is to say (2^26 * 64) + 977 = 4294968273 + // + // The algorithm presented in the referenced section typically repeats + // until the quotient is zero. However, due to our field representation + // we already know to within one reduction how many times we would need + // to repeat as it's the uppermost bits of the high order word. Thus we + // can simply multiply the magnitude by the field representation of the + // prime and do a single iteration. After this step there might be an + // additional carry to bit 256 (bit 22 of the high order word). + t9 := f.n[9] + m := t9 >> fieldMSBBits + t9 &= fieldMSBMask + t0 := f.n[0] + m*977 + t1 := (t0 >> fieldBase) + f.n[1] + (m << 6) + t0 &= fieldBaseMask + t2 := (t1 >> fieldBase) + f.n[2] + t1 &= fieldBaseMask + t3 := (t2 >> fieldBase) + f.n[3] + t2 &= fieldBaseMask + t4 := (t3 >> fieldBase) + f.n[4] + t3 &= fieldBaseMask + t5 := (t4 >> fieldBase) + f.n[5] + t4 &= fieldBaseMask + t6 := (t5 >> fieldBase) + f.n[6] + t5 &= fieldBaseMask + t7 := (t6 >> fieldBase) + f.n[7] + t6 &= fieldBaseMask + t8 := (t7 >> fieldBase) + f.n[8] + t7 &= fieldBaseMask + t9 = (t8 >> fieldBase) + t9 + t8 &= fieldBaseMask + + // At this point, the magnitude is guaranteed to be one, however, the + // value could still be greater than the prime if there was either a + // carry through to bit 256 (bit 22 of the higher order word) or the + // value is greater than or equal to the field characteristic. The + // following determines if either or these conditions are true and does + // the final reduction in constant time. + // + // Also note that 'm' will be zero when neither of the aforementioned + // conditions are true and the value will not be changed when 'm' is zero. + m = constantTimeEq(t9, fieldMSBMask) + m &= constantTimeEq(t8&t7&t6&t5&t4&t3&t2, fieldBaseMask) + m &= constantTimeGreater(t1+64+((t0+977)>>fieldBase), fieldBaseMask) + m |= t9 >> fieldMSBBits + t0 += m * 977 + t1 = (t0 >> fieldBase) + t1 + (m << 6) + t0 &= fieldBaseMask + t2 = (t1 >> fieldBase) + t2 + t1 &= fieldBaseMask + t3 = (t2 >> fieldBase) + t3 + t2 &= fieldBaseMask + t4 = (t3 >> fieldBase) + t4 + t3 &= fieldBaseMask + t5 = (t4 >> fieldBase) + t5 + t4 &= fieldBaseMask + t6 = (t5 >> fieldBase) + t6 + t5 &= fieldBaseMask + t7 = (t6 >> fieldBase) + t7 + t6 &= fieldBaseMask + t8 = (t7 >> fieldBase) + t8 + t7 &= fieldBaseMask + t9 = (t8 >> fieldBase) + t9 + t8 &= fieldBaseMask + t9 &= fieldMSBMask // Remove potential multiple of 2^256. + + // Finally, set the normalized and reduced words. + f.n[0] = t0 + f.n[1] = t1 + f.n[2] = t2 + f.n[3] = t3 + f.n[4] = t4 + f.n[5] = t5 + f.n[6] = t6 + f.n[7] = t7 + f.n[8] = t8 + f.n[9] = t9 + return f +} + +// PutBytesUnchecked unpacks the field value to a 32-byte big-endian value +// directly into the passed byte slice in constant time. The target slice must +// have at least 32 bytes available or it will panic. +// +// There is a similar function, PutBytes, which unpacks the field value into a +// 32-byte array directly. This version is provided since it can be useful +// to write directly into part of a larger buffer without needing a separate +// allocation. +// +// Preconditions: +// - The field value MUST be normalized +// - The target slice MUST have at least 32 bytes available +func (f *FieldVal) PutBytesUnchecked(b []byte) { + // Unpack the 256 total bits from the 10 uint32 words with a max of + // 26-bits per word. This could be done with a couple of for loops, + // but this unrolled version is a bit faster. Benchmarks show this is + // about 10 times faster than the variant which uses loops. + b[31] = byte(f.n[0] & eightBitsMask) + b[30] = byte((f.n[0] >> 8) & eightBitsMask) + b[29] = byte((f.n[0] >> 16) & eightBitsMask) + b[28] = byte((f.n[0]>>24)&twoBitsMask | (f.n[1]&sixBitsMask)<<2) + b[27] = byte((f.n[1] >> 6) & eightBitsMask) + b[26] = byte((f.n[1] >> 14) & eightBitsMask) + b[25] = byte((f.n[1]>>22)&fourBitsMask | (f.n[2]&fourBitsMask)<<4) + b[24] = byte((f.n[2] >> 4) & eightBitsMask) + b[23] = byte((f.n[2] >> 12) & eightBitsMask) + b[22] = byte((f.n[2]>>20)&sixBitsMask | (f.n[3]&twoBitsMask)<<6) + b[21] = byte((f.n[3] >> 2) & eightBitsMask) + b[20] = byte((f.n[3] >> 10) & eightBitsMask) + b[19] = byte((f.n[3] >> 18) & eightBitsMask) + b[18] = byte(f.n[4] & eightBitsMask) + b[17] = byte((f.n[4] >> 8) & eightBitsMask) + b[16] = byte((f.n[4] >> 16) & eightBitsMask) + b[15] = byte((f.n[4]>>24)&twoBitsMask | (f.n[5]&sixBitsMask)<<2) + b[14] = byte((f.n[5] >> 6) & eightBitsMask) + b[13] = byte((f.n[5] >> 14) & eightBitsMask) + b[12] = byte((f.n[5]>>22)&fourBitsMask | (f.n[6]&fourBitsMask)<<4) + b[11] = byte((f.n[6] >> 4) & eightBitsMask) + b[10] = byte((f.n[6] >> 12) & eightBitsMask) + b[9] = byte((f.n[6]>>20)&sixBitsMask | (f.n[7]&twoBitsMask)<<6) + b[8] = byte((f.n[7] >> 2) & eightBitsMask) + b[7] = byte((f.n[7] >> 10) & eightBitsMask) + b[6] = byte((f.n[7] >> 18) & eightBitsMask) + b[5] = byte(f.n[8] & eightBitsMask) + b[4] = byte((f.n[8] >> 8) & eightBitsMask) + b[3] = byte((f.n[8] >> 16) & eightBitsMask) + b[2] = byte((f.n[8]>>24)&twoBitsMask | (f.n[9]&sixBitsMask)<<2) + b[1] = byte((f.n[9] >> 6) & eightBitsMask) + b[0] = byte((f.n[9] >> 14) & eightBitsMask) +} + +// PutBytes unpacks the field value to a 32-byte big-endian value using the +// passed byte array in constant time. +// +// There is a similar function, PutBytesUnchecked, which unpacks the field value +// into a slice that must have at least 32 bytes available. This version is +// provided since it can be useful to write directly into an array that is type +// checked. +// +// Alternatively, there is also Bytes, which unpacks the field value into a new +// array and returns that which can sometimes be more ergonomic in applications +// that aren't concerned about an additional copy. +// +// Preconditions: +// - The field value MUST be normalized +func (f *FieldVal) PutBytes(b *[32]byte) { + f.PutBytesUnchecked(b[:]) +} + +// Bytes unpacks the field value to a 32-byte big-endian value in constant time. +// +// See PutBytes and PutBytesUnchecked for variants that allow an array or slice +// to be passed which can be useful to cut down on the number of allocations by +// allowing the caller to reuse a buffer or write directly into part of a larger +// buffer. +// +// Preconditions: +// - The field value MUST be normalized +func (f *FieldVal) Bytes() *[32]byte { + b := new([32]byte) + f.PutBytesUnchecked(b[:]) + return b +} + +// IsZeroBit returns 1 when the field value is equal to zero or 0 otherwise in +// constant time. +// +// Note that a bool is not used here because it is not possible in Go to convert +// from a bool to numeric value in constant time and many constant-time +// operations require a numeric value. See IsZero for the version that returns +// a bool. +// +// Preconditions: +// - The field value MUST be normalized +func (f *FieldVal) IsZeroBit() uint32 { + // The value can only be zero if no bits are set in any of the words. + // This is a constant time implementation. + bits := f.n[0] | f.n[1] | f.n[2] | f.n[3] | f.n[4] | + f.n[5] | f.n[6] | f.n[7] | f.n[8] | f.n[9] + + return constantTimeEq(bits, 0) +} + +// IsZero returns whether or not the field value is equal to zero in constant +// time. +// +// Preconditions: +// - The field value MUST be normalized +func (f *FieldVal) IsZero() bool { + // The value can only be zero if no bits are set in any of the words. + // This is a constant time implementation. + bits := f.n[0] | f.n[1] | f.n[2] | f.n[3] | f.n[4] | + f.n[5] | f.n[6] | f.n[7] | f.n[8] | f.n[9] + + return bits == 0 +} + +// IsOneBit returns 1 when the field value is equal to one or 0 otherwise in +// constant time. +// +// Note that a bool is not used here because it is not possible in Go to convert +// from a bool to numeric value in constant time and many constant-time +// operations require a numeric value. See IsOne for the version that returns a +// bool. +// +// Preconditions: +// - The field value MUST be normalized +func (f *FieldVal) IsOneBit() uint32 { + // The value can only be one if the single lowest significant bit is set in + // the first word and no other bits are set in any of the other words. + // This is a constant time implementation. + bits := (f.n[0] ^ 1) | f.n[1] | f.n[2] | f.n[3] | f.n[4] | f.n[5] | + f.n[6] | f.n[7] | f.n[8] | f.n[9] + + return constantTimeEq(bits, 0) +} + +// IsOne returns whether or not the field value is equal to one in constant +// time. +// +// Preconditions: +// - The field value MUST be normalized +func (f *FieldVal) IsOne() bool { + // The value can only be one if the single lowest significant bit is set in + // the first word and no other bits are set in any of the other words. + // This is a constant time implementation. + bits := (f.n[0] ^ 1) | f.n[1] | f.n[2] | f.n[3] | f.n[4] | f.n[5] | + f.n[6] | f.n[7] | f.n[8] | f.n[9] + + return bits == 0 +} + +// IsOddBit returns 1 when the field value is an odd number or 0 otherwise in +// constant time. +// +// Note that a bool is not used here because it is not possible in Go to convert +// from a bool to numeric value in constant time and many constant-time +// operations require a numeric value. See IsOdd for the version that returns a +// bool. +// +// Preconditions: +// - The field value MUST be normalized +func (f *FieldVal) IsOddBit() uint32 { + // Only odd numbers have the bottom bit set. + return f.n[0] & 1 +} + +// IsOdd returns whether or not the field value is an odd number in constant +// time. +// +// Preconditions: +// - The field value MUST be normalized +func (f *FieldVal) IsOdd() bool { + // Only odd numbers have the bottom bit set. + return f.n[0]&1 == 1 +} + +// Equals returns whether or not the two field values are the same in constant +// time. +// +// Preconditions: +// - Both field values being compared MUST be normalized +func (f *FieldVal) Equals(val *FieldVal) bool { + // Xor only sets bits when they are different, so the two field values + // can only be the same if no bits are set after xoring each word. + // This is a constant time implementation. + bits := (f.n[0] ^ val.n[0]) | (f.n[1] ^ val.n[1]) | (f.n[2] ^ val.n[2]) | + (f.n[3] ^ val.n[3]) | (f.n[4] ^ val.n[4]) | (f.n[5] ^ val.n[5]) | + (f.n[6] ^ val.n[6]) | (f.n[7] ^ val.n[7]) | (f.n[8] ^ val.n[8]) | + (f.n[9] ^ val.n[9]) + + return bits == 0 +} + +// NegateVal negates the passed value and stores the result in f in constant +// time. The caller must provide the maximum magnitude of the passed value for +// a correct result. +// +// The field value is returned to support chaining. This enables syntax like: +// f.NegateVal(f2).AddInt(1) so that f = -f2 + 1. +// +// Preconditions: +// - The max magnitude MUST be 31 +// Output Normalized: No +// Output Max Magnitude: Input magnitude + 1 +func (f *FieldVal) NegateVal(val *FieldVal, magnitude uint32) *FieldVal { + // Negation in the field is just the prime minus the value. However, + // in order to allow negation against a field value without having to + // normalize/reduce it first, multiply by the magnitude (that is how + // "far" away it is from the normalized value) to adjust. Also, since + // negating a value pushes it one more order of magnitude away from the + // normalized range, add 1 to compensate. + // + // For some intuition here, imagine you're performing mod 12 arithmetic + // (picture a clock) and you are negating the number 7. So you start at + // 12 (which is of course 0 under mod 12) and count backwards (left on + // the clock) 7 times to arrive at 5. Notice this is just 12-7 = 5. + // Now, assume you're starting with 19, which is a number that is + // already larger than the modulus and congruent to 7 (mod 12). When a + // value is already in the desired range, its magnitude is 1. Since 19 + // is an additional "step", its magnitude (mod 12) is 2. Since any + // multiple of the modulus is congruent to zero (mod m), the answer can + // be shortcut by simply multiplying the magnitude by the modulus and + // subtracting. Keeping with the example, this would be (2*12)-19 = 5. + f.n[0] = (magnitude+1)*fieldPrimeWordZero - val.n[0] + f.n[1] = (magnitude+1)*fieldPrimeWordOne - val.n[1] + f.n[2] = (magnitude+1)*fieldBaseMask - val.n[2] + f.n[3] = (magnitude+1)*fieldBaseMask - val.n[3] + f.n[4] = (magnitude+1)*fieldBaseMask - val.n[4] + f.n[5] = (magnitude+1)*fieldBaseMask - val.n[5] + f.n[6] = (magnitude+1)*fieldBaseMask - val.n[6] + f.n[7] = (magnitude+1)*fieldBaseMask - val.n[7] + f.n[8] = (magnitude+1)*fieldBaseMask - val.n[8] + f.n[9] = (magnitude+1)*fieldMSBMask - val.n[9] + + return f +} + +// Negate negates the field value in constant time. The existing field value is +// modified. The caller must provide the maximum magnitude of the field value +// for a correct result. +// +// The field value is returned to support chaining. This enables syntax like: +// f.Negate().AddInt(1) so that f = -f + 1. +// +// Preconditions: +// - The max magnitude MUST be 31 +// Output Normalized: No +// Output Max Magnitude: Input magnitude + 1 +func (f *FieldVal) Negate(magnitude uint32) *FieldVal { + return f.NegateVal(f, magnitude) +} + +// AddInt adds the passed integer to the existing field value and stores the +// result in f in constant time. This is a convenience function since it is +// fairly common to perform some arithmetic with small native integers. +// +// The field value is returned to support chaining. This enables syntax like: +// f.AddInt(1).Add(f2) so that f = f + 1 + f2. +// +// Preconditions: +// - The field value MUST have a max magnitude of 31 +// - The integer MUST be a max of 32767 +// Output Normalized: No +// Output Max Magnitude: Existing field magnitude + 1 +func (f *FieldVal) AddInt(ui uint16) *FieldVal { + // Since the field representation intentionally provides overflow bits, + // it's ok to use carryless addition as the carry bit is safely part of + // the word and will be normalized out. + f.n[0] += uint32(ui) + + return f +} + +// Add adds the passed value to the existing field value and stores the result +// in f in constant time. +// +// The field value is returned to support chaining. This enables syntax like: +// f.Add(f2).AddInt(1) so that f = f + f2 + 1. +// +// Preconditions: +// - The sum of the magnitudes of the two field values MUST be a max of 32 +// Output Normalized: No +// Output Max Magnitude: Sum of the magnitude of the two individual field values +func (f *FieldVal) Add(val *FieldVal) *FieldVal { + // Since the field representation intentionally provides overflow bits, + // it's ok to use carryless addition as the carry bit is safely part of + // each word and will be normalized out. This could obviously be done + // in a loop, but the unrolled version is faster. + f.n[0] += val.n[0] + f.n[1] += val.n[1] + f.n[2] += val.n[2] + f.n[3] += val.n[3] + f.n[4] += val.n[4] + f.n[5] += val.n[5] + f.n[6] += val.n[6] + f.n[7] += val.n[7] + f.n[8] += val.n[8] + f.n[9] += val.n[9] + + return f +} + +// Add2 adds the passed two field values together and stores the result in f in +// constant time. +// +// The field value is returned to support chaining. This enables syntax like: +// f3.Add2(f, f2).AddInt(1) so that f3 = f + f2 + 1. +// +// Preconditions: +// - The sum of the magnitudes of the two field values MUST be a max of 32 +// Output Normalized: No +// Output Max Magnitude: Sum of the magnitude of the two field values +func (f *FieldVal) Add2(val *FieldVal, val2 *FieldVal) *FieldVal { + // Since the field representation intentionally provides overflow bits, + // it's ok to use carryless addition as the carry bit is safely part of + // each word and will be normalized out. This could obviously be done + // in a loop, but the unrolled version is faster. + f.n[0] = val.n[0] + val2.n[0] + f.n[1] = val.n[1] + val2.n[1] + f.n[2] = val.n[2] + val2.n[2] + f.n[3] = val.n[3] + val2.n[3] + f.n[4] = val.n[4] + val2.n[4] + f.n[5] = val.n[5] + val2.n[5] + f.n[6] = val.n[6] + val2.n[6] + f.n[7] = val.n[7] + val2.n[7] + f.n[8] = val.n[8] + val2.n[8] + f.n[9] = val.n[9] + val2.n[9] + + return f +} + +// MulInt multiplies the field value by the passed int and stores the result in +// f in constant time. Note that this function can overflow if multiplying the +// value by any of the individual words exceeds a max uint32. Therefore it is +// important that the caller ensures no overflows will occur before using this +// function. +// +// The field value is returned to support chaining. This enables syntax like: +// f.MulInt(2).Add(f2) so that f = 2 * f + f2. +// +// Preconditions: +// - The field value magnitude multiplied by given val MUST be a max of 32 +// Output Normalized: No +// Output Max Magnitude: Existing field magnitude times the provided integer val +func (f *FieldVal) MulInt(val uint8) *FieldVal { + // Since each word of the field representation can hold up to + // 32 - fieldBase extra bits which will be normalized out, it's safe + // to multiply each word without using a larger type or carry + // propagation so long as the values won't overflow a uint32. This + // could obviously be done in a loop, but the unrolled version is + // faster. + ui := uint32(val) + f.n[0] *= ui + f.n[1] *= ui + f.n[2] *= ui + f.n[3] *= ui + f.n[4] *= ui + f.n[5] *= ui + f.n[6] *= ui + f.n[7] *= ui + f.n[8] *= ui + f.n[9] *= ui + + return f +} + +// Mul multiplies the passed value to the existing field value and stores the +// result in f in constant time. Note that this function can overflow if +// multiplying any of the individual words exceeds a max uint32. In practice, +// this means the magnitude of either value involved in the multiplication must +// be a max of 8. +// +// The field value is returned to support chaining. This enables syntax like: +// f.Mul(f2).AddInt(1) so that f = (f * f2) + 1. +// +// Preconditions: +// - Both field values MUST have a max magnitude of 8 +// Output Normalized: No +// Output Max Magnitude: 1 +func (f *FieldVal) Mul(val *FieldVal) *FieldVal { + return f.Mul2(f, val) +} + +// Mul2 multiplies the passed two field values together and stores the result in +// f in constant time. Note that this function can overflow if multiplying any +// of the individual words exceeds a max uint32. In practice, this means the +// magnitude of either value involved in the multiplication must be a max of 8. +// +// The field value is returned to support chaining. This enables syntax like: +// f3.Mul2(f, f2).AddInt(1) so that f3 = (f * f2) + 1. +// +// Preconditions: +// - Both input field values MUST have a max magnitude of 8 +// Output Normalized: No +// Output Max Magnitude: 1 +func (f *FieldVal) Mul2(val *FieldVal, val2 *FieldVal) *FieldVal { + // This could be done with a couple of for loops and an array to store + // the intermediate terms, but this unrolled version is significantly + // faster. + + // Terms for 2^(fieldBase*0). + m := uint64(val.n[0]) * uint64(val2.n[0]) + t0 := m & fieldBaseMask + + // Terms for 2^(fieldBase*1). + m = (m >> fieldBase) + + uint64(val.n[0])*uint64(val2.n[1]) + + uint64(val.n[1])*uint64(val2.n[0]) + t1 := m & fieldBaseMask + + // Terms for 2^(fieldBase*2). + m = (m >> fieldBase) + + uint64(val.n[0])*uint64(val2.n[2]) + + uint64(val.n[1])*uint64(val2.n[1]) + + uint64(val.n[2])*uint64(val2.n[0]) + t2 := m & fieldBaseMask + + // Terms for 2^(fieldBase*3). + m = (m >> fieldBase) + + uint64(val.n[0])*uint64(val2.n[3]) + + uint64(val.n[1])*uint64(val2.n[2]) + + uint64(val.n[2])*uint64(val2.n[1]) + + uint64(val.n[3])*uint64(val2.n[0]) + t3 := m & fieldBaseMask + + // Terms for 2^(fieldBase*4). + m = (m >> fieldBase) + + uint64(val.n[0])*uint64(val2.n[4]) + + uint64(val.n[1])*uint64(val2.n[3]) + + uint64(val.n[2])*uint64(val2.n[2]) + + uint64(val.n[3])*uint64(val2.n[1]) + + uint64(val.n[4])*uint64(val2.n[0]) + t4 := m & fieldBaseMask + + // Terms for 2^(fieldBase*5). + m = (m >> fieldBase) + + uint64(val.n[0])*uint64(val2.n[5]) + + uint64(val.n[1])*uint64(val2.n[4]) + + uint64(val.n[2])*uint64(val2.n[3]) + + uint64(val.n[3])*uint64(val2.n[2]) + + uint64(val.n[4])*uint64(val2.n[1]) + + uint64(val.n[5])*uint64(val2.n[0]) + t5 := m & fieldBaseMask + + // Terms for 2^(fieldBase*6). + m = (m >> fieldBase) + + uint64(val.n[0])*uint64(val2.n[6]) + + uint64(val.n[1])*uint64(val2.n[5]) + + uint64(val.n[2])*uint64(val2.n[4]) + + uint64(val.n[3])*uint64(val2.n[3]) + + uint64(val.n[4])*uint64(val2.n[2]) + + uint64(val.n[5])*uint64(val2.n[1]) + + uint64(val.n[6])*uint64(val2.n[0]) + t6 := m & fieldBaseMask + + // Terms for 2^(fieldBase*7). + m = (m >> fieldBase) + + uint64(val.n[0])*uint64(val2.n[7]) + + uint64(val.n[1])*uint64(val2.n[6]) + + uint64(val.n[2])*uint64(val2.n[5]) + + uint64(val.n[3])*uint64(val2.n[4]) + + uint64(val.n[4])*uint64(val2.n[3]) + + uint64(val.n[5])*uint64(val2.n[2]) + + uint64(val.n[6])*uint64(val2.n[1]) + + uint64(val.n[7])*uint64(val2.n[0]) + t7 := m & fieldBaseMask + + // Terms for 2^(fieldBase*8). + m = (m >> fieldBase) + + uint64(val.n[0])*uint64(val2.n[8]) + + uint64(val.n[1])*uint64(val2.n[7]) + + uint64(val.n[2])*uint64(val2.n[6]) + + uint64(val.n[3])*uint64(val2.n[5]) + + uint64(val.n[4])*uint64(val2.n[4]) + + uint64(val.n[5])*uint64(val2.n[3]) + + uint64(val.n[6])*uint64(val2.n[2]) + + uint64(val.n[7])*uint64(val2.n[1]) + + uint64(val.n[8])*uint64(val2.n[0]) + t8 := m & fieldBaseMask + + // Terms for 2^(fieldBase*9). + m = (m >> fieldBase) + + uint64(val.n[0])*uint64(val2.n[9]) + + uint64(val.n[1])*uint64(val2.n[8]) + + uint64(val.n[2])*uint64(val2.n[7]) + + uint64(val.n[3])*uint64(val2.n[6]) + + uint64(val.n[4])*uint64(val2.n[5]) + + uint64(val.n[5])*uint64(val2.n[4]) + + uint64(val.n[6])*uint64(val2.n[3]) + + uint64(val.n[7])*uint64(val2.n[2]) + + uint64(val.n[8])*uint64(val2.n[1]) + + uint64(val.n[9])*uint64(val2.n[0]) + t9 := m & fieldBaseMask + + // Terms for 2^(fieldBase*10). + m = (m >> fieldBase) + + uint64(val.n[1])*uint64(val2.n[9]) + + uint64(val.n[2])*uint64(val2.n[8]) + + uint64(val.n[3])*uint64(val2.n[7]) + + uint64(val.n[4])*uint64(val2.n[6]) + + uint64(val.n[5])*uint64(val2.n[5]) + + uint64(val.n[6])*uint64(val2.n[4]) + + uint64(val.n[7])*uint64(val2.n[3]) + + uint64(val.n[8])*uint64(val2.n[2]) + + uint64(val.n[9])*uint64(val2.n[1]) + t10 := m & fieldBaseMask + + // Terms for 2^(fieldBase*11). + m = (m >> fieldBase) + + uint64(val.n[2])*uint64(val2.n[9]) + + uint64(val.n[3])*uint64(val2.n[8]) + + uint64(val.n[4])*uint64(val2.n[7]) + + uint64(val.n[5])*uint64(val2.n[6]) + + uint64(val.n[6])*uint64(val2.n[5]) + + uint64(val.n[7])*uint64(val2.n[4]) + + uint64(val.n[8])*uint64(val2.n[3]) + + uint64(val.n[9])*uint64(val2.n[2]) + t11 := m & fieldBaseMask + + // Terms for 2^(fieldBase*12). + m = (m >> fieldBase) + + uint64(val.n[3])*uint64(val2.n[9]) + + uint64(val.n[4])*uint64(val2.n[8]) + + uint64(val.n[5])*uint64(val2.n[7]) + + uint64(val.n[6])*uint64(val2.n[6]) + + uint64(val.n[7])*uint64(val2.n[5]) + + uint64(val.n[8])*uint64(val2.n[4]) + + uint64(val.n[9])*uint64(val2.n[3]) + t12 := m & fieldBaseMask + + // Terms for 2^(fieldBase*13). + m = (m >> fieldBase) + + uint64(val.n[4])*uint64(val2.n[9]) + + uint64(val.n[5])*uint64(val2.n[8]) + + uint64(val.n[6])*uint64(val2.n[7]) + + uint64(val.n[7])*uint64(val2.n[6]) + + uint64(val.n[8])*uint64(val2.n[5]) + + uint64(val.n[9])*uint64(val2.n[4]) + t13 := m & fieldBaseMask + + // Terms for 2^(fieldBase*14). + m = (m >> fieldBase) + + uint64(val.n[5])*uint64(val2.n[9]) + + uint64(val.n[6])*uint64(val2.n[8]) + + uint64(val.n[7])*uint64(val2.n[7]) + + uint64(val.n[8])*uint64(val2.n[6]) + + uint64(val.n[9])*uint64(val2.n[5]) + t14 := m & fieldBaseMask + + // Terms for 2^(fieldBase*15). + m = (m >> fieldBase) + + uint64(val.n[6])*uint64(val2.n[9]) + + uint64(val.n[7])*uint64(val2.n[8]) + + uint64(val.n[8])*uint64(val2.n[7]) + + uint64(val.n[9])*uint64(val2.n[6]) + t15 := m & fieldBaseMask + + // Terms for 2^(fieldBase*16). + m = (m >> fieldBase) + + uint64(val.n[7])*uint64(val2.n[9]) + + uint64(val.n[8])*uint64(val2.n[8]) + + uint64(val.n[9])*uint64(val2.n[7]) + t16 := m & fieldBaseMask + + // Terms for 2^(fieldBase*17). + m = (m >> fieldBase) + + uint64(val.n[8])*uint64(val2.n[9]) + + uint64(val.n[9])*uint64(val2.n[8]) + t17 := m & fieldBaseMask + + // Terms for 2^(fieldBase*18). + m = (m >> fieldBase) + uint64(val.n[9])*uint64(val2.n[9]) + t18 := m & fieldBaseMask + + // What's left is for 2^(fieldBase*19). + t19 := m >> fieldBase + + // At this point, all of the terms are grouped into their respective + // base. + // + // Per [HAC] section 14.3.4: Reduction method of moduli of special form, + // when the modulus is of the special form m = b^t - c, highly efficient + // reduction can be achieved per the provided algorithm. + // + // The secp256k1 prime is equivalent to 2^256 - 4294968273, so it fits + // this criteria. + // + // 4294968273 in field representation (base 2^26) is: + // n[0] = 977 + // n[1] = 64 + // That is to say (2^26 * 64) + 977 = 4294968273 + // + // Since each word is in base 26, the upper terms (t10 and up) start + // at 260 bits (versus the final desired range of 256 bits), so the + // field representation of 'c' from above needs to be adjusted for the + // extra 4 bits by multiplying it by 2^4 = 16. 4294968273 * 16 = + // 68719492368. Thus, the adjusted field representation of 'c' is: + // n[0] = 977 * 16 = 15632 + // n[1] = 64 * 16 = 1024 + // That is to say (2^26 * 1024) + 15632 = 68719492368 + // + // To reduce the final term, t19, the entire 'c' value is needed instead + // of only n[0] because there are no more terms left to handle n[1]. + // This means there might be some magnitude left in the upper bits that + // is handled below. + m = t0 + t10*15632 + t0 = m & fieldBaseMask + m = (m >> fieldBase) + t1 + t10*1024 + t11*15632 + t1 = m & fieldBaseMask + m = (m >> fieldBase) + t2 + t11*1024 + t12*15632 + t2 = m & fieldBaseMask + m = (m >> fieldBase) + t3 + t12*1024 + t13*15632 + t3 = m & fieldBaseMask + m = (m >> fieldBase) + t4 + t13*1024 + t14*15632 + t4 = m & fieldBaseMask + m = (m >> fieldBase) + t5 + t14*1024 + t15*15632 + t5 = m & fieldBaseMask + m = (m >> fieldBase) + t6 + t15*1024 + t16*15632 + t6 = m & fieldBaseMask + m = (m >> fieldBase) + t7 + t16*1024 + t17*15632 + t7 = m & fieldBaseMask + m = (m >> fieldBase) + t8 + t17*1024 + t18*15632 + t8 = m & fieldBaseMask + m = (m >> fieldBase) + t9 + t18*1024 + t19*68719492368 + t9 = m & fieldMSBMask + m >>= fieldMSBBits + + // At this point, if the magnitude is greater than 0, the overall value + // is greater than the max possible 256-bit value. In particular, it is + // "how many times larger" than the max value it is. + // + // The algorithm presented in [HAC] section 14.3.4 repeats until the + // quotient is zero. However, due to the above, we already know at + // least how many times we would need to repeat as it's the value + // currently in m. Thus we can simply multiply the magnitude by the + // field representation of the prime and do a single iteration. Notice + // that nothing will be changed when the magnitude is zero, so we could + // skip this in that case, however always running regardless allows it + // to run in constant time. The final result will be in the range + // 0 <= result <= prime + (2^64 - c), so it is guaranteed to have a + // magnitude of 1, but it is denormalized. + d := t0 + m*977 + f.n[0] = uint32(d & fieldBaseMask) + d = (d >> fieldBase) + t1 + m*64 + f.n[1] = uint32(d & fieldBaseMask) + f.n[2] = uint32((d >> fieldBase) + t2) + f.n[3] = uint32(t3) + f.n[4] = uint32(t4) + f.n[5] = uint32(t5) + f.n[6] = uint32(t6) + f.n[7] = uint32(t7) + f.n[8] = uint32(t8) + f.n[9] = uint32(t9) + + return f +} + +// SquareRootVal either calculates the square root of the passed value when it +// exists or the square root of the negation of the value when it does not exist +// and stores the result in f in constant time. The return flag is true when +// the calculated square root is for the passed value itself and false when it +// is for its negation. +// +// Note that this function can overflow if multiplying any of the individual +// words exceeds a max uint32. In practice, this means the magnitude of the +// field must be a max of 8 to prevent overflow. The magnitude of the result +// will be 1. +// +// Preconditions: +// - The input field value MUST have a max magnitude of 8 +// Output Normalized: No +// Output Max Magnitude: 1 +func (f *FieldVal) SquareRootVal(val *FieldVal) bool { + // This uses the Tonelli-Shanks method for calculating the square root of + // the value when it exists. The key principles of the method follow. + // + // Fermat's little theorem states that for a nonzero number 'a' and prime + // 'p', a^(p-1) ≡ 1 (mod p). + // + // Further, Euler's criterion states that an integer 'a' has a square root + // (aka is a quadratic residue) modulo a prime if a^((p-1)/2) ≡ 1 (mod p) + // and, conversely, when it does NOT have a square root (aka 'a' is a + // non-residue) a^((p-1)/2) ≡ -1 (mod p). + // + // This can be seen by considering that Fermat's little theorem can be + // written as (a^((p-1)/2) - 1)(a^((p-1)/2) + 1) ≡ 0 (mod p). Therefore, + // one of the two factors must be 0. Then, when a ≡ x^2 (aka 'a' is a + // quadratic residue), (x^2)^((p-1)/2) ≡ x^(p-1) ≡ 1 (mod p) which implies + // the first factor must be zero. Finally, per Lagrange's theorem, the + // non-residues are the only remaining possible solutions and thus must make + // the second factor zero to satisfy Fermat's little theorem implying that + // a^((p-1)/2) ≡ -1 (mod p) for that case. + // + // The Tonelli-Shanks method uses these facts along with factoring out + // powers of two to solve a congruence that results in either the solution + // when the square root exists or the square root of the negation of the + // value when it does not. In the case of primes that are ≡ 3 (mod 4), the + // possible solutions are r = ±a^((p+1)/4) (mod p). Therefore, either r^2 ≡ + // a (mod p) is true in which case ±r are the two solutions, or r^2 ≡ -a + // (mod p) in which case 'a' is a non-residue and there are no solutions. + // + // The secp256k1 prime is ≡ 3 (mod 4), so this result applies. + // + // In other words, calculate a^((p+1)/4) and then square it and check it + // against the original value to determine if it is actually the square + // root. + // + // In order to efficiently compute a^((p+1)/4), (p+1)/4 needs to be split + // into a sequence of squares and multiplications that minimizes the number + // of multiplications needed (since they are more costly than squarings). + // + // The secp256k1 prime + 1 / 4 is 2^254 - 2^30 - 244. In binary, that is: + // + // 00111111 11111111 11111111 11111111 + // 11111111 11111111 11111111 11111111 + // 11111111 11111111 11111111 11111111 + // 11111111 11111111 11111111 11111111 + // 11111111 11111111 11111111 11111111 + // 11111111 11111111 11111111 11111111 + // 11111111 11111111 11111111 11111111 + // 10111111 11111111 11111111 00001100 + // + // Notice that can be broken up into three windows of consecutive 1s (in + // order of least to most significant) as: + // + // 6-bit window with two bits set (bits 4, 5, 6, 7 unset) + // 23-bit window with 22 bits set (bit 30 unset) + // 223-bit window with all 223 bits set + // + // Thus, the groups of 1 bits in each window forms the set: + // S = {2, 22, 223}. + // + // The strategy is to calculate a^(2^n - 1) for each grouping via an + // addition chain with a sliding window. + // + // The addition chain used is (credits to Peter Dettman): + // (0,0),(1,0),(2,2),(3,2),(4,1),(5,5),(6,6),(7,7),(8,8),(9,7),(10,2) + // => 2^1 2^[2] 2^3 2^6 2^9 2^11 2^[22] 2^44 2^88 2^176 2^220 2^[223] + // + // This has a cost of 254 field squarings and 13 field multiplications. + var a, a2, a3, a6, a9, a11, a22, a44, a88, a176, a220, a223 FieldVal + a.Set(val) + a2.SquareVal(&a).Mul(&a) // a2 = a^(2^2 - 1) + a3.SquareVal(&a2).Mul(&a) // a3 = a^(2^3 - 1) + a6.SquareVal(&a3).Square().Square() // a6 = a^(2^6 - 2^3) + a6.Mul(&a3) // a6 = a^(2^6 - 1) + a9.SquareVal(&a6).Square().Square() // a9 = a^(2^9 - 2^3) + a9.Mul(&a3) // a9 = a^(2^9 - 1) + a11.SquareVal(&a9).Square() // a11 = a^(2^11 - 2^2) + a11.Mul(&a2) // a11 = a^(2^11 - 1) + a22.SquareVal(&a11).Square().Square().Square().Square() // a22 = a^(2^16 - 2^5) + a22.Square().Square().Square().Square().Square() // a22 = a^(2^21 - 2^10) + a22.Square() // a22 = a^(2^22 - 2^11) + a22.Mul(&a11) // a22 = a^(2^22 - 1) + a44.SquareVal(&a22).Square().Square().Square().Square() // a44 = a^(2^27 - 2^5) + a44.Square().Square().Square().Square().Square() // a44 = a^(2^32 - 2^10) + a44.Square().Square().Square().Square().Square() // a44 = a^(2^37 - 2^15) + a44.Square().Square().Square().Square().Square() // a44 = a^(2^42 - 2^20) + a44.Square().Square() // a44 = a^(2^44 - 2^22) + a44.Mul(&a22) // a44 = a^(2^44 - 1) + a88.SquareVal(&a44).Square().Square().Square().Square() // a88 = a^(2^49 - 2^5) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^54 - 2^10) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^59 - 2^15) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^64 - 2^20) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^69 - 2^25) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^74 - 2^30) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^79 - 2^35) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^84 - 2^40) + a88.Square().Square().Square().Square() // a88 = a^(2^88 - 2^44) + a88.Mul(&a44) // a88 = a^(2^88 - 1) + a176.SquareVal(&a88).Square().Square().Square().Square() // a176 = a^(2^93 - 2^5) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^98 - 2^10) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^103 - 2^15) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^108 - 2^20) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^113 - 2^25) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^118 - 2^30) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^123 - 2^35) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^128 - 2^40) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^133 - 2^45) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^138 - 2^50) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^143 - 2^55) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^148 - 2^60) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^153 - 2^65) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^158 - 2^70) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^163 - 2^75) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^168 - 2^80) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^173 - 2^85) + a176.Square().Square().Square() // a176 = a^(2^176 - 2^88) + a176.Mul(&a88) // a176 = a^(2^176 - 1) + a220.SquareVal(&a176).Square().Square().Square().Square() // a220 = a^(2^181 - 2^5) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^186 - 2^10) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^191 - 2^15) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^196 - 2^20) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^201 - 2^25) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^206 - 2^30) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^211 - 2^35) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^216 - 2^40) + a220.Square().Square().Square().Square() // a220 = a^(2^220 - 2^44) + a220.Mul(&a44) // a220 = a^(2^220 - 1) + a223.SquareVal(&a220).Square().Square() // a223 = a^(2^223 - 2^3) + a223.Mul(&a3) // a223 = a^(2^223 - 1) + + f.SquareVal(&a223).Square().Square().Square().Square() // f = a^(2^228 - 2^5) + f.Square().Square().Square().Square().Square() // f = a^(2^233 - 2^10) + f.Square().Square().Square().Square().Square() // f = a^(2^238 - 2^15) + f.Square().Square().Square().Square().Square() // f = a^(2^243 - 2^20) + f.Square().Square().Square() // f = a^(2^246 - 2^23) + f.Mul(&a22) // f = a^(2^246 - 2^22 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^251 - 2^27 - 2^5) + f.Square() // f = a^(2^252 - 2^28 - 2^6) + f.Mul(&a2) // f = a^(2^252 - 2^28 - 2^6 - 2^1 - 1) + f.Square().Square() // f = a^(2^254 - 2^30 - 2^8 - 2^3 - 2^2) + // // = a^(2^254 - 2^30 - 244) + // // = a^((p+1)/4) + + // Ensure the calculated result is actually the square root by squaring it + // and checking against the original value. + var sqr FieldVal + return sqr.SquareVal(f).Normalize().Equals(val.Normalize()) +} + +// Square squares the field value in constant time. The existing field value is +// modified. Note that this function can overflow if multiplying any of the +// individual words exceeds a max uint32. In practice, this means the magnitude +// of the field must be a max of 8 to prevent overflow. +// +// The field value is returned to support chaining. This enables syntax like: +// f.Square().Mul(f2) so that f = f^2 * f2. +// +// Preconditions: +// - The field value MUST have a max magnitude of 8 +// Output Normalized: No +// Output Max Magnitude: 1 +func (f *FieldVal) Square() *FieldVal { + return f.SquareVal(f) +} + +// SquareVal squares the passed value and stores the result in f in constant +// time. Note that this function can overflow if multiplying any of the +// individual words exceeds a max uint32. In practice, this means the magnitude +// of the field being squared must be a max of 8 to prevent overflow. +// +// The field value is returned to support chaining. This enables syntax like: +// f3.SquareVal(f).Mul(f) so that f3 = f^2 * f = f^3. +// +// Preconditions: +// - The input field value MUST have a max magnitude of 8 +// Output Normalized: No +// Output Max Magnitude: 1 +func (f *FieldVal) SquareVal(val *FieldVal) *FieldVal { + // This could be done with a couple of for loops and an array to store + // the intermediate terms, but this unrolled version is significantly + // faster. + + // Terms for 2^(fieldBase*0). + m := uint64(val.n[0]) * uint64(val.n[0]) + t0 := m & fieldBaseMask + + // Terms for 2^(fieldBase*1). + m = (m >> fieldBase) + 2*uint64(val.n[0])*uint64(val.n[1]) + t1 := m & fieldBaseMask + + // Terms for 2^(fieldBase*2). + m = (m >> fieldBase) + + 2*uint64(val.n[0])*uint64(val.n[2]) + + uint64(val.n[1])*uint64(val.n[1]) + t2 := m & fieldBaseMask + + // Terms for 2^(fieldBase*3). + m = (m >> fieldBase) + + 2*uint64(val.n[0])*uint64(val.n[3]) + + 2*uint64(val.n[1])*uint64(val.n[2]) + t3 := m & fieldBaseMask + + // Terms for 2^(fieldBase*4). + m = (m >> fieldBase) + + 2*uint64(val.n[0])*uint64(val.n[4]) + + 2*uint64(val.n[1])*uint64(val.n[3]) + + uint64(val.n[2])*uint64(val.n[2]) + t4 := m & fieldBaseMask + + // Terms for 2^(fieldBase*5). + m = (m >> fieldBase) + + 2*uint64(val.n[0])*uint64(val.n[5]) + + 2*uint64(val.n[1])*uint64(val.n[4]) + + 2*uint64(val.n[2])*uint64(val.n[3]) + t5 := m & fieldBaseMask + + // Terms for 2^(fieldBase*6). + m = (m >> fieldBase) + + 2*uint64(val.n[0])*uint64(val.n[6]) + + 2*uint64(val.n[1])*uint64(val.n[5]) + + 2*uint64(val.n[2])*uint64(val.n[4]) + + uint64(val.n[3])*uint64(val.n[3]) + t6 := m & fieldBaseMask + + // Terms for 2^(fieldBase*7). + m = (m >> fieldBase) + + 2*uint64(val.n[0])*uint64(val.n[7]) + + 2*uint64(val.n[1])*uint64(val.n[6]) + + 2*uint64(val.n[2])*uint64(val.n[5]) + + 2*uint64(val.n[3])*uint64(val.n[4]) + t7 := m & fieldBaseMask + + // Terms for 2^(fieldBase*8). + m = (m >> fieldBase) + + 2*uint64(val.n[0])*uint64(val.n[8]) + + 2*uint64(val.n[1])*uint64(val.n[7]) + + 2*uint64(val.n[2])*uint64(val.n[6]) + + 2*uint64(val.n[3])*uint64(val.n[5]) + + uint64(val.n[4])*uint64(val.n[4]) + t8 := m & fieldBaseMask + + // Terms for 2^(fieldBase*9). + m = (m >> fieldBase) + + 2*uint64(val.n[0])*uint64(val.n[9]) + + 2*uint64(val.n[1])*uint64(val.n[8]) + + 2*uint64(val.n[2])*uint64(val.n[7]) + + 2*uint64(val.n[3])*uint64(val.n[6]) + + 2*uint64(val.n[4])*uint64(val.n[5]) + t9 := m & fieldBaseMask + + // Terms for 2^(fieldBase*10). + m = (m >> fieldBase) + + 2*uint64(val.n[1])*uint64(val.n[9]) + + 2*uint64(val.n[2])*uint64(val.n[8]) + + 2*uint64(val.n[3])*uint64(val.n[7]) + + 2*uint64(val.n[4])*uint64(val.n[6]) + + uint64(val.n[5])*uint64(val.n[5]) + t10 := m & fieldBaseMask + + // Terms for 2^(fieldBase*11). + m = (m >> fieldBase) + + 2*uint64(val.n[2])*uint64(val.n[9]) + + 2*uint64(val.n[3])*uint64(val.n[8]) + + 2*uint64(val.n[4])*uint64(val.n[7]) + + 2*uint64(val.n[5])*uint64(val.n[6]) + t11 := m & fieldBaseMask + + // Terms for 2^(fieldBase*12). + m = (m >> fieldBase) + + 2*uint64(val.n[3])*uint64(val.n[9]) + + 2*uint64(val.n[4])*uint64(val.n[8]) + + 2*uint64(val.n[5])*uint64(val.n[7]) + + uint64(val.n[6])*uint64(val.n[6]) + t12 := m & fieldBaseMask + + // Terms for 2^(fieldBase*13). + m = (m >> fieldBase) + + 2*uint64(val.n[4])*uint64(val.n[9]) + + 2*uint64(val.n[5])*uint64(val.n[8]) + + 2*uint64(val.n[6])*uint64(val.n[7]) + t13 := m & fieldBaseMask + + // Terms for 2^(fieldBase*14). + m = (m >> fieldBase) + + 2*uint64(val.n[5])*uint64(val.n[9]) + + 2*uint64(val.n[6])*uint64(val.n[8]) + + uint64(val.n[7])*uint64(val.n[7]) + t14 := m & fieldBaseMask + + // Terms for 2^(fieldBase*15). + m = (m >> fieldBase) + + 2*uint64(val.n[6])*uint64(val.n[9]) + + 2*uint64(val.n[7])*uint64(val.n[8]) + t15 := m & fieldBaseMask + + // Terms for 2^(fieldBase*16). + m = (m >> fieldBase) + + 2*uint64(val.n[7])*uint64(val.n[9]) + + uint64(val.n[8])*uint64(val.n[8]) + t16 := m & fieldBaseMask + + // Terms for 2^(fieldBase*17). + m = (m >> fieldBase) + 2*uint64(val.n[8])*uint64(val.n[9]) + t17 := m & fieldBaseMask + + // Terms for 2^(fieldBase*18). + m = (m >> fieldBase) + uint64(val.n[9])*uint64(val.n[9]) + t18 := m & fieldBaseMask + + // What's left is for 2^(fieldBase*19). + t19 := m >> fieldBase + + // At this point, all of the terms are grouped into their respective + // base. + // + // Per [HAC] section 14.3.4: Reduction method of moduli of special form, + // when the modulus is of the special form m = b^t - c, highly efficient + // reduction can be achieved per the provided algorithm. + // + // The secp256k1 prime is equivalent to 2^256 - 4294968273, so it fits + // this criteria. + // + // 4294968273 in field representation (base 2^26) is: + // n[0] = 977 + // n[1] = 64 + // That is to say (2^26 * 64) + 977 = 4294968273 + // + // Since each word is in base 26, the upper terms (t10 and up) start + // at 260 bits (versus the final desired range of 256 bits), so the + // field representation of 'c' from above needs to be adjusted for the + // extra 4 bits by multiplying it by 2^4 = 16. 4294968273 * 16 = + // 68719492368. Thus, the adjusted field representation of 'c' is: + // n[0] = 977 * 16 = 15632 + // n[1] = 64 * 16 = 1024 + // That is to say (2^26 * 1024) + 15632 = 68719492368 + // + // To reduce the final term, t19, the entire 'c' value is needed instead + // of only n[0] because there are no more terms left to handle n[1]. + // This means there might be some magnitude left in the upper bits that + // is handled below. + m = t0 + t10*15632 + t0 = m & fieldBaseMask + m = (m >> fieldBase) + t1 + t10*1024 + t11*15632 + t1 = m & fieldBaseMask + m = (m >> fieldBase) + t2 + t11*1024 + t12*15632 + t2 = m & fieldBaseMask + m = (m >> fieldBase) + t3 + t12*1024 + t13*15632 + t3 = m & fieldBaseMask + m = (m >> fieldBase) + t4 + t13*1024 + t14*15632 + t4 = m & fieldBaseMask + m = (m >> fieldBase) + t5 + t14*1024 + t15*15632 + t5 = m & fieldBaseMask + m = (m >> fieldBase) + t6 + t15*1024 + t16*15632 + t6 = m & fieldBaseMask + m = (m >> fieldBase) + t7 + t16*1024 + t17*15632 + t7 = m & fieldBaseMask + m = (m >> fieldBase) + t8 + t17*1024 + t18*15632 + t8 = m & fieldBaseMask + m = (m >> fieldBase) + t9 + t18*1024 + t19*68719492368 + t9 = m & fieldMSBMask + m >>= fieldMSBBits + + // At this point, if the magnitude is greater than 0, the overall value + // is greater than the max possible 256-bit value. In particular, it is + // "how many times larger" than the max value it is. + // + // The algorithm presented in [HAC] section 14.3.4 repeats until the + // quotient is zero. However, due to the above, we already know at + // least how many times we would need to repeat as it's the value + // currently in m. Thus we can simply multiply the magnitude by the + // field representation of the prime and do a single iteration. Notice + // that nothing will be changed when the magnitude is zero, so we could + // skip this in that case, however always running regardless allows it + // to run in constant time. The final result will be in the range + // 0 <= result <= prime + (2^64 - c), so it is guaranteed to have a + // magnitude of 1, but it is denormalized. + n := t0 + m*977 + f.n[0] = uint32(n & fieldBaseMask) + n = (n >> fieldBase) + t1 + m*64 + f.n[1] = uint32(n & fieldBaseMask) + f.n[2] = uint32((n >> fieldBase) + t2) + f.n[3] = uint32(t3) + f.n[4] = uint32(t4) + f.n[5] = uint32(t5) + f.n[6] = uint32(t6) + f.n[7] = uint32(t7) + f.n[8] = uint32(t8) + f.n[9] = uint32(t9) + + return f +} + +// Inverse finds the modular multiplicative inverse of the field value in +// constant time. The existing field value is modified. +// +// The field value is returned to support chaining. This enables syntax like: +// f.Inverse().Mul(f2) so that f = f^-1 * f2. +// +// Preconditions: +// - The field value MUST have a max magnitude of 8 +// Output Normalized: No +// Output Max Magnitude: 1 +func (f *FieldVal) Inverse() *FieldVal { + // Fermat's little theorem states that for a nonzero number 'a' and prime + // 'p', a^(p-1) ≡ 1 (mod p). Multiplying both sides of the equation by the + // multiplicative inverse a^-1 yields a^(p-2) ≡ a^-1 (mod p). Thus, a^(p-2) + // is the multiplicative inverse. + // + // In order to efficiently compute a^(p-2), p-2 needs to be split into a + // sequence of squares and multiplications that minimizes the number of + // multiplications needed (since they are more costly than squarings). + // Intermediate results are saved and reused as well. + // + // The secp256k1 prime - 2 is 2^256 - 4294968275. In binary, that is: + // + // 11111111 11111111 11111111 11111111 + // 11111111 11111111 11111111 11111111 + // 11111111 11111111 11111111 11111111 + // 11111111 11111111 11111111 11111111 + // 11111111 11111111 11111111 11111111 + // 11111111 11111111 11111111 11111111 + // 11111111 11111111 11111111 11111110 + // 11111111 11111111 11111100 00101101 + // + // Notice that can be broken up into five windows of consecutive 1s (in + // order of least to most significant) as: + // + // 2-bit window with 1 bit set (bit 1 unset) + // 3-bit window with 2 bits set (bit 4 unset) + // 5-bit window with 1 bit set (bits 6, 7, 8, 9 unset) + // 23-bit window with 22 bits set (bit 32 unset) + // 223-bit window with all 223 bits set + // + // Thus, the groups of 1 bits in each window forms the set: + // S = {1, 2, 22, 223}. + // + // The strategy is to calculate a^(2^n - 1) for each grouping via an + // addition chain with a sliding window. + // + // The addition chain used is (credits to Peter Dettman): + // (0,0),(1,0),(2,2),(3,2),(4,1),(5,5),(6,6),(7,7),(8,8),(9,7),(10,2) + // => 2^[1] 2^[2] 2^3 2^6 2^9 2^11 2^[22] 2^44 2^88 2^176 2^220 2^[223] + // + // This has a cost of 255 field squarings and 15 field multiplications. + var a, a2, a3, a6, a9, a11, a22, a44, a88, a176, a220, a223 FieldVal + a.Set(f) + a2.SquareVal(&a).Mul(&a) // a2 = a^(2^2 - 1) + a3.SquareVal(&a2).Mul(&a) // a3 = a^(2^3 - 1) + a6.SquareVal(&a3).Square().Square() // a6 = a^(2^6 - 2^3) + a6.Mul(&a3) // a6 = a^(2^6 - 1) + a9.SquareVal(&a6).Square().Square() // a9 = a^(2^9 - 2^3) + a9.Mul(&a3) // a9 = a^(2^9 - 1) + a11.SquareVal(&a9).Square() // a11 = a^(2^11 - 2^2) + a11.Mul(&a2) // a11 = a^(2^11 - 1) + a22.SquareVal(&a11).Square().Square().Square().Square() // a22 = a^(2^16 - 2^5) + a22.Square().Square().Square().Square().Square() // a22 = a^(2^21 - 2^10) + a22.Square() // a22 = a^(2^22 - 2^11) + a22.Mul(&a11) // a22 = a^(2^22 - 1) + a44.SquareVal(&a22).Square().Square().Square().Square() // a44 = a^(2^27 - 2^5) + a44.Square().Square().Square().Square().Square() // a44 = a^(2^32 - 2^10) + a44.Square().Square().Square().Square().Square() // a44 = a^(2^37 - 2^15) + a44.Square().Square().Square().Square().Square() // a44 = a^(2^42 - 2^20) + a44.Square().Square() // a44 = a^(2^44 - 2^22) + a44.Mul(&a22) // a44 = a^(2^44 - 1) + a88.SquareVal(&a44).Square().Square().Square().Square() // a88 = a^(2^49 - 2^5) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^54 - 2^10) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^59 - 2^15) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^64 - 2^20) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^69 - 2^25) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^74 - 2^30) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^79 - 2^35) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^84 - 2^40) + a88.Square().Square().Square().Square() // a88 = a^(2^88 - 2^44) + a88.Mul(&a44) // a88 = a^(2^88 - 1) + a176.SquareVal(&a88).Square().Square().Square().Square() // a176 = a^(2^93 - 2^5) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^98 - 2^10) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^103 - 2^15) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^108 - 2^20) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^113 - 2^25) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^118 - 2^30) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^123 - 2^35) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^128 - 2^40) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^133 - 2^45) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^138 - 2^50) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^143 - 2^55) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^148 - 2^60) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^153 - 2^65) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^158 - 2^70) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^163 - 2^75) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^168 - 2^80) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^173 - 2^85) + a176.Square().Square().Square() // a176 = a^(2^176 - 2^88) + a176.Mul(&a88) // a176 = a^(2^176 - 1) + a220.SquareVal(&a176).Square().Square().Square().Square() // a220 = a^(2^181 - 2^5) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^186 - 2^10) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^191 - 2^15) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^196 - 2^20) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^201 - 2^25) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^206 - 2^30) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^211 - 2^35) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^216 - 2^40) + a220.Square().Square().Square().Square() // a220 = a^(2^220 - 2^44) + a220.Mul(&a44) // a220 = a^(2^220 - 1) + a223.SquareVal(&a220).Square().Square() // a223 = a^(2^223 - 2^3) + a223.Mul(&a3) // a223 = a^(2^223 - 1) + + f.SquareVal(&a223).Square().Square().Square().Square() // f = a^(2^228 - 2^5) + f.Square().Square().Square().Square().Square() // f = a^(2^233 - 2^10) + f.Square().Square().Square().Square().Square() // f = a^(2^238 - 2^15) + f.Square().Square().Square().Square().Square() // f = a^(2^243 - 2^20) + f.Square().Square().Square() // f = a^(2^246 - 2^23) + f.Mul(&a22) // f = a^(2^246 - 4194305) + f.Square().Square().Square().Square().Square() // f = a^(2^251 - 134217760) + f.Mul(&a) // f = a^(2^251 - 134217759) + f.Square().Square().Square() // f = a^(2^254 - 1073742072) + f.Mul(&a2) // f = a^(2^254 - 1073742069) + f.Square().Square() // f = a^(2^256 - 4294968276) + return f.Mul(&a) // f = a^(2^256 - 4294968275) = a^(p-2) +} + +// IsGtOrEqPrimeMinusOrder returns whether or not the field value is greater +// than or equal to the field prime minus the secp256k1 group order in constant +// time. +// +// Preconditions: +// - The field value MUST be normalized +func (f *FieldVal) IsGtOrEqPrimeMinusOrder() bool { + // The secp256k1 prime is equivalent to 2^256 - 4294968273 and the group + // order is 2^256 - 432420386565659656852420866394968145599. Thus, + // the prime minus the group order is: + // 432420386565659656852420866390673177326 + // + // In hex that is: + // 0x00000000 00000000 00000000 00000001 45512319 50b75fc4 402da172 2fc9baee + // + // Converting that to field representation (base 2^26) is: + // + // n[0] = 0x03c9baee + // n[1] = 0x03685c8b + // n[2] = 0x01fc4402 + // n[3] = 0x006542dd + // n[4] = 0x01455123 + // + // This can be verified with the following test code: + // pMinusN := new(big.Int).Sub(curveParams.P, curveParams.N) + // var fv FieldVal + // fv.SetByteSlice(pMinusN.Bytes()) + // t.Logf("%x", fv.n) + // + // Outputs: [3c9baee 3685c8b 1fc4402 6542dd 1455123 0 0 0 0 0] + const ( + pMinusNWordZero = 0x03c9baee + pMinusNWordOne = 0x03685c8b + pMinusNWordTwo = 0x01fc4402 + pMinusNWordThree = 0x006542dd + pMinusNWordFour = 0x01455123 + pMinusNWordFive = 0x00000000 + pMinusNWordSix = 0x00000000 + pMinusNWordSeven = 0x00000000 + pMinusNWordEight = 0x00000000 + pMinusNWordNine = 0x00000000 + ) + + // The intuition here is that the value is greater than field prime minus + // the group order if one of the higher individual words is greater than the + // corresponding word and all higher words in the value are equal. + result := constantTimeGreater(f.n[9], pMinusNWordNine) + highWordsEqual := constantTimeEq(f.n[9], pMinusNWordNine) + result |= highWordsEqual & constantTimeGreater(f.n[8], pMinusNWordEight) + highWordsEqual &= constantTimeEq(f.n[8], pMinusNWordEight) + result |= highWordsEqual & constantTimeGreater(f.n[7], pMinusNWordSeven) + highWordsEqual &= constantTimeEq(f.n[7], pMinusNWordSeven) + result |= highWordsEqual & constantTimeGreater(f.n[6], pMinusNWordSix) + highWordsEqual &= constantTimeEq(f.n[6], pMinusNWordSix) + result |= highWordsEqual & constantTimeGreater(f.n[5], pMinusNWordFive) + highWordsEqual &= constantTimeEq(f.n[5], pMinusNWordFive) + result |= highWordsEqual & constantTimeGreater(f.n[4], pMinusNWordFour) + highWordsEqual &= constantTimeEq(f.n[4], pMinusNWordFour) + result |= highWordsEqual & constantTimeGreater(f.n[3], pMinusNWordThree) + highWordsEqual &= constantTimeEq(f.n[3], pMinusNWordThree) + result |= highWordsEqual & constantTimeGreater(f.n[2], pMinusNWordTwo) + highWordsEqual &= constantTimeEq(f.n[2], pMinusNWordTwo) + result |= highWordsEqual & constantTimeGreater(f.n[1], pMinusNWordOne) + highWordsEqual &= constantTimeEq(f.n[1], pMinusNWordOne) + result |= highWordsEqual & constantTimeGreaterOrEq(f.n[0], pMinusNWordZero) + + return result != 0 +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/loadprecomputed.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/loadprecomputed.go new file mode 100644 index 000000000000..91c3d3776933 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/loadprecomputed.go @@ -0,0 +1,91 @@ +// Copyright 2015 The btcsuite developers +// Copyright (c) 2015-2022 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +import ( + "compress/zlib" + "encoding/base64" + "io" + "strings" + "sync" +) + +//go:generate go run genprecomps.go + +// bytePointTable describes a table used to house pre-computed values for +// accelerating scalar base multiplication. +type bytePointTable [32][256]JacobianPoint + +// compressedBytePointsFn is set to a real function by the code generation to +// return the compressed pre-computed values for accelerating scalar base +// multiplication. +var compressedBytePointsFn func() string + +// s256BytePoints houses pre-computed values used to accelerate scalar base +// multiplication such that they are only loaded on first use. +var s256BytePoints = func() func() *bytePointTable { + // mustLoadBytePoints decompresses and deserializes the pre-computed byte + // points used to accelerate scalar base multiplication for the secp256k1 + // curve. + // + // This approach is used since it allows the compile to use significantly + // less ram and be performed much faster than it is with hard-coding the + // final in-memory data structure. At the same time, it is quite fast to + // generate the in-memory data structure on first use with this approach + // versus computing the table. + // + // It will panic on any errors because the data is hard coded and thus any + // errors means something is wrong in the source code. + var data *bytePointTable + mustLoadBytePoints := func() { + // There will be no byte points to load when generating them. + if compressedBytePointsFn == nil { + return + } + bp := compressedBytePointsFn() + + // Decompress the pre-computed table used to accelerate scalar base + // multiplication. + decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(bp)) + r, err := zlib.NewReader(decoder) + if err != nil { + panic(err) + } + serialized, err := io.ReadAll(r) + if err != nil { + panic(err) + } + + // Deserialize the precomputed byte points and set the memory table to + // them. + offset := 0 + var bytePoints bytePointTable + for byteNum := 0; byteNum < len(bytePoints); byteNum++ { + // All points in this window. + for i := 0; i < len(bytePoints[byteNum]); i++ { + p := &bytePoints[byteNum][i] + p.X.SetByteSlice(serialized[offset:]) + offset += 32 + p.Y.SetByteSlice(serialized[offset:]) + offset += 32 + p.Z.SetInt(1) + } + } + data = &bytePoints + } + + // Return a closure that initializes the data on first access. This is done + // because the table takes a non-trivial amount of memory and initializing + // it unconditionally would cause anything that imports the package, either + // directly, or indirectly via transitive deps, to use that memory even if + // the caller never accesses any parts of the package that actually needs + // access to it. + var loadBytePointsOnce sync.Once + return func() *bytePointTable { + loadBytePointsOnce.Do(mustLoadBytePoints) + return data + } +}() diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/modnscalar.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/modnscalar.go new file mode 100644 index 000000000000..225016d8de99 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/modnscalar.go @@ -0,0 +1,1105 @@ +// Copyright (c) 2020-2024 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +import ( + "encoding/hex" + "math/big" +) + +// References: +// [SECG]: Recommended Elliptic Curve Domain Parameters +// https://www.secg.org/sec2-v2.pdf +// +// [HAC]: Handbook of Applied Cryptography Menezes, van Oorschot, Vanstone. +// http://cacr.uwaterloo.ca/hac/ + +// Many elliptic curve operations require working with scalars in a finite field +// characterized by the order of the group underlying the secp256k1 curve. +// Given this precision is larger than the biggest available native type, +// obviously some form of bignum math is needed. This code implements +// specialized fixed-precision field arithmetic rather than relying on an +// arbitrary-precision arithmetic package such as math/big for dealing with the +// math modulo the group order since the size is known. As a result, rather +// large performance gains are achieved by taking advantage of many +// optimizations not available to arbitrary-precision arithmetic and generic +// modular arithmetic algorithms. +// +// There are various ways to internally represent each element. For example, +// the most obvious representation would be to use an array of 4 uint64s (64 +// bits * 4 = 256 bits). However, that representation suffers from the fact +// that there is no native Go type large enough to handle the intermediate +// results while adding or multiplying two 64-bit numbers. +// +// Given the above, this implementation represents the field elements as 8 +// uint32s with each word (array entry) treated as base 2^32. This was chosen +// because most systems at the current time are 64-bit (or at least have 64-bit +// registers available for specialized purposes such as MMX) so the intermediate +// results can typically be done using a native register (and using uint64s to +// avoid the need for additional half-word arithmetic) + +const ( + // These fields provide convenient access to each of the words of the + // secp256k1 curve group order N to improve code readability. + // + // The group order of the curve per [SECG] is: + // 0xffffffff ffffffff ffffffff fffffffe baaedce6 af48a03b bfd25e8c d0364141 + // + // nolint: dupword + orderWordZero uint32 = 0xd0364141 + orderWordOne uint32 = 0xbfd25e8c + orderWordTwo uint32 = 0xaf48a03b + orderWordThree uint32 = 0xbaaedce6 + orderWordFour uint32 = 0xfffffffe + orderWordFive uint32 = 0xffffffff + orderWordSix uint32 = 0xffffffff + orderWordSeven uint32 = 0xffffffff + + // These fields provide convenient access to each of the words of the two's + // complement of the secp256k1 curve group order N to improve code + // readability. + // + // The two's complement of the group order is: + // 0x00000000 00000000 00000000 00000001 45512319 50b75fc4 402da173 2fc9bebf + orderComplementWordZero uint32 = (^orderWordZero) + 1 + orderComplementWordOne uint32 = ^orderWordOne + orderComplementWordTwo uint32 = ^orderWordTwo + orderComplementWordThree uint32 = ^orderWordThree + // orderComplementWordFour uint32 = ^orderWordFour // unused + // orderComplementWordFive uint32 = ^orderWordFive // unused + // orderComplementWordSix uint32 = ^orderWordSix // unused + // orderComplementWordSeven uint32 = ^orderWordSeven // unused + + // These fields provide convenient access to each of the words of the + // secp256k1 curve group order N / 2 to improve code readability and avoid + // the need to recalculate them. + // + // The half order of the secp256k1 curve group is: + // 0x7fffffff ffffffff ffffffff ffffffff 5d576e73 57a4501d dfe92f46 681b20a0 + // + // nolint: dupword + halfOrderWordZero uint32 = 0x681b20a0 + halfOrderWordOne uint32 = 0xdfe92f46 + halfOrderWordTwo uint32 = 0x57a4501d + halfOrderWordThree uint32 = 0x5d576e73 + halfOrderWordFour uint32 = 0xffffffff + halfOrderWordFive uint32 = 0xffffffff + halfOrderWordSix uint32 = 0xffffffff + halfOrderWordSeven uint32 = 0x7fffffff + + // uint32Mask is simply a mask with all bits set for a uint32 and is used to + // improve the readability of the code. + uint32Mask = 0xffffffff +) + +var ( + // zero32 is an array of 32 bytes used for the purposes of zeroing and is + // defined here to avoid extra allocations. + zero32 = [32]byte{} +) + +// ModNScalar implements optimized 256-bit constant-time fixed-precision +// arithmetic over the secp256k1 group order. This means all arithmetic is +// performed modulo: +// +// 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141 +// +// It only implements the arithmetic needed for elliptic curve operations, +// however, the operations that are not implemented can typically be worked +// around if absolutely needed. For example, subtraction can be performed by +// adding the negation. +// +// Should it be absolutely necessary, conversion to the standard library +// math/big.Int can be accomplished by using the Bytes method, slicing the +// resulting fixed-size array, and feeding it to big.Int.SetBytes. However, +// that should typically be avoided when possible as conversion to big.Ints +// requires allocations, is not constant time, and is slower when working modulo +// the group order. +type ModNScalar struct { + // The scalar is represented as 8 32-bit integers in base 2^32. + // + // The following depicts the internal representation: + // --------------------------------------------------------- + // | n[7] | n[6] | ... | n[0] | + // | 32 bits | 32 bits | ... | 32 bits | + // | Mult: 2^(32*7) | Mult: 2^(32*6) | ... | Mult: 2^(32*0) | + // --------------------------------------------------------- + // + // For example, consider the number 2^87 + 2^42 + 1. It would be + // represented as: + // n[0] = 1 + // n[1] = 2^10 + // n[2] = 2^23 + // n[3..7] = 0 + // + // The full 256-bit value is then calculated by looping i from 7..0 and + // doing sum(n[i] * 2^(32i)) like so: + // n[7] * 2^(32*7) = 0 * 2^224 = 0 + // n[6] * 2^(32*6) = 0 * 2^192 = 0 + // ... + // n[2] * 2^(32*2) = 2^23 * 2^64 = 2^87 + // n[1] * 2^(32*1) = 2^10 * 2^32 = 2^42 + // n[0] * 2^(32*0) = 1 * 2^0 = 1 + // Sum: 0 + 0 + ... + 2^87 + 2^42 + 1 = 2^87 + 2^42 + 1 + n [8]uint32 +} + +// String returns the scalar as a human-readable hex string. +// +// This is NOT constant time. +func (s ModNScalar) String() string { + b := s.Bytes() + return hex.EncodeToString(b[:]) +} + +// Set sets the scalar equal to a copy of the passed one in constant time. +// +// The scalar is returned to support chaining. This enables syntax like: +// s := new(ModNScalar).Set(s2).Add(1) so that s = s2 + 1 where s2 is not +// modified. +func (s *ModNScalar) Set(val *ModNScalar) *ModNScalar { + *s = *val + return s +} + +// Zero sets the scalar to zero in constant time. A newly created scalar is +// already set to zero. This function can be useful to clear an existing scalar +// for reuse. +func (s *ModNScalar) Zero() { + s.n[0] = 0 + s.n[1] = 0 + s.n[2] = 0 + s.n[3] = 0 + s.n[4] = 0 + s.n[5] = 0 + s.n[6] = 0 + s.n[7] = 0 +} + +// IsZeroBit returns 1 when the scalar is equal to zero or 0 otherwise in +// constant time. +// +// Note that a bool is not used here because it is not possible in Go to convert +// from a bool to numeric value in constant time and many constant-time +// operations require a numeric value. See IsZero for the version that returns +// a bool. +func (s *ModNScalar) IsZeroBit() uint32 { + // The scalar can only be zero if no bits are set in any of the words. + bits := s.n[0] | s.n[1] | s.n[2] | s.n[3] | s.n[4] | s.n[5] | s.n[6] | s.n[7] + return constantTimeEq(bits, 0) +} + +// IsZero returns whether or not the scalar is equal to zero in constant time. +func (s *ModNScalar) IsZero() bool { + // The scalar can only be zero if no bits are set in any of the words. + bits := s.n[0] | s.n[1] | s.n[2] | s.n[3] | s.n[4] | s.n[5] | s.n[6] | s.n[7] + return bits == 0 +} + +// SetInt sets the scalar to the passed integer in constant time. This is a +// convenience function since it is fairly common to perform some arithmetic +// with small native integers. +// +// The scalar is returned to support chaining. This enables syntax like: +// s := new(ModNScalar).SetInt(2).Mul(s2) so that s = 2 * s2. +func (s *ModNScalar) SetInt(ui uint32) *ModNScalar { + s.Zero() + s.n[0] = ui + return s +} + +// constantTimeEq returns 1 if a == b or 0 otherwise in constant time. +func constantTimeEq(a, b uint32) uint32 { + return uint32((uint64(a^b) - 1) >> 63) +} + +// constantTimeNotEq returns 1 if a != b or 0 otherwise in constant time. +func constantTimeNotEq(a, b uint32) uint32 { + return ^uint32((uint64(a^b)-1)>>63) & 1 +} + +// constantTimeLess returns 1 if a < b or 0 otherwise in constant time. +func constantTimeLess(a, b uint32) uint32 { + return uint32((uint64(a) - uint64(b)) >> 63) +} + +// constantTimeLessOrEq returns 1 if a <= b or 0 otherwise in constant time. +func constantTimeLessOrEq(a, b uint32) uint32 { + return uint32((uint64(a) - uint64(b) - 1) >> 63) +} + +// constantTimeGreater returns 1 if a > b or 0 otherwise in constant time. +func constantTimeGreater(a, b uint32) uint32 { + return constantTimeLess(b, a) +} + +// constantTimeGreaterOrEq returns 1 if a >= b or 0 otherwise in constant time. +func constantTimeGreaterOrEq(a, b uint32) uint32 { + return constantTimeLessOrEq(b, a) +} + +// constantTimeMin returns min(a,b) in constant time. +func constantTimeMin(a, b uint32) uint32 { + return b ^ ((a ^ b) & -constantTimeLess(a, b)) +} + +// overflows determines if the current scalar is greater than or equal to the +// group order in constant time and returns 1 if it is or 0 otherwise. +func (s *ModNScalar) overflows() uint32 { + // The intuition here is that the scalar is greater than the group order if + // one of the higher individual words is greater than corresponding word of + // the group order and all higher words in the scalar are equal to their + // corresponding word of the group order. Since this type is modulo the + // group order, being equal is also an overflow back to 0. + // + // Note that the words 5, 6, and 7 are all the max uint32 value, so there is + // no need to test if those individual words of the scalar exceeds them, + // hence, only equality is checked for them. + highWordsEqual := constantTimeEq(s.n[7], orderWordSeven) + highWordsEqual &= constantTimeEq(s.n[6], orderWordSix) + highWordsEqual &= constantTimeEq(s.n[5], orderWordFive) + overflow := highWordsEqual & constantTimeGreater(s.n[4], orderWordFour) + highWordsEqual &= constantTimeEq(s.n[4], orderWordFour) + overflow |= highWordsEqual & constantTimeGreater(s.n[3], orderWordThree) + highWordsEqual &= constantTimeEq(s.n[3], orderWordThree) + overflow |= highWordsEqual & constantTimeGreater(s.n[2], orderWordTwo) + highWordsEqual &= constantTimeEq(s.n[2], orderWordTwo) + overflow |= highWordsEqual & constantTimeGreater(s.n[1], orderWordOne) + highWordsEqual &= constantTimeEq(s.n[1], orderWordOne) + overflow |= highWordsEqual & constantTimeGreaterOrEq(s.n[0], orderWordZero) + + return overflow +} + +// reduce256 reduces the current scalar modulo the group order in accordance +// with the overflows parameter in constant time. The overflows parameter +// specifies whether or not the scalar is known to be greater than the group +// order and MUST either be 1 in the case it is or 0 in the case it is not for a +// correct result. +func (s *ModNScalar) reduce256(overflows uint32) { + // Notice that since s < 2^256 < 2N (where N is the group order), the max + // possible number of reductions required is one. Therefore, in the case a + // reduction is needed, it can be performed with a single subtraction of N. + // Also, recall that subtraction is equivalent to addition by the two's + // complement while ignoring the carry. + // + // When s >= N, the overflows parameter will be 1. Conversely, it will be 0 + // when s < N. Thus multiplying by the overflows parameter will either + // result in 0 or the multiplicand itself. + // + // Combining the above along with the fact that s + 0 = s, the following is + // a constant time implementation that works by either adding 0 or the two's + // complement of N as needed. + // + // The final result will be in the range 0 <= s < N as expected. + overflows64 := uint64(overflows) + c := uint64(s.n[0]) + overflows64*uint64(orderComplementWordZero) + s.n[0] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(s.n[1]) + overflows64*uint64(orderComplementWordOne) + s.n[1] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(s.n[2]) + overflows64*uint64(orderComplementWordTwo) + s.n[2] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(s.n[3]) + overflows64*uint64(orderComplementWordThree) + s.n[3] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(s.n[4]) + overflows64 // * 1 + s.n[4] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(s.n[5]) // + overflows64 * 0 + s.n[5] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(s.n[6]) // + overflows64 * 0 + s.n[6] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(s.n[7]) // + overflows64 * 0 + s.n[7] = uint32(c & uint32Mask) +} + +// SetBytes interprets the provided array as a 256-bit big-endian unsigned +// integer, reduces it modulo the group order, sets the scalar to the result, +// and returns either 1 if it was reduced (aka it overflowed) or 0 otherwise in +// constant time. +// +// Note that a bool is not used here because it is not possible in Go to convert +// from a bool to numeric value in constant time and many constant-time +// operations require a numeric value. +func (s *ModNScalar) SetBytes(b *[32]byte) uint32 { + // Pack the 256 total bits across the 8 uint32 words. This could be done + // with a for loop, but benchmarks show this unrolled version is about 2 + // times faster than the variant that uses a loop. + s.n[0] = uint32(b[31]) | uint32(b[30])<<8 | uint32(b[29])<<16 | uint32(b[28])<<24 + s.n[1] = uint32(b[27]) | uint32(b[26])<<8 | uint32(b[25])<<16 | uint32(b[24])<<24 + s.n[2] = uint32(b[23]) | uint32(b[22])<<8 | uint32(b[21])<<16 | uint32(b[20])<<24 + s.n[3] = uint32(b[19]) | uint32(b[18])<<8 | uint32(b[17])<<16 | uint32(b[16])<<24 + s.n[4] = uint32(b[15]) | uint32(b[14])<<8 | uint32(b[13])<<16 | uint32(b[12])<<24 + s.n[5] = uint32(b[11]) | uint32(b[10])<<8 | uint32(b[9])<<16 | uint32(b[8])<<24 + s.n[6] = uint32(b[7]) | uint32(b[6])<<8 | uint32(b[5])<<16 | uint32(b[4])<<24 + s.n[7] = uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 + + // The value might be >= N, so reduce it as required and return whether or + // not it was reduced. + needsReduce := s.overflows() + s.reduce256(needsReduce) + return needsReduce +} + +// zeroArray32 zeroes the provided 32-byte buffer. +func zeroArray32(b *[32]byte) { + copy(b[:], zero32[:]) +} + +// SetByteSlice interprets the provided slice as a 256-bit big-endian unsigned +// integer (meaning it is truncated to the first 32 bytes), reduces it modulo +// the group order, sets the scalar to the result, and returns whether or not +// the resulting truncated 256-bit integer overflowed in constant time. +// +// Note that since passing a slice with more than 32 bytes is truncated, it is +// possible that the truncated value is less than the order of the curve and +// hence it will not be reported as having overflowed in that case. It is up to +// the caller to decide whether it needs to provide numbers of the appropriate +// size or it is acceptable to use this function with the described truncation +// and overflow behavior. +func (s *ModNScalar) SetByteSlice(b []byte) bool { + var b32 [32]byte + b = b[:constantTimeMin(uint32(len(b)), 32)] + copy(b32[:], b32[:32-len(b)]) + copy(b32[32-len(b):], b) + result := s.SetBytes(&b32) + zeroArray32(&b32) + return result != 0 +} + +// PutBytesUnchecked unpacks the scalar to a 32-byte big-endian value directly +// into the passed byte slice in constant time. The target slice must have at +// least 32 bytes available or it will panic. +// +// There is a similar function, PutBytes, which unpacks the scalar into a +// 32-byte array directly. This version is provided since it can be useful to +// write directly into part of a larger buffer without needing a separate +// allocation. +// +// Preconditions: +// - The target slice MUST have at least 32 bytes available +func (s *ModNScalar) PutBytesUnchecked(b []byte) { + // Unpack the 256 total bits from the 8 uint32 words. This could be done + // with a for loop, but benchmarks show this unrolled version is about 2 + // times faster than the variant which uses a loop. + b[31] = byte(s.n[0]) + b[30] = byte(s.n[0] >> 8) + b[29] = byte(s.n[0] >> 16) + b[28] = byte(s.n[0] >> 24) + b[27] = byte(s.n[1]) + b[26] = byte(s.n[1] >> 8) + b[25] = byte(s.n[1] >> 16) + b[24] = byte(s.n[1] >> 24) + b[23] = byte(s.n[2]) + b[22] = byte(s.n[2] >> 8) + b[21] = byte(s.n[2] >> 16) + b[20] = byte(s.n[2] >> 24) + b[19] = byte(s.n[3]) + b[18] = byte(s.n[3] >> 8) + b[17] = byte(s.n[3] >> 16) + b[16] = byte(s.n[3] >> 24) + b[15] = byte(s.n[4]) + b[14] = byte(s.n[4] >> 8) + b[13] = byte(s.n[4] >> 16) + b[12] = byte(s.n[4] >> 24) + b[11] = byte(s.n[5]) + b[10] = byte(s.n[5] >> 8) + b[9] = byte(s.n[5] >> 16) + b[8] = byte(s.n[5] >> 24) + b[7] = byte(s.n[6]) + b[6] = byte(s.n[6] >> 8) + b[5] = byte(s.n[6] >> 16) + b[4] = byte(s.n[6] >> 24) + b[3] = byte(s.n[7]) + b[2] = byte(s.n[7] >> 8) + b[1] = byte(s.n[7] >> 16) + b[0] = byte(s.n[7] >> 24) +} + +// PutBytes unpacks the scalar to a 32-byte big-endian value using the passed +// byte array in constant time. +// +// There is a similar function, PutBytesUnchecked, which unpacks the scalar into +// a slice that must have at least 32 bytes available. This version is provided +// since it can be useful to write directly into an array that is type checked. +// +// Alternatively, there is also Bytes, which unpacks the scalar into a new array +// and returns that which can sometimes be more ergonomic in applications that +// aren't concerned about an additional copy. +func (s *ModNScalar) PutBytes(b *[32]byte) { + s.PutBytesUnchecked(b[:]) +} + +// Bytes unpacks the scalar to a 32-byte big-endian value in constant time. +// +// See PutBytes and PutBytesUnchecked for variants that allow an array or slice +// to be passed which can be useful to cut down on the number of allocations +// by allowing the caller to reuse a buffer or write directly into part of a +// larger buffer. +func (s *ModNScalar) Bytes() [32]byte { + var b [32]byte + s.PutBytesUnchecked(b[:]) + return b +} + +// IsOdd returns whether or not the scalar is an odd number in constant time. +func (s *ModNScalar) IsOdd() bool { + // Only odd numbers have the bottom bit set. + return s.n[0]&1 == 1 +} + +// Equals returns whether or not the two scalars are the same in constant time. +func (s *ModNScalar) Equals(val *ModNScalar) bool { + // Xor only sets bits when they are different, so the two scalars can only + // be the same if no bits are set after xoring each word. + bits := (s.n[0] ^ val.n[0]) | (s.n[1] ^ val.n[1]) | (s.n[2] ^ val.n[2]) | + (s.n[3] ^ val.n[3]) | (s.n[4] ^ val.n[4]) | (s.n[5] ^ val.n[5]) | + (s.n[6] ^ val.n[6]) | (s.n[7] ^ val.n[7]) + + return bits == 0 +} + +// Add2 adds the passed two scalars together modulo the group order in constant +// time and stores the result in s. +// +// The scalar is returned to support chaining. This enables syntax like: +// s3.Add2(s, s2).AddInt(1) so that s3 = s + s2 + 1. +func (s *ModNScalar) Add2(val1, val2 *ModNScalar) *ModNScalar { + c := uint64(val1.n[0]) + uint64(val2.n[0]) + s.n[0] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(val1.n[1]) + uint64(val2.n[1]) + s.n[1] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(val1.n[2]) + uint64(val2.n[2]) + s.n[2] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(val1.n[3]) + uint64(val2.n[3]) + s.n[3] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(val1.n[4]) + uint64(val2.n[4]) + s.n[4] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(val1.n[5]) + uint64(val2.n[5]) + s.n[5] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(val1.n[6]) + uint64(val2.n[6]) + s.n[6] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(val1.n[7]) + uint64(val2.n[7]) + s.n[7] = uint32(c & uint32Mask) + + // The result is now 256 bits, but it might still be >= N, so use the + // existing normal reduce method for 256-bit values. + s.reduce256(uint32(c>>32) + s.overflows()) + return s +} + +// Add adds the passed scalar to the existing one modulo the group order in +// constant time and stores the result in s. +// +// The scalar is returned to support chaining. This enables syntax like: +// s.Add(s2).AddInt(1) so that s = s + s2 + 1. +func (s *ModNScalar) Add(val *ModNScalar) *ModNScalar { + return s.Add2(s, val) +} + +// accumulator96 provides a 96-bit accumulator for use in the intermediate +// calculations requiring more than 64-bits. +type accumulator96 struct { + n [3]uint32 +} + +// Add adds the passed unsigned 64-bit value to the accumulator. +func (a *accumulator96) Add(v uint64) { + low := uint32(v & uint32Mask) + hi := uint32(v >> 32) + a.n[0] += low + hi += constantTimeLess(a.n[0], low) // Carry if overflow in n[0]. + a.n[1] += hi + a.n[2] += constantTimeLess(a.n[1], hi) // Carry if overflow in n[1]. +} + +// Rsh32 right shifts the accumulator by 32 bits. +func (a *accumulator96) Rsh32() { + a.n[0] = a.n[1] + a.n[1] = a.n[2] + a.n[2] = 0 +} + +// reduce385 reduces the 385-bit intermediate result in the passed terms modulo +// the group order in constant time and stores the result in s. +func (s *ModNScalar) reduce385(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12 uint64) { + // At this point, the intermediate result in the passed terms has been + // reduced to fit within 385 bits, so reduce it again using the same method + // described in reduce512. As before, the intermediate result will end up + // being reduced by another 127 bits to 258 bits, thus 9 32-bit terms are + // needed for this iteration. The reduced terms are assigned back to t0 + // through t8. + // + // Note that several of the intermediate calculations require adding 64-bit + // products together which would overflow a uint64, so a 96-bit accumulator + // is used instead until the value is reduced enough to use native uint64s. + + // Terms for 2^(32*0). + var acc accumulator96 + acc.n[0] = uint32(t0) // == acc.Add(t0) because acc is guaranteed to be 0. + acc.Add(t8 * uint64(orderComplementWordZero)) + t0 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*1). + acc.Add(t1) + acc.Add(t8 * uint64(orderComplementWordOne)) + acc.Add(t9 * uint64(orderComplementWordZero)) + t1 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*2). + acc.Add(t2) + acc.Add(t8 * uint64(orderComplementWordTwo)) + acc.Add(t9 * uint64(orderComplementWordOne)) + acc.Add(t10 * uint64(orderComplementWordZero)) + t2 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*3). + acc.Add(t3) + acc.Add(t8 * uint64(orderComplementWordThree)) + acc.Add(t9 * uint64(orderComplementWordTwo)) + acc.Add(t10 * uint64(orderComplementWordOne)) + acc.Add(t11 * uint64(orderComplementWordZero)) + t3 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*4). + acc.Add(t4) + acc.Add(t8) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t9 * uint64(orderComplementWordThree)) + acc.Add(t10 * uint64(orderComplementWordTwo)) + acc.Add(t11 * uint64(orderComplementWordOne)) + acc.Add(t12 * uint64(orderComplementWordZero)) + t4 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*5). + acc.Add(t5) + // acc.Add(t8 * uint64(orderComplementWordFive)) // 0 + acc.Add(t9) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t10 * uint64(orderComplementWordThree)) + acc.Add(t11 * uint64(orderComplementWordTwo)) + acc.Add(t12 * uint64(orderComplementWordOne)) + t5 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*6). + acc.Add(t6) + // acc.Add(t8 * uint64(orderComplementWordSix)) // 0 + // acc.Add(t9 * uint64(orderComplementWordFive)) // 0 + acc.Add(t10) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t11 * uint64(orderComplementWordThree)) + acc.Add(t12 * uint64(orderComplementWordTwo)) + t6 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*7). + acc.Add(t7) + // acc.Add(t8 * uint64(orderComplementWordSeven)) // 0 + // acc.Add(t9 * uint64(orderComplementWordSix)) // 0 + // acc.Add(t10 * uint64(orderComplementWordFive)) // 0 + acc.Add(t11) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t12 * uint64(orderComplementWordThree)) + t7 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*8). + // acc.Add(t9 * uint64(orderComplementWordSeven)) // 0 + // acc.Add(t10 * uint64(orderComplementWordSix)) // 0 + // acc.Add(t11 * uint64(orderComplementWordFive)) // 0 + acc.Add(t12) // * uint64(orderComplementWordFour) // * 1 + t8 = uint64(acc.n[0]) + // acc.Rsh32() // No need since not used after this. Guaranteed to be 0. + + // NOTE: All of the remaining multiplications for this iteration result in 0 + // as they all involve multiplying by combinations of the fifth, sixth, and + // seventh words of the two's complement of N, which are 0, so skip them. + + // At this point, the result is reduced to fit within 258 bits, so reduce it + // again using a slightly modified version of the same method. The maximum + // value in t8 is 2 at this point and therefore multiplying it by each word + // of the two's complement of N and adding it to a 32-bit term will result + // in a maximum requirement of 33 bits, so it is safe to use native uint64s + // here for the intermediate term carry propagation. + // + // Also, since the maximum value in t8 is 2, this ends up reducing by + // another 2 bits to 256 bits. + c := t0 + t8*uint64(orderComplementWordZero) + s.n[0] = uint32(c & uint32Mask) + c = (c >> 32) + t1 + t8*uint64(orderComplementWordOne) + s.n[1] = uint32(c & uint32Mask) + c = (c >> 32) + t2 + t8*uint64(orderComplementWordTwo) + s.n[2] = uint32(c & uint32Mask) + c = (c >> 32) + t3 + t8*uint64(orderComplementWordThree) + s.n[3] = uint32(c & uint32Mask) + c = (c >> 32) + t4 + t8 // * uint64(orderComplementWordFour) == * 1 + s.n[4] = uint32(c & uint32Mask) + c = (c >> 32) + t5 // + t8*uint64(orderComplementWordFive) == 0 + s.n[5] = uint32(c & uint32Mask) + c = (c >> 32) + t6 // + t8*uint64(orderComplementWordSix) == 0 + s.n[6] = uint32(c & uint32Mask) + c = (c >> 32) + t7 // + t8*uint64(orderComplementWordSeven) == 0 + s.n[7] = uint32(c & uint32Mask) + + // The result is now 256 bits, but it might still be >= N, so use the + // existing normal reduce method for 256-bit values. + s.reduce256(uint32(c>>32) + s.overflows()) +} + +// reduce512 reduces the 512-bit intermediate result in the passed terms modulo +// the group order down to 385 bits in constant time and stores the result in s. +func (s *ModNScalar) reduce512(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15 uint64) { + // At this point, the intermediate result in the passed terms is grouped + // into the respective bases. + // + // Per [HAC] section 14.3.4: Reduction method of moduli of special form, + // when the modulus is of the special form m = b^t - c, where log_2(c) < t, + // highly efficient reduction can be achieved per the provided algorithm. + // + // The secp256k1 group order fits this criteria since it is: + // 2^256 - 432420386565659656852420866394968145599 + // + // Technically the max possible value here is (N-1)^2 since the two scalars + // being multiplied are always mod N. Nevertheless, it is safer to consider + // it to be (2^256-1)^2 = 2^512 - 2^257 + 1 since it is the product of two + // 256-bit values. + // + // The algorithm is to reduce the result modulo the prime by subtracting + // multiples of the group order N. However, in order simplify carry + // propagation, this adds with the two's complement of N to achieve the same + // result. + // + // Since the two's complement of N has 127 leading zero bits, this will end + // up reducing the intermediate result from 512 bits to 385 bits, resulting + // in 13 32-bit terms. The reduced terms are assigned back to t0 through + // t12. + // + // Note that several of the intermediate calculations require adding 64-bit + // products together which would overflow a uint64, so a 96-bit accumulator + // is used instead. + + // Terms for 2^(32*0). + var acc accumulator96 + acc.n[0] = uint32(t0) // == acc.Add(t0) because acc is guaranteed to be 0. + acc.Add(t8 * uint64(orderComplementWordZero)) + t0 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*1). + acc.Add(t1) + acc.Add(t8 * uint64(orderComplementWordOne)) + acc.Add(t9 * uint64(orderComplementWordZero)) + t1 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*2). + acc.Add(t2) + acc.Add(t8 * uint64(orderComplementWordTwo)) + acc.Add(t9 * uint64(orderComplementWordOne)) + acc.Add(t10 * uint64(orderComplementWordZero)) + t2 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*3). + acc.Add(t3) + acc.Add(t8 * uint64(orderComplementWordThree)) + acc.Add(t9 * uint64(orderComplementWordTwo)) + acc.Add(t10 * uint64(orderComplementWordOne)) + acc.Add(t11 * uint64(orderComplementWordZero)) + t3 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*4). + acc.Add(t4) + acc.Add(t8) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t9 * uint64(orderComplementWordThree)) + acc.Add(t10 * uint64(orderComplementWordTwo)) + acc.Add(t11 * uint64(orderComplementWordOne)) + acc.Add(t12 * uint64(orderComplementWordZero)) + t4 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*5). + acc.Add(t5) + // acc.Add(t8 * uint64(orderComplementWordFive)) // 0 + acc.Add(t9) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t10 * uint64(orderComplementWordThree)) + acc.Add(t11 * uint64(orderComplementWordTwo)) + acc.Add(t12 * uint64(orderComplementWordOne)) + acc.Add(t13 * uint64(orderComplementWordZero)) + t5 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*6). + acc.Add(t6) + // acc.Add(t8 * uint64(orderComplementWordSix)) // 0 + // acc.Add(t9 * uint64(orderComplementWordFive)) // 0 + acc.Add(t10) // * uint64(orderComplementWordFour)) // * 1 + acc.Add(t11 * uint64(orderComplementWordThree)) + acc.Add(t12 * uint64(orderComplementWordTwo)) + acc.Add(t13 * uint64(orderComplementWordOne)) + acc.Add(t14 * uint64(orderComplementWordZero)) + t6 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*7). + acc.Add(t7) + // acc.Add(t8 * uint64(orderComplementWordSeven)) // 0 + // acc.Add(t9 * uint64(orderComplementWordSix)) // 0 + // acc.Add(t10 * uint64(orderComplementWordFive)) // 0 + acc.Add(t11) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t12 * uint64(orderComplementWordThree)) + acc.Add(t13 * uint64(orderComplementWordTwo)) + acc.Add(t14 * uint64(orderComplementWordOne)) + acc.Add(t15 * uint64(orderComplementWordZero)) + t7 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*8). + // acc.Add(t9 * uint64(orderComplementWordSeven)) // 0 + // acc.Add(t10 * uint64(orderComplementWordSix)) // 0 + // acc.Add(t11 * uint64(orderComplementWordFive)) // 0 + acc.Add(t12) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t13 * uint64(orderComplementWordThree)) + acc.Add(t14 * uint64(orderComplementWordTwo)) + acc.Add(t15 * uint64(orderComplementWordOne)) + t8 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*9). + // acc.Add(t10 * uint64(orderComplementWordSeven)) // 0 + // acc.Add(t11 * uint64(orderComplementWordSix)) // 0 + // acc.Add(t12 * uint64(orderComplementWordFive)) // 0 + acc.Add(t13) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t14 * uint64(orderComplementWordThree)) + acc.Add(t15 * uint64(orderComplementWordTwo)) + t9 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*10). + // acc.Add(t11 * uint64(orderComplementWordSeven)) // 0 + // acc.Add(t12 * uint64(orderComplementWordSix)) // 0 + // acc.Add(t13 * uint64(orderComplementWordFive)) // 0 + acc.Add(t14) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t15 * uint64(orderComplementWordThree)) + t10 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*11). + // acc.Add(t12 * uint64(orderComplementWordSeven)) // 0 + // acc.Add(t13 * uint64(orderComplementWordSix)) // 0 + // acc.Add(t14 * uint64(orderComplementWordFive)) // 0 + acc.Add(t15) // * uint64(orderComplementWordFour) // * 1 + t11 = uint64(acc.n[0]) + acc.Rsh32() + + // NOTE: All of the remaining multiplications for this iteration result in 0 + // as they all involve multiplying by combinations of the fifth, sixth, and + // seventh words of the two's complement of N, which are 0, so skip them. + + // Terms for 2^(32*12). + t12 = uint64(acc.n[0]) + // acc.Rsh32() // No need since not used after this. Guaranteed to be 0. + + // At this point, the result is reduced to fit within 385 bits, so reduce it + // again using the same method accordingly. + s.reduce385(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12) +} + +// Mul2 multiplies the passed two scalars together modulo the group order in +// constant time and stores the result in s. +// +// The scalar is returned to support chaining. This enables syntax like: +// s3.Mul2(s, s2).AddInt(1) so that s3 = (s * s2) + 1. +func (s *ModNScalar) Mul2(val, val2 *ModNScalar) *ModNScalar { + // This could be done with for loops and an array to store the intermediate + // terms, but this unrolled version is significantly faster. + + // The overall strategy employed here is: + // 1) Calculate the 512-bit product of the two scalars using the standard + // pencil-and-paper method. + // 2) Reduce the result modulo the prime by effectively subtracting + // multiples of the group order N (actually performed by adding multiples + // of the two's complement of N to avoid implementing subtraction). + // 3) Repeat step 2 noting that each iteration reduces the required number + // of bits by 127 because the two's complement of N has 127 leading zero + // bits. + // 4) Once reduced to 256 bits, call the existing reduce method to perform + // a final reduction as needed. + // + // Note that several of the intermediate calculations require adding 64-bit + // products together which would overflow a uint64, so a 96-bit accumulator + // is used instead. + + // Terms for 2^(32*0). + var acc accumulator96 + acc.Add(uint64(val.n[0]) * uint64(val2.n[0])) + t0 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*1). + acc.Add(uint64(val.n[0]) * uint64(val2.n[1])) + acc.Add(uint64(val.n[1]) * uint64(val2.n[0])) + t1 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*2). + acc.Add(uint64(val.n[0]) * uint64(val2.n[2])) + acc.Add(uint64(val.n[1]) * uint64(val2.n[1])) + acc.Add(uint64(val.n[2]) * uint64(val2.n[0])) + t2 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*3). + acc.Add(uint64(val.n[0]) * uint64(val2.n[3])) + acc.Add(uint64(val.n[1]) * uint64(val2.n[2])) + acc.Add(uint64(val.n[2]) * uint64(val2.n[1])) + acc.Add(uint64(val.n[3]) * uint64(val2.n[0])) + t3 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*4). + acc.Add(uint64(val.n[0]) * uint64(val2.n[4])) + acc.Add(uint64(val.n[1]) * uint64(val2.n[3])) + acc.Add(uint64(val.n[2]) * uint64(val2.n[2])) + acc.Add(uint64(val.n[3]) * uint64(val2.n[1])) + acc.Add(uint64(val.n[4]) * uint64(val2.n[0])) + t4 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*5). + acc.Add(uint64(val.n[0]) * uint64(val2.n[5])) + acc.Add(uint64(val.n[1]) * uint64(val2.n[4])) + acc.Add(uint64(val.n[2]) * uint64(val2.n[3])) + acc.Add(uint64(val.n[3]) * uint64(val2.n[2])) + acc.Add(uint64(val.n[4]) * uint64(val2.n[1])) + acc.Add(uint64(val.n[5]) * uint64(val2.n[0])) + t5 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*6). + acc.Add(uint64(val.n[0]) * uint64(val2.n[6])) + acc.Add(uint64(val.n[1]) * uint64(val2.n[5])) + acc.Add(uint64(val.n[2]) * uint64(val2.n[4])) + acc.Add(uint64(val.n[3]) * uint64(val2.n[3])) + acc.Add(uint64(val.n[4]) * uint64(val2.n[2])) + acc.Add(uint64(val.n[5]) * uint64(val2.n[1])) + acc.Add(uint64(val.n[6]) * uint64(val2.n[0])) + t6 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*7). + acc.Add(uint64(val.n[0]) * uint64(val2.n[7])) + acc.Add(uint64(val.n[1]) * uint64(val2.n[6])) + acc.Add(uint64(val.n[2]) * uint64(val2.n[5])) + acc.Add(uint64(val.n[3]) * uint64(val2.n[4])) + acc.Add(uint64(val.n[4]) * uint64(val2.n[3])) + acc.Add(uint64(val.n[5]) * uint64(val2.n[2])) + acc.Add(uint64(val.n[6]) * uint64(val2.n[1])) + acc.Add(uint64(val.n[7]) * uint64(val2.n[0])) + t7 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*8). + acc.Add(uint64(val.n[1]) * uint64(val2.n[7])) + acc.Add(uint64(val.n[2]) * uint64(val2.n[6])) + acc.Add(uint64(val.n[3]) * uint64(val2.n[5])) + acc.Add(uint64(val.n[4]) * uint64(val2.n[4])) + acc.Add(uint64(val.n[5]) * uint64(val2.n[3])) + acc.Add(uint64(val.n[6]) * uint64(val2.n[2])) + acc.Add(uint64(val.n[7]) * uint64(val2.n[1])) + t8 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*9). + acc.Add(uint64(val.n[2]) * uint64(val2.n[7])) + acc.Add(uint64(val.n[3]) * uint64(val2.n[6])) + acc.Add(uint64(val.n[4]) * uint64(val2.n[5])) + acc.Add(uint64(val.n[5]) * uint64(val2.n[4])) + acc.Add(uint64(val.n[6]) * uint64(val2.n[3])) + acc.Add(uint64(val.n[7]) * uint64(val2.n[2])) + t9 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*10). + acc.Add(uint64(val.n[3]) * uint64(val2.n[7])) + acc.Add(uint64(val.n[4]) * uint64(val2.n[6])) + acc.Add(uint64(val.n[5]) * uint64(val2.n[5])) + acc.Add(uint64(val.n[6]) * uint64(val2.n[4])) + acc.Add(uint64(val.n[7]) * uint64(val2.n[3])) + t10 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*11). + acc.Add(uint64(val.n[4]) * uint64(val2.n[7])) + acc.Add(uint64(val.n[5]) * uint64(val2.n[6])) + acc.Add(uint64(val.n[6]) * uint64(val2.n[5])) + acc.Add(uint64(val.n[7]) * uint64(val2.n[4])) + t11 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*12). + acc.Add(uint64(val.n[5]) * uint64(val2.n[7])) + acc.Add(uint64(val.n[6]) * uint64(val2.n[6])) + acc.Add(uint64(val.n[7]) * uint64(val2.n[5])) + t12 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*13). + acc.Add(uint64(val.n[6]) * uint64(val2.n[7])) + acc.Add(uint64(val.n[7]) * uint64(val2.n[6])) + t13 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*14). + acc.Add(uint64(val.n[7]) * uint64(val2.n[7])) + t14 := uint64(acc.n[0]) + acc.Rsh32() + + // What's left is for 2^(32*15). + t15 := uint64(acc.n[0]) + // acc.Rsh32() // No need since not used after this. Guaranteed to be 0. + + // At this point, all of the terms are grouped into their respective base + // and occupy up to 512 bits. Reduce the result accordingly. + s.reduce512(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, + t15) + return s +} + +// Mul multiplies the passed scalar with the existing one modulo the group order +// in constant time and stores the result in s. +// +// The scalar is returned to support chaining. This enables syntax like: +// s.Mul(s2).AddInt(1) so that s = (s * s2) + 1. +func (s *ModNScalar) Mul(val *ModNScalar) *ModNScalar { + return s.Mul2(s, val) +} + +// SquareVal squares the passed scalar modulo the group order in constant time +// and stores the result in s. +// +// The scalar is returned to support chaining. This enables syntax like: +// s3.SquareVal(s).Mul(s) so that s3 = s^2 * s = s^3. +func (s *ModNScalar) SquareVal(val *ModNScalar) *ModNScalar { + // This could technically be optimized slightly to take advantage of the + // fact that many of the intermediate calculations in squaring are just + // doubling, however, benchmarking has shown that due to the need to use a + // 96-bit accumulator, any savings are essentially offset by that and + // consequently there is no real difference in performance over just + // multiplying the value by itself to justify the extra code for now. This + // can be revisited in the future if it becomes a bottleneck in practice. + + return s.Mul2(val, val) +} + +// Square squares the scalar modulo the group order in constant time. The +// existing scalar is modified. +// +// The scalar is returned to support chaining. This enables syntax like: +// s.Square().Mul(s2) so that s = s^2 * s2. +func (s *ModNScalar) Square() *ModNScalar { + return s.SquareVal(s) +} + +// NegateVal negates the passed scalar modulo the group order and stores the +// result in s in constant time. +// +// The scalar is returned to support chaining. This enables syntax like: +// s.NegateVal(s2).AddInt(1) so that s = -s2 + 1. +func (s *ModNScalar) NegateVal(val *ModNScalar) *ModNScalar { + // Since the scalar is already in the range 0 <= val < N, where N is the + // group order, negation modulo the group order is just the group order + // minus the value. This implies that the result will always be in the + // desired range with the sole exception of 0 because N - 0 = N itself. + // + // Therefore, in order to avoid the need to reduce the result for every + // other case in order to achieve constant time, this creates a mask that is + // all 0s in the case of the scalar being negated is 0 and all 1s otherwise + // and bitwise ands that mask with each word. + // + // Finally, to simplify the carry propagation, this adds the two's + // complement of the scalar to N in order to achieve the same result. + bits := val.n[0] | val.n[1] | val.n[2] | val.n[3] | val.n[4] | val.n[5] | + val.n[6] | val.n[7] + mask := uint64(uint32Mask * constantTimeNotEq(bits, 0)) + c := uint64(orderWordZero) + (uint64(^val.n[0]) + 1) + s.n[0] = uint32(c & mask) + c = (c >> 32) + uint64(orderWordOne) + uint64(^val.n[1]) + s.n[1] = uint32(c & mask) + c = (c >> 32) + uint64(orderWordTwo) + uint64(^val.n[2]) + s.n[2] = uint32(c & mask) + c = (c >> 32) + uint64(orderWordThree) + uint64(^val.n[3]) + s.n[3] = uint32(c & mask) + c = (c >> 32) + uint64(orderWordFour) + uint64(^val.n[4]) + s.n[4] = uint32(c & mask) + c = (c >> 32) + uint64(orderWordFive) + uint64(^val.n[5]) + s.n[5] = uint32(c & mask) + c = (c >> 32) + uint64(orderWordSix) + uint64(^val.n[6]) + s.n[6] = uint32(c & mask) + c = (c >> 32) + uint64(orderWordSeven) + uint64(^val.n[7]) + s.n[7] = uint32(c & mask) + return s +} + +// Negate negates the scalar modulo the group order in constant time. The +// existing scalar is modified. +// +// The scalar is returned to support chaining. This enables syntax like: +// s.Negate().AddInt(1) so that s = -s + 1. +func (s *ModNScalar) Negate() *ModNScalar { + return s.NegateVal(s) +} + +// InverseValNonConst finds the modular multiplicative inverse of the passed +// scalar and stores result in s in *non-constant* time. +// +// The scalar is returned to support chaining. This enables syntax like: +// s3.InverseVal(s1).Mul(s2) so that s3 = s1^-1 * s2. +func (s *ModNScalar) InverseValNonConst(val *ModNScalar) *ModNScalar { + // This is making use of big integers for now. Ideally it will be replaced + // with an implementation that does not depend on big integers. + valBytes := val.Bytes() + bigVal := new(big.Int).SetBytes(valBytes[:]) + bigVal.ModInverse(bigVal, curveParams.N) + s.SetByteSlice(bigVal.Bytes()) + return s +} + +// InverseNonConst finds the modular multiplicative inverse of the scalar in +// *non-constant* time. The existing scalar is modified. +// +// The scalar is returned to support chaining. This enables syntax like: +// s.Inverse().Mul(s2) so that s = s^-1 * s2. +func (s *ModNScalar) InverseNonConst() *ModNScalar { + return s.InverseValNonConst(s) +} + +// IsOverHalfOrder returns whether or not the scalar exceeds the group order +// divided by 2 in constant time. +func (s *ModNScalar) IsOverHalfOrder() bool { + // The intuition here is that the scalar is greater than half of the group + // order if one of the higher individual words is greater than the + // corresponding word of the half group order and all higher words in the + // scalar are equal to their corresponding word of the half group order. + // + // Note that the words 4, 5, and 6 are all the max uint32 value, so there is + // no need to test if those individual words of the scalar exceeds them, + // hence, only equality is checked for them. + result := constantTimeGreater(s.n[7], halfOrderWordSeven) + highWordsEqual := constantTimeEq(s.n[7], halfOrderWordSeven) + highWordsEqual &= constantTimeEq(s.n[6], halfOrderWordSix) + highWordsEqual &= constantTimeEq(s.n[5], halfOrderWordFive) + highWordsEqual &= constantTimeEq(s.n[4], halfOrderWordFour) + result |= highWordsEqual & constantTimeGreater(s.n[3], halfOrderWordThree) + highWordsEqual &= constantTimeEq(s.n[3], halfOrderWordThree) + result |= highWordsEqual & constantTimeGreater(s.n[2], halfOrderWordTwo) + highWordsEqual &= constantTimeEq(s.n[2], halfOrderWordTwo) + result |= highWordsEqual & constantTimeGreater(s.n[1], halfOrderWordOne) + highWordsEqual &= constantTimeEq(s.n[1], halfOrderWordOne) + result |= highWordsEqual & constantTimeGreater(s.n[0], halfOrderWordZero) + + return result != 0 +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/nonce.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/nonce.go new file mode 100644 index 000000000000..70a75bb81c68 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/nonce.go @@ -0,0 +1,263 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015-2024 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +import ( + "bytes" + "crypto/sha256" + "hash" +) + +// References: +// [GECC]: Guide to Elliptic Curve Cryptography (Hankerson, Menezes, Vanstone) +// +// [ISO/IEC 8825-1]: Information technology — ASN.1 encoding rules: +// Specification of Basic Encoding Rules (BER), Canonical Encoding Rules +// (CER) and Distinguished Encoding Rules (DER) +// +// [SEC1]: Elliptic Curve Cryptography (May 31, 2009, Version 2.0) +// https://www.secg.org/sec1-v2.pdf + +var ( + // singleZero is used during RFC6979 nonce generation. It is provided + // here to avoid the need to create it multiple times. + singleZero = []byte{0x00} + + // zeroInitializer is used during RFC6979 nonce generation. It is provided + // here to avoid the need to create it multiple times. + zeroInitializer = bytes.Repeat([]byte{0x00}, sha256.BlockSize) + + // singleOne is used during RFC6979 nonce generation. It is provided + // here to avoid the need to create it multiple times. + singleOne = []byte{0x01} + + // oneInitializer is used during RFC6979 nonce generation. It is provided + // here to avoid the need to create it multiple times. + oneInitializer = bytes.Repeat([]byte{0x01}, sha256.Size) +) + +// hmacsha256 implements a resettable version of HMAC-SHA256. +type hmacsha256 struct { + inner, outer hash.Hash + ipad, opad [sha256.BlockSize]byte +} + +// Write adds data to the running hash. +func (h *hmacsha256) Write(p []byte) { + h.inner.Write(p) +} + +// initKey initializes the HMAC-SHA256 instance to the provided key. +func (h *hmacsha256) initKey(key []byte) { + // Hash the key if it is too large. + if len(key) > sha256.BlockSize { + h.outer.Write(key) + key = h.outer.Sum(nil) + } + copy(h.ipad[:], key) + copy(h.opad[:], key) + for i := range h.ipad { + h.ipad[i] ^= 0x36 + } + for i := range h.opad { + h.opad[i] ^= 0x5c + } + h.inner.Write(h.ipad[:]) +} + +// ResetKey resets the HMAC-SHA256 to its initial state and then initializes it +// with the provided key. It is equivalent to creating a new instance with the +// provided key without allocating more memory. +func (h *hmacsha256) ResetKey(key []byte) { + h.inner.Reset() + h.outer.Reset() + copy(h.ipad[:], zeroInitializer) + copy(h.opad[:], zeroInitializer) + h.initKey(key) +} + +// Resets the HMAC-SHA256 to its initial state using the current key. +func (h *hmacsha256) Reset() { + h.inner.Reset() + h.inner.Write(h.ipad[:]) +} + +// Sum returns the hash of the written data. +func (h *hmacsha256) Sum() []byte { + h.outer.Reset() + h.outer.Write(h.opad[:]) + h.outer.Write(h.inner.Sum(nil)) + return h.outer.Sum(nil) +} + +// newHMACSHA256 returns a new HMAC-SHA256 hasher using the provided key. +func newHMACSHA256(key []byte) *hmacsha256 { + h := new(hmacsha256) + h.inner = sha256.New() + h.outer = sha256.New() + h.initKey(key) + return h +} + +// NonceRFC6979 generates a nonce deterministically according to RFC 6979 using +// HMAC-SHA256 for the hashing function. It takes a 32-byte hash as an input +// and returns a 32-byte nonce to be used for deterministic signing. The extra +// and version arguments are optional, but allow additional data to be added to +// the input of the HMAC. When provided, the extra data must be 32-bytes and +// version must be 16 bytes or they will be ignored. +// +// Finally, the extraIterations parameter provides a method to produce a stream +// of deterministic nonces to ensure the signing code is able to produce a nonce +// that results in a valid signature in the extremely unlikely event the +// original nonce produced results in an invalid signature (e.g. R == 0). +// Signing code should start with 0 and increment it if necessary. +func NonceRFC6979(privKey []byte, hash []byte, extra []byte, version []byte, extraIterations uint32) *ModNScalar { + // Input to HMAC is the 32-byte private key and the 32-byte hash. In + // addition, it may include the optional 32-byte extra data and 16-byte + // version. Create a fixed-size array to avoid extra allocs and slice it + // properly. + const ( + privKeyLen = 32 + hashLen = 32 + extraLen = 32 + versionLen = 16 + ) + var keyBuf [privKeyLen + hashLen + extraLen + versionLen]byte + + // Truncate rightmost bytes of private key and hash if they are too long and + // leave left padding of zeros when they're too short. + if len(privKey) > privKeyLen { + privKey = privKey[:privKeyLen] + } + if len(hash) > hashLen { + hash = hash[:hashLen] + } + offset := privKeyLen - len(privKey) // Zero left padding if needed. + offset += copy(keyBuf[offset:], privKey) + offset += hashLen - len(hash) // Zero left padding if needed. + offset += copy(keyBuf[offset:], hash) + if len(extra) == extraLen { + offset += copy(keyBuf[offset:], extra) + if len(version) == versionLen { + offset += copy(keyBuf[offset:], version) + } + } else if len(version) == versionLen { + // When the version was specified, but not the extra data, leave the + // extra data portion all zero. + offset += privKeyLen + offset += copy(keyBuf[offset:], version) + } + key := keyBuf[:offset] + + // Step B. + // + // V = 0x01 0x01 0x01 ... 0x01 such that the length of V, in bits, is + // equal to 8*ceil(hashLen/8). + // + // Note that since the hash length is a multiple of 8 for the chosen hash + // function in this optimized implementation, the result is just the hash + // length, so avoid the extra calculations. Also, since it isn't modified, + // start with a global value. + v := oneInitializer + + // Step C (Go zeroes all allocated memory). + // + // K = 0x00 0x00 0x00 ... 0x00 such that the length of K, in bits, is + // equal to 8*ceil(hashLen/8). + // + // As above, since the hash length is a multiple of 8 for the chosen hash + // function in this optimized implementation, the result is just the hash + // length, so avoid the extra calculations. + k := zeroInitializer[:hashLen] + + // Step D. + // + // K = HMAC_K(V || 0x00 || int2octets(x) || bits2octets(h1)) + // + // Note that key is the "int2octets(x) || bits2octets(h1)" portion along + // with potential additional data as described by section 3.6 of the RFC. + hasher := newHMACSHA256(k) + hasher.Write(oneInitializer) + hasher.Write(singleZero) + hasher.Write(key) + k = hasher.Sum() + + // Step E. + // + // V = HMAC_K(V) + hasher.ResetKey(k) + hasher.Write(v) + v = hasher.Sum() + + // Step F. + // + // K = HMAC_K(V || 0x01 || int2octets(x) || bits2octets(h1)) + // + // Note that key is the "int2octets(x) || bits2octets(h1)" portion along + // with potential additional data as described by section 3.6 of the RFC. + hasher.Reset() + hasher.Write(v) + hasher.Write(singleOne) + hasher.Write(key) + k = hasher.Sum() + + // Step G. + // + // V = HMAC_K(V) + hasher.ResetKey(k) + hasher.Write(v) + v = hasher.Sum() + + // Step H. + // + // Repeat until the value is nonzero and less than the curve order. + var generated uint32 + for { + // Step H1 and H2. + // + // Set T to the empty sequence. The length of T (in bits) is denoted + // tlen; thus, at that point, tlen = 0. + // + // While tlen < qlen, do the following: + // V = HMAC_K(V) + // T = T || V + // + // Note that because the hash function output is the same length as the + // private key in this optimized implementation, there is no need to + // loop or create an intermediate T. + hasher.Reset() + hasher.Write(v) + v = hasher.Sum() + + // Step H3. + // + // k = bits2int(T) + // If k is within the range [1,q-1], return it. + // + // Otherwise, compute: + // K = HMAC_K(V || 0x00) + // V = HMAC_K(V) + var secret ModNScalar + overflow := secret.SetByteSlice(v) + if !overflow && !secret.IsZero() { + generated++ + if generated > extraIterations { + return &secret + } + } + + // K = HMAC_K(V || 0x00) + hasher.Reset() + hasher.Write(v) + hasher.Write(singleZero) + k = hasher.Sum() + + // V = HMAC_K(V) + hasher.ResetKey(k) + hasher.Write(v) + v = hasher.Sum() + } +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/privkey.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/privkey.go new file mode 100644 index 000000000000..e6b7be35063e --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/privkey.go @@ -0,0 +1,111 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015-2024 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +import ( + cryptorand "crypto/rand" + "io" +) + +// PrivateKey provides facilities for working with secp256k1 private keys within +// this package and includes functionality such as serializing and parsing them +// as well as computing their associated public key. +type PrivateKey struct { + Key ModNScalar +} + +// NewPrivateKey instantiates a new private key from a scalar encoded as a +// big integer. +func NewPrivateKey(key *ModNScalar) *PrivateKey { + return &PrivateKey{Key: *key} +} + +// PrivKeyFromBytes returns a private based on the provided byte slice which is +// interpreted as an unsigned 256-bit big-endian integer in the range [0, N-1], +// where N is the order of the curve. +// +// WARNING: This means passing a slice with more than 32 bytes is truncated and +// that truncated value is reduced modulo N. Further, 0 is not a valid private +// key. It is up to the caller to provide a value in the appropriate range of +// [1, N-1]. Failure to do so will either result in an invalid private key or +// potentially weak private keys that have bias that could be exploited. +// +// This function primarily exists to provide a mechanism for converting +// serialized private keys that are already known to be good. +// +// Typically callers should make use of GeneratePrivateKey or +// GeneratePrivateKeyFromRand when creating private keys since they properly +// handle generation of appropriate values. +func PrivKeyFromBytes(privKeyBytes []byte) *PrivateKey { + var privKey PrivateKey + privKey.Key.SetByteSlice(privKeyBytes) + return &privKey +} + +// generatePrivateKey generates and returns a new private key that is suitable +// for use with secp256k1 using the provided reader as a source of entropy. The +// provided reader must be a source of cryptographically secure randomness to +// avoid weak private keys. +func generatePrivateKey(rand io.Reader) (*PrivateKey, error) { + // The group order is close enough to 2^256 that there is only roughly a 1 + // in 2^128 chance of generating an invalid private key, so this loop will + // virtually never run more than a single iteration in practice. + var key PrivateKey + var b32 [32]byte + for valid := false; !valid; { + if _, err := io.ReadFull(rand, b32[:]); err != nil { + return nil, err + } + + // The private key is only valid when it is in the range [1, N-1], where + // N is the order of the curve. + overflow := key.Key.SetBytes(&b32) + valid = (key.Key.IsZeroBit() | overflow) == 0 + } + zeroArray32(&b32) + + return &key, nil +} + +// GeneratePrivateKey generates and returns a new cryptographically secure +// private key that is suitable for use with secp256k1. +func GeneratePrivateKey() (*PrivateKey, error) { + return generatePrivateKey(cryptorand.Reader) +} + +// GeneratePrivateKeyFromRand generates a private key that is suitable for use +// with secp256k1 using the provided reader as a source of entropy. The +// provided reader must be a source of cryptographically secure randomness, such +// as [crypto/rand.Reader], to avoid weak private keys. +func GeneratePrivateKeyFromRand(rand io.Reader) (*PrivateKey, error) { + return generatePrivateKey(rand) +} + +// PubKey computes and returns the public key corresponding to this private key. +func (p *PrivateKey) PubKey() *PublicKey { + var result JacobianPoint + ScalarBaseMultNonConst(&p.Key, &result) + result.ToAffine() + return NewPublicKey(&result.X, &result.Y) +} + +// Zero manually clears the memory associated with the private key. This can be +// used to explicitly clear key material from memory for enhanced security +// against memory scraping. +func (p *PrivateKey) Zero() { + p.Key.Zero() +} + +// PrivKeyBytesLen defines the length in bytes of a serialized private key. +const PrivKeyBytesLen = 32 + +// Serialize returns the private key as a 256-bit big-endian binary-encoded +// number, padded to a length of 32 bytes. +func (p PrivateKey) Serialize() []byte { + var privKeyBytes [PrivKeyBytesLen]byte + p.Key.PutBytes(&privKeyBytes) + return privKeyBytes[:] +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/pubkey.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/pubkey.go new file mode 100644 index 000000000000..2f8815bedf31 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/pubkey.go @@ -0,0 +1,236 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015-2024 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +// References: +// [SEC1] Elliptic Curve Cryptography +// https://www.secg.org/sec1-v2.pdf +// +// [SEC2] Recommended Elliptic Curve Domain Parameters +// https://www.secg.org/sec2-v2.pdf +// +// [ANSI X9.62-1998] Public Key Cryptography For The Financial Services +// Industry: The Elliptic Curve Digital Signature Algorithm (ECDSA) + +import ( + "fmt" +) + +const ( + // PubKeyBytesLenCompressed is the number of bytes of a serialized + // compressed public key. + PubKeyBytesLenCompressed = 33 + + // PubKeyBytesLenUncompressed is the number of bytes of a serialized + // uncompressed public key. + PubKeyBytesLenUncompressed = 65 + + // PubKeyFormatCompressedEven is the identifier prefix byte for a public key + // whose Y coordinate is even when serialized in the compressed format per + // section 2.3.4 of [SEC1](https://secg.org/sec1-v2.pdf#subsubsection.2.3.4). + PubKeyFormatCompressedEven byte = 0x02 + + // PubKeyFormatCompressedOdd is the identifier prefix byte for a public key + // whose Y coordinate is odd when serialized in the compressed format per + // section 2.3.4 of [SEC1](https://secg.org/sec1-v2.pdf#subsubsection.2.3.4). + PubKeyFormatCompressedOdd byte = 0x03 + + // PubKeyFormatUncompressed is the identifier prefix byte for a public key + // when serialized according in the uncompressed format per section 2.3.3 of + // [SEC1](https://secg.org/sec1-v2.pdf#subsubsection.2.3.3). + PubKeyFormatUncompressed byte = 0x04 + + // PubKeyFormatHybridEven is the identifier prefix byte for a public key + // whose Y coordinate is even when serialized according to the hybrid format + // per section 4.3.6 of [ANSI X9.62-1998]. + // + // NOTE: This format makes little sense in practice an therefore this + // package will not produce public keys serialized in this format. However, + // it will parse them since they exist in the wild. + PubKeyFormatHybridEven byte = 0x06 + + // PubKeyFormatHybridOdd is the identifier prefix byte for a public key + // whose Y coordingate is odd when serialized according to the hybrid format + // per section 4.3.6 of [ANSI X9.62-1998]. + // + // NOTE: This format makes little sense in practice an therefore this + // package will not produce public keys serialized in this format. However, + // it will parse them since they exist in the wild. + PubKeyFormatHybridOdd byte = 0x07 +) + +// PublicKey provides facilities for efficiently working with secp256k1 public +// keys within this package and includes functions to serialize in both +// uncompressed and compressed SEC (Standards for Efficient Cryptography) +// formats. +type PublicKey struct { + x FieldVal + y FieldVal +} + +// NewPublicKey instantiates a new public key with the given x and y +// coordinates. +// +// It should be noted that, unlike ParsePubKey, since this accepts arbitrary x +// and y coordinates, it allows creation of public keys that are not valid +// points on the secp256k1 curve. The IsOnCurve method of the returned instance +// can be used to determine validity. +func NewPublicKey(x, y *FieldVal) *PublicKey { + var pubKey PublicKey + pubKey.x.Set(x) + pubKey.y.Set(y) + return &pubKey +} + +// ParsePubKey parses a secp256k1 public key encoded according to the format +// specified by ANSI X9.62-1998, which means it is also compatible with the +// SEC (Standards for Efficient Cryptography) specification which is a subset of +// the former. In other words, it supports the uncompressed, compressed, and +// hybrid formats as follows: +// +// Compressed: +// +// <32-byte X coordinate> +// +// Uncompressed: +// +// <32-byte X coordinate><32-byte Y coordinate> +// +// Hybrid: +// +// <32-byte X coordinate><32-byte Y coordinate> +// +// NOTE: The hybrid format makes little sense in practice an therefore this +// package will not produce public keys serialized in this format. However, +// this function will properly parse them since they exist in the wild. +func ParsePubKey(serialized []byte) (key *PublicKey, err error) { + var x, y FieldVal + switch len(serialized) { + case PubKeyBytesLenUncompressed: + // Reject unsupported public key formats for the given length. + format := serialized[0] + switch format { + case PubKeyFormatUncompressed: + case PubKeyFormatHybridEven, PubKeyFormatHybridOdd: + default: + str := fmt.Sprintf("invalid public key: unsupported format: %x", + format) + return nil, makeError(ErrPubKeyInvalidFormat, str) + } + + // Parse the x and y coordinates while ensuring that they are in the + // allowed range. + if overflow := x.SetByteSlice(serialized[1:33]); overflow { + str := "invalid public key: x >= field prime" + return nil, makeError(ErrPubKeyXTooBig, str) + } + if overflow := y.SetByteSlice(serialized[33:]); overflow { + str := "invalid public key: y >= field prime" + return nil, makeError(ErrPubKeyYTooBig, str) + } + + // Ensure the oddness of the y coordinate matches the specified format + // for hybrid public keys. + if format == PubKeyFormatHybridEven || format == PubKeyFormatHybridOdd { + wantOddY := format == PubKeyFormatHybridOdd + if y.IsOdd() != wantOddY { + str := fmt.Sprintf("invalid public key: y oddness does not "+ + "match specified value of %v", wantOddY) + return nil, makeError(ErrPubKeyMismatchedOddness, str) + } + } + + // Reject public keys that are not on the secp256k1 curve. + if !isOnCurve(&x, &y) { + str := fmt.Sprintf("invalid public key: [%v,%v] not on secp256k1 "+ + "curve", x, y) + return nil, makeError(ErrPubKeyNotOnCurve, str) + } + + case PubKeyBytesLenCompressed: + // Reject unsupported public key formats for the given length. + format := serialized[0] + switch format { + case PubKeyFormatCompressedEven, PubKeyFormatCompressedOdd: + default: + str := fmt.Sprintf("invalid public key: unsupported format: %x", + format) + return nil, makeError(ErrPubKeyInvalidFormat, str) + } + + // Parse the x coordinate while ensuring that it is in the allowed + // range. + if overflow := x.SetByteSlice(serialized[1:33]); overflow { + str := "invalid public key: x >= field prime" + return nil, makeError(ErrPubKeyXTooBig, str) + } + + // Attempt to calculate the y coordinate for the given x coordinate such + // that the result pair is a point on the secp256k1 curve and the + // solution with desired oddness is chosen. + wantOddY := format == PubKeyFormatCompressedOdd + if !DecompressY(&x, wantOddY, &y) { + str := fmt.Sprintf("invalid public key: x coordinate %v is not on "+ + "the secp256k1 curve", x) + return nil, makeError(ErrPubKeyNotOnCurve, str) + } + + default: + str := fmt.Sprintf("malformed public key: invalid length: %d", + len(serialized)) + return nil, makeError(ErrPubKeyInvalidLen, str) + } + + return NewPublicKey(&x, &y), nil +} + +// SerializeUncompressed serializes a public key in the 65-byte uncompressed +// format. +func (p PublicKey) SerializeUncompressed() []byte { + // 0x04 || 32-byte x coordinate || 32-byte y coordinate + var b [PubKeyBytesLenUncompressed]byte + b[0] = PubKeyFormatUncompressed + p.x.PutBytesUnchecked(b[1:33]) + p.y.PutBytesUnchecked(b[33:65]) + return b[:] +} + +// SerializeCompressed serializes a public key in the 33-byte compressed format. +func (p PublicKey) SerializeCompressed() []byte { + // Choose the format byte depending on the oddness of the Y coordinate. + format := PubKeyFormatCompressedEven + if p.y.IsOdd() { + format = PubKeyFormatCompressedOdd + } + + // 0x02 or 0x03 || 32-byte x coordinate + var b [PubKeyBytesLenCompressed]byte + b[0] = format + p.x.PutBytesUnchecked(b[1:33]) + return b[:] +} + +// IsEqual compares this public key instance to the one passed, returning true +// if both public keys are equivalent. A public key is equivalent to another, +// if they both have the same X and Y coordinates. +func (p *PublicKey) IsEqual(otherPubKey *PublicKey) bool { + return p.x.Equals(&otherPubKey.x) && p.y.Equals(&otherPubKey.y) +} + +// AsJacobian converts the public key into a Jacobian point with Z=1 and stores +// the result in the provided result param. This allows the public key to be +// treated a Jacobian point in the secp256k1 group in calculations. +func (p *PublicKey) AsJacobian(result *JacobianPoint) { + result.X.Set(&p.x) + result.Y.Set(&p.y) + result.Z.SetInt(1) +} + +// IsOnCurve returns whether or not the public key represents a point on the +// secp256k1 curve. +func (p *PublicKey) IsOnCurve() bool { + return isOnCurve(&p.x, &p.y) +} diff --git a/vendor/github.com/go-ini/ini/.editorconfig b/vendor/github.com/go-ini/ini/.editorconfig new file mode 100644 index 000000000000..4a2d9180f96f --- /dev/null +++ b/vendor/github.com/go-ini/ini/.editorconfig @@ -0,0 +1,12 @@ +# http://editorconfig.org + +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true + +[*_test.go] +trim_trailing_whitespace = false diff --git a/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore new file mode 100644 index 000000000000..588388bda28d --- /dev/null +++ b/vendor/github.com/go-ini/ini/.gitignore @@ -0,0 +1,7 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini +.idea +/.vscode +.DS_Store diff --git a/vendor/github.com/go-ini/ini/.golangci.yml b/vendor/github.com/go-ini/ini/.golangci.yml new file mode 100644 index 000000000000..631e369254d3 --- /dev/null +++ b/vendor/github.com/go-ini/ini/.golangci.yml @@ -0,0 +1,27 @@ +linters-settings: + staticcheck: + checks: [ + "all", + "-SA1019" # There are valid use cases of strings.Title + ] + nakedret: + max-func-lines: 0 # Disallow any unnamed return statement + +linters: + enable: + - deadcode + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + - nakedret + - gofmt + - rowserrcheck + - unconvert + - goimports + - unparam diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE new file mode 100644 index 000000000000..d361bbcdf5c9 --- /dev/null +++ b/vendor/github.com/go-ini/ini/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright 2014 Unknwon + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile new file mode 100644 index 000000000000..f3b0dae2d298 --- /dev/null +++ b/vendor/github.com/go-ini/ini/Makefile @@ -0,0 +1,15 @@ +.PHONY: build test bench vet coverage + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -test.bench=. -test.benchmem + +vet: + go vet + +coverage: + go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md new file mode 100644 index 000000000000..30606d9700a8 --- /dev/null +++ b/vendor/github.com/go-ini/ini/README.md @@ -0,0 +1,43 @@ +# INI + +[![GitHub Workflow Status](https://img.shields.io/github/checks-status/go-ini/ini/main?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=branch%3Amain) +[![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini) +[![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc) +[![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini) + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +## Features + +- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +The minimum requirement of Go is **1.13**. + +```sh +$ go get gopkg.in/ini.v1 +``` + +Please add `-u` flag to update in the future. + +## Getting Help + +- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started) +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- 中国大陆镜像:https://ini.unknwon.cn + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/codecov.yml b/vendor/github.com/go-ini/ini/codecov.yml new file mode 100644 index 000000000000..e02ec84bc05f --- /dev/null +++ b/vendor/github.com/go-ini/ini/codecov.yml @@ -0,0 +1,16 @@ +coverage: + range: "60...95" + status: + project: + default: + threshold: 1% + informational: true + patch: + defualt: + only_pulls: true + informational: true + +comment: + layout: 'diff' + +github_checks: false diff --git a/vendor/github.com/go-ini/ini/data_source.go b/vendor/github.com/go-ini/ini/data_source.go new file mode 100644 index 000000000000..c3a541f1d1b5 --- /dev/null +++ b/vendor/github.com/go-ini/ini/data_source.go @@ -0,0 +1,76 @@ +// Copyright 2019 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" +) + +var ( + _ dataSource = (*sourceFile)(nil) + _ dataSource = (*sourceData)(nil) + _ dataSource = (*sourceReadCloser)(nil) +) + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(s.data)), nil +} + +// sourceReadCloser represents an input stream with Close method. +type sourceReadCloser struct { + reader io.ReadCloser +} + +func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { + return s.reader, nil +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + case io.ReadCloser: + return &sourceReadCloser{s}, nil + case io.Reader: + return &sourceReadCloser{ioutil.NopCloser(s)}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type %q", s) + } +} diff --git a/vendor/github.com/go-ini/ini/deprecated.go b/vendor/github.com/go-ini/ini/deprecated.go new file mode 100644 index 000000000000..48b8e66d6d6f --- /dev/null +++ b/vendor/github.com/go-ini/ini/deprecated.go @@ -0,0 +1,22 @@ +// Copyright 2019 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +var ( + // Deprecated: Use "DefaultSection" instead. + DEFAULT_SECTION = DefaultSection + // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore = SnackCase +) diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go new file mode 100644 index 000000000000..f66bc94b8b69 --- /dev/null +++ b/vendor/github.com/go-ini/ini/error.go @@ -0,0 +1,49 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" +) + +// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one. +type ErrDelimiterNotFound struct { + Line string +} + +// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound. +func IsErrDelimiterNotFound(err error) bool { + _, ok := err.(ErrDelimiterNotFound) + return ok +} + +func (err ErrDelimiterNotFound) Error() string { + return fmt.Sprintf("key-value delimiter not found: %s", err.Line) +} + +// ErrEmptyKeyName indicates the error type of no key name is found which there should be one. +type ErrEmptyKeyName struct { + Line string +} + +// IsErrEmptyKeyName returns true if the given error is an instance of ErrEmptyKeyName. +func IsErrEmptyKeyName(err error) bool { + _, ok := err.(ErrEmptyKeyName) + return ok +} + +func (err ErrEmptyKeyName) Error() string { + return fmt.Sprintf("empty key name: %s", err.Line) +} diff --git a/vendor/github.com/go-ini/ini/file.go b/vendor/github.com/go-ini/ini/file.go new file mode 100644 index 000000000000..f8b22408be51 --- /dev/null +++ b/vendor/github.com/go-ini/ini/file.go @@ -0,0 +1,541 @@ +// Copyright 2017 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" +) + +// File represents a combination of one or more INI files in memory. +type File struct { + options LoadOptions + dataSources []dataSource + + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + lock sync.RWMutex + + // To keep data in order. + sectionList []string + // To keep track of the index of a section with same name. + // This meta list is only used with non-unique section names are allowed. + sectionIndexes []int + + // Actual data is stored here. + sections map[string][]*Section + + NameMapper + ValueMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, opts LoadOptions) *File { + if len(opts.KeyValueDelimiters) == 0 { + opts.KeyValueDelimiters = "=:" + } + if len(opts.KeyValueDelimiterOnWrite) == 0 { + opts.KeyValueDelimiterOnWrite = "=" + } + if len(opts.ChildSectionDelimiter) == 0 { + opts.ChildSectionDelimiter = "." + } + + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string][]*Section), + options: opts, + } +} + +// Empty returns an empty file object. +func Empty(opts ...LoadOptions) *File { + var opt LoadOptions + if len(opts) > 0 { + opt = opts[0] + } + + // Ignore error here, we are sure our data is good. + f, _ := LoadSources(opt, []byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("empty section name") + } + + if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) { + return f.sections[name][0], nil + } + + f.sectionList = append(f.sectionList, name) + + // NOTE: Append to indexes must happen before appending to sections, + // otherwise index will have off-by-one problem. + f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name])) + + sec := newSection(f, name) + f.sections[name] = append(f.sections[name], sec) + + return sec, nil +} + +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + secs, err := f.SectionsByName(name) + if err != nil { + return nil, err + } + + return secs[0], err +} + +// HasSection returns true if the file contains a section with given name. +func (f *File) HasSection(name string) bool { + section, _ := f.GetSection(name) + return section != nil +} + +// SectionsByName returns all sections with given name. +func (f *File) SectionsByName(name string) ([]*Section, error) { + if len(name) == 0 { + name = DefaultSection + } + if f.options.Insensitive || f.options.InsensitiveSections { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + secs := f.sections[name] + if len(secs) == 0 { + return nil, fmt.Errorf("section %q does not exist", name) + } + + return secs, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + if name == "" { + name = DefaultSection + } + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// SectionWithIndex assumes named section exists and returns a new section when not. +func (f *File) SectionWithIndex(name string, index int) *Section { + secs, err := f.SectionsByName(name) + if err != nil || len(secs) <= index { + // NOTE: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + newSec, _ := f.NewSection(name) + return newSec + } + + return secs[index] +} + +// Sections returns a list of Section stored in the current instance. +func (f *File) Sections() []*Section { + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sections := make([]*Section, len(f.sectionList)) + for i, name := range f.sectionList { + sections[i] = f.sections[name][f.sectionIndexes[i]] + } + return sections +} + +// ChildSections returns a list of child sections of given section name. +func (f *File) ChildSections(name string) []*Section { + return f.Section(name).ChildSections() +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section or all sections with given name. +func (f *File) DeleteSection(name string) { + secs, err := f.SectionsByName(name) + if err != nil { + return + } + + for i := 0; i < len(secs); i++ { + // For non-unique sections, it is always needed to remove the first one so + // in the next iteration, the subsequent section continue having index 0. + // Ignoring the error as index 0 never returns an error. + _ = f.DeleteSectionWithIndex(name, 0) + } +} + +// DeleteSectionWithIndex deletes a section with given name and index. +func (f *File) DeleteSectionWithIndex(name string, index int) error { + if !f.options.AllowNonUniqueSections && index != 0 { + return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled") + } + + if len(name) == 0 { + name = DefaultSection + } + if f.options.Insensitive || f.options.InsensitiveSections { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + // Count occurrences of the sections + occurrences := 0 + + sectionListCopy := make([]string, len(f.sectionList)) + copy(sectionListCopy, f.sectionList) + + for i, s := range sectionListCopy { + if s != name { + continue + } + + if occurrences == index { + if len(f.sections[name]) <= 1 { + delete(f.sections, name) // The last one in the map + } else { + f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...) + } + + // Fix section lists + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...) + + } else if occurrences > index { + // Fix the indices of all following sections with this name. + f.sectionIndexes[i-1]-- + } + + occurrences++ + } + + return nil +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + _ = f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + if f.options.ShortCircuit { + return nil + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { + equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight + + if PrettyFormat || PrettyEqual { + equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite) + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + lastSectionIdx := len(f.sectionList) - 1 + for i, sname := range f.sectionList { + sec := f.SectionWithIndex(sname, f.sectionIndexes[i]) + if len(sec.Comment) > 0 { + // Support multiline comments + lines := strings.Split(sec.Comment, LineBreak) + for i := range lines { + if lines[i][0] != '#' && lines[i][0] != ';' { + lines[i] = "; " + lines[i] + } else { + lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) + } + + if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { + return nil, err + } + } + } + + if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) { + if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return nil, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + isLastSection := i == lastSectionIdx + if sec.isRawSection { + if _, err := buf.WriteString(sec.rawBody); err != nil { + return nil, err + } + + if PrettySection && !isLastSection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modified if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + KeyList: + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DefaultSection { + buf.WriteString(indent) + } + + // Support multiline comments + lines := strings.Split(key.Comment, LineBreak) + for i := range lines { + if lines[i][0] != '#' && lines[i][0] != ';' { + lines[i] = "; " + strings.TrimSpace(lines[i]) + } else { + lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) + } + + if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { + return nil, err + } + } + } + + if len(indent) > 0 && sname != DefaultSection { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncrement: + kname = "-" + case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + writeKeyValue := func(val string) (bool, error) { + if _, err := buf.WriteString(kname); err != nil { + return false, err + } + + if key.isBooleanType { + buf.WriteString(LineBreak) + return true, nil + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } else if len(strings.TrimSpace(val)) != len(val) { + val = `"` + val + `"` + } + if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { + return false, err + } + return false, nil + } + + shadows := key.ValueWithShadows() + if len(shadows) == 0 { + if _, err := writeKeyValue(""); err != nil { + return nil, err + } + } + + for _, val := range shadows { + exitLoop, err := writeKeyValue(val) + if err != nil { + return nil, err + } else if exitLoop { + continue KeyList + } + } + + for _, val := range key.nestedValues { + if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { + return nil, err + } + } + } + + if PrettySection && !isLastSection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + } + + return buf, nil +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { + buf, err := f.writeToBuffer(indent) + if err != nil { + return 0, err + } + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename after done. + buf, err := f.writeToBuffer(indent) + if err != nil { + return err + } + + return ioutil.WriteFile(filename, buf.Bytes(), 0666) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/github.com/go-ini/ini/helper.go b/vendor/github.com/go-ini/ini/helper.go new file mode 100644 index 000000000000..f9d80a682a55 --- /dev/null +++ b/vendor/github.com/go-ini/ini/helper.go @@ -0,0 +1,24 @@ +// Copyright 2019 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go new file mode 100644 index 000000000000..99e7f86511a4 --- /dev/null +++ b/vendor/github.com/go-ini/ini/ini.go @@ -0,0 +1,176 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "os" + "regexp" + "runtime" + "strings" +) + +const ( + // Maximum allowed depth when recursively substituing variable names. + depthValues = 99 +) + +var ( + // DefaultSection is the name of default section. You can use this var or the string literal. + // In most of cases, an empty string is all you need to access the section. + DefaultSection = "DEFAULT" + + // LineBreak is the delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows at package init time. + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^)]+)\)s`) + + // DefaultHeader explicitly writes default section header. + DefaultHeader = false + + // PrettySection indicates whether to put a line between sections. + PrettySection = true + // PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true + // PrettyEqual places spaces around "=" sign even when PrettyFormat is false. + PrettyEqual = false + // DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled. + DefaultFormatLeft = "" + // DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled. + DefaultFormatRight = "" +) + +var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") + +func init() { + if runtime.GOOS == "windows" && !inTest { + LineBreak = "\r\n" + } +} + +// LoadOptions contains all customized options used for load data source(s). +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // InsensitiveSections indicates whether the parser forces all section to lowercase. + InsensitiveSections bool + // InsensitiveKeys indicates whether the parser forces all key names to lowercase. + InsensitiveKeys bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. + IgnoreInlineComment bool + // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. + SkipUnrecognizableLines bool + // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source. + ShortCircuit bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool + // AllowShadows indicates whether to keep track of keys with same name under same section. + AllowShadows bool + // AllowNestedValues indicates whether to allow AWS-like nested values. + // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values + AllowNestedValues bool + // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values. + // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure + // Relevant quote: Values can also span multiple lines, as long as they are indented deeper + // than the first line of the value. + AllowPythonMultilineValues bool + // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value. + // Docs: https://docs.python.org/2/library/configparser.html + // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names. + // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment. + SpaceBeforeInlineComment bool + // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format + // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" + UnescapeValueDoubleQuotes bool + // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format + // when value is NOT surrounded by any quotes. + // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. + UnescapeValueCommentSymbols bool + // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise + // conform to key/value pairs. Specify the names of those blocks here. + UnparseableSections []string + // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:". + KeyValueDelimiters string + // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=". + KeyValueDelimiterOnWrite string + // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".". + ChildSectionDelimiter string + // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes). + PreserveSurroundedQuote bool + // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values). + DebugFunc DebugFunc + // ReaderBufferSize is the buffer size of the reader in bytes. + ReaderBufferSize int + // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times. + AllowNonUniqueSections bool + // AllowDuplicateShadowValues indicates whether values for shadowed keys should be deduplicated. + AllowDuplicateShadowValues bool +} + +// DebugFunc is the type of function called to log parse events. +type DebugFunc func(message string) + +// LoadSources allows caller to apply customized options for loading from data source(s). +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) +} + +// ShadowLoad has exactly same functionality as Load function +// except it allows have shadow keys. +func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{AllowShadows: true}, source, others...) +} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go new file mode 100644 index 000000000000..a19d9f38ef14 --- /dev/null +++ b/vendor/github.com/go-ini/ini/key.go @@ -0,0 +1,837 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncrement bool + isBooleanType bool + + isShadow bool + shadows []*Key + + nestedValues []string +} + +// newKey simply return a key object with given values. +func newKey(s *Section, name, val string) *Key { + return &Key{ + s: s, + name: name, + value: val, + } +} + +func (k *Key) addShadow(val string) error { + if k.isShadow { + return errors.New("cannot add shadow to another shadow key") + } else if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add shadow to auto-increment or boolean key") + } + + if !k.s.f.options.AllowDuplicateShadowValues { + // Deduplicate shadows based on their values. + if k.value == val { + return nil + } + for i := range k.shadows { + if k.shadows[i].value == val { + return nil + } + } + } + + shadow := newKey(k.s, k.name, val) + shadow.isShadow = true + k.shadows = append(k.shadows, shadow) + return nil +} + +// AddShadow adds a new shadow key to itself. +func (k *Key) AddShadow(val string) error { + if !k.s.f.options.AllowShadows { + return errors.New("shadow key is not allowed") + } + return k.addShadow(val) +} + +func (k *Key) addNestedValue(val string) error { + if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add nested value to auto-increment or boolean key") + } + + k.nestedValues = append(k.nestedValues, val) + return nil +} + +// AddNestedValue adds a nested value to the key. +func (k *Key) AddNestedValue(val string) error { + if !k.s.f.options.AllowNestedValues { + return errors.New("nested value is not allowed") + } + return k.addNestedValue(val) +} + +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv +type ValueMapper func(string) string + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// ValueWithShadows returns raw values of key and its shadows if any. Shadow +// keys with empty values are ignored from the returned list. +func (k *Key) ValueWithShadows() []string { + if len(k.shadows) == 0 { + if k.value == "" { + return []string{} + } + return []string{k.value} + } + + vals := make([]string, 0, len(k.shadows)+1) + if k.value != "" { + vals = append(vals, k.value) + } + for _, s := range k.shadows { + if s.value != "" { + vals = append(vals, s.value) + } + } + return vals +} + +// NestedValues returns nested values stored in the key. +// It is possible returned value is nil if no nested values stored in the key. +func (k *Key) NestedValues() []string { + return k.nestedValues +} + +// transformValue takes a raw value and transforms to its final string. +func (k *Key) transformValue(val string) string { + if k.s.f.ValueMapper != nil { + val = k.s.f.ValueMapper(val) + } + + // Fail-fast if no indicate char found for recursive value + if !strings.Contains(val, "%") { + return val + } + for i := 0; i < depthValues; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := vr[2 : len(vr)-2] + + // Search in the same section. + // If not found or found the key itself, then search again in default section. + nk, err := k.s.GetKey(noption) + if err != nil || k == nk { + nk, _ = k.s.f.Section("").GetKey(noption) + if nk == nil { + // Stop when no results found in the default section, + // and returns the value as-is. + break + } + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// String returns string representation of value. +func (k *Key) String() string { + return k.transformValue(k.value) +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + v, err := strconv.ParseInt(k.String(), 0, 64) + return int(v), err +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 0, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 0, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 0, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + runes := []rune(str) + vals := make([]string, 0, 2) + var buf bytes.Buffer + escape := false + idx := 0 + for { + if escape { + escape = false + if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { + buf.WriteRune('\\') + } + buf.WriteRune(runes[idx]) + } else { + if runes[idx] == '\\' { + escape = true + } else if strings.HasPrefix(string(runes[idx:]), delim) { + idx += len(delim) - 1 + vals = append(vals, strings.TrimSpace(buf.String())) + buf.Reset() + } else { + buf.WriteRune(runes[idx]) + } + } + idx++ + if idx == len(runes) { + break + } + } + + if buf.Len() > 0 { + vals = append(vals, strings.TrimSpace(buf.String())) + } + + return vals +} + +// StringsWithShadows returns list of string divided by given delimiter. +// Shadows will also be appended if any. +func (k *Key) StringsWithShadows(delim string) []string { + vals := k.ValueWithShadows() + results := make([]string, 0, len(vals)*2) + for i := range vals { + if len(vals) == 0 { + continue + } + + results = append(results, strings.Split(vals[i], delim)...) + } + + for i := range results { + results[i] = k.transformValue(strings.TrimSpace(results[i])) + } + return results +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), true, false) + return vals +} + +// Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Bools(delim string) []bool { + vals, _ := k.parseBools(k.Strings(delim), true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), false, false) + return vals +} + +// ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidBools(delim string) []bool { + vals, _ := k.parseBools(k.Strings(delim), false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.parseFloat64s(k.Strings(delim), false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.parseInts(k.Strings(delim), false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.parseInt64s(k.Strings(delim), false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.parseUints(k.Strings(delim), false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.parseUint64s(k.Strings(delim), false, true) +} + +// StrictBools returns list of bool divided by given delimiter or error on first invalid input. +func (k *Key) StrictBools(delim string) ([]bool, error) { + return k.parseBools(k.Strings(delim), false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.parseTimesFormat(format, k.Strings(delim), false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// parseBools transforms strings to bools. +func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) { + vals := make([]bool, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := parseBool(str) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(bool)) + } + } + return vals, err +} + +// parseFloat64s transforms strings to float64s. +func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { + vals := make([]float64, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseFloat(str, 64) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(float64)) + } + } + return vals, err +} + +// parseInts transforms strings to ints. +func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { + vals := make([]int, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseInt(str, 0, 64) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, int(val.(int64))) + } + } + return vals, err +} + +// parseInt64s transforms strings to int64s. +func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { + vals := make([]int64, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseInt(str, 0, 64) + return val, err + } + + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(int64)) + } + } + return vals, err +} + +// parseUints transforms strings to uints. +func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { + vals := make([]uint, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseUint(str, 0, 64) + return val, err + } + + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, uint(val.(uint64))) + } + } + return vals, err +} + +// parseUint64s transforms strings to uint64s. +func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + vals := make([]uint64, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseUint(str, 0, 64) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(uint64)) + } + } + return vals, err +} + +type Parser func(str string) (interface{}, error) + +// parseTimesFormat transforms strings to times in given format. +func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + vals := make([]time.Time, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := time.Parse(format, str) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(time.Time)) + } + } + return vals, err +} + +// doParse transforms strings to different types +func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) { + vals := make([]interface{}, 0, len(strs)) + for _, str := range strs { + val, err := parser(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go new file mode 100644 index 000000000000..44fc526c2cb6 --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser.go @@ -0,0 +1,520 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "strings" + "unicode" +) + +const minReaderBufferSize = 4096 + +var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`) + +type parserOptions struct { + IgnoreContinuation bool + IgnoreInlineComment bool + AllowPythonMultilineValues bool + SpaceBeforeInlineComment bool + UnescapeValueDoubleQuotes bool + UnescapeValueCommentSymbols bool + PreserveSurroundedQuote bool + DebugFunc DebugFunc + ReaderBufferSize int +} + +type parser struct { + buf *bufio.Reader + options parserOptions + + isEOF bool + count int + comment *bytes.Buffer +} + +func (p *parser) debug(format string, args ...interface{}) { + if p.options.DebugFunc != nil { + p.options.DebugFunc(fmt.Sprintf(format, args...)) + } +} + +func newParser(r io.Reader, opts parserOptions) *parser { + size := opts.ReaderBufferSize + if size < minReaderBufferSize { + size = minReaderBufferSize + } + + return &parser{ + buf: bufio.NewReaderSize(r, size), + options: opts, + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(2) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 2 { + return nil + } + + switch { + case mask[0] == 254 && mask[1] == 255: + fallthrough + case mask[0] == 255 && mask[1] == 254: + _, err = p.buf.Read(mask) + if err != nil { + return err + } + case mask[0] == 239 && mask[1] == 187: + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } + if mask[2] == 191 { + _, err = p.buf.Read(mask) + if err != nil { + return err + } + } + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(delimiters string, in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && line[0:3] == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + var endIdx int + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], delimiters) + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, delimiters) + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + if endIdx == 0 { + return "", -1, ErrEmptyKeyName{line} + } + + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from %q to %q", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, bufferSize int) (string, error) { + + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' { + return p.readPythonMultilines(line, bufferSize) + } + return "", nil + } + + var valQuote string + if len(line) > 3 && line[0:3] == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' { + valQuote = `"` + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + if p.options.UnescapeValueDoubleQuotes && valQuote == `"` { + return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil + } + return line[startIdx : pos+startIdx], nil + } + + lastChar := line[len(line)-1] + // Won't be able to reach here if value only contains whitespace + line = strings.TrimSpace(line) + trimmedLastChar := line[len(line)-1] + + // Check continuation lines when desired + if !p.options.IgnoreContinuation && trimmedLastChar == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + // Check if ignore inline comment + if !p.options.IgnoreInlineComment { + var i int + if p.options.SpaceBeforeInlineComment { + i = strings.Index(line, " #") + if i == -1 { + i = strings.Index(line, " ;") + } + + } else { + i = strings.IndexAny(line, "#;") + } + + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + } + + // Trim single and double quotes + if (hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote { + line = line[1 : len(line)-1] + } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols { + line = strings.ReplaceAll(line, `\;`, ";") + line = strings.ReplaceAll(line, `\#`, "#") + } else if p.options.AllowPythonMultilineValues && lastChar == '\n' { + return p.readPythonMultilines(line, bufferSize) + } + + return line, nil +} + +func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) { + parserBufferPeekResult, _ := p.buf.Peek(bufferSize) + peekBuffer := bytes.NewBuffer(parserBufferPeekResult) + + for { + peekData, peekErr := peekBuffer.ReadBytes('\n') + if peekErr != nil && peekErr != io.EOF { + p.debug("readPythonMultilines: failed to peek with error: %v", peekErr) + return "", peekErr + } + + p.debug("readPythonMultilines: parsing %q", string(peekData)) + + peekMatches := pythonMultiline.FindStringSubmatch(string(peekData)) + p.debug("readPythonMultilines: matched %d parts", len(peekMatches)) + for n, v := range peekMatches { + p.debug(" %d: %q", n, v) + } + + // Return if not a Python multiline value. + if len(peekMatches) != 3 { + p.debug("readPythonMultilines: end of value, got: %q", line) + return line, nil + } + + // Advance the parser reader (buffer) in-sync with the peek buffer. + _, err := p.buf.Discard(len(peekData)) + if err != nil { + p.debug("readPythonMultilines: failed to skip to the end, returning error") + return "", err + } + + line += "\n" + peekMatches[0] + } +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader, parserOptions{ + IgnoreContinuation: f.options.IgnoreContinuation, + IgnoreInlineComment: f.options.IgnoreInlineComment, + AllowPythonMultilineValues: f.options.AllowPythonMultilineValues, + SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment, + UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes, + UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols, + PreserveSurroundedQuote: f.options.PreserveSurroundedQuote, + DebugFunc: f.options.DebugFunc, + ReaderBufferSize: f.options.ReaderBufferSize, + }) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + name := DefaultSection + if f.options.Insensitive || f.options.InsensitiveSections { + name = strings.ToLower(DefaultSection) + } + section, _ := f.NewSection(name) + + // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key + var isLastValueEmpty bool + var lastRegularKey *Key + + var line []byte + var inUnparseableSection bool + + // NOTE: Iterate and increase `currentPeekSize` until + // the size of the parser buffer is found. + // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. + parserBufferSize := 0 + // NOTE: Peek 4kb at a time. + currentPeekSize := minReaderBufferSize + + if f.options.AllowPythonMultilineValues { + for { + peekBytes, _ := p.buf.Peek(currentPeekSize) + peekBytesLength := len(peekBytes) + + if parserBufferSize >= peekBytesLength { + break + } + + currentPeekSize *= 2 + parserBufferSize = peekBytesLength + } + } + + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + if f.options.AllowNestedValues && + isLastValueEmpty && len(line) > 0 { + if line[0] == ' ' || line[0] == '\t' { + err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) + if err != nil { + return err + } + continue + } + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + closeIdx := bytes.LastIndexByte(line, ']') + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset auto-counter and comments + p.comment.Reset() + p.count = 1 + // Nested values can't span sections + isLastValueEmpty = false + + inUnparseableSection = false + for i := range f.options.UnparseableSections { + if f.options.UnparseableSections[i] == name || + ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) { + inUnparseableSection = true + continue + } + } + continue + } + + if inUnparseableSection { + section.isRawSection = true + section.rawBody += string(line) + continue + } + + kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line) + if err != nil { + switch { + // Treat as boolean key when desired, and whole line is key name. + case IsErrDelimiterNotFound(err): + switch { + case f.options.AllowBooleanKeys: + kname, err := p.readValue(line, parserBufferSize) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + + case f.options.SkipUnrecognizableLines: + continue + } + case IsErrEmptyKeyName(err) && f.options.SkipUnrecognizableLines: + continue + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + value, err := p.readValue(line[offset:], parserBufferSize) + if err != nil { + return err + } + isLastValueEmpty = len(value) == 0 + + key, err := section.NewKey(kname, value) + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + lastRegularKey = key + } + return nil +} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go new file mode 100644 index 000000000000..a3615d820b7a --- /dev/null +++ b/vendor/github.com/go-ini/ini/section.go @@ -0,0 +1,256 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string + + isRawSection bool + rawBody string +} + +func newSection(f *File, name string) *Section { + return &Section{ + f: f, + name: name, + keys: make(map[string]*Key), + keyList: make([]string, 0, 10), + keysHash: make(map[string]string), + } +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// Body returns rawBody of Section if the section was marked as unparseable. +// It still follows the other rules of the INI format surrounding leading/trailing whitespace. +func (s *Section) Body() string { + return strings.TrimSpace(s.rawBody) +} + +// SetBody updates body content only if section is raw. +func (s *Section) SetBody(body string) { + if !s.isRawSection { + return + } + s.rawBody = body +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + if s.f.options.AllowShadows { + if err := s.keys[name].addShadow(val); err != nil { + return nil, err + } + } else { + s.keys[name].value = val + s.keysHash[name] = val + } + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = newKey(s, name, val) + s.keysHash[name] = val + return s.keys[name], nil +} + +// NewBooleanKey creates a new boolean type key to given section. +func (s *Section) NewBooleanKey(name string) (*Key, error) { + key, err := s.NewKey(name, "true") + if err != nil { + return nil, err + } + + key.isBooleanType = true + return key, nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive || s.f.options.InsensitiveKeys { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } + break + } + return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Deprecated: Use "HasKey" instead. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := make(map[string]string, len(s.keysHash)) + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + delete(s.keysHash, name) + return + } + } +} + +// ChildSections returns a list of child sections of current section. +// For example, "[parent.child1]" and "[parent.child12]" are child sections +// of section "[parent]". +func (s *Section) ChildSections() []*Section { + prefix := s.name + s.f.options.ChildSectionDelimiter + children := make([]*Section, 0, 3) + for _, name := range s.f.sectionList { + if strings.HasPrefix(name, prefix) { + children = append(children, s.f.sections[name]...) + } + } + return children +} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go new file mode 100644 index 000000000000..a486b2fe0fdc --- /dev/null +++ b/vendor/github.com/go-ini/ini/struct.go @@ -0,0 +1,747 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strings" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // SnackCase converts to format SNACK_CASE. + SnackCase NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= 'A' - 'a' + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + var strs []string + if allowShadow { + strs = key.StringsWithShadows(delim) + } else { + strs = key.Strings(delim) + } + + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + var err error + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals, err = key.parseInts(strs, true, false) + case reflect.Int64: + vals, err = key.parseInt64s(strs, true, false) + case reflect.Uint: + vals, err = key.parseUints(strs, true, false) + case reflect.Uint64: + vals, err = key.parseUint64s(strs, true, false) + case reflect.Float64: + vals, err = key.parseFloat64s(strs, true, false) + case reflect.Bool: + vals, err = key.parseBools(strs, true, false) + case reflectTime: + vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + if err != nil && isStrict { + return err + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflect.Bool: + slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + +func wrapStrictError(err error, isStrict bool) error { + if isStrict { + return err + } + return nil +} + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to struct. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + vt := t + isPtr := t.Kind() == reflect.Ptr + if isPtr { + vt = t.Elem() + } + switch vt.Kind() { + case reflect.String: + stringVal := key.String() + if isPtr { + field.Set(reflect.ValueOf(&stringVal)) + } else if len(stringVal) > 0 { + field.SetString(key.String()) + } + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + field.Set(reflect.ValueOf(&boolVal)) + } else { + field.SetBool(boolVal) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // ParseDuration will not return err for `0`, so check the type name + if vt.Name() == "Duration" { + durationVal, err := key.Duration() + if err != nil { + if intVal, err := key.Int64(); err == nil { + field.SetInt(intVal) + return nil + } + return wrapStrictError(err, isStrict) + } + if isPtr { + field.Set(reflect.ValueOf(&durationVal)) + } else if int64(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + } + return nil + } + + intVal, err := key.Int64() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + pv := reflect.New(t.Elem()) + pv.Elem().SetInt(intVal) + field.Set(pv) + } else { + field.SetInt(intVal) + } + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && uint64(durationVal) > 0 { + if isPtr { + field.Set(reflect.ValueOf(&durationVal)) + } else { + field.Set(reflect.ValueOf(durationVal)) + } + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + pv := reflect.New(t.Elem()) + pv.Elem().SetUint(uintVal) + field.Set(pv) + } else { + field.SetUint(uintVal) + } + + case reflect.Float32, reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + pv := reflect.New(t.Elem()) + pv.Elem().SetFloat(floatVal) + field.Set(pv) + } else { + field.SetFloat(floatVal) + } + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + field.Set(reflect.ValueOf(&timeVal)) + } else { + field.Set(reflect.ValueOf(timeVal)) + } + case reflect.Slice: + return setSliceWithProperType(key, field, delim, allowShadow, isStrict) + default: + return fmt.Errorf("unsupported type %q", t) + } + return nil +} + +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) { + opts := strings.SplitN(tag, ",", 5) + rawName = opts[0] + for _, opt := range opts[1:] { + omitEmpty = omitEmpty || (opt == "omitempty") + allowShadow = allowShadow || (opt == "allowshadow") + allowNonUnique = allowNonUnique || (opt == "nonunique") + extends = extends || (opt == "extends") + } + return rawName, omitEmpty, allowShadow, allowNonUnique, extends +} + +// mapToField maps the given value to the matching field of the given section. +// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added. +func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag) + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isStruct := tpField.Type.Kind() == reflect.Struct + isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct + isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + if isAnonymousPtr { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) { + if isStructPtr && field.IsNil() { + field.Set(reflect.New(tpField.Type.Elem())) + } + fieldSection := s + if rawName != "" { + sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName + if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) { + fieldSection = secs[sectionIndex] + } + } + if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil { + return fmt.Errorf("map to field %q: %v", fieldName, err) + } + } else if isAnonymousPtr || isStruct || isStructPtr { + if secs, err := s.f.SectionsByName(fieldName); err == nil { + if len(secs) <= sectionIndex { + return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName) + } + // Only set the field to non-nil struct value if we have a section for it. + // Otherwise, we end up with a non-nil struct ptr even though there is no data. + if isStructPtr && field.IsNil() { + field.Set(reflect.New(tpField.Type.Elem())) + } + if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil { + return fmt.Errorf("map to field %q: %v", fieldName, err) + } + continue + } + } + + // Map non-unique sections + if allowNonUnique && tpField.Type.Kind() == reflect.Slice { + newField, err := s.mapToSlice(fieldName, field, isStrict) + if err != nil { + return fmt.Errorf("map to slice %q: %v", fieldName, err) + } + + field.Set(newField) + continue + } + + if key, err := s.GetKey(fieldName); err == nil { + delim := parseDelim(tpField.Tag.Get("delim")) + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { + return fmt.Errorf("set field %q: %v", fieldName, err) + } + } + } + return nil +} + +// mapToSlice maps all sections with the same name and returns the new value. +// The type of the Value must be a slice. +func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) { + secs, err := s.f.SectionsByName(secName) + if err != nil { + return reflect.Value{}, err + } + + typ := val.Type().Elem() + for i, sec := range secs { + elem := reflect.New(typ) + if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil { + return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err) + } + + val = reflect.Append(val, elem.Elem()) + } + return val, nil +} + +// mapTo maps a section to object v. +func (s *Section) mapTo(v interface{}, isStrict bool) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("not a pointer to a struct") + } + + if typ.Kind() == reflect.Slice { + newField, err := s.mapToSlice(s.name, val, isStrict) + if err != nil { + return err + } + + val.Set(newField) + return nil + } + + return s.mapToField(val, isStrict, 0, s.name) +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + return s.mapTo(v, false) +} + +// StrictMapTo maps section to given struct in strict mode, +// which returns all possible error including value parsing error. +func (s *Section) StrictMapTo(v interface{}) error { + return s.mapTo(v, true) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// StrictMapTo maps file to given struct in strict mode, +// which returns all possible error including value parsing error. +func (f *File) StrictMapTo(v interface{}) error { + return f.Section("").StrictMapTo(v) +} + +// MapToWithMapper maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, +// which returns all possible error including value parsing error. +func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.StrictMapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// StrictMapTo maps data sources to given struct in strict mode, +// which returns all possible error including value parsing error. +func StrictMapTo(v, source interface{}, others ...interface{}) error { + return StrictMapToWithMapper(v, nil, source, others...) +} + +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + sliceOf := field.Type().Elem().Kind() + + if allowShadow { + var keyWithShadows *Key + for i := 0; i < field.Len(); i++ { + var val string + switch sliceOf { + case reflect.String: + val = slice.Index(i).String() + case reflect.Int, reflect.Int64: + val = fmt.Sprint(slice.Index(i).Int()) + case reflect.Uint, reflect.Uint64: + val = fmt.Sprint(slice.Index(i).Uint()) + case reflect.Float64: + val = fmt.Sprint(slice.Index(i).Float()) + case reflect.Bool: + val = fmt.Sprint(slice.Index(i).Bool()) + case reflectTime: + val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + + if i == 0 { + keyWithShadows = newKey(key.s, key.name, val) + } else { + _ = keyWithShadows.AddShadow(val) + } + } + *key = *keyWithShadows + return nil + } + + var buf bytes.Buffer + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflect.Bool: + buf.WriteString(fmt.Sprint(slice.Index(i).Bool())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-len(delim)]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) + case reflect.Slice: + return reflectSliceWithProperType(key, field, delim, allowShadow) + case reflect.Ptr: + if !field.IsNil() { + return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow) + } + default: + return fmt.Errorf("unsupported type %q", t) + } + return nil +} + +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflectTime: + t, ok := v.Interface().(time.Time) + return ok && t.IsZero() + } + return false +} + +// StructReflector is the interface implemented by struct types that can extract themselves into INI objects. +type StructReflector interface { + ReflectINIStruct(*File) error +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + if !val.Field(i).CanInterface() { + continue + } + + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag) + if omitEmpty && isEmptyValue(field) { + continue + } + + if r, ok := field.Interface().(StructReflector); ok { + return r.ReflectINIStruct(s.f) + } + + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) { + if err := s.reflectFrom(field); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) || + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + + // Add comment from comment tag + if len(sec.Comment) == 0 { + sec.Comment = tpField.Tag.Get("comment") + } + + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + continue + } + + if allowNonUnique && tpField.Type.Kind() == reflect.Slice { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + sliceOf := field.Type().Elem().Kind() + + for i := 0; i < field.Len(); i++ { + if sliceOf != reflect.Struct && sliceOf != reflect.Ptr { + return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName) + } + + sec, err := s.f.NewSection(fieldName) + if err != nil { + return err + } + + // Add comment from comment tag + if len(sec.Comment) == 0 { + sec.Comment = tpField.Tag.Get("comment") + } + + if err := sec.reflectFrom(slice.Index(i)); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + } + continue + } + + // Note: Same reason as section. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + + // Add comment from comment tag + if len(key.Comment) == 0 { + key.Comment = tpField.Tag.Get("comment") + } + + delim := parseDelim(tpField.Tag.Get("delim")) + if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { + return fmt.Errorf("reflect field %q: %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects section from given struct. It overwrites existing ones. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + + if s.name != DefaultSection && s.f.options.AllowNonUniqueSections && + (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) { + // Clear sections to make sure none exists before adding the new ones + s.f.DeleteSection(s.name) + + if typ.Kind() == reflect.Ptr { + sec, err := s.f.NewSection(s.name) + if err != nil { + return err + } + return sec.reflectFrom(val.Elem()) + } + + slice := val.Slice(0, val.Len()) + sliceOf := val.Type().Elem().Kind() + if sliceOf != reflect.Ptr { + return fmt.Errorf("not a slice of pointers") + } + + for i := 0; i < slice.Len(); i++ { + sec, err := s.f.NewSection(s.name) + if err != nil { + return err + } + + err = sec.reflectFrom(slice.Index(i)) + if err != nil { + return fmt.Errorf("reflect from %dth field: %v", i, err) + } + } + + return nil + } + + if typ.Kind() == reflect.Ptr { + val = val.Elem() + } else { + return errors.New("not a pointer to a struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFromWithMapper reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/vendor/github.com/gobwas/glob/.gitignore b/vendor/github.com/gobwas/glob/.gitignore new file mode 100644 index 000000000000..b4ae623be552 --- /dev/null +++ b/vendor/github.com/gobwas/glob/.gitignore @@ -0,0 +1,8 @@ +glob.iml +.idea +*.cpu +*.mem +*.test +*.dot +*.png +*.svg diff --git a/vendor/github.com/gobwas/glob/.travis.yml b/vendor/github.com/gobwas/glob/.travis.yml new file mode 100644 index 000000000000..e8a276826cf2 --- /dev/null +++ b/vendor/github.com/gobwas/glob/.travis.yml @@ -0,0 +1,9 @@ +sudo: false + +language: go + +go: + - 1.5.3 + +script: + - go test -v ./... diff --git a/vendor/github.com/gobwas/glob/LICENSE b/vendor/github.com/gobwas/glob/LICENSE new file mode 100644 index 000000000000..9d4735cad9f4 --- /dev/null +++ b/vendor/github.com/gobwas/glob/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Sergey Kamardin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gobwas/glob/bench.sh b/vendor/github.com/gobwas/glob/bench.sh new file mode 100644 index 000000000000..804cf22e6460 --- /dev/null +++ b/vendor/github.com/gobwas/glob/bench.sh @@ -0,0 +1,26 @@ +#! /bin/bash + +bench() { + filename="/tmp/$1-$2.bench" + if test -e "${filename}"; + then + echo "Already exists ${filename}" + else + backup=`git rev-parse --abbrev-ref HEAD` + git checkout $1 + echo -n "Creating ${filename}... " + go test ./... -run=NONE -bench=$2 > "${filename}" -benchmem + echo "OK" + git checkout ${backup} + sleep 5 + fi +} + + +to=$1 +current=`git rev-parse --abbrev-ref HEAD` + +bench ${to} $2 +bench ${current} $2 + +benchcmp $3 "/tmp/${to}-$2.bench" "/tmp/${current}-$2.bench" diff --git a/vendor/github.com/gobwas/glob/compiler/compiler.go b/vendor/github.com/gobwas/glob/compiler/compiler.go new file mode 100644 index 000000000000..02e7de80a0b0 --- /dev/null +++ b/vendor/github.com/gobwas/glob/compiler/compiler.go @@ -0,0 +1,525 @@ +package compiler + +// TODO use constructor with all matchers, and to their structs private +// TODO glue multiple Text nodes (like after QuoteMeta) + +import ( + "fmt" + "reflect" + + "github.com/gobwas/glob/match" + "github.com/gobwas/glob/syntax/ast" + "github.com/gobwas/glob/util/runes" +) + +func optimizeMatcher(matcher match.Matcher) match.Matcher { + switch m := matcher.(type) { + + case match.Any: + if len(m.Separators) == 0 { + return match.NewSuper() + } + + case match.AnyOf: + if len(m.Matchers) == 1 { + return m.Matchers[0] + } + + return m + + case match.List: + if m.Not == false && len(m.List) == 1 { + return match.NewText(string(m.List)) + } + + return m + + case match.BTree: + m.Left = optimizeMatcher(m.Left) + m.Right = optimizeMatcher(m.Right) + + r, ok := m.Value.(match.Text) + if !ok { + return m + } + + var ( + leftNil = m.Left == nil + rightNil = m.Right == nil + ) + if leftNil && rightNil { + return match.NewText(r.Str) + } + + _, leftSuper := m.Left.(match.Super) + lp, leftPrefix := m.Left.(match.Prefix) + la, leftAny := m.Left.(match.Any) + + _, rightSuper := m.Right.(match.Super) + rs, rightSuffix := m.Right.(match.Suffix) + ra, rightAny := m.Right.(match.Any) + + switch { + case leftSuper && rightSuper: + return match.NewContains(r.Str, false) + + case leftSuper && rightNil: + return match.NewSuffix(r.Str) + + case rightSuper && leftNil: + return match.NewPrefix(r.Str) + + case leftNil && rightSuffix: + return match.NewPrefixSuffix(r.Str, rs.Suffix) + + case rightNil && leftPrefix: + return match.NewPrefixSuffix(lp.Prefix, r.Str) + + case rightNil && leftAny: + return match.NewSuffixAny(r.Str, la.Separators) + + case leftNil && rightAny: + return match.NewPrefixAny(r.Str, ra.Separators) + } + + return m + } + + return matcher +} + +func compileMatchers(matchers []match.Matcher) (match.Matcher, error) { + if len(matchers) == 0 { + return nil, fmt.Errorf("compile error: need at least one matcher") + } + if len(matchers) == 1 { + return matchers[0], nil + } + if m := glueMatchers(matchers); m != nil { + return m, nil + } + + idx := -1 + maxLen := -1 + var val match.Matcher + for i, matcher := range matchers { + if l := matcher.Len(); l != -1 && l >= maxLen { + maxLen = l + idx = i + val = matcher + } + } + + if val == nil { // not found matcher with static length + r, err := compileMatchers(matchers[1:]) + if err != nil { + return nil, err + } + return match.NewBTree(matchers[0], nil, r), nil + } + + left := matchers[:idx] + var right []match.Matcher + if len(matchers) > idx+1 { + right = matchers[idx+1:] + } + + var l, r match.Matcher + var err error + if len(left) > 0 { + l, err = compileMatchers(left) + if err != nil { + return nil, err + } + } + + if len(right) > 0 { + r, err = compileMatchers(right) + if err != nil { + return nil, err + } + } + + return match.NewBTree(val, l, r), nil +} + +func glueMatchers(matchers []match.Matcher) match.Matcher { + if m := glueMatchersAsEvery(matchers); m != nil { + return m + } + if m := glueMatchersAsRow(matchers); m != nil { + return m + } + return nil +} + +func glueMatchersAsRow(matchers []match.Matcher) match.Matcher { + if len(matchers) <= 1 { + return nil + } + + var ( + c []match.Matcher + l int + ) + for _, matcher := range matchers { + if ml := matcher.Len(); ml == -1 { + return nil + } else { + c = append(c, matcher) + l += ml + } + } + return match.NewRow(l, c...) +} + +func glueMatchersAsEvery(matchers []match.Matcher) match.Matcher { + if len(matchers) <= 1 { + return nil + } + + var ( + hasAny bool + hasSuper bool + hasSingle bool + min int + separator []rune + ) + + for i, matcher := range matchers { + var sep []rune + + switch m := matcher.(type) { + case match.Super: + sep = []rune{} + hasSuper = true + + case match.Any: + sep = m.Separators + hasAny = true + + case match.Single: + sep = m.Separators + hasSingle = true + min++ + + case match.List: + if !m.Not { + return nil + } + sep = m.List + hasSingle = true + min++ + + default: + return nil + } + + // initialize + if i == 0 { + separator = sep + } + + if runes.Equal(sep, separator) { + continue + } + + return nil + } + + if hasSuper && !hasAny && !hasSingle { + return match.NewSuper() + } + + if hasAny && !hasSuper && !hasSingle { + return match.NewAny(separator) + } + + if (hasAny || hasSuper) && min > 0 && len(separator) == 0 { + return match.NewMin(min) + } + + every := match.NewEveryOf() + + if min > 0 { + every.Add(match.NewMin(min)) + + if !hasAny && !hasSuper { + every.Add(match.NewMax(min)) + } + } + + if len(separator) > 0 { + every.Add(match.NewContains(string(separator), true)) + } + + return every +} + +func minimizeMatchers(matchers []match.Matcher) []match.Matcher { + var done match.Matcher + var left, right, count int + + for l := 0; l < len(matchers); l++ { + for r := len(matchers); r > l; r-- { + if glued := glueMatchers(matchers[l:r]); glued != nil { + var swap bool + + if done == nil { + swap = true + } else { + cl, gl := done.Len(), glued.Len() + swap = cl > -1 && gl > -1 && gl > cl + swap = swap || count < r-l + } + + if swap { + done = glued + left = l + right = r + count = r - l + } + } + } + } + + if done == nil { + return matchers + } + + next := append(append([]match.Matcher{}, matchers[:left]...), done) + if right < len(matchers) { + next = append(next, matchers[right:]...) + } + + if len(next) == len(matchers) { + return next + } + + return minimizeMatchers(next) +} + +// minimizeAnyOf tries to apply some heuristics to minimize number of nodes in given tree +func minimizeTree(tree *ast.Node) *ast.Node { + switch tree.Kind { + case ast.KindAnyOf: + return minimizeTreeAnyOf(tree) + default: + return nil + } +} + +// minimizeAnyOf tries to find common children of given node of AnyOf pattern +// it searches for common children from left and from right +// if any common children are found – then it returns new optimized ast tree +// else it returns nil +func minimizeTreeAnyOf(tree *ast.Node) *ast.Node { + if !areOfSameKind(tree.Children, ast.KindPattern) { + return nil + } + + commonLeft, commonRight := commonChildren(tree.Children) + commonLeftCount, commonRightCount := len(commonLeft), len(commonRight) + if commonLeftCount == 0 && commonRightCount == 0 { // there are no common parts + return nil + } + + var result []*ast.Node + if commonLeftCount > 0 { + result = append(result, ast.NewNode(ast.KindPattern, nil, commonLeft...)) + } + + var anyOf []*ast.Node + for _, child := range tree.Children { + reuse := child.Children[commonLeftCount : len(child.Children)-commonRightCount] + var node *ast.Node + if len(reuse) == 0 { + // this pattern is completely reduced by commonLeft and commonRight patterns + // so it become nothing + node = ast.NewNode(ast.KindNothing, nil) + } else { + node = ast.NewNode(ast.KindPattern, nil, reuse...) + } + anyOf = appendIfUnique(anyOf, node) + } + switch { + case len(anyOf) == 1 && anyOf[0].Kind != ast.KindNothing: + result = append(result, anyOf[0]) + case len(anyOf) > 1: + result = append(result, ast.NewNode(ast.KindAnyOf, nil, anyOf...)) + } + + if commonRightCount > 0 { + result = append(result, ast.NewNode(ast.KindPattern, nil, commonRight...)) + } + + return ast.NewNode(ast.KindPattern, nil, result...) +} + +func commonChildren(nodes []*ast.Node) (commonLeft, commonRight []*ast.Node) { + if len(nodes) <= 1 { + return + } + + // find node that has least number of children + idx := leastChildren(nodes) + if idx == -1 { + return + } + tree := nodes[idx] + treeLength := len(tree.Children) + + // allocate max able size for rightCommon slice + // to get ability insert elements in reverse order (from end to start) + // without sorting + commonRight = make([]*ast.Node, treeLength) + lastRight := treeLength // will use this to get results as commonRight[lastRight:] + + var ( + breakLeft bool + breakRight bool + commonTotal int + ) + for i, j := 0, treeLength-1; commonTotal < treeLength && j >= 0 && !(breakLeft && breakRight); i, j = i+1, j-1 { + treeLeft := tree.Children[i] + treeRight := tree.Children[j] + + for k := 0; k < len(nodes) && !(breakLeft && breakRight); k++ { + // skip least children node + if k == idx { + continue + } + + restLeft := nodes[k].Children[i] + restRight := nodes[k].Children[j+len(nodes[k].Children)-treeLength] + + breakLeft = breakLeft || !treeLeft.Equal(restLeft) + + // disable searching for right common parts, if left part is already overlapping + breakRight = breakRight || (!breakLeft && j <= i) + breakRight = breakRight || !treeRight.Equal(restRight) + } + + if !breakLeft { + commonTotal++ + commonLeft = append(commonLeft, treeLeft) + } + if !breakRight { + commonTotal++ + lastRight = j + commonRight[j] = treeRight + } + } + + commonRight = commonRight[lastRight:] + + return +} + +func appendIfUnique(target []*ast.Node, val *ast.Node) []*ast.Node { + for _, n := range target { + if reflect.DeepEqual(n, val) { + return target + } + } + return append(target, val) +} + +func areOfSameKind(nodes []*ast.Node, kind ast.Kind) bool { + for _, n := range nodes { + if n.Kind != kind { + return false + } + } + return true +} + +func leastChildren(nodes []*ast.Node) int { + min := -1 + idx := -1 + for i, n := range nodes { + if idx == -1 || (len(n.Children) < min) { + min = len(n.Children) + idx = i + } + } + return idx +} + +func compileTreeChildren(tree *ast.Node, sep []rune) ([]match.Matcher, error) { + var matchers []match.Matcher + for _, desc := range tree.Children { + m, err := compile(desc, sep) + if err != nil { + return nil, err + } + matchers = append(matchers, optimizeMatcher(m)) + } + return matchers, nil +} + +func compile(tree *ast.Node, sep []rune) (m match.Matcher, err error) { + switch tree.Kind { + case ast.KindAnyOf: + // todo this could be faster on pattern_alternatives_combine_lite (see glob_test.go) + if n := minimizeTree(tree); n != nil { + return compile(n, sep) + } + matchers, err := compileTreeChildren(tree, sep) + if err != nil { + return nil, err + } + return match.NewAnyOf(matchers...), nil + + case ast.KindPattern: + if len(tree.Children) == 0 { + return match.NewNothing(), nil + } + matchers, err := compileTreeChildren(tree, sep) + if err != nil { + return nil, err + } + m, err = compileMatchers(minimizeMatchers(matchers)) + if err != nil { + return nil, err + } + + case ast.KindAny: + m = match.NewAny(sep) + + case ast.KindSuper: + m = match.NewSuper() + + case ast.KindSingle: + m = match.NewSingle(sep) + + case ast.KindNothing: + m = match.NewNothing() + + case ast.KindList: + l := tree.Value.(ast.List) + m = match.NewList([]rune(l.Chars), l.Not) + + case ast.KindRange: + r := tree.Value.(ast.Range) + m = match.NewRange(r.Lo, r.Hi, r.Not) + + case ast.KindText: + t := tree.Value.(ast.Text) + m = match.NewText(t.Text) + + default: + return nil, fmt.Errorf("could not compile tree: unknown node type") + } + + return optimizeMatcher(m), nil +} + +func Compile(tree *ast.Node, sep []rune) (match.Matcher, error) { + m, err := compile(tree, sep) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/gobwas/glob/glob.go b/vendor/github.com/gobwas/glob/glob.go new file mode 100644 index 000000000000..2afde343af83 --- /dev/null +++ b/vendor/github.com/gobwas/glob/glob.go @@ -0,0 +1,80 @@ +package glob + +import ( + "github.com/gobwas/glob/compiler" + "github.com/gobwas/glob/syntax" +) + +// Glob represents compiled glob pattern. +type Glob interface { + Match(string) bool +} + +// Compile creates Glob for given pattern and strings (if any present after pattern) as separators. +// The pattern syntax is: +// +// pattern: +// { term } +// +// term: +// `*` matches any sequence of non-separator characters +// `**` matches any sequence of characters +// `?` matches any single non-separator character +// `[` [ `!` ] { character-range } `]` +// character class (must be non-empty) +// `{` pattern-list `}` +// pattern alternatives +// c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`) +// `\` c matches character c +// +// character-range: +// c matches character c (c != `\\`, `-`, `]`) +// `\` c matches character c +// lo `-` hi matches character c for lo <= c <= hi +// +// pattern-list: +// pattern { `,` pattern } +// comma-separated (without spaces) patterns +// +func Compile(pattern string, separators ...rune) (Glob, error) { + ast, err := syntax.Parse(pattern) + if err != nil { + return nil, err + } + + matcher, err := compiler.Compile(ast, separators) + if err != nil { + return nil, err + } + + return matcher, nil +} + +// MustCompile is the same as Compile, except that if Compile returns error, this will panic +func MustCompile(pattern string, separators ...rune) Glob { + g, err := Compile(pattern, separators...) + if err != nil { + panic(err) + } + + return g +} + +// QuoteMeta returns a string that quotes all glob pattern meta characters +// inside the argument text; For example, QuoteMeta(`{foo*}`) returns `\[foo\*\]`. +func QuoteMeta(s string) string { + b := make([]byte, 2*len(s)) + + // a byte loop is correct because all meta characters are ASCII + j := 0 + for i := 0; i < len(s); i++ { + if syntax.Special(s[i]) { + b[j] = '\\' + j++ + } + b[j] = s[i] + j++ + } + + return string(b[0:j]) +} diff --git a/vendor/github.com/gobwas/glob/match/any.go b/vendor/github.com/gobwas/glob/match/any.go new file mode 100644 index 000000000000..514a9a5c450a --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/any.go @@ -0,0 +1,45 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/strings" +) + +type Any struct { + Separators []rune +} + +func NewAny(s []rune) Any { + return Any{s} +} + +func (self Any) Match(s string) bool { + return strings.IndexAnyRunes(s, self.Separators) == -1 +} + +func (self Any) Index(s string) (int, []int) { + found := strings.IndexAnyRunes(s, self.Separators) + switch found { + case -1: + case 0: + return 0, segments0 + default: + s = s[:found] + } + + segments := acquireSegments(len(s)) + for i := range s { + segments = append(segments, i) + } + segments = append(segments, len(s)) + + return 0, segments +} + +func (self Any) Len() int { + return lenNo +} + +func (self Any) String() string { + return fmt.Sprintf("", string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/any_of.go b/vendor/github.com/gobwas/glob/match/any_of.go new file mode 100644 index 000000000000..8e65356cdc9f --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/any_of.go @@ -0,0 +1,82 @@ +package match + +import "fmt" + +type AnyOf struct { + Matchers Matchers +} + +func NewAnyOf(m ...Matcher) AnyOf { + return AnyOf{Matchers(m)} +} + +func (self *AnyOf) Add(m Matcher) error { + self.Matchers = append(self.Matchers, m) + return nil +} + +func (self AnyOf) Match(s string) bool { + for _, m := range self.Matchers { + if m.Match(s) { + return true + } + } + + return false +} + +func (self AnyOf) Index(s string) (int, []int) { + index := -1 + + segments := acquireSegments(len(s)) + for _, m := range self.Matchers { + idx, seg := m.Index(s) + if idx == -1 { + continue + } + + if index == -1 || idx < index { + index = idx + segments = append(segments[:0], seg...) + continue + } + + if idx > index { + continue + } + + // here idx == index + segments = appendMerge(segments, seg) + } + + if index == -1 { + releaseSegments(segments) + return -1, nil + } + + return index, segments +} + +func (self AnyOf) Len() (l int) { + l = -1 + for _, m := range self.Matchers { + ml := m.Len() + switch { + case l == -1: + l = ml + continue + + case ml == -1: + return -1 + + case l != ml: + return -1 + } + } + + return +} + +func (self AnyOf) String() string { + return fmt.Sprintf("", self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/btree.go b/vendor/github.com/gobwas/glob/match/btree.go new file mode 100644 index 000000000000..a8130e93eae5 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/btree.go @@ -0,0 +1,146 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type BTree struct { + Value Matcher + Left Matcher + Right Matcher + ValueLengthRunes int + LeftLengthRunes int + RightLengthRunes int + LengthRunes int +} + +func NewBTree(Value, Left, Right Matcher) (tree BTree) { + tree.Value = Value + tree.Left = Left + tree.Right = Right + + lenOk := true + if tree.ValueLengthRunes = Value.Len(); tree.ValueLengthRunes == -1 { + lenOk = false + } + + if Left != nil { + if tree.LeftLengthRunes = Left.Len(); tree.LeftLengthRunes == -1 { + lenOk = false + } + } + + if Right != nil { + if tree.RightLengthRunes = Right.Len(); tree.RightLengthRunes == -1 { + lenOk = false + } + } + + if lenOk { + tree.LengthRunes = tree.LeftLengthRunes + tree.ValueLengthRunes + tree.RightLengthRunes + } else { + tree.LengthRunes = -1 + } + + return tree +} + +func (self BTree) Len() int { + return self.LengthRunes +} + +// todo? +func (self BTree) Index(s string) (int, []int) { + return -1, nil +} + +func (self BTree) Match(s string) bool { + inputLen := len(s) + + // self.Length, self.RLen and self.LLen are values meaning the length of runes for each part + // here we manipulating byte length for better optimizations + // but these checks still works, cause minLen of 1-rune string is 1 byte. + if self.LengthRunes != -1 && self.LengthRunes > inputLen { + return false + } + + // try to cut unnecessary parts + // by knowledge of length of right and left part + var offset, limit int + if self.LeftLengthRunes >= 0 { + offset = self.LeftLengthRunes + } + if self.RightLengthRunes >= 0 { + limit = inputLen - self.RightLengthRunes + } else { + limit = inputLen + } + + for offset < limit { + // search for matching part in substring + index, segments := self.Value.Index(s[offset:limit]) + if index == -1 { + releaseSegments(segments) + return false + } + + l := s[:offset+index] + var left bool + if self.Left != nil { + left = self.Left.Match(l) + } else { + left = l == "" + } + + if left { + for i := len(segments) - 1; i >= 0; i-- { + length := segments[i] + + var right bool + var r string + // if there is no string for the right branch + if inputLen <= offset+index+length { + r = "" + } else { + r = s[offset+index+length:] + } + + if self.Right != nil { + right = self.Right.Match(r) + } else { + right = r == "" + } + + if right { + releaseSegments(segments) + return true + } + } + } + + _, step := utf8.DecodeRuneInString(s[offset+index:]) + offset += index + step + + releaseSegments(segments) + } + + return false +} + +func (self BTree) String() string { + const n string = "" + var l, r string + if self.Left == nil { + l = n + } else { + l = self.Left.String() + } + if self.Right == nil { + r = n + } else { + r = self.Right.String() + } + + return fmt.Sprintf("%s]>", l, self.Value, r) +} diff --git a/vendor/github.com/gobwas/glob/match/contains.go b/vendor/github.com/gobwas/glob/match/contains.go new file mode 100644 index 000000000000..0998e95b0eaf --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/contains.go @@ -0,0 +1,58 @@ +package match + +import ( + "fmt" + "strings" +) + +type Contains struct { + Needle string + Not bool +} + +func NewContains(needle string, not bool) Contains { + return Contains{needle, not} +} + +func (self Contains) Match(s string) bool { + return strings.Contains(s, self.Needle) != self.Not +} + +func (self Contains) Index(s string) (int, []int) { + var offset int + + idx := strings.Index(s, self.Needle) + + if !self.Not { + if idx == -1 { + return -1, nil + } + + offset = idx + len(self.Needle) + if len(s) <= offset { + return 0, []int{offset} + } + s = s[offset:] + } else if idx != -1 { + s = s[:idx] + } + + segments := acquireSegments(len(s) + 1) + for i := range s { + segments = append(segments, offset+i) + } + + return 0, append(segments, offset+len(s)) +} + +func (self Contains) Len() int { + return lenNo +} + +func (self Contains) String() string { + var not string + if self.Not { + not = "!" + } + return fmt.Sprintf("", not, self.Needle) +} diff --git a/vendor/github.com/gobwas/glob/match/every_of.go b/vendor/github.com/gobwas/glob/match/every_of.go new file mode 100644 index 000000000000..7c968ee368b0 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/every_of.go @@ -0,0 +1,99 @@ +package match + +import ( + "fmt" +) + +type EveryOf struct { + Matchers Matchers +} + +func NewEveryOf(m ...Matcher) EveryOf { + return EveryOf{Matchers(m)} +} + +func (self *EveryOf) Add(m Matcher) error { + self.Matchers = append(self.Matchers, m) + return nil +} + +func (self EveryOf) Len() (l int) { + for _, m := range self.Matchers { + if ml := m.Len(); l > 0 { + l += ml + } else { + return -1 + } + } + + return +} + +func (self EveryOf) Index(s string) (int, []int) { + var index int + var offset int + + // make `in` with cap as len(s), + // cause it is the maximum size of output segments values + next := acquireSegments(len(s)) + current := acquireSegments(len(s)) + + sub := s + for i, m := range self.Matchers { + idx, seg := m.Index(sub) + if idx == -1 { + releaseSegments(next) + releaseSegments(current) + return -1, nil + } + + if i == 0 { + // we use copy here instead of `current = seg` + // cause seg is a slice from reusable buffer `in` + // and it could be overwritten in next iteration + current = append(current, seg...) + } else { + // clear the next + next = next[:0] + + delta := index - (idx + offset) + for _, ex := range current { + for _, n := range seg { + if ex+delta == n { + next = append(next, n) + } + } + } + + if len(next) == 0 { + releaseSegments(next) + releaseSegments(current) + return -1, nil + } + + current = append(current[:0], next...) + } + + index = idx + offset + sub = s[index:] + offset += idx + } + + releaseSegments(next) + + return index, current +} + +func (self EveryOf) Match(s string) bool { + for _, m := range self.Matchers { + if !m.Match(s) { + return false + } + } + + return true +} + +func (self EveryOf) String() string { + return fmt.Sprintf("", self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/list.go b/vendor/github.com/gobwas/glob/match/list.go new file mode 100644 index 000000000000..7fd763ecd8ea --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/list.go @@ -0,0 +1,49 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +type List struct { + List []rune + Not bool +} + +func NewList(list []rune, not bool) List { + return List{list, not} +} + +func (self List) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + inList := runes.IndexRune(self.List, r) != -1 + return inList == !self.Not +} + +func (self List) Len() int { + return lenOne +} + +func (self List) Index(s string) (int, []int) { + for i, r := range s { + if self.Not == (runes.IndexRune(self.List, r) == -1) { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self List) String() string { + var not string + if self.Not { + not = "!" + } + + return fmt.Sprintf("", not, string(self.List)) +} diff --git a/vendor/github.com/gobwas/glob/match/match.go b/vendor/github.com/gobwas/glob/match/match.go new file mode 100644 index 000000000000..f80e007fb838 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/match.go @@ -0,0 +1,81 @@ +package match + +// todo common table of rune's length + +import ( + "fmt" + "strings" +) + +const lenOne = 1 +const lenZero = 0 +const lenNo = -1 + +type Matcher interface { + Match(string) bool + Index(string) (int, []int) + Len() int + String() string +} + +type Matchers []Matcher + +func (m Matchers) String() string { + var s []string + for _, matcher := range m { + s = append(s, fmt.Sprint(matcher)) + } + + return fmt.Sprintf("%s", strings.Join(s, ",")) +} + +// appendMerge merges and sorts given already SORTED and UNIQUE segments. +func appendMerge(target, sub []int) []int { + lt, ls := len(target), len(sub) + out := make([]int, 0, lt+ls) + + for x, y := 0, 0; x < lt || y < ls; { + if x >= lt { + out = append(out, sub[y:]...) + break + } + + if y >= ls { + out = append(out, target[x:]...) + break + } + + xValue := target[x] + yValue := sub[y] + + switch { + + case xValue == yValue: + out = append(out, xValue) + x++ + y++ + + case xValue < yValue: + out = append(out, xValue) + x++ + + case yValue < xValue: + out = append(out, yValue) + y++ + + } + } + + target = append(target[:0], out...) + + return target +} + +func reverseSegments(input []int) { + l := len(input) + m := l / 2 + + for i := 0; i < m; i++ { + input[i], input[l-i-1] = input[l-i-1], input[i] + } +} diff --git a/vendor/github.com/gobwas/glob/match/max.go b/vendor/github.com/gobwas/glob/match/max.go new file mode 100644 index 000000000000..d72f69efff75 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/max.go @@ -0,0 +1,49 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Max struct { + Limit int +} + +func NewMax(l int) Max { + return Max{l} +} + +func (self Max) Match(s string) bool { + var l int + for range s { + l += 1 + if l > self.Limit { + return false + } + } + + return true +} + +func (self Max) Index(s string) (int, []int) { + segments := acquireSegments(self.Limit + 1) + segments = append(segments, 0) + var count int + for i, r := range s { + count++ + if count > self.Limit { + break + } + segments = append(segments, i+utf8.RuneLen(r)) + } + + return 0, segments +} + +func (self Max) Len() int { + return lenNo +} + +func (self Max) String() string { + return fmt.Sprintf("", self.Limit) +} diff --git a/vendor/github.com/gobwas/glob/match/min.go b/vendor/github.com/gobwas/glob/match/min.go new file mode 100644 index 000000000000..db57ac8eb49d --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/min.go @@ -0,0 +1,57 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Min struct { + Limit int +} + +func NewMin(l int) Min { + return Min{l} +} + +func (self Min) Match(s string) bool { + var l int + for range s { + l += 1 + if l >= self.Limit { + return true + } + } + + return false +} + +func (self Min) Index(s string) (int, []int) { + var count int + + c := len(s) - self.Limit + 1 + if c <= 0 { + return -1, nil + } + + segments := acquireSegments(c) + for i, r := range s { + count++ + if count >= self.Limit { + segments = append(segments, i+utf8.RuneLen(r)) + } + } + + if len(segments) == 0 { + return -1, nil + } + + return 0, segments +} + +func (self Min) Len() int { + return lenNo +} + +func (self Min) String() string { + return fmt.Sprintf("", self.Limit) +} diff --git a/vendor/github.com/gobwas/glob/match/nothing.go b/vendor/github.com/gobwas/glob/match/nothing.go new file mode 100644 index 000000000000..0d4ecd36b80b --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/nothing.go @@ -0,0 +1,27 @@ +package match + +import ( + "fmt" +) + +type Nothing struct{} + +func NewNothing() Nothing { + return Nothing{} +} + +func (self Nothing) Match(s string) bool { + return len(s) == 0 +} + +func (self Nothing) Index(s string) (int, []int) { + return 0, segments0 +} + +func (self Nothing) Len() int { + return lenZero +} + +func (self Nothing) String() string { + return fmt.Sprintf("") +} diff --git a/vendor/github.com/gobwas/glob/match/prefix.go b/vendor/github.com/gobwas/glob/match/prefix.go new file mode 100644 index 000000000000..a7347250e8d5 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix.go @@ -0,0 +1,50 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +type Prefix struct { + Prefix string +} + +func NewPrefix(p string) Prefix { + return Prefix{p} +} + +func (self Prefix) Index(s string) (int, []int) { + idx := strings.Index(s, self.Prefix) + if idx == -1 { + return -1, nil + } + + length := len(self.Prefix) + var sub string + if len(s) > idx+length { + sub = s[idx+length:] + } else { + sub = "" + } + + segments := acquireSegments(len(sub) + 1) + segments = append(segments, length) + for i, r := range sub { + segments = append(segments, length+i+utf8.RuneLen(r)) + } + + return idx, segments +} + +func (self Prefix) Len() int { + return lenNo +} + +func (self Prefix) Match(s string) bool { + return strings.HasPrefix(s, self.Prefix) +} + +func (self Prefix) String() string { + return fmt.Sprintf("", self.Prefix) +} diff --git a/vendor/github.com/gobwas/glob/match/prefix_any.go b/vendor/github.com/gobwas/glob/match/prefix_any.go new file mode 100644 index 000000000000..8ee58fe1b3c7 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix_any.go @@ -0,0 +1,55 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" + + sutil "github.com/gobwas/glob/util/strings" +) + +type PrefixAny struct { + Prefix string + Separators []rune +} + +func NewPrefixAny(s string, sep []rune) PrefixAny { + return PrefixAny{s, sep} +} + +func (self PrefixAny) Index(s string) (int, []int) { + idx := strings.Index(s, self.Prefix) + if idx == -1 { + return -1, nil + } + + n := len(self.Prefix) + sub := s[idx+n:] + i := sutil.IndexAnyRunes(sub, self.Separators) + if i > -1 { + sub = sub[:i] + } + + seg := acquireSegments(len(sub) + 1) + seg = append(seg, n) + for i, r := range sub { + seg = append(seg, n+i+utf8.RuneLen(r)) + } + + return idx, seg +} + +func (self PrefixAny) Len() int { + return lenNo +} + +func (self PrefixAny) Match(s string) bool { + if !strings.HasPrefix(s, self.Prefix) { + return false + } + return sutil.IndexAnyRunes(s[len(self.Prefix):], self.Separators) == -1 +} + +func (self PrefixAny) String() string { + return fmt.Sprintf("", self.Prefix, string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/prefix_suffix.go b/vendor/github.com/gobwas/glob/match/prefix_suffix.go new file mode 100644 index 000000000000..8208085a1996 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix_suffix.go @@ -0,0 +1,62 @@ +package match + +import ( + "fmt" + "strings" +) + +type PrefixSuffix struct { + Prefix, Suffix string +} + +func NewPrefixSuffix(p, s string) PrefixSuffix { + return PrefixSuffix{p, s} +} + +func (self PrefixSuffix) Index(s string) (int, []int) { + prefixIdx := strings.Index(s, self.Prefix) + if prefixIdx == -1 { + return -1, nil + } + + suffixLen := len(self.Suffix) + if suffixLen <= 0 { + return prefixIdx, []int{len(s) - prefixIdx} + } + + if (len(s) - prefixIdx) <= 0 { + return -1, nil + } + + segments := acquireSegments(len(s) - prefixIdx) + for sub := s[prefixIdx:]; ; { + suffixIdx := strings.LastIndex(sub, self.Suffix) + if suffixIdx == -1 { + break + } + + segments = append(segments, suffixIdx+suffixLen) + sub = sub[:suffixIdx] + } + + if len(segments) == 0 { + releaseSegments(segments) + return -1, nil + } + + reverseSegments(segments) + + return prefixIdx, segments +} + +func (self PrefixSuffix) Len() int { + return lenNo +} + +func (self PrefixSuffix) Match(s string) bool { + return strings.HasPrefix(s, self.Prefix) && strings.HasSuffix(s, self.Suffix) +} + +func (self PrefixSuffix) String() string { + return fmt.Sprintf("", self.Prefix, self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/range.go b/vendor/github.com/gobwas/glob/match/range.go new file mode 100644 index 000000000000..ce30245a40bc --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/range.go @@ -0,0 +1,48 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Range struct { + Lo, Hi rune + Not bool +} + +func NewRange(lo, hi rune, not bool) Range { + return Range{lo, hi, not} +} + +func (self Range) Len() int { + return lenOne +} + +func (self Range) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + inRange := r >= self.Lo && r <= self.Hi + + return inRange == !self.Not +} + +func (self Range) Index(s string) (int, []int) { + for i, r := range s { + if self.Not != (r >= self.Lo && r <= self.Hi) { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self Range) String() string { + var not string + if self.Not { + not = "!" + } + return fmt.Sprintf("", not, string(self.Lo), string(self.Hi)) +} diff --git a/vendor/github.com/gobwas/glob/match/row.go b/vendor/github.com/gobwas/glob/match/row.go new file mode 100644 index 000000000000..4379042e42f3 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/row.go @@ -0,0 +1,77 @@ +package match + +import ( + "fmt" +) + +type Row struct { + Matchers Matchers + RunesLength int + Segments []int +} + +func NewRow(len int, m ...Matcher) Row { + return Row{ + Matchers: Matchers(m), + RunesLength: len, + Segments: []int{len}, + } +} + +func (self Row) matchAll(s string) bool { + var idx int + for _, m := range self.Matchers { + length := m.Len() + + var next, i int + for next = range s[idx:] { + i++ + if i == length { + break + } + } + + if i < length || !m.Match(s[idx:idx+next+1]) { + return false + } + + idx += next + 1 + } + + return true +} + +func (self Row) lenOk(s string) bool { + var i int + for range s { + i++ + if i > self.RunesLength { + return false + } + } + return self.RunesLength == i +} + +func (self Row) Match(s string) bool { + return self.lenOk(s) && self.matchAll(s) +} + +func (self Row) Len() (l int) { + return self.RunesLength +} + +func (self Row) Index(s string) (int, []int) { + for i := range s { + if len(s[i:]) < self.RunesLength { + break + } + if self.matchAll(s[i:]) { + return i, self.Segments + } + } + return -1, nil +} + +func (self Row) String() string { + return fmt.Sprintf("", self.RunesLength, self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/segments.go b/vendor/github.com/gobwas/glob/match/segments.go new file mode 100644 index 000000000000..9ea6f3094398 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/segments.go @@ -0,0 +1,91 @@ +package match + +import ( + "sync" +) + +type SomePool interface { + Get() []int + Put([]int) +} + +var segmentsPools [1024]sync.Pool + +func toPowerOfTwo(v int) int { + v-- + v |= v >> 1 + v |= v >> 2 + v |= v >> 4 + v |= v >> 8 + v |= v >> 16 + v++ + + return v +} + +const ( + cacheFrom = 16 + cacheToAndHigher = 1024 + cacheFromIndex = 15 + cacheToAndHigherIndex = 1023 +) + +var ( + segments0 = []int{0} + segments1 = []int{1} + segments2 = []int{2} + segments3 = []int{3} + segments4 = []int{4} +) + +var segmentsByRuneLength [5][]int = [5][]int{ + 0: segments0, + 1: segments1, + 2: segments2, + 3: segments3, + 4: segments4, +} + +func init() { + for i := cacheToAndHigher; i >= cacheFrom; i >>= 1 { + func(i int) { + segmentsPools[i-1] = sync.Pool{New: func() interface{} { + return make([]int, 0, i) + }} + }(i) + } +} + +func getTableIndex(c int) int { + p := toPowerOfTwo(c) + switch { + case p >= cacheToAndHigher: + return cacheToAndHigherIndex + case p <= cacheFrom: + return cacheFromIndex + default: + return p - 1 + } +} + +func acquireSegments(c int) []int { + // make []int with less capacity than cacheFrom + // is faster than acquiring it from pool + if c < cacheFrom { + return make([]int, 0, c) + } + + return segmentsPools[getTableIndex(c)].Get().([]int)[:0] +} + +func releaseSegments(s []int) { + c := cap(s) + + // make []int with less capacity than cacheFrom + // is faster than acquiring it from pool + if c < cacheFrom { + return + } + + segmentsPools[getTableIndex(c)].Put(s) +} diff --git a/vendor/github.com/gobwas/glob/match/single.go b/vendor/github.com/gobwas/glob/match/single.go new file mode 100644 index 000000000000..ee6e3954c1f3 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/single.go @@ -0,0 +1,43 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +// single represents ? +type Single struct { + Separators []rune +} + +func NewSingle(s []rune) Single { + return Single{s} +} + +func (self Single) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + return runes.IndexRune(self.Separators, r) == -1 +} + +func (self Single) Len() int { + return lenOne +} + +func (self Single) Index(s string) (int, []int) { + for i, r := range s { + if runes.IndexRune(self.Separators, r) == -1 { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self Single) String() string { + return fmt.Sprintf("", string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/suffix.go b/vendor/github.com/gobwas/glob/match/suffix.go new file mode 100644 index 000000000000..85bea8c68ec4 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/suffix.go @@ -0,0 +1,35 @@ +package match + +import ( + "fmt" + "strings" +) + +type Suffix struct { + Suffix string +} + +func NewSuffix(s string) Suffix { + return Suffix{s} +} + +func (self Suffix) Len() int { + return lenNo +} + +func (self Suffix) Match(s string) bool { + return strings.HasSuffix(s, self.Suffix) +} + +func (self Suffix) Index(s string) (int, []int) { + idx := strings.Index(s, self.Suffix) + if idx == -1 { + return -1, nil + } + + return 0, []int{idx + len(self.Suffix)} +} + +func (self Suffix) String() string { + return fmt.Sprintf("", self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/suffix_any.go b/vendor/github.com/gobwas/glob/match/suffix_any.go new file mode 100644 index 000000000000..c5106f8196ca --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/suffix_any.go @@ -0,0 +1,43 @@ +package match + +import ( + "fmt" + "strings" + + sutil "github.com/gobwas/glob/util/strings" +) + +type SuffixAny struct { + Suffix string + Separators []rune +} + +func NewSuffixAny(s string, sep []rune) SuffixAny { + return SuffixAny{s, sep} +} + +func (self SuffixAny) Index(s string) (int, []int) { + idx := strings.Index(s, self.Suffix) + if idx == -1 { + return -1, nil + } + + i := sutil.LastIndexAnyRunes(s[:idx], self.Separators) + 1 + + return i, []int{idx + len(self.Suffix) - i} +} + +func (self SuffixAny) Len() int { + return lenNo +} + +func (self SuffixAny) Match(s string) bool { + if !strings.HasSuffix(s, self.Suffix) { + return false + } + return sutil.IndexAnyRunes(s[:len(s)-len(self.Suffix)], self.Separators) == -1 +} + +func (self SuffixAny) String() string { + return fmt.Sprintf("", string(self.Separators), self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/super.go b/vendor/github.com/gobwas/glob/match/super.go new file mode 100644 index 000000000000..3875950bb8c7 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/super.go @@ -0,0 +1,33 @@ +package match + +import ( + "fmt" +) + +type Super struct{} + +func NewSuper() Super { + return Super{} +} + +func (self Super) Match(s string) bool { + return true +} + +func (self Super) Len() int { + return lenNo +} + +func (self Super) Index(s string) (int, []int) { + segments := acquireSegments(len(s) + 1) + for i := range s { + segments = append(segments, i) + } + segments = append(segments, len(s)) + + return 0, segments +} + +func (self Super) String() string { + return fmt.Sprintf("") +} diff --git a/vendor/github.com/gobwas/glob/match/text.go b/vendor/github.com/gobwas/glob/match/text.go new file mode 100644 index 000000000000..0a17616d3cb8 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/text.go @@ -0,0 +1,45 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +// raw represents raw string to match +type Text struct { + Str string + RunesLength int + BytesLength int + Segments []int +} + +func NewText(s string) Text { + return Text{ + Str: s, + RunesLength: utf8.RuneCountInString(s), + BytesLength: len(s), + Segments: []int{len(s)}, + } +} + +func (self Text) Match(s string) bool { + return self.Str == s +} + +func (self Text) Len() int { + return self.RunesLength +} + +func (self Text) Index(s string) (int, []int) { + index := strings.Index(s, self.Str) + if index == -1 { + return -1, nil + } + + return index, self.Segments +} + +func (self Text) String() string { + return fmt.Sprintf("", self.Str) +} diff --git a/vendor/github.com/gobwas/glob/readme.md b/vendor/github.com/gobwas/glob/readme.md new file mode 100644 index 000000000000..f58144e733e2 --- /dev/null +++ b/vendor/github.com/gobwas/glob/readme.md @@ -0,0 +1,148 @@ +# glob.[go](https://golang.org) + +[![GoDoc][godoc-image]][godoc-url] [![Build Status][travis-image]][travis-url] + +> Go Globbing Library. + +## Install + +```shell + go get github.com/gobwas/glob +``` + +## Example + +```go + +package main + +import "github.com/gobwas/glob" + +func main() { + var g glob.Glob + + // create simple glob + g = glob.MustCompile("*.github.com") + g.Match("api.github.com") // true + + // quote meta characters and then create simple glob + g = glob.MustCompile(glob.QuoteMeta("*.github.com")) + g.Match("*.github.com") // true + + // create new glob with set of delimiters as ["."] + g = glob.MustCompile("api.*.com", '.') + g.Match("api.github.com") // true + g.Match("api.gi.hub.com") // false + + // create new glob with set of delimiters as ["."] + // but now with super wildcard + g = glob.MustCompile("api.**.com", '.') + g.Match("api.github.com") // true + g.Match("api.gi.hub.com") // true + + // create glob with single symbol wildcard + g = glob.MustCompile("?at") + g.Match("cat") // true + g.Match("fat") // true + g.Match("at") // false + + // create glob with single symbol wildcard and delimiters ['f'] + g = glob.MustCompile("?at", 'f') + g.Match("cat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-list matchers + g = glob.MustCompile("[abc]at") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-list matchers + g = glob.MustCompile("[!abc]at") + g.Match("cat") // false + g.Match("bat") // false + g.Match("fat") // true + g.Match("at") // false + + // create glob with character-range matchers + g = glob.MustCompile("[a-c]at") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-range matchers + g = glob.MustCompile("[!a-c]at") + g.Match("cat") // false + g.Match("bat") // false + g.Match("fat") // true + g.Match("at") // false + + // create glob with pattern-alternatives list + g = glob.MustCompile("{cat,bat,[fr]at}") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // true + g.Match("rat") // true + g.Match("at") // false + g.Match("zat") // false +} + +``` + +## Performance + +This library is created for compile-once patterns. This means, that compilation could take time, but +strings matching is done faster, than in case when always parsing template. + +If you will not use compiled `glob.Glob` object, and do `g := glob.MustCompile(pattern); g.Match(...)` every time, then your code will be much more slower. + +Run `go test -bench=.` from source root to see the benchmarks: + +Pattern | Fixture | Match | Speed (ns/op) +--------|---------|-------|-------------- +`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my cat has very bright eyes` | `true` | 432 +`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my dog has very bright eyes` | `false` | 199 +`https://*.google.*` | `https://account.google.com` | `true` | 96 +`https://*.google.*` | `https://google.com` | `false` | 66 +`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://yahoo.com` | `true` | 163 +`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://google.com` | `false` | 197 +`{https://*gobwas.com,http://exclude.gobwas.com}` | `https://safe.gobwas.com` | `true` | 22 +`{https://*gobwas.com,http://exclude.gobwas.com}` | `http://safe.gobwas.com` | `false` | 24 +`abc*` | `abcdef` | `true` | 8.15 +`abc*` | `af` | `false` | 5.68 +`*def` | `abcdef` | `true` | 8.84 +`*def` | `af` | `false` | 5.74 +`ab*ef` | `abcdef` | `true` | 15.2 +`ab*ef` | `af` | `false` | 10.4 + +The same things with `regexp` package: + +Pattern | Fixture | Match | Speed (ns/op) +--------|---------|-------|-------------- +`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my cat has very bright eyes` | `true` | 2553 +`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my dog has very bright eyes` | `false` | 1383 +`^https:\/\/.*\.google\..*$` | `https://account.google.com` | `true` | 1205 +`^https:\/\/.*\.google\..*$` | `https://google.com` | `false` | 767 +`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://yahoo.com` | `true` | 1435 +`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://google.com` | `false` | 1674 +`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `https://safe.gobwas.com` | `true` | 1039 +`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `http://safe.gobwas.com` | `false` | 272 +`^abc.*$` | `abcdef` | `true` | 237 +`^abc.*$` | `af` | `false` | 100 +`^.*def$` | `abcdef` | `true` | 464 +`^.*def$` | `af` | `false` | 265 +`^ab.*ef$` | `abcdef` | `true` | 375 +`^ab.*ef$` | `af` | `false` | 145 + +[godoc-image]: https://godoc.org/github.com/gobwas/glob?status.svg +[godoc-url]: https://godoc.org/github.com/gobwas/glob +[travis-image]: https://travis-ci.org/gobwas/glob.svg?branch=master +[travis-url]: https://travis-ci.org/gobwas/glob + +## Syntax + +Syntax is inspired by [standard wildcards](http://tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm), +except that `**` is aka super-asterisk, that do not sensitive for separators. \ No newline at end of file diff --git a/vendor/github.com/gobwas/glob/syntax/ast/ast.go b/vendor/github.com/gobwas/glob/syntax/ast/ast.go new file mode 100644 index 000000000000..3220a694a9cb --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/ast/ast.go @@ -0,0 +1,122 @@ +package ast + +import ( + "bytes" + "fmt" +) + +type Node struct { + Parent *Node + Children []*Node + Value interface{} + Kind Kind +} + +func NewNode(k Kind, v interface{}, ch ...*Node) *Node { + n := &Node{ + Kind: k, + Value: v, + } + for _, c := range ch { + Insert(n, c) + } + return n +} + +func (a *Node) Equal(b *Node) bool { + if a.Kind != b.Kind { + return false + } + if a.Value != b.Value { + return false + } + if len(a.Children) != len(b.Children) { + return false + } + for i, c := range a.Children { + if !c.Equal(b.Children[i]) { + return false + } + } + return true +} + +func (a *Node) String() string { + var buf bytes.Buffer + buf.WriteString(a.Kind.String()) + if a.Value != nil { + buf.WriteString(" =") + buf.WriteString(fmt.Sprintf("%v", a.Value)) + } + if len(a.Children) > 0 { + buf.WriteString(" [") + for i, c := range a.Children { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(c.String()) + } + buf.WriteString("]") + } + return buf.String() +} + +func Insert(parent *Node, children ...*Node) { + parent.Children = append(parent.Children, children...) + for _, ch := range children { + ch.Parent = parent + } +} + +type List struct { + Not bool + Chars string +} + +type Range struct { + Not bool + Lo, Hi rune +} + +type Text struct { + Text string +} + +type Kind int + +const ( + KindNothing Kind = iota + KindPattern + KindList + KindRange + KindText + KindAny + KindSuper + KindSingle + KindAnyOf +) + +func (k Kind) String() string { + switch k { + case KindNothing: + return "Nothing" + case KindPattern: + return "Pattern" + case KindList: + return "List" + case KindRange: + return "Range" + case KindText: + return "Text" + case KindAny: + return "Any" + case KindSuper: + return "Super" + case KindSingle: + return "Single" + case KindAnyOf: + return "AnyOf" + default: + return "" + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/ast/parser.go b/vendor/github.com/gobwas/glob/syntax/ast/parser.go new file mode 100644 index 000000000000..429b4094303e --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/ast/parser.go @@ -0,0 +1,157 @@ +package ast + +import ( + "errors" + "fmt" + "github.com/gobwas/glob/syntax/lexer" + "unicode/utf8" +) + +type Lexer interface { + Next() lexer.Token +} + +type parseFn func(*Node, Lexer) (parseFn, *Node, error) + +func Parse(lexer Lexer) (*Node, error) { + var parser parseFn + + root := NewNode(KindPattern, nil) + + var ( + tree *Node + err error + ) + for parser, tree = parserMain, root; parser != nil; { + parser, tree, err = parser(tree, lexer) + if err != nil { + return nil, err + } + } + + return root, nil +} + +func parserMain(tree *Node, lex Lexer) (parseFn, *Node, error) { + for { + token := lex.Next() + switch token.Type { + case lexer.EOF: + return nil, tree, nil + + case lexer.Error: + return nil, tree, errors.New(token.Raw) + + case lexer.Text: + Insert(tree, NewNode(KindText, Text{token.Raw})) + return parserMain, tree, nil + + case lexer.Any: + Insert(tree, NewNode(KindAny, nil)) + return parserMain, tree, nil + + case lexer.Super: + Insert(tree, NewNode(KindSuper, nil)) + return parserMain, tree, nil + + case lexer.Single: + Insert(tree, NewNode(KindSingle, nil)) + return parserMain, tree, nil + + case lexer.RangeOpen: + return parserRange, tree, nil + + case lexer.TermsOpen: + a := NewNode(KindAnyOf, nil) + Insert(tree, a) + + p := NewNode(KindPattern, nil) + Insert(a, p) + + return parserMain, p, nil + + case lexer.Separator: + p := NewNode(KindPattern, nil) + Insert(tree.Parent, p) + + return parserMain, p, nil + + case lexer.TermsClose: + return parserMain, tree.Parent.Parent, nil + + default: + return nil, tree, fmt.Errorf("unexpected token: %s", token) + } + } + return nil, tree, fmt.Errorf("unknown error") +} + +func parserRange(tree *Node, lex Lexer) (parseFn, *Node, error) { + var ( + not bool + lo rune + hi rune + chars string + ) + for { + token := lex.Next() + switch token.Type { + case lexer.EOF: + return nil, tree, errors.New("unexpected end") + + case lexer.Error: + return nil, tree, errors.New(token.Raw) + + case lexer.Not: + not = true + + case lexer.RangeLo: + r, w := utf8.DecodeRuneInString(token.Raw) + if len(token.Raw) > w { + return nil, tree, fmt.Errorf("unexpected length of lo character") + } + lo = r + + case lexer.RangeBetween: + // + + case lexer.RangeHi: + r, w := utf8.DecodeRuneInString(token.Raw) + if len(token.Raw) > w { + return nil, tree, fmt.Errorf("unexpected length of lo character") + } + + hi = r + + if hi < lo { + return nil, tree, fmt.Errorf("hi character '%s' should be greater than lo '%s'", string(hi), string(lo)) + } + + case lexer.Text: + chars = token.Raw + + case lexer.RangeClose: + isRange := lo != 0 && hi != 0 + isChars := chars != "" + + if isChars == isRange { + return nil, tree, fmt.Errorf("could not parse range") + } + + if isRange { + Insert(tree, NewNode(KindRange, Range{ + Lo: lo, + Hi: hi, + Not: not, + })) + } else { + Insert(tree, NewNode(KindList, List{ + Chars: chars, + Not: not, + })) + } + + return parserMain, tree, nil + } + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go new file mode 100644 index 000000000000..a1c8d1962a06 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go @@ -0,0 +1,273 @@ +package lexer + +import ( + "bytes" + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +const ( + char_any = '*' + char_comma = ',' + char_single = '?' + char_escape = '\\' + char_range_open = '[' + char_range_close = ']' + char_terms_open = '{' + char_terms_close = '}' + char_range_not = '!' + char_range_between = '-' +) + +var specials = []byte{ + char_any, + char_single, + char_escape, + char_range_open, + char_range_close, + char_terms_open, + char_terms_close, +} + +func Special(c byte) bool { + return bytes.IndexByte(specials, c) != -1 +} + +type tokens []Token + +func (i *tokens) shift() (ret Token) { + ret = (*i)[0] + copy(*i, (*i)[1:]) + *i = (*i)[:len(*i)-1] + return +} + +func (i *tokens) push(v Token) { + *i = append(*i, v) +} + +func (i *tokens) empty() bool { + return len(*i) == 0 +} + +var eof rune = 0 + +type lexer struct { + data string + pos int + err error + + tokens tokens + termsLevel int + + lastRune rune + lastRuneSize int + hasRune bool +} + +func NewLexer(source string) *lexer { + l := &lexer{ + data: source, + tokens: tokens(make([]Token, 0, 4)), + } + return l +} + +func (l *lexer) Next() Token { + if l.err != nil { + return Token{Error, l.err.Error()} + } + if !l.tokens.empty() { + return l.tokens.shift() + } + + l.fetchItem() + return l.Next() +} + +func (l *lexer) peek() (r rune, w int) { + if l.pos == len(l.data) { + return eof, 0 + } + + r, w = utf8.DecodeRuneInString(l.data[l.pos:]) + if r == utf8.RuneError { + l.errorf("could not read rune") + r = eof + w = 0 + } + + return +} + +func (l *lexer) read() rune { + if l.hasRune { + l.hasRune = false + l.seek(l.lastRuneSize) + return l.lastRune + } + + r, s := l.peek() + l.seek(s) + + l.lastRune = r + l.lastRuneSize = s + + return r +} + +func (l *lexer) seek(w int) { + l.pos += w +} + +func (l *lexer) unread() { + if l.hasRune { + l.errorf("could not unread rune") + return + } + l.seek(-l.lastRuneSize) + l.hasRune = true +} + +func (l *lexer) errorf(f string, v ...interface{}) { + l.err = fmt.Errorf(f, v...) +} + +func (l *lexer) inTerms() bool { + return l.termsLevel > 0 +} + +func (l *lexer) termsEnter() { + l.termsLevel++ +} + +func (l *lexer) termsLeave() { + l.termsLevel-- +} + +var inTextBreakers = []rune{char_single, char_any, char_range_open, char_terms_open} +var inTermsBreakers = append(inTextBreakers, char_terms_close, char_comma) + +func (l *lexer) fetchItem() { + r := l.read() + switch { + case r == eof: + l.tokens.push(Token{EOF, ""}) + + case r == char_terms_open: + l.termsEnter() + l.tokens.push(Token{TermsOpen, string(r)}) + + case r == char_comma && l.inTerms(): + l.tokens.push(Token{Separator, string(r)}) + + case r == char_terms_close && l.inTerms(): + l.tokens.push(Token{TermsClose, string(r)}) + l.termsLeave() + + case r == char_range_open: + l.tokens.push(Token{RangeOpen, string(r)}) + l.fetchRange() + + case r == char_single: + l.tokens.push(Token{Single, string(r)}) + + case r == char_any: + if l.read() == char_any { + l.tokens.push(Token{Super, string(r) + string(r)}) + } else { + l.unread() + l.tokens.push(Token{Any, string(r)}) + } + + default: + l.unread() + + var breakers []rune + if l.inTerms() { + breakers = inTermsBreakers + } else { + breakers = inTextBreakers + } + l.fetchText(breakers) + } +} + +func (l *lexer) fetchRange() { + var wantHi bool + var wantClose bool + var seenNot bool + for { + r := l.read() + if r == eof { + l.errorf("unexpected end of input") + return + } + + if wantClose { + if r != char_range_close { + l.errorf("expected close range character") + } else { + l.tokens.push(Token{RangeClose, string(r)}) + } + return + } + + if wantHi { + l.tokens.push(Token{RangeHi, string(r)}) + wantClose = true + continue + } + + if !seenNot && r == char_range_not { + l.tokens.push(Token{Not, string(r)}) + seenNot = true + continue + } + + if n, w := l.peek(); n == char_range_between { + l.seek(w) + l.tokens.push(Token{RangeLo, string(r)}) + l.tokens.push(Token{RangeBetween, string(n)}) + wantHi = true + continue + } + + l.unread() // unread first peek and fetch as text + l.fetchText([]rune{char_range_close}) + wantClose = true + } +} + +func (l *lexer) fetchText(breakers []rune) { + var data []rune + var escaped bool + +reading: + for { + r := l.read() + if r == eof { + break + } + + if !escaped { + if r == char_escape { + escaped = true + continue + } + + if runes.IndexRune(breakers, r) != -1 { + l.unread() + break reading + } + } + + escaped = false + data = append(data, r) + } + + if len(data) > 0 { + l.tokens.push(Token{Text, string(data)}) + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/token.go b/vendor/github.com/gobwas/glob/syntax/lexer/token.go new file mode 100644 index 000000000000..2797c4e83a4f --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/lexer/token.go @@ -0,0 +1,88 @@ +package lexer + +import "fmt" + +type TokenType int + +const ( + EOF TokenType = iota + Error + Text + Char + Any + Super + Single + Not + Separator + RangeOpen + RangeClose + RangeLo + RangeHi + RangeBetween + TermsOpen + TermsClose +) + +func (tt TokenType) String() string { + switch tt { + case EOF: + return "eof" + + case Error: + return "error" + + case Text: + return "text" + + case Char: + return "char" + + case Any: + return "any" + + case Super: + return "super" + + case Single: + return "single" + + case Not: + return "not" + + case Separator: + return "separator" + + case RangeOpen: + return "range_open" + + case RangeClose: + return "range_close" + + case RangeLo: + return "range_lo" + + case RangeHi: + return "range_hi" + + case RangeBetween: + return "range_between" + + case TermsOpen: + return "terms_open" + + case TermsClose: + return "terms_close" + + default: + return "undef" + } +} + +type Token struct { + Type TokenType + Raw string +} + +func (t Token) String() string { + return fmt.Sprintf("%v<%q>", t.Type, t.Raw) +} diff --git a/vendor/github.com/gobwas/glob/syntax/syntax.go b/vendor/github.com/gobwas/glob/syntax/syntax.go new file mode 100644 index 000000000000..1d168b148299 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/syntax.go @@ -0,0 +1,14 @@ +package syntax + +import ( + "github.com/gobwas/glob/syntax/ast" + "github.com/gobwas/glob/syntax/lexer" +) + +func Parse(s string) (*ast.Node, error) { + return ast.Parse(lexer.NewLexer(s)) +} + +func Special(b byte) bool { + return lexer.Special(b) +} diff --git a/vendor/github.com/gobwas/glob/util/runes/runes.go b/vendor/github.com/gobwas/glob/util/runes/runes.go new file mode 100644 index 000000000000..a72355641072 --- /dev/null +++ b/vendor/github.com/gobwas/glob/util/runes/runes.go @@ -0,0 +1,154 @@ +package runes + +func Index(s, needle []rune) int { + ls, ln := len(s), len(needle) + + switch { + case ln == 0: + return 0 + case ln == 1: + return IndexRune(s, needle[0]) + case ln == ls: + if Equal(s, needle) { + return 0 + } + return -1 + case ln > ls: + return -1 + } + +head: + for i := 0; i < ls && ls-i >= ln; i++ { + for y := 0; y < ln; y++ { + if s[i+y] != needle[y] { + continue head + } + } + + return i + } + + return -1 +} + +func LastIndex(s, needle []rune) int { + ls, ln := len(s), len(needle) + + switch { + case ln == 0: + if ls == 0 { + return 0 + } + return ls + case ln == 1: + return IndexLastRune(s, needle[0]) + case ln == ls: + if Equal(s, needle) { + return 0 + } + return -1 + case ln > ls: + return -1 + } + +head: + for i := ls - 1; i >= 0 && i >= ln; i-- { + for y := ln - 1; y >= 0; y-- { + if s[i-(ln-y-1)] != needle[y] { + continue head + } + } + + return i - ln + 1 + } + + return -1 +} + +// IndexAny returns the index of the first instance of any Unicode code point +// from chars in s, or -1 if no Unicode code point from chars is present in s. +func IndexAny(s, chars []rune) int { + if len(chars) > 0 { + for i, c := range s { + for _, m := range chars { + if c == m { + return i + } + } + } + } + return -1 +} + +func Contains(s, needle []rune) bool { + return Index(s, needle) >= 0 +} + +func Max(s []rune) (max rune) { + for _, r := range s { + if r > max { + max = r + } + } + + return +} + +func Min(s []rune) rune { + min := rune(-1) + for _, r := range s { + if min == -1 { + min = r + continue + } + + if r < min { + min = r + } + } + + return min +} + +func IndexRune(s []rune, r rune) int { + for i, c := range s { + if c == r { + return i + } + } + return -1 +} + +func IndexLastRune(s []rune, r rune) int { + for i := len(s) - 1; i >= 0; i-- { + if s[i] == r { + return i + } + } + + return -1 +} + +func Equal(a, b []rune) bool { + if len(a) == len(b) { + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + + return true + } + + return false +} + +// HasPrefix tests whether the string s begins with prefix. +func HasPrefix(s, prefix []rune) bool { + return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix) +} + +// HasSuffix tests whether the string s ends with suffix. +func HasSuffix(s, suffix []rune) bool { + return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix) +} diff --git a/vendor/github.com/gobwas/glob/util/strings/strings.go b/vendor/github.com/gobwas/glob/util/strings/strings.go new file mode 100644 index 000000000000..e8ee1920b17e --- /dev/null +++ b/vendor/github.com/gobwas/glob/util/strings/strings.go @@ -0,0 +1,39 @@ +package strings + +import ( + "strings" + "unicode/utf8" +) + +func IndexAnyRunes(s string, rs []rune) int { + for _, r := range rs { + if i := strings.IndexRune(s, r); i != -1 { + return i + } + } + + return -1 +} + +func LastIndexAnyRunes(s string, rs []rune) int { + for _, r := range rs { + i := -1 + if 0 <= r && r < utf8.RuneSelf { + i = strings.LastIndexByte(s, byte(r)) + } else { + sub := s + for len(sub) > 0 { + j := strings.IndexRune(s, r) + if j == -1 { + break + } + i = j + sub = sub[i+1:] + } + } + if i != -1 { + return i + } + } + return -1 +} diff --git a/vendor/github.com/goccy/go-json/.codecov.yml b/vendor/github.com/goccy/go-json/.codecov.yml new file mode 100644 index 000000000000..e98134570c4f --- /dev/null +++ b/vendor/github.com/goccy/go-json/.codecov.yml @@ -0,0 +1,32 @@ +codecov: + require_ci_to_pass: yes + +coverage: + precision: 2 + round: down + range: "70...100" + + status: + project: + default: + target: 70% + threshold: 2% + patch: off + changes: no + +parsers: + gcov: + branch_detection: + conditional: yes + loop: yes + method: no + macro: no + +comment: + layout: "header,diff" + behavior: default + require_changes: no + +ignore: + - internal/encoder/vm_color + - internal/encoder/vm_color_indent diff --git a/vendor/github.com/goccy/go-json/.gitignore b/vendor/github.com/goccy/go-json/.gitignore new file mode 100644 index 000000000000..378283829cfa --- /dev/null +++ b/vendor/github.com/goccy/go-json/.gitignore @@ -0,0 +1,2 @@ +cover.html +cover.out diff --git a/vendor/github.com/goccy/go-json/.golangci.yml b/vendor/github.com/goccy/go-json/.golangci.yml new file mode 100644 index 000000000000..977accaa9f4f --- /dev/null +++ b/vendor/github.com/goccy/go-json/.golangci.yml @@ -0,0 +1,86 @@ +run: + skip-files: + - encode_optype.go + - ".*_test\\.go$" + +linters-settings: + govet: + enable-all: true + disable: + - shadow + +linters: + enable-all: true + disable: + - dogsled + - dupl + - exhaustive + - exhaustivestruct + - errorlint + - forbidigo + - funlen + - gci + - gochecknoglobals + - gochecknoinits + - gocognit + - gocritic + - gocyclo + - godot + - godox + - goerr113 + - gofumpt + - gomnd + - gosec + - ifshort + - lll + - makezero + - nakedret + - nestif + - nlreturn + - paralleltest + - testpackage + - thelper + - wrapcheck + - interfacer + - lll + - nakedret + - nestif + - nlreturn + - testpackage + - wsl + - varnamelen + - nilnil + - ireturn + - govet + - forcetypeassert + - cyclop + - containedctx + - revive + - nosnakecase + - exhaustruct + - depguard + +issues: + exclude-rules: + # not needed + - path: /*.go + text: "ST1003: should not use underscores in package names" + linters: + - stylecheck + - path: /*.go + text: "don't use an underscore in package name" + linters: + - golint + - path: rtype.go + linters: + - golint + - stylecheck + - path: error.go + linters: + - staticcheck + + # Maximum issues count per one linter. Set to 0 to disable. Default is 50. + max-issues-per-linter: 0 + + # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. + max-same-issues: 0 diff --git a/vendor/github.com/goccy/go-json/CHANGELOG.md b/vendor/github.com/goccy/go-json/CHANGELOG.md new file mode 100644 index 000000000000..d09bb89c318a --- /dev/null +++ b/vendor/github.com/goccy/go-json/CHANGELOG.md @@ -0,0 +1,425 @@ +# v0.10.2 - 2023/03/20 + +### New features + +* Support DebugDOT option for debugging encoder ( #440 ) + +### Fix bugs + +* Fix combination of embedding structure and omitempty option ( #442 ) + +# v0.10.1 - 2023/03/13 + +### Fix bugs + +* Fix checkptr error for array decoder ( #415 ) +* Fix added buffer size check when decoding key ( #430 ) +* Fix handling of anonymous fields other than struct ( #431 ) +* Fix to not optimize when lower conversion can't handle byte-by-byte ( #432 ) +* Fix a problem that MarshalIndent does not work when UnorderedMap is specified ( #435 ) +* Fix mapDecoder.DecodeStream() for empty objects containing whitespace ( #425 ) +* Fix an issue that could not set the correct NextField for fields in the embedded structure ( #438 ) + +# v0.10.0 - 2022/11/29 + +### New features + +* Support JSON Path ( #250 ) + +### Fix bugs + +* Fix marshaler for map's key ( #409 ) + +# v0.9.11 - 2022/08/18 + +### Fix bugs + +* Fix unexpected behavior when buffer ends with backslash ( #383 ) +* Fix stream decoding of escaped character ( #387 ) + +# v0.9.10 - 2022/07/15 + +### Fix bugs + +* Fix boundary exception of type caching ( #382 ) + +# v0.9.9 - 2022/07/15 + +### Fix bugs + +* Fix encoding of directed interface with typed nil ( #377 ) +* Fix embedded primitive type encoding using alias ( #378 ) +* Fix slice/array type encoding with types implementing MarshalJSON ( #379 ) +* Fix unicode decoding when the expected buffer state is not met after reading ( #380 ) + +# v0.9.8 - 2022/06/30 + +### Fix bugs + +* Fix decoding of surrogate-pair ( #365 ) +* Fix handling of embedded primitive type ( #366 ) +* Add validation of escape sequence for decoder ( #367 ) +* Fix stream tokenizing respecting UseNumber ( #369 ) +* Fix encoding when struct pointer type that implements Marshal JSON is embedded ( #375 ) + +### Improve performance + +* Improve performance of linkRecursiveCode ( #368 ) + +# v0.9.7 - 2022/04/22 + +### Fix bugs + +#### Encoder + +* Add filtering process for encoding on slow path ( #355 ) +* Fix encoding of interface{} with pointer type ( #363 ) + +#### Decoder + +* Fix map key decoder that implements UnmarshalJSON ( #353 ) +* Fix decoding of []uint8 type ( #361 ) + +### New features + +* Add DebugWith option for encoder ( #356 ) + +# v0.9.6 - 2022/03/22 + +### Fix bugs + +* Correct the handling of the minimum value of int type for decoder ( #344 ) +* Fix bugs of stream decoder's bufferSize ( #349 ) +* Add a guard to use typeptr more safely ( #351 ) + +### Improve decoder performance + +* Improve escapeString's performance ( #345 ) + +### Others + +* Update go version for CI ( #347 ) + +# v0.9.5 - 2022/03/04 + +### Fix bugs + +* Fix panic when decoding time.Time with context ( #328 ) +* Fix reading the next character in buffer to nul consideration ( #338 ) +* Fix incorrect handling on skipValue ( #341 ) + +### Improve decoder performance + +* Improve performance when a payload contains escape sequence ( #334 ) + +# v0.9.4 - 2022/01/21 + +* Fix IsNilForMarshaler for string type with omitempty ( #323 ) +* Fix the case where the embedded field is at the end ( #326 ) + +# v0.9.3 - 2022/01/14 + +* Fix logic of removing struct field for decoder ( #322 ) + +# v0.9.2 - 2022/01/14 + +* Add invalid decoder to delay type error judgment at decode ( #321 ) + +# v0.9.1 - 2022/01/11 + +* Fix encoding of MarshalText/MarshalJSON operation with head offset ( #319 ) + +# v0.9.0 - 2022/01/05 + +### New feature + +* Supports dynamic filtering of struct fields ( #314 ) + +### Improve encoding performance + +* Improve map encoding performance ( #310 ) +* Optimize encoding path for escaped string ( #311 ) +* Add encoding option for performance ( #312 ) + +### Fix bugs + +* Fix panic at encoding map value on 1.18 ( #310 ) +* Fix MarshalIndent for interface type ( #317 ) + +# v0.8.1 - 2021/12/05 + +* Fix operation conversion from PtrHead to Head in Recursive type ( #305 ) + +# v0.8.0 - 2021/12/02 + +* Fix embedded field conflict behavior ( #300 ) +* Refactor compiler for encoder ( #301 #302 ) + +# v0.7.10 - 2021/10/16 + +* Fix conversion from pointer to uint64 ( #294 ) + +# v0.7.9 - 2021/09/28 + +* Fix encoding of nil value about interface type that has method ( #291 ) + +# v0.7.8 - 2021/09/01 + +* Fix mapassign_faststr for indirect struct type ( #283 ) +* Fix encoding of not empty interface type ( #284 ) +* Fix encoding of empty struct interface type ( #286 ) + +# v0.7.7 - 2021/08/25 + +* Fix invalid utf8 on stream decoder ( #279 ) +* Fix buffer length bug on string stream decoder ( #280 ) + +Thank you @orisano !! + +# v0.7.6 - 2021/08/13 + +* Fix nil slice assignment ( #276 ) +* Improve error message ( #277 ) + +# v0.7.5 - 2021/08/12 + +* Fix encoding of embedded struct with tags ( #265 ) +* Fix encoding of embedded struct that isn't first field ( #272 ) +* Fix decoding of binary type with escaped char ( #273 ) + +# v0.7.4 - 2021/07/06 + +* Fix encoding of indirect layout structure ( #264 ) + +# v0.7.3 - 2021/06/29 + +* Fix encoding of pointer type in empty interface ( #262 ) + +# v0.7.2 - 2021/06/26 + +### Fix decoder + +* Add decoder for func type to fix decoding of nil function value ( #257 ) +* Fix stream decoding of []byte type ( #258 ) + +### Performance + +* Improve decoding performance of map[string]interface{} type ( use `mapassign_faststr` ) ( #256 ) +* Improve encoding performance of empty interface type ( remove recursive calling of `vm.Run` ) ( #259 ) + +### Benchmark + +* Add bytedance/sonic as benchmark target ( #254 ) + +# v0.7.1 - 2021/06/18 + +### Fix decoder + +* Fix error when unmarshal empty array ( #253 ) + +# v0.7.0 - 2021/06/12 + +### Support context for MarshalJSON and UnmarshalJSON ( #248 ) + +* json.MarshalContext(context.Context, interface{}, ...json.EncodeOption) ([]byte, error) +* json.NewEncoder(io.Writer).EncodeContext(context.Context, interface{}, ...json.EncodeOption) error +* json.UnmarshalContext(context.Context, []byte, interface{}, ...json.DecodeOption) error +* json.NewDecoder(io.Reader).DecodeContext(context.Context, interface{}) error + +```go +type MarshalerContext interface { + MarshalJSON(context.Context) ([]byte, error) +} + +type UnmarshalerContext interface { + UnmarshalJSON(context.Context, []byte) error +} +``` + +### Add DecodeFieldPriorityFirstWin option ( #242 ) + +In the default behavior, go-json, like encoding/json, will reflect the result of the last evaluation when a field with the same name exists. I've added new options to allow you to change this behavior. `json.DecodeFieldPriorityFirstWin` option reflects the result of the first evaluation if a field with the same name exists. This behavior has a performance advantage as it allows the subsequent strings to be skipped if all fields have been evaluated. + +### Fix encoder + +* Fix indent number contains recursive type ( #249 ) +* Fix encoding of using empty interface as map key ( #244 ) + +### Fix decoder + +* Fix decoding fields containing escaped characters ( #237 ) + +### Refactor + +* Move some tests to subdirectory ( #243 ) +* Refactor package layout for decoder ( #238 ) + +# v0.6.1 - 2021/06/02 + +### Fix encoder + +* Fix value of totalLength for encoding ( #236 ) + +# v0.6.0 - 2021/06/01 + +### Support Colorize option for encoding (#233) + +```go +b, err := json.MarshalWithOption(v, json.Colorize(json.DefaultColorScheme)) +if err != nil { + ... +} +fmt.Println(string(b)) // print colored json +``` + +### Refactor + +* Fix opcode layout - Adjust memory layout of the opcode to 128 bytes in a 64-bit environment ( #230 ) +* Refactor encode option ( #231 ) +* Refactor escape string ( #232 ) + +# v0.5.1 - 2021/5/20 + +### Optimization + +* Add type addrShift to enable bigger encoder/decoder cache ( #213 ) + +### Fix decoder + +* Keep original reference of slice element ( #229 ) + +### Refactor + +* Refactor Debug mode for encoding ( #226 ) +* Generate VM sources for encoding ( #227 ) +* Refactor validator for null/true/false for decoding ( #221 ) + +# v0.5.0 - 2021/5/9 + +### Supports using omitempty and string tags at the same time ( #216 ) + +### Fix decoder + +* Fix stream decoder for unicode char ( #215 ) +* Fix decoding of slice element ( #219 ) +* Fix calculating of buffer length for stream decoder ( #220 ) + +### Refactor + +* replace skipWhiteSpace goto by loop ( #212 ) + +# v0.4.14 - 2021/5/4 + +### Benchmark + +* Add valyala/fastjson to benchmark ( #193 ) +* Add benchmark task for CI ( #211 ) + +### Fix decoder + +* Fix decoding of slice with unmarshal json type ( #198 ) +* Fix decoding of null value for interface type that does not implement Unmarshaler ( #205 ) +* Fix decoding of null value to []byte by json.Unmarshal ( #206 ) +* Fix decoding of backslash char at the end of string ( #207 ) +* Fix stream decoder for null/true/false value ( #208 ) +* Fix stream decoder for slow reader ( #211 ) + +### Performance + +* If cap of slice is enough, reuse slice data for compatibility with encoding/json ( #200 ) + +# v0.4.13 - 2021/4/20 + +### Fix json.Compact and json.Indent + +* Support validation the input buffer for json.Compact and json.Indent ( #189 ) +* Optimize json.Compact and json.Indent ( improve memory footprint ) ( #190 ) + +# v0.4.12 - 2021/4/15 + +### Fix encoder + +* Fix unnecessary indent for empty slice type ( #181 ) +* Fix encoding of omitempty feature for the slice or interface type ( #183 ) +* Fix encoding custom types zero values with omitempty when marshaller exists ( #187 ) + +### Fix decoder + +* Fix decoder for invalid top level value ( #184 ) +* Fix decoder for invalid number value ( #185 ) + +# v0.4.11 - 2021/4/3 + +* Improve decoder performance for interface type + +# v0.4.10 - 2021/4/2 + +### Fix encoder + +* Fixed a bug when encoding slice and map containing recursive structures +* Fixed a logic to determine if indirect reference + +# v0.4.9 - 2021/3/29 + +### Add debug mode + +If you use `json.MarshalWithOption(v, json.Debug())` and `panic` occurred in `go-json`, produces debug information to console. + +### Support a new feature to compatible with encoding/json + +- invalid UTF-8 is coerced to valid UTF-8 ( without performance down ) + +### Fix encoder + +- Fixed handling of MarshalJSON of function type + +### Fix decoding of slice of pointer type + +If there is a pointer value, go-json will use it. (This behavior is necessary to achieve the ability to prioritize pre-filled values). However, since slices are reused internally, there was a bug that referred to the previous pointer value. Therefore, it is not necessary to refer to the pointer value in advance for the slice element, so we explicitly initialize slice element by `nil`. + +# v0.4.8 - 2021/3/21 + +### Reduce memory usage at compile time + +* go-json have used about 2GB of memory at compile time, but now it can compile with about less than 550MB. + +### Fix any encoder's bug + +* Add many test cases for encoder +* Fix composite type ( slice/array/map ) +* Fix pointer types +* Fix encoding of MarshalJSON or MarshalText or json.Number type + +### Refactor encoder + +* Change package layout for reducing memory usage at compile +* Remove anonymous and only operation +* Remove root property from encodeCompileContext and opcode + +### Fix CI + +* Add Go 1.16 +* Remove Go 1.13 +* Fix `make cover` task + +### Number/Delim/Token/RawMessage use the types defined in encoding/json by type alias + +# v0.4.7 - 2021/02/22 + +### Fix decoder + +* Fix decoding of deep recursive structure +* Fix decoding of embedded unexported pointer field +* Fix invalid test case +* Fix decoding of invalid value +* Fix decoding of prefilled value +* Fix not being able to return UnmarshalTypeError when it should be returned +* Fix decoding of null value +* Fix decoding of type of null string +* Use pre allocated pointer if exists it at decoding + +### Reduce memory usage at compile + +* Integrate int/int8/int16/int32/int64 and uint/uint8/uint16/uint32/uint64 operation to reduce memory usage at compile + +### Remove unnecessary optype diff --git a/vendor/github.com/goccy/go-json/LICENSE b/vendor/github.com/goccy/go-json/LICENSE new file mode 100644 index 000000000000..6449c8bff65e --- /dev/null +++ b/vendor/github.com/goccy/go-json/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Masaaki Goshima + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/goccy/go-json/Makefile b/vendor/github.com/goccy/go-json/Makefile new file mode 100644 index 000000000000..c030577dcf9d --- /dev/null +++ b/vendor/github.com/goccy/go-json/Makefile @@ -0,0 +1,39 @@ +PKG := github.com/goccy/go-json + +BIN_DIR := $(CURDIR)/bin +PKGS := $(shell go list ./... | grep -v internal/cmd|grep -v test) +COVER_PKGS := $(foreach pkg,$(PKGS),$(subst $(PKG),.,$(pkg))) + +COMMA := , +EMPTY := +SPACE := $(EMPTY) $(EMPTY) +COVERPKG_OPT := $(subst $(SPACE),$(COMMA),$(COVER_PKGS)) + +$(BIN_DIR): + @mkdir -p $(BIN_DIR) + +.PHONY: cover +cover: + go test -coverpkg=$(COVERPKG_OPT) -coverprofile=cover.out ./... + +.PHONY: cover-html +cover-html: cover + go tool cover -html=cover.out + +.PHONY: lint +lint: golangci-lint + $(BIN_DIR)/golangci-lint run + +golangci-lint: | $(BIN_DIR) + @{ \ + set -e; \ + GOLANGCI_LINT_TMP_DIR=$$(mktemp -d); \ + cd $$GOLANGCI_LINT_TMP_DIR; \ + go mod init tmp; \ + GOBIN=$(BIN_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.54.2; \ + rm -rf $$GOLANGCI_LINT_TMP_DIR; \ + } + +.PHONY: generate +generate: + go generate ./internal/... diff --git a/vendor/github.com/goccy/go-json/README.md b/vendor/github.com/goccy/go-json/README.md new file mode 100644 index 000000000000..7bacc54f9cd0 --- /dev/null +++ b/vendor/github.com/goccy/go-json/README.md @@ -0,0 +1,529 @@ +# go-json + +![Go](https://github.com/goccy/go-json/workflows/Go/badge.svg) +[![GoDoc](https://godoc.org/github.com/goccy/go-json?status.svg)](https://pkg.go.dev/github.com/goccy/go-json?tab=doc) +[![codecov](https://codecov.io/gh/goccy/go-json/branch/master/graph/badge.svg)](https://codecov.io/gh/goccy/go-json) + +Fast JSON encoder/decoder compatible with encoding/json for Go + + + +# Roadmap + +``` +* version ( expected release date ) + +* v0.9.0 + | + | while maintaining compatibility with encoding/json, we will add convenient APIs + | + v +* v1.0.0 +``` + +We are accepting requests for features that will be implemented between v0.9.0 and v.1.0.0. +If you have the API you need, please submit your issue [here](https://github.com/goccy/go-json/issues). + +# Features + +- Drop-in replacement of `encoding/json` +- Fast ( See [Benchmark section](https://github.com/goccy/go-json#benchmarks) ) +- Flexible customization with options +- Coloring the encoded string +- Can propagate context.Context to `MarshalJSON` or `UnmarshalJSON` +- Can dynamically filter the fields of the structure type-safely + +# Installation + +``` +go get github.com/goccy/go-json +``` + +# How to use + +Replace import statement from `encoding/json` to `github.com/goccy/go-json` + +``` +-import "encoding/json" ++import "github.com/goccy/go-json" +``` + +# JSON library comparison + +| name | encoder | decoder | compatible with `encoding/json` | +| :----: | :------: | :-----: | :-----------------------------: | +| encoding/json | yes | yes | N/A | +| [json-iterator/go](https://github.com/json-iterator/go) | yes | yes | partial | +| [easyjson](https://github.com/mailru/easyjson) | yes | yes | no | +| [gojay](https://github.com/francoispqt/gojay) | yes | yes | no | +| [segmentio/encoding/json](https://github.com/segmentio/encoding/tree/master/json) | yes | yes | partial | +| [jettison](https://github.com/wI2L/jettison) | yes | no | no | +| [simdjson-go](https://github.com/minio/simdjson-go) | no | yes | no | +| goccy/go-json | yes | yes | yes | + +- `json-iterator/go` isn't compatible with `encoding/json` in many ways (e.g. https://github.com/json-iterator/go/issues/229 ), but it hasn't been supported for a long time. +- `segmentio/encoding/json` is well supported for encoders, but some are not supported for decoder APIs such as `Token` ( streaming decode ) + +## Other libraries + +- [jingo](https://github.com/bet365/jingo) + +I tried the benchmark but it didn't work. +Also, it seems to panic when it receives an unexpected value because there is no error handling... + +- [ffjson](https://github.com/pquerna/ffjson) + +Benchmarking gave very slow results. +It seems that it is assumed that the user will use the buffer pool properly. +Also, development seems to have already stopped + +# Benchmarks + +``` +$ cd benchmarks +$ go test -bench . +``` + +## Encode + + + + +## Decode + + + + + + +# Fuzzing + +[go-json-fuzz](https://github.com/goccy/go-json-fuzz) is the repository for fuzzing tests. +If you run the test in this repository and find a bug, please commit to corpus to go-json-fuzz and report the issue to [go-json](https://github.com/goccy/go-json/issues). + +# How it works + +`go-json` is very fast in both encoding and decoding compared to other libraries. +It's easier to implement by using automatic code generation for performance or by using a dedicated interface, but `go-json` dares to stick to compatibility with `encoding/json` and is the simple interface. Despite this, we are developing with the aim of being the fastest library. + +Here, we explain the various speed-up techniques implemented by `go-json`. + +## Basic technique + +The techniques listed here are the ones used by most of the libraries listed above. + +### Buffer reuse + +Since the only value required for the result of `json.Marshal(interface{}) ([]byte, error)` is `[]byte`, the only value that must be allocated during encoding is the return value `[]byte` . + +Also, as the number of allocations increases, the performance will be affected, so the number of allocations should be kept as low as possible when creating `[]byte`. + +Therefore, there is a technique to reduce the number of times a new buffer must be allocated by reusing the buffer used for the previous encoding by using `sync.Pool`. + +Finally, you allocate a buffer that is as long as the resulting buffer and copy the contents into it, you only need to allocate the buffer once in theory. + +```go +type buffer struct { + data []byte +} + +var bufPool = sync.Pool{ + New: func() interface{} { + return &buffer{data: make([]byte, 0, 1024)} + }, +} + +buf := bufPool.Get().(*buffer) +data := encode(buf.data) // reuse buf.data + +newBuf := make([]byte, len(data)) +copy(newBuf, buf) + +buf.data = data +bufPool.Put(buf) +``` + +### Elimination of reflection + +As you know, the reflection operation is very slow. + +Therefore, using the fact that the address position where the type information is stored is fixed for each binary ( we call this `typeptr` ), +we can use the address in the type information to call a pre-built optimized process. + +For example, you can get the address to the type information from `interface{}` as follows and you can use that information to call a process that does not have reflection. + +To process without reflection, pass a pointer (`unsafe.Pointer`) to the value is stored. + +```go + +type emptyInterface struct { + typ unsafe.Pointer + ptr unsafe.Pointer +} + +var typeToEncoder = map[uintptr]func(unsafe.Pointer)([]byte, error){} + +func Marshal(v interface{}) ([]byte, error) { + iface := (*emptyInterface)(unsafe.Pointer(&v) + typeptr := uintptr(iface.typ) + if enc, exists := typeToEncoder[typeptr]; exists { + return enc(iface.ptr) + } + ... +} +``` + +※ In reality, `typeToEncoder` can be referenced by multiple goroutines, so exclusive control is required. + +## Unique speed-up technique + +## Encoder + +### Do not escape arguments of `Marshal` + +`json.Marshal` and `json.Unmarshal` receive `interface{}` value and they perform type determination dynamically to process. +In normal case, you need to use the `reflect` library to determine the type dynamically, but since `reflect.Type` is defined as `interface`, when you call the method of `reflect.Type`, The reflect's argument is escaped. + +Therefore, the arguments for `Marshal` and `Unmarshal` are always escaped to the heap. +However, `go-json` can use the feature of `reflect.Type` while avoiding escaping. + +`reflect.Type` is defined as `interface`, but in reality `reflect.Type` is implemented only by the structure `rtype` defined in the `reflect` package. +For this reason, to date `reflect.Type` is the same as `*reflect.rtype`. + +Therefore, by directly handling `*reflect.rtype`, which is an implementation of `reflect.Type`, it is possible to avoid escaping because it changes from `interface` to using `struct`. + +The technique for working with `*reflect.rtype` directly from `go-json` is implemented at [rtype.go](https://github.com/goccy/go-json/blob/master/internal/runtime/rtype.go) + +Also, the same technique is cut out as a library ( https://github.com/goccy/go-reflect ) + +Initially this feature was the default behavior of `go-json`. +But after careful testing, I found that I passed a large value to `json.Marshal()` and if the argument could not be assigned to the stack, it could not be properly escaped to the heap (a bug in the Go compiler). + +Therefore, this feature will be provided as an **optional** until this issue is resolved. + +To use it, add `NoEscape` like `MarshalNoEscape()` + +### Encoding using opcode sequence + +I explained that you can use `typeptr` to call a pre-built process from type information. + +In other libraries, this dedicated process is processed by making it an function calling like anonymous function, but function calls are inherently slow processes and should be avoided as much as possible. + +Therefore, `go-json` adopted the Instruction-based execution processing system, which is also used to implement virtual machines for programming language. + +If it is the first type to encode, create the opcode ( instruction ) sequence required for encoding. +From the second time onward, use `typeptr` to get the cached pre-built opcode sequence and encode it based on it. An example of the opcode sequence is shown below. + +```go +json.Marshal(struct{ + X int `json:"x"` + Y string `json:"y"` +}{X: 1, Y: "hello"}) +``` + +When encoding a structure like the one above, create a sequence of opcodes like this: + +``` +- opStructFieldHead ( `{` ) +- opStructFieldInt ( `"x": 1,` ) +- opStructFieldString ( `"y": "hello"` ) +- opStructEnd ( `}` ) +- opEnd +``` + +※ When processing each operation, write the letters on the right. + +In addition, each opcode is managed by the following structure ( +Pseudo code ). + +```go +type opType int +const ( + opStructFieldHead opType = iota + opStructFieldInt + opStructFieldStirng + opStructEnd + opEnd +) +type opcode struct { + op opType + key []byte + next *opcode +} +``` + +The process of encoding using the opcode sequence is roughly implemented as follows. + +```go +func encode(code *opcode, b []byte, p unsafe.Pointer) ([]byte, error) { + for { + switch code.op { + case opStructFieldHead: + b = append(b, '{') + code = code.next + case opStructFieldInt: + b = append(b, code.key...) + b = appendInt((*int)(unsafe.Pointer(uintptr(p)+code.offset))) + code = code.next + case opStructFieldString: + b = append(b, code.key...) + b = appendString((*string)(unsafe.Pointer(uintptr(p)+code.offset))) + code = code.next + case opStructEnd: + b = append(b, '}') + code = code.next + case opEnd: + goto END + } + } +END: + return b, nil +} +``` + +In this way, the huge `switch-case` is used to encode by manipulating the linked list opcodes to avoid unnecessary function calls. + +### Opcode sequence optimization + +One of the advantages of encoding using the opcode sequence is the ease of optimization. +The opcode sequence mentioned above is actually converted into the following optimized operations and used. + +``` +- opStructFieldHeadInt ( `{"x": 1,` ) +- opStructEndString ( `"y": "hello"}` ) +- opEnd +``` + +It has been reduced from 5 opcodes to 3 opcodes ! +Reducing the number of opcodees means reducing the number of branches with `switch-case`. +In other words, the closer the number of operations is to 1, the faster the processing can be performed. + +In `go-json`, optimization to reduce the number of opcodes itself like the above and it speeds up by preparing opcodes with optimized paths. + +### Change recursive call from CALL to JMP + +Recursive processing is required during encoding if the type is defined recursively as follows: + +```go +type T struct { + X int + U *U +} + +type U struct { + T *T +} + +b, err := json.Marshal(&T{ + X: 1, + U: &U{ + T: &T{ + X: 2, + }, + }, +}) +fmt.Println(string(b)) // {"X":1,"U":{"T":{"X":2,"U":null}}} +``` + +In `go-json`, recursive processing is processed by the operation type of ` opStructFieldRecursive`. + +In this operation, after acquiring the opcode sequence used for recursive processing, the function is **not** called recursively as it is, but the necessary values ​​are saved by itself and implemented by moving to the next operation. + +The technique of implementing recursive processing with the `JMP` operation while avoiding the `CALL` operation is a famous technique for implementing a high-speed virtual machine. + +For more details, please refer to [the article](https://engineering.mercari.com/blog/entry/1599563768-081104c850) ( but Japanese only ). + +### Dispatch by typeptr from map to slice + +When retrieving the data cached from the type information by `typeptr`, we usually use map. +Map requires exclusive control, so use `sync.Map` for a naive implementation. + +However, this is slow, so it's a good idea to use the `atomic` package for exclusive control as implemented by `segmentio/encoding/json` ( https://github.com/segmentio/encoding/blob/master/json/codec.go#L41-L55 ). + +This implementation slows down the set instead of speeding up the get, but it works well because of the nature of the library, it encodes much more for the same type. + +However, as a result of profiling, I noticed that `runtime.mapaccess2` accounts for a significant percentage of the execution time. So I thought if I could change the lookup from map to slice. + +There is an API named `typelinks` defined in the `runtime` package that the `reflect` package uses internally. +This allows you to get all the type information defined in the binary at runtime. + +The fact that all type information can be acquired means that by constructing slices in advance with the acquired total number of type information, it is possible to look up with the value of `typeptr` without worrying about out-of-range access. + +However, if there is too much type information, it will use a lot of memory, so by default we will only use this optimization if the slice size fits within **2Mib** . + +If this approach is not available, it will fall back to the `atomic` based process described above. + +If you want to know more, please refer to the implementation [here](https://github.com/goccy/go-json/blob/master/internal/runtime/type.go#L36-L100) + +## Decoder + +### Dispatch by typeptr from map to slice + +Like the encoder, the decoder also uses typeptr to call the dedicated process. + +### Faster termination character inspection using NUL character + +In order to decode, you have to traverse the input buffer character by position. +At that time, if you check whether the buffer has reached the end, it will be very slow. + +`buf` : `[]byte` type variable. holds the string passed to the decoder +`cursor` : `int64` type variable. holds the current read position + +```go +buflen := len(buf) +for ; cursor < buflen; cursor++ { // compare cursor and buflen at all times, it is so slow. + switch buf[cursor] { + case ' ', '\n', '\r', '\t': + } +} +``` + +Therefore, by adding the `NUL` (`\000`) character to the end of the read buffer as shown below, it is possible to check the termination character at the same time as other characters. + +```go +for { + switch buf[cursor] { + case ' ', '\n', '\r', '\t': + case '\000': + return nil + } + cursor++ +} +``` + +### Use Boundary Check Elimination + +Due to the `NUL` character optimization, the Go compiler does a boundary check every time, even though `buf[cursor]` does not cause out-of-range access. + +Therefore, `go-json` eliminates boundary check by fetching characters for hotspot by pointer operation. For example, the following code. + +```go +func char(ptr unsafe.Pointer, offset int64) byte { + return *(*byte)(unsafe.Pointer(uintptr(ptr) + uintptr(offset))) +} + +p := (*sliceHeader)(&unsafe.Pointer(buf)).data +for { + switch char(p, cursor) { + case ' ', '\n', '\r', '\t': + case '\000': + return nil + } + cursor++ +} +``` + +### Checking the existence of fields of struct using Bitmaps + +I found by the profiling result, in the struct decode, lookup process for field was taking a long time. + +For example, consider decoding a string like `{"a":1,"b":2,"c":3}` into the following structure: + +```go +type T struct { + A int `json:"a"` + B int `json:"b"` + C int `json:"c"` +} +``` + +At this time, it was found that it takes a lot of time to acquire the decoding process corresponding to the field from the field name as shown below during the decoding process. + +```go +fieldName := decodeKey(buf, cursor) // "a" or "b" or "c" +decoder, exists := fieldToDecoderMap[fieldName] // so slow +if exists { + decoder(buf, cursor) +} else { + skipValue(buf, cursor) +} +``` + +To improve this process, `json-iterator/go` is optimized so that it can be branched by switch-case when the number of fields in the structure is 10 or less (switch-case is faster than map). However, there is a risk of hash collision because the value hashed by the FNV algorithm is used for conditional branching. Also, `gojay` processes this part at high speed by letting the library user yourself write `switch-case`. + + +`go-json` considers and implements a new approach that is different from these. I call this **bitmap field optimization**. + +The range of values ​​per character can be represented by `[256]byte`. Also, if the number of fields in the structure is 8 or less, `int8` type can represent the state of each field. +In other words, it has the following structure. + +- Base ( 8bit ): `00000000` +- Key "a": `00000001` ( assign key "a" to the first bit ) +- Key "b": `00000010` ( assign key "b" to the second bit ) +- Key "c": `00000100` ( assign key "c" to the third bit ) + +Bitmap structure is the following + +``` + | key index(0) | +------------------------ + 0 | 00000000 | + 1 | 00000000 | +~~ | | +97 (a) | 00000001 | +98 (b) | 00000010 | +99 (c) | 00000100 | +~~ | | +255 | 00000000 | +``` + +You can think of this as a Bitmap with a height of `256` and a width of the maximum string length in the field name. +In other words, it can be represented by the following type . + +```go +[maxFieldKeyLength][256]int8 +``` + +When decoding a field character, check whether the corresponding character exists by referring to the pre-built bitmap like the following. + +```go +var curBit int8 = math.MaxInt8 // 11111111 + +c := char(buf, cursor) +bit := bitmap[keyIdx][c] +curBit &= bit +if curBit == 0 { + // not found field +} +``` + +If `curBit` is not `0` until the end of the field string, then the string is +You may have hit one of the fields. +But the possibility is that if the decoded string is shorter than the field string, you will get a false hit. + +- input: `{"a":1}` +```go +type T struct { + X int `json:"abc"` +} +``` +※ Since `a` is shorter than `abc`, it can decode to the end of the field character without `curBit` being 0. + +Rest assured. In this case, it doesn't matter because you can tell if you hit by comparing the string length of `a` with the string length of `abc`. + +Finally, calculate the position of the bit where `1` is set and get the corresponding value, and you're done. + +Using this technique, field lookups are possible with only bitwise operations and access to slices. + +`go-json` uses a similar technique for fields with 9 or more and 16 or less fields. At this time, Bitmap is constructed as `[maxKeyLen][256]int16` type. + +Currently, this optimization is not performed when the maximum length of the field name is long (specifically, 64 bytes or more) in addition to the limitation of the number of fields from the viewpoint of saving memory usage. + +### Others + +I have done a lot of other optimizations. I will find time to write about them. If you have any questions about what's written here or other optimizations, please visit the `#go-json` channel on `gophers.slack.com` . + +## Reference + +Regarding the story of go-json, there are the following articles in Japanese only. + +- https://speakerdeck.com/goccy/zui-su-falsejsonraiburariwoqiu-mete +- https://engineering.mercari.com/blog/entry/1599563768-081104c850/ + +# Looking for Sponsors + +I'm looking for sponsors this library. This library is being developed as a personal project in my spare time. If you want a quick response or problem resolution when using this library in your project, please register as a [sponsor](https://github.com/sponsors/goccy). I will cooperate as much as possible. Of course, this library is developed as an MIT license, so you can use it freely for free. + +# License + +MIT diff --git a/vendor/github.com/goccy/go-json/color.go b/vendor/github.com/goccy/go-json/color.go new file mode 100644 index 000000000000..e80b22b4869a --- /dev/null +++ b/vendor/github.com/goccy/go-json/color.go @@ -0,0 +1,68 @@ +package json + +import ( + "fmt" + + "github.com/goccy/go-json/internal/encoder" +) + +type ( + ColorFormat = encoder.ColorFormat + ColorScheme = encoder.ColorScheme +) + +const escape = "\x1b" + +type colorAttr int + +//nolint:deadcode,varcheck +const ( + fgBlackColor colorAttr = iota + 30 + fgRedColor + fgGreenColor + fgYellowColor + fgBlueColor + fgMagentaColor + fgCyanColor + fgWhiteColor +) + +//nolint:deadcode,varcheck +const ( + fgHiBlackColor colorAttr = iota + 90 + fgHiRedColor + fgHiGreenColor + fgHiYellowColor + fgHiBlueColor + fgHiMagentaColor + fgHiCyanColor + fgHiWhiteColor +) + +func createColorFormat(attr colorAttr) ColorFormat { + return ColorFormat{ + Header: wrapColor(attr), + Footer: resetColor(), + } +} + +func wrapColor(attr colorAttr) string { + return fmt.Sprintf("%s[%dm", escape, attr) +} + +func resetColor() string { + return wrapColor(colorAttr(0)) +} + +var ( + DefaultColorScheme = &ColorScheme{ + Int: createColorFormat(fgHiMagentaColor), + Uint: createColorFormat(fgHiMagentaColor), + Float: createColorFormat(fgHiMagentaColor), + Bool: createColorFormat(fgHiYellowColor), + String: createColorFormat(fgHiGreenColor), + Binary: createColorFormat(fgHiRedColor), + ObjectKey: createColorFormat(fgHiCyanColor), + Null: createColorFormat(fgBlueColor), + } +) diff --git a/vendor/github.com/goccy/go-json/decode.go b/vendor/github.com/goccy/go-json/decode.go new file mode 100644 index 000000000000..74c6ac3bcad7 --- /dev/null +++ b/vendor/github.com/goccy/go-json/decode.go @@ -0,0 +1,263 @@ +package json + +import ( + "context" + "fmt" + "io" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/decoder" + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type Decoder struct { + s *decoder.Stream +} + +const ( + nul = '\000' +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +func unmarshal(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + src := make([]byte, len(data)+1) // append nul byte to the end + copy(src, data) + + header := (*emptyInterface)(unsafe.Pointer(&v)) + + if err := validateType(header.typ, uintptr(header.ptr)); err != nil { + return err + } + dec, err := decoder.CompileToGetDecoder(header.typ) + if err != nil { + return err + } + ctx := decoder.TakeRuntimeContext() + ctx.Buf = src + ctx.Option.Flags = 0 + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + cursor, err := dec.Decode(ctx, 0, 0, header.ptr) + if err != nil { + decoder.ReleaseRuntimeContext(ctx) + return err + } + decoder.ReleaseRuntimeContext(ctx) + return validateEndBuf(src, cursor) +} + +func unmarshalContext(ctx context.Context, data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + src := make([]byte, len(data)+1) // append nul byte to the end + copy(src, data) + + header := (*emptyInterface)(unsafe.Pointer(&v)) + + if err := validateType(header.typ, uintptr(header.ptr)); err != nil { + return err + } + dec, err := decoder.CompileToGetDecoder(header.typ) + if err != nil { + return err + } + rctx := decoder.TakeRuntimeContext() + rctx.Buf = src + rctx.Option.Flags = 0 + rctx.Option.Flags |= decoder.ContextOption + rctx.Option.Context = ctx + for _, optFunc := range optFuncs { + optFunc(rctx.Option) + } + cursor, err := dec.Decode(rctx, 0, 0, header.ptr) + if err != nil { + decoder.ReleaseRuntimeContext(rctx) + return err + } + decoder.ReleaseRuntimeContext(rctx) + return validateEndBuf(src, cursor) +} + +var ( + pathDecoder = decoder.NewPathDecoder() +) + +func extractFromPath(path *Path, data []byte, optFuncs ...DecodeOptionFunc) ([][]byte, error) { + if path.path.RootSelectorOnly { + return [][]byte{data}, nil + } + src := make([]byte, len(data)+1) // append nul byte to the end + copy(src, data) + + ctx := decoder.TakeRuntimeContext() + ctx.Buf = src + ctx.Option.Flags = 0 + ctx.Option.Flags |= decoder.PathOption + ctx.Option.Path = path.path + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + paths, cursor, err := pathDecoder.DecodePath(ctx, 0, 0) + if err != nil { + decoder.ReleaseRuntimeContext(ctx) + return nil, err + } + decoder.ReleaseRuntimeContext(ctx) + if err := validateEndBuf(src, cursor); err != nil { + return nil, err + } + return paths, nil +} + +func unmarshalNoEscape(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + src := make([]byte, len(data)+1) // append nul byte to the end + copy(src, data) + + header := (*emptyInterface)(unsafe.Pointer(&v)) + + if err := validateType(header.typ, uintptr(header.ptr)); err != nil { + return err + } + dec, err := decoder.CompileToGetDecoder(header.typ) + if err != nil { + return err + } + + ctx := decoder.TakeRuntimeContext() + ctx.Buf = src + ctx.Option.Flags = 0 + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + cursor, err := dec.Decode(ctx, 0, 0, noescape(header.ptr)) + if err != nil { + decoder.ReleaseRuntimeContext(ctx) + return err + } + decoder.ReleaseRuntimeContext(ctx) + return validateEndBuf(src, cursor) +} + +func validateEndBuf(src []byte, cursor int64) error { + for { + switch src[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case nul: + return nil + } + return errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after top-level value", src[cursor]), + cursor+1, + ) + } +} + +//nolint:staticcheck +//go:nosplit +func noescape(p unsafe.Pointer) unsafe.Pointer { + x := uintptr(p) + return unsafe.Pointer(x ^ 0) +} + +func validateType(typ *runtime.Type, p uintptr) error { + if typ == nil || typ.Kind() != reflect.Ptr || p == 0 { + return &InvalidUnmarshalError{Type: runtime.RType2Type(typ)} + } + return nil +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may +// read data from r beyond the JSON values requested. +func NewDecoder(r io.Reader) *Decoder { + s := decoder.NewStream(r) + return &Decoder{ + s: s, + } +} + +// Buffered returns a reader of the data remaining in the Decoder's +// buffer. The reader is valid until the next call to Decode. +func (d *Decoder) Buffered() io.Reader { + return d.s.Buffered() +} + +// Decode reads the next JSON-encoded value from its +// input and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about +// the conversion of JSON into a Go value. +func (d *Decoder) Decode(v interface{}) error { + return d.DecodeWithOption(v) +} + +// DecodeContext reads the next JSON-encoded value from its +// input and stores it in the value pointed to by v with context.Context. +func (d *Decoder) DecodeContext(ctx context.Context, v interface{}) error { + d.s.Option.Flags |= decoder.ContextOption + d.s.Option.Context = ctx + return d.DecodeWithOption(v) +} + +func (d *Decoder) DecodeWithOption(v interface{}, optFuncs ...DecodeOptionFunc) error { + header := (*emptyInterface)(unsafe.Pointer(&v)) + typ := header.typ + ptr := uintptr(header.ptr) + typeptr := uintptr(unsafe.Pointer(typ)) + // noescape trick for header.typ ( reflect.*rtype ) + copiedType := *(**runtime.Type)(unsafe.Pointer(&typeptr)) + + if err := validateType(copiedType, ptr); err != nil { + return err + } + + dec, err := decoder.CompileToGetDecoder(typ) + if err != nil { + return err + } + if err := d.s.PrepareForDecode(); err != nil { + return err + } + s := d.s + for _, optFunc := range optFuncs { + optFunc(s.Option) + } + if err := dec.DecodeStream(s, 0, header.ptr); err != nil { + return err + } + s.Reset() + return nil +} + +func (d *Decoder) More() bool { + return d.s.More() +} + +func (d *Decoder) Token() (Token, error) { + return d.s.Token() +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (d *Decoder) DisallowUnknownFields() { + d.s.DisallowUnknownFields = true +} + +func (d *Decoder) InputOffset() int64 { + return d.s.TotalOffset() +} + +// UseNumber causes the Decoder to unmarshal a number into an interface{} as a +// Number instead of as a float64. +func (d *Decoder) UseNumber() { + d.s.UseNumber = true +} diff --git a/vendor/github.com/goccy/go-json/docker-compose.yml b/vendor/github.com/goccy/go-json/docker-compose.yml new file mode 100644 index 000000000000..db40c79ad5da --- /dev/null +++ b/vendor/github.com/goccy/go-json/docker-compose.yml @@ -0,0 +1,13 @@ +version: '2' +services: + go-json: + image: golang:1.18 + volumes: + - '.:/go/src/go-json' + deploy: + resources: + limits: + memory: 620M + working_dir: /go/src/go-json + command: | + sh -c "go test -c . && ls go-json.test" diff --git a/vendor/github.com/goccy/go-json/encode.go b/vendor/github.com/goccy/go-json/encode.go new file mode 100644 index 000000000000..c5173825a95a --- /dev/null +++ b/vendor/github.com/goccy/go-json/encode.go @@ -0,0 +1,326 @@ +package json + +import ( + "context" + "io" + "os" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/encoder/vm" + "github.com/goccy/go-json/internal/encoder/vm_color" + "github.com/goccy/go-json/internal/encoder/vm_color_indent" + "github.com/goccy/go-json/internal/encoder/vm_indent" +) + +// An Encoder writes JSON values to an output stream. +type Encoder struct { + w io.Writer + enabledIndent bool + enabledHTMLEscape bool + prefix string + indentStr string +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w, enabledHTMLEscape: true} +} + +// Encode writes the JSON encoding of v to the stream, followed by a newline character. +// +// See the documentation for Marshal for details about the conversion of Go values to JSON. +func (e *Encoder) Encode(v interface{}) error { + return e.EncodeWithOption(v) +} + +// EncodeWithOption call Encode with EncodeOption. +func (e *Encoder) EncodeWithOption(v interface{}, optFuncs ...EncodeOptionFunc) error { + ctx := encoder.TakeRuntimeContext() + ctx.Option.Flag = 0 + + err := e.encodeWithOption(ctx, v, optFuncs...) + + encoder.ReleaseRuntimeContext(ctx) + return err +} + +// EncodeContext call Encode with context.Context and EncodeOption. +func (e *Encoder) EncodeContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) error { + rctx := encoder.TakeRuntimeContext() + rctx.Option.Flag = 0 + rctx.Option.Flag |= encoder.ContextOption + rctx.Option.Context = ctx + + err := e.encodeWithOption(rctx, v, optFuncs...) //nolint: contextcheck + + encoder.ReleaseRuntimeContext(rctx) + return err +} + +func (e *Encoder) encodeWithOption(ctx *encoder.RuntimeContext, v interface{}, optFuncs ...EncodeOptionFunc) error { + if e.enabledHTMLEscape { + ctx.Option.Flag |= encoder.HTMLEscapeOption + } + ctx.Option.Flag |= encoder.NormalizeUTF8Option + ctx.Option.DebugOut = os.Stdout + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + var ( + buf []byte + err error + ) + if e.enabledIndent { + buf, err = encodeIndent(ctx, v, e.prefix, e.indentStr) + } else { + buf, err = encode(ctx, v) + } + if err != nil { + return err + } + if e.enabledIndent { + buf = buf[:len(buf)-2] + } else { + buf = buf[:len(buf)-1] + } + buf = append(buf, '\n') + if _, err := e.w.Write(buf); err != nil { + return err + } + return nil +} + +// SetEscapeHTML specifies whether problematic HTML characters should be escaped inside JSON quoted strings. +// The default behavior is to escape &, <, and > to \u0026, \u003c, and \u003e to avoid certain safety problems that can arise when embedding JSON in HTML. +// +// In non-HTML settings where the escaping interferes with the readability of the output, SetEscapeHTML(false) disables this behavior. +func (e *Encoder) SetEscapeHTML(on bool) { + e.enabledHTMLEscape = on +} + +// SetIndent instructs the encoder to format each subsequent encoded value as if indented by the package-level function Indent(dst, src, prefix, indent). +// Calling SetIndent("", "") disables indentation. +func (e *Encoder) SetIndent(prefix, indent string) { + if prefix == "" && indent == "" { + e.enabledIndent = false + return + } + e.prefix = prefix + e.indentStr = indent + e.enabledIndent = true +} + +func marshalContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { + rctx := encoder.TakeRuntimeContext() + rctx.Option.Flag = 0 + rctx.Option.Flag = encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option | encoder.ContextOption + rctx.Option.Context = ctx + for _, optFunc := range optFuncs { + optFunc(rctx.Option) + } + + buf, err := encode(rctx, v) //nolint: contextcheck + if err != nil { + encoder.ReleaseRuntimeContext(rctx) + return nil, err + } + + // this line exists to escape call of `runtime.makeslicecopy` . + // if use `make([]byte, len(buf)-1)` and `copy(copied, buf)`, + // dst buffer size and src buffer size are differrent. + // in this case, compiler uses `runtime.makeslicecopy`, but it is slow. + buf = buf[:len(buf)-1] + copied := make([]byte, len(buf)) + copy(copied, buf) + + encoder.ReleaseRuntimeContext(rctx) + return copied, nil +} + +func marshal(v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { + ctx := encoder.TakeRuntimeContext() + + ctx.Option.Flag = 0 + ctx.Option.Flag |= (encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option) + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + + buf, err := encode(ctx, v) + if err != nil { + encoder.ReleaseRuntimeContext(ctx) + return nil, err + } + + // this line exists to escape call of `runtime.makeslicecopy` . + // if use `make([]byte, len(buf)-1)` and `copy(copied, buf)`, + // dst buffer size and src buffer size are differrent. + // in this case, compiler uses `runtime.makeslicecopy`, but it is slow. + buf = buf[:len(buf)-1] + copied := make([]byte, len(buf)) + copy(copied, buf) + + encoder.ReleaseRuntimeContext(ctx) + return copied, nil +} + +func marshalNoEscape(v interface{}) ([]byte, error) { + ctx := encoder.TakeRuntimeContext() + + ctx.Option.Flag = 0 + ctx.Option.Flag |= (encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option) + + buf, err := encodeNoEscape(ctx, v) + if err != nil { + encoder.ReleaseRuntimeContext(ctx) + return nil, err + } + + // this line exists to escape call of `runtime.makeslicecopy` . + // if use `make([]byte, len(buf)-1)` and `copy(copied, buf)`, + // dst buffer size and src buffer size are differrent. + // in this case, compiler uses `runtime.makeslicecopy`, but it is slow. + buf = buf[:len(buf)-1] + copied := make([]byte, len(buf)) + copy(copied, buf) + + encoder.ReleaseRuntimeContext(ctx) + return copied, nil +} + +func marshalIndent(v interface{}, prefix, indent string, optFuncs ...EncodeOptionFunc) ([]byte, error) { + ctx := encoder.TakeRuntimeContext() + + ctx.Option.Flag = 0 + ctx.Option.Flag |= (encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option | encoder.IndentOption) + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + + buf, err := encodeIndent(ctx, v, prefix, indent) + if err != nil { + encoder.ReleaseRuntimeContext(ctx) + return nil, err + } + + buf = buf[:len(buf)-2] + copied := make([]byte, len(buf)) + copy(copied, buf) + + encoder.ReleaseRuntimeContext(ctx) + return copied, nil +} + +func encode(ctx *encoder.RuntimeContext, v interface{}) ([]byte, error) { + b := ctx.Buf[:0] + if v == nil { + b = encoder.AppendNull(ctx, b) + b = encoder.AppendComma(ctx, b) + return b, nil + } + header := (*emptyInterface)(unsafe.Pointer(&v)) + typ := header.typ + + typeptr := uintptr(unsafe.Pointer(typ)) + codeSet, err := encoder.CompileToGetCodeSet(ctx, typeptr) + if err != nil { + return nil, err + } + + p := uintptr(header.ptr) + ctx.Init(p, codeSet.CodeLength) + ctx.KeepRefs = append(ctx.KeepRefs, header.ptr) + + buf, err := encodeRunCode(ctx, b, codeSet) + if err != nil { + return nil, err + } + ctx.Buf = buf + return buf, nil +} + +func encodeNoEscape(ctx *encoder.RuntimeContext, v interface{}) ([]byte, error) { + b := ctx.Buf[:0] + if v == nil { + b = encoder.AppendNull(ctx, b) + b = encoder.AppendComma(ctx, b) + return b, nil + } + header := (*emptyInterface)(unsafe.Pointer(&v)) + typ := header.typ + + typeptr := uintptr(unsafe.Pointer(typ)) + codeSet, err := encoder.CompileToGetCodeSet(ctx, typeptr) + if err != nil { + return nil, err + } + + p := uintptr(header.ptr) + ctx.Init(p, codeSet.CodeLength) + buf, err := encodeRunCode(ctx, b, codeSet) + if err != nil { + return nil, err + } + + ctx.Buf = buf + return buf, nil +} + +func encodeIndent(ctx *encoder.RuntimeContext, v interface{}, prefix, indent string) ([]byte, error) { + b := ctx.Buf[:0] + if v == nil { + b = encoder.AppendNull(ctx, b) + b = encoder.AppendCommaIndent(ctx, b) + return b, nil + } + header := (*emptyInterface)(unsafe.Pointer(&v)) + typ := header.typ + + typeptr := uintptr(unsafe.Pointer(typ)) + codeSet, err := encoder.CompileToGetCodeSet(ctx, typeptr) + if err != nil { + return nil, err + } + + p := uintptr(header.ptr) + ctx.Init(p, codeSet.CodeLength) + buf, err := encodeRunIndentCode(ctx, b, codeSet, prefix, indent) + + ctx.KeepRefs = append(ctx.KeepRefs, header.ptr) + + if err != nil { + return nil, err + } + + ctx.Buf = buf + return buf, nil +} + +func encodeRunCode(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + if (ctx.Option.Flag & encoder.DebugOption) != 0 { + if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { + return vm_color.DebugRun(ctx, b, codeSet) + } + return vm.DebugRun(ctx, b, codeSet) + } + if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { + return vm_color.Run(ctx, b, codeSet) + } + return vm.Run(ctx, b, codeSet) +} + +func encodeRunIndentCode(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet, prefix, indent string) ([]byte, error) { + ctx.Prefix = []byte(prefix) + ctx.IndentStr = []byte(indent) + if (ctx.Option.Flag & encoder.DebugOption) != 0 { + if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { + return vm_color_indent.DebugRun(ctx, b, codeSet) + } + return vm_indent.DebugRun(ctx, b, codeSet) + } + if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { + return vm_color_indent.Run(ctx, b, codeSet) + } + return vm_indent.Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/error.go b/vendor/github.com/goccy/go-json/error.go new file mode 100644 index 000000000000..5b2dcee50ecf --- /dev/null +++ b/vendor/github.com/goccy/go-json/error.go @@ -0,0 +1,41 @@ +package json + +import ( + "github.com/goccy/go-json/internal/errors" +) + +// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when +// attempting to encode a string value with invalid UTF-8 sequences. +// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by +// replacing invalid bytes with the Unicode replacement rune U+FFFD. +// +// Deprecated: No longer used; kept for compatibility. +type InvalidUTF8Error = errors.InvalidUTF8Error + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError = errors.InvalidUnmarshalError + +// A MarshalerError represents an error from calling a MarshalJSON or MarshalText method. +type MarshalerError = errors.MarshalerError + +// A SyntaxError is a description of a JSON syntax error. +type SyntaxError = errors.SyntaxError + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// +// Deprecated: No longer used; kept for compatibility. +type UnmarshalFieldError = errors.UnmarshalFieldError + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError = errors.UnmarshalTypeError + +// An UnsupportedTypeError is returned by Marshal when attempting +// to encode an unsupported value type. +type UnsupportedTypeError = errors.UnsupportedTypeError + +type UnsupportedValueError = errors.UnsupportedValueError + +type PathError = errors.PathError diff --git a/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go b/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go new file mode 100644 index 000000000000..b6876cf0d049 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go @@ -0,0 +1,41 @@ +package decoder + +import ( + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type anonymousFieldDecoder struct { + structType *runtime.Type + offset uintptr + dec Decoder +} + +func newAnonymousFieldDecoder(structType *runtime.Type, offset uintptr, dec Decoder) *anonymousFieldDecoder { + return &anonymousFieldDecoder{ + structType: structType, + offset: offset, + dec: dec, + } +} + +func (d *anonymousFieldDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + if *(*unsafe.Pointer)(p) == nil { + *(*unsafe.Pointer)(p) = unsafe_New(d.structType) + } + p = *(*unsafe.Pointer)(p) + return d.dec.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+d.offset)) +} + +func (d *anonymousFieldDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + if *(*unsafe.Pointer)(p) == nil { + *(*unsafe.Pointer)(p) = unsafe_New(d.structType) + } + p = *(*unsafe.Pointer)(p) + return d.dec.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+d.offset)) +} + +func (d *anonymousFieldDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return d.dec.DecodePath(ctx, cursor, depth) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/array.go b/vendor/github.com/goccy/go-json/internal/decoder/array.go new file mode 100644 index 000000000000..4b23ed43fe20 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/array.go @@ -0,0 +1,176 @@ +package decoder + +import ( + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type arrayDecoder struct { + elemType *runtime.Type + size uintptr + valueDecoder Decoder + alen int + structName string + fieldName string + zeroValue unsafe.Pointer +} + +func newArrayDecoder(dec Decoder, elemType *runtime.Type, alen int, structName, fieldName string) *arrayDecoder { + // workaround to avoid checkptr errors. cannot use `*(*unsafe.Pointer)(unsafe_New(elemType))` directly. + zeroValuePtr := unsafe_New(elemType) + zeroValue := **(**unsafe.Pointer)(unsafe.Pointer(&zeroValuePtr)) + return &arrayDecoder{ + valueDecoder: dec, + elemType: elemType, + size: elemType.Size(), + alen: alen, + structName: structName, + fieldName: fieldName, + zeroValue: zeroValue, + } +} + +func (d *arrayDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + case 'n': + if err := nullBytes(s); err != nil { + return err + } + return nil + case '[': + idx := 0 + s.cursor++ + if s.skipWhiteSpace() == ']' { + for idx < d.alen { + *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue + idx++ + } + s.cursor++ + return nil + } + for { + if idx < d.alen { + if err := d.valueDecoder.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+uintptr(idx)*d.size)); err != nil { + return err + } + } else { + if err := s.skipValue(depth); err != nil { + return err + } + } + idx++ + switch s.skipWhiteSpace() { + case ']': + for idx < d.alen { + *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue + idx++ + } + s.cursor++ + return nil + case ',': + s.cursor++ + continue + case nul: + if s.read() { + s.cursor++ + continue + } + goto ERROR + default: + goto ERROR + } + } + case nul: + if s.read() { + continue + } + goto ERROR + default: + goto ERROR + } + s.cursor++ + } +ERROR: + return errors.ErrUnexpectedEndOfJSON("array", s.totalOffset()) +} + +func (d *arrayDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + case '[': + idx := 0 + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == ']' { + for idx < d.alen { + *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue + idx++ + } + cursor++ + return cursor, nil + } + for { + if idx < d.alen { + c, err := d.valueDecoder.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+uintptr(idx)*d.size)) + if err != nil { + return 0, err + } + cursor = c + } else { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + cursor = c + } + idx++ + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case ']': + for idx < d.alen { + *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue + idx++ + } + cursor++ + return cursor, nil + case ',': + cursor++ + continue + default: + return 0, errors.ErrInvalidCharacter(buf[cursor], "array", cursor) + } + } + default: + return 0, errors.ErrUnexpectedEndOfJSON("array", cursor) + } + } +} + +func (d *arrayDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: array decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/assign.go b/vendor/github.com/goccy/go-json/internal/decoder/assign.go new file mode 100644 index 000000000000..c53e6ad9fc57 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/assign.go @@ -0,0 +1,438 @@ +package decoder + +import ( + "fmt" + "reflect" + "strconv" +) + +var ( + nilValue = reflect.ValueOf(nil) +) + +func AssignValue(src, dst reflect.Value) error { + if dst.Type().Kind() != reflect.Ptr { + return fmt.Errorf("invalid dst type. required pointer type: %T", dst.Type()) + } + casted, err := castValue(dst.Elem().Type(), src) + if err != nil { + return err + } + dst.Elem().Set(casted) + return nil +} + +func castValue(t reflect.Type, v reflect.Value) (reflect.Value, error) { + switch t.Kind() { + case reflect.Int: + vv, err := castInt(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(int(vv.Int())), nil + case reflect.Int8: + vv, err := castInt(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(int8(vv.Int())), nil + case reflect.Int16: + vv, err := castInt(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(int16(vv.Int())), nil + case reflect.Int32: + vv, err := castInt(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(int32(vv.Int())), nil + case reflect.Int64: + return castInt(v) + case reflect.Uint: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uint(vv.Uint())), nil + case reflect.Uint8: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uint8(vv.Uint())), nil + case reflect.Uint16: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uint16(vv.Uint())), nil + case reflect.Uint32: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uint32(vv.Uint())), nil + case reflect.Uint64: + return castUint(v) + case reflect.Uintptr: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uintptr(vv.Uint())), nil + case reflect.String: + return castString(v) + case reflect.Bool: + return castBool(v) + case reflect.Float32: + vv, err := castFloat(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(float32(vv.Float())), nil + case reflect.Float64: + return castFloat(v) + case reflect.Array: + return castArray(t, v) + case reflect.Slice: + return castSlice(t, v) + case reflect.Map: + return castMap(t, v) + case reflect.Struct: + return castStruct(t, v) + } + return v, nil +} + +func castInt(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return reflect.ValueOf(int64(v.Uint())), nil + case reflect.String: + i64, err := strconv.ParseInt(v.String(), 10, 64) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(i64), nil + case reflect.Bool: + if v.Bool() { + return reflect.ValueOf(int64(1)), nil + } + return reflect.ValueOf(int64(0)), nil + case reflect.Float32, reflect.Float64: + return reflect.ValueOf(int64(v.Float())), nil + case reflect.Array: + if v.Len() > 0 { + return castInt(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to int64 from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castInt(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to int64 from empty slice") + case reflect.Interface: + return castInt(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to int64 from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to int64 from struct") + case reflect.Ptr: + return castInt(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to int64 from %s", v.Type().Kind()) +} + +func castUint(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return reflect.ValueOf(uint64(v.Int())), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v, nil + case reflect.String: + u64, err := strconv.ParseUint(v.String(), 10, 64) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(u64), nil + case reflect.Bool: + if v.Bool() { + return reflect.ValueOf(uint64(1)), nil + } + return reflect.ValueOf(uint64(0)), nil + case reflect.Float32, reflect.Float64: + return reflect.ValueOf(uint64(v.Float())), nil + case reflect.Array: + if v.Len() > 0 { + return castUint(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to uint64 from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castUint(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to uint64 from empty slice") + case reflect.Interface: + return castUint(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to uint64 from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to uint64 from struct") + case reflect.Ptr: + return castUint(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to uint64 from %s", v.Type().Kind()) +} + +func castString(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return reflect.ValueOf(fmt.Sprint(v.Int())), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return reflect.ValueOf(fmt.Sprint(v.Uint())), nil + case reflect.String: + return v, nil + case reflect.Bool: + if v.Bool() { + return reflect.ValueOf("true"), nil + } + return reflect.ValueOf("false"), nil + case reflect.Float32, reflect.Float64: + return reflect.ValueOf(fmt.Sprint(v.Float())), nil + case reflect.Array: + if v.Len() > 0 { + return castString(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to string from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castString(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to string from empty slice") + case reflect.Interface: + return castString(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to string from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to string from struct") + case reflect.Ptr: + return castString(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to string from %s", v.Type().Kind()) +} + +func castBool(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch v.Int() { + case 0: + return reflect.ValueOf(false), nil + case 1: + return reflect.ValueOf(true), nil + } + return nilValue, fmt.Errorf("failed to cast to bool from %d", v.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch v.Uint() { + case 0: + return reflect.ValueOf(false), nil + case 1: + return reflect.ValueOf(true), nil + } + return nilValue, fmt.Errorf("failed to cast to bool from %d", v.Uint()) + case reflect.String: + b, err := strconv.ParseBool(v.String()) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(b), nil + case reflect.Bool: + return v, nil + case reflect.Float32, reflect.Float64: + switch v.Float() { + case 0: + return reflect.ValueOf(false), nil + case 1: + return reflect.ValueOf(true), nil + } + return nilValue, fmt.Errorf("failed to cast to bool from %f", v.Float()) + case reflect.Array: + if v.Len() > 0 { + return castBool(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to string from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castBool(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to string from empty slice") + case reflect.Interface: + return castBool(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to string from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to string from struct") + case reflect.Ptr: + return castBool(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to bool from %s", v.Type().Kind()) +} + +func castFloat(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return reflect.ValueOf(float64(v.Int())), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return reflect.ValueOf(float64(v.Uint())), nil + case reflect.String: + f64, err := strconv.ParseFloat(v.String(), 64) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(f64), nil + case reflect.Bool: + if v.Bool() { + return reflect.ValueOf(float64(1)), nil + } + return reflect.ValueOf(float64(0)), nil + case reflect.Float32, reflect.Float64: + return v, nil + case reflect.Array: + if v.Len() > 0 { + return castFloat(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to float64 from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castFloat(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to float64 from empty slice") + case reflect.Interface: + return castFloat(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to float64 from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to float64 from struct") + case reflect.Ptr: + return castFloat(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to float64 from %s", v.Type().Kind()) +} + +func castArray(t reflect.Type, v reflect.Value) (reflect.Value, error) { + kind := v.Type().Kind() + if kind == reflect.Interface { + return castArray(t, reflect.ValueOf(v.Interface())) + } + if kind != reflect.Slice && kind != reflect.Array { + return nilValue, fmt.Errorf("failed to cast to array from %s", kind) + } + if t.Elem() == v.Type().Elem() { + return v, nil + } + if t.Len() != v.Len() { + return nilValue, fmt.Errorf("failed to cast [%d]array from slice of %d length", t.Len(), v.Len()) + } + ret := reflect.New(t).Elem() + for i := 0; i < v.Len(); i++ { + vv, err := castValue(t.Elem(), v.Index(i)) + if err != nil { + return nilValue, err + } + ret.Index(i).Set(vv) + } + return ret, nil +} + +func castSlice(t reflect.Type, v reflect.Value) (reflect.Value, error) { + kind := v.Type().Kind() + if kind == reflect.Interface { + return castSlice(t, reflect.ValueOf(v.Interface())) + } + if kind != reflect.Slice && kind != reflect.Array { + return nilValue, fmt.Errorf("failed to cast to slice from %s", kind) + } + if t.Elem() == v.Type().Elem() { + return v, nil + } + ret := reflect.MakeSlice(t, v.Len(), v.Len()) + for i := 0; i < v.Len(); i++ { + vv, err := castValue(t.Elem(), v.Index(i)) + if err != nil { + return nilValue, err + } + ret.Index(i).Set(vv) + } + return ret, nil +} + +func castMap(t reflect.Type, v reflect.Value) (reflect.Value, error) { + ret := reflect.MakeMap(t) + switch v.Type().Kind() { + case reflect.Map: + iter := v.MapRange() + for iter.Next() { + key, err := castValue(t.Key(), iter.Key()) + if err != nil { + return nilValue, err + } + value, err := castValue(t.Elem(), iter.Value()) + if err != nil { + return nilValue, err + } + ret.SetMapIndex(key, value) + } + return ret, nil + case reflect.Interface: + return castMap(t, reflect.ValueOf(v.Interface())) + case reflect.Slice: + if v.Len() > 0 { + return castMap(t, v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to map from empty slice") + } + return nilValue, fmt.Errorf("failed to cast to map from %s", v.Type().Kind()) +} + +func castStruct(t reflect.Type, v reflect.Value) (reflect.Value, error) { + ret := reflect.New(t).Elem() + switch v.Type().Kind() { + case reflect.Map: + iter := v.MapRange() + for iter.Next() { + key := iter.Key() + k, err := castString(key) + if err != nil { + return nilValue, err + } + fieldName := k.String() + field, ok := t.FieldByName(fieldName) + if ok { + value, err := castValue(field.Type, iter.Value()) + if err != nil { + return nilValue, err + } + ret.FieldByName(fieldName).Set(value) + } + } + return ret, nil + case reflect.Struct: + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + ret.FieldByName(name).Set(v.FieldByName(name)) + } + return ret, nil + case reflect.Interface: + return castStruct(t, reflect.ValueOf(v.Interface())) + case reflect.Slice: + if v.Len() > 0 { + return castStruct(t, v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to struct from empty slice") + default: + return nilValue, fmt.Errorf("failed to cast to struct from %s", v.Type().Kind()) + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/bool.go b/vendor/github.com/goccy/go-json/internal/decoder/bool.go new file mode 100644 index 000000000000..ba6cf5bc496f --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/bool.go @@ -0,0 +1,83 @@ +package decoder + +import ( + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type boolDecoder struct { + structName string + fieldName string +} + +func newBoolDecoder(structName, fieldName string) *boolDecoder { + return &boolDecoder{structName: structName, fieldName: fieldName} +} + +func (d *boolDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + c := s.skipWhiteSpace() + for { + switch c { + case 't': + if err := trueBytes(s); err != nil { + return err + } + **(**bool)(unsafe.Pointer(&p)) = true + return nil + case 'f': + if err := falseBytes(s); err != nil { + return err + } + **(**bool)(unsafe.Pointer(&p)) = false + return nil + case 'n': + if err := nullBytes(s); err != nil { + return err + } + return nil + case nul: + if s.read() { + c = s.char() + continue + } + goto ERROR + } + break + } +ERROR: + return errors.ErrUnexpectedEndOfJSON("bool", s.totalOffset()) +} + +func (d *boolDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case 't': + if err := validateTrue(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**bool)(unsafe.Pointer(&p)) = true + return cursor, nil + case 'f': + if err := validateFalse(buf, cursor); err != nil { + return 0, err + } + cursor += 5 + **(**bool)(unsafe.Pointer(&p)) = false + return cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + } + return 0, errors.ErrUnexpectedEndOfJSON("bool", cursor) +} + +func (d *boolDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: bool decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/bytes.go b/vendor/github.com/goccy/go-json/internal/decoder/bytes.go new file mode 100644 index 000000000000..939bf4327411 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/bytes.go @@ -0,0 +1,118 @@ +package decoder + +import ( + "encoding/base64" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type bytesDecoder struct { + typ *runtime.Type + sliceDecoder Decoder + stringDecoder *stringDecoder + structName string + fieldName string +} + +func byteUnmarshalerSliceDecoder(typ *runtime.Type, structName string, fieldName string) Decoder { + var unmarshalDecoder Decoder + switch { + case runtime.PtrTo(typ).Implements(unmarshalJSONType): + unmarshalDecoder = newUnmarshalJSONDecoder(runtime.PtrTo(typ), structName, fieldName) + case runtime.PtrTo(typ).Implements(unmarshalTextType): + unmarshalDecoder = newUnmarshalTextDecoder(runtime.PtrTo(typ), structName, fieldName) + default: + unmarshalDecoder, _ = compileUint8(typ, structName, fieldName) + } + return newSliceDecoder(unmarshalDecoder, typ, 1, structName, fieldName) +} + +func newBytesDecoder(typ *runtime.Type, structName string, fieldName string) *bytesDecoder { + return &bytesDecoder{ + typ: typ, + sliceDecoder: byteUnmarshalerSliceDecoder(typ, structName, fieldName), + stringDecoder: newStringDecoder(structName, fieldName), + structName: structName, + fieldName: fieldName, + } +} + +func (d *bytesDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamBinary(s, depth, p) + if err != nil { + return err + } + if bytes == nil { + s.reset() + return nil + } + decodedLen := base64.StdEncoding.DecodedLen(len(bytes)) + buf := make([]byte, decodedLen) + n, err := base64.StdEncoding.Decode(buf, bytes) + if err != nil { + return err + } + *(*[]byte)(p) = buf[:n] + s.reset() + return nil +} + +func (d *bytesDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeBinary(ctx, cursor, depth, p) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + decodedLen := base64.StdEncoding.DecodedLen(len(bytes)) + b := make([]byte, decodedLen) + n, err := base64.StdEncoding.Decode(b, bytes) + if err != nil { + return 0, err + } + *(*[]byte)(p) = b[:n] + return cursor, nil +} + +func (d *bytesDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: []byte decoder does not support decode path") +} + +func (d *bytesDecoder) decodeStreamBinary(s *Stream, depth int64, p unsafe.Pointer) ([]byte, error) { + c := s.skipWhiteSpace() + if c == '[' { + if d.sliceDecoder == nil { + return nil, &errors.UnmarshalTypeError{ + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + } + err := d.sliceDecoder.DecodeStream(s, depth, p) + return nil, err + } + return d.stringDecoder.decodeStreamByte(s) +} + +func (d *bytesDecoder) decodeBinary(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) ([]byte, int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == '[' { + if d.sliceDecoder == nil { + return nil, 0, &errors.UnmarshalTypeError{ + Type: runtime.RType2Type(d.typ), + Offset: cursor, + } + } + c, err := d.sliceDecoder.Decode(ctx, cursor, depth, p) + if err != nil { + return nil, 0, err + } + return nil, c, nil + } + return d.stringDecoder.decodeByte(buf, cursor) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile.go b/vendor/github.com/goccy/go-json/internal/decoder/compile.go new file mode 100644 index 000000000000..8ad50936c0c4 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile.go @@ -0,0 +1,493 @@ +package decoder + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" + "unicode" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +var ( + jsonNumberType = reflect.TypeOf(json.Number("")) + typeAddr *runtime.TypeAddr + cachedDecoderMap unsafe.Pointer // map[uintptr]decoder + cachedDecoder []Decoder + initOnce sync.Once +) + +func initDecoder() { + initOnce.Do(func() { + typeAddr = runtime.AnalyzeTypeAddr() + if typeAddr == nil { + typeAddr = &runtime.TypeAddr{} + } + cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1) + }) +} + +func loadDecoderMap() map[uintptr]Decoder { + initDecoder() + p := atomic.LoadPointer(&cachedDecoderMap) + return *(*map[uintptr]Decoder)(unsafe.Pointer(&p)) +} + +func storeDecoder(typ uintptr, dec Decoder, m map[uintptr]Decoder) { + initDecoder() + newDecoderMap := make(map[uintptr]Decoder, len(m)+1) + newDecoderMap[typ] = dec + + for k, v := range m { + newDecoderMap[k] = v + } + + atomic.StorePointer(&cachedDecoderMap, *(*unsafe.Pointer)(unsafe.Pointer(&newDecoderMap))) +} + +func compileToGetDecoderSlowPath(typeptr uintptr, typ *runtime.Type) (Decoder, error) { + decoderMap := loadDecoderMap() + if dec, exists := decoderMap[typeptr]; exists { + return dec, nil + } + + dec, err := compileHead(typ, map[uintptr]Decoder{}) + if err != nil { + return nil, err + } + storeDecoder(typeptr, dec, decoderMap) + return dec, nil +} + +func compileHead(typ *runtime.Type, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + switch { + case implementsUnmarshalJSONType(runtime.PtrTo(typ)): + return newUnmarshalJSONDecoder(runtime.PtrTo(typ), "", ""), nil + case runtime.PtrTo(typ).Implements(unmarshalTextType): + return newUnmarshalTextDecoder(runtime.PtrTo(typ), "", ""), nil + } + return compile(typ.Elem(), "", "", structTypeToDecoder) +} + +func compile(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + switch { + case implementsUnmarshalJSONType(runtime.PtrTo(typ)): + return newUnmarshalJSONDecoder(runtime.PtrTo(typ), structName, fieldName), nil + case runtime.PtrTo(typ).Implements(unmarshalTextType): + return newUnmarshalTextDecoder(runtime.PtrTo(typ), structName, fieldName), nil + } + + switch typ.Kind() { + case reflect.Ptr: + return compilePtr(typ, structName, fieldName, structTypeToDecoder) + case reflect.Struct: + return compileStruct(typ, structName, fieldName, structTypeToDecoder) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + return compileBytes(elem, structName, fieldName) + } + return compileSlice(typ, structName, fieldName, structTypeToDecoder) + case reflect.Array: + return compileArray(typ, structName, fieldName, structTypeToDecoder) + case reflect.Map: + return compileMap(typ, structName, fieldName, structTypeToDecoder) + case reflect.Interface: + return compileInterface(typ, structName, fieldName) + case reflect.Uintptr: + return compileUint(typ, structName, fieldName) + case reflect.Int: + return compileInt(typ, structName, fieldName) + case reflect.Int8: + return compileInt8(typ, structName, fieldName) + case reflect.Int16: + return compileInt16(typ, structName, fieldName) + case reflect.Int32: + return compileInt32(typ, structName, fieldName) + case reflect.Int64: + return compileInt64(typ, structName, fieldName) + case reflect.Uint: + return compileUint(typ, structName, fieldName) + case reflect.Uint8: + return compileUint8(typ, structName, fieldName) + case reflect.Uint16: + return compileUint16(typ, structName, fieldName) + case reflect.Uint32: + return compileUint32(typ, structName, fieldName) + case reflect.Uint64: + return compileUint64(typ, structName, fieldName) + case reflect.String: + return compileString(typ, structName, fieldName) + case reflect.Bool: + return compileBool(structName, fieldName) + case reflect.Float32: + return compileFloat32(structName, fieldName) + case reflect.Float64: + return compileFloat64(structName, fieldName) + case reflect.Func: + return compileFunc(typ, structName, fieldName) + } + return newInvalidDecoder(typ, structName, fieldName), nil +} + +func isStringTagSupportedType(typ *runtime.Type) bool { + switch { + case implementsUnmarshalJSONType(runtime.PtrTo(typ)): + return false + case runtime.PtrTo(typ).Implements(unmarshalTextType): + return false + } + switch typ.Kind() { + case reflect.Map: + return false + case reflect.Slice: + return false + case reflect.Array: + return false + case reflect.Struct: + return false + case reflect.Interface: + return false + } + return true +} + +func compileMapKey(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + if runtime.PtrTo(typ).Implements(unmarshalTextType) { + return newUnmarshalTextDecoder(runtime.PtrTo(typ), structName, fieldName), nil + } + if typ.Kind() == reflect.String { + return newStringDecoder(structName, fieldName), nil + } + dec, err := compile(typ, structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + for { + switch t := dec.(type) { + case *stringDecoder, *interfaceDecoder: + return dec, nil + case *boolDecoder, *intDecoder, *uintDecoder, *numberDecoder: + return newWrappedStringDecoder(typ, dec, structName, fieldName), nil + case *ptrDecoder: + dec = t.dec + default: + return newInvalidDecoder(typ, structName, fieldName), nil + } + } +} + +func compilePtr(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + dec, err := compile(typ.Elem(), structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + return newPtrDecoder(dec, typ.Elem(), structName, fieldName), nil +} + +func compileInt(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int)(p) = int(v) + }), nil +} + +func compileInt8(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int8)(p) = int8(v) + }), nil +} + +func compileInt16(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int16)(p) = int16(v) + }), nil +} + +func compileInt32(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int32)(p) = int32(v) + }), nil +} + +func compileInt64(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int64)(p) = v + }), nil +} + +func compileUint(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint)(p) = uint(v) + }), nil +} + +func compileUint8(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint8)(p) = uint8(v) + }), nil +} + +func compileUint16(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint16)(p) = uint16(v) + }), nil +} + +func compileUint32(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint32)(p) = uint32(v) + }), nil +} + +func compileUint64(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint64)(p) = v + }), nil +} + +func compileFloat32(structName, fieldName string) (Decoder, error) { + return newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { + *(*float32)(p) = float32(v) + }), nil +} + +func compileFloat64(structName, fieldName string) (Decoder, error) { + return newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { + *(*float64)(p) = v + }), nil +} + +func compileString(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + if typ == runtime.Type2RType(jsonNumberType) { + return newNumberDecoder(structName, fieldName, func(p unsafe.Pointer, v json.Number) { + *(*json.Number)(p) = v + }), nil + } + return newStringDecoder(structName, fieldName), nil +} + +func compileBool(structName, fieldName string) (Decoder, error) { + return newBoolDecoder(structName, fieldName), nil +} + +func compileBytes(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newBytesDecoder(typ, structName, fieldName), nil +} + +func compileSlice(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + elem := typ.Elem() + decoder, err := compile(elem, structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + return newSliceDecoder(decoder, elem, elem.Size(), structName, fieldName), nil +} + +func compileArray(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + elem := typ.Elem() + decoder, err := compile(elem, structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + return newArrayDecoder(decoder, elem, typ.Len(), structName, fieldName), nil +} + +func compileMap(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + keyDec, err := compileMapKey(typ.Key(), structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + valueDec, err := compile(typ.Elem(), structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + return newMapDecoder(typ, typ.Key(), keyDec, typ.Elem(), valueDec, structName, fieldName), nil +} + +func compileInterface(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newInterfaceDecoder(typ, structName, fieldName), nil +} + +func compileFunc(typ *runtime.Type, strutName, fieldName string) (Decoder, error) { + return newFuncDecoder(typ, strutName, fieldName), nil +} + +func typeToStructTags(typ *runtime.Type) runtime.StructTags { + tags := runtime.StructTags{} + fieldNum := typ.NumField() + for i := 0; i < fieldNum; i++ { + field := typ.Field(i) + if runtime.IsIgnoredStructField(field) { + continue + } + tags = append(tags, runtime.StructTagFromField(field)) + } + return tags +} + +func compileStruct(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + fieldNum := typ.NumField() + fieldMap := map[string]*structFieldSet{} + typeptr := uintptr(unsafe.Pointer(typ)) + if dec, exists := structTypeToDecoder[typeptr]; exists { + return dec, nil + } + structDec := newStructDecoder(structName, fieldName, fieldMap) + structTypeToDecoder[typeptr] = structDec + structName = typ.Name() + tags := typeToStructTags(typ) + allFields := []*structFieldSet{} + for i := 0; i < fieldNum; i++ { + field := typ.Field(i) + if runtime.IsIgnoredStructField(field) { + continue + } + isUnexportedField := unicode.IsLower([]rune(field.Name)[0]) + tag := runtime.StructTagFromField(field) + dec, err := compile(runtime.Type2RType(field.Type), structName, field.Name, structTypeToDecoder) + if err != nil { + return nil, err + } + if field.Anonymous && !tag.IsTaggedKey { + if stDec, ok := dec.(*structDecoder); ok { + if runtime.Type2RType(field.Type) == typ { + // recursive definition + continue + } + for k, v := range stDec.fieldMap { + if tags.ExistsKey(k) { + continue + } + fieldSet := &structFieldSet{ + dec: v.dec, + offset: field.Offset + v.offset, + isTaggedKey: v.isTaggedKey, + key: k, + keyLen: int64(len(k)), + } + allFields = append(allFields, fieldSet) + } + } else if pdec, ok := dec.(*ptrDecoder); ok { + contentDec := pdec.contentDecoder() + if pdec.typ == typ { + // recursive definition + continue + } + var fieldSetErr error + if isUnexportedField { + fieldSetErr = fmt.Errorf( + "json: cannot set embedded pointer to unexported struct: %v", + field.Type.Elem(), + ) + } + if dec, ok := contentDec.(*structDecoder); ok { + for k, v := range dec.fieldMap { + if tags.ExistsKey(k) { + continue + } + fieldSet := &structFieldSet{ + dec: newAnonymousFieldDecoder(pdec.typ, v.offset, v.dec), + offset: field.Offset, + isTaggedKey: v.isTaggedKey, + key: k, + keyLen: int64(len(k)), + err: fieldSetErr, + } + allFields = append(allFields, fieldSet) + } + } else { + fieldSet := &structFieldSet{ + dec: pdec, + offset: field.Offset, + isTaggedKey: tag.IsTaggedKey, + key: field.Name, + keyLen: int64(len(field.Name)), + } + allFields = append(allFields, fieldSet) + } + } else { + fieldSet := &structFieldSet{ + dec: dec, + offset: field.Offset, + isTaggedKey: tag.IsTaggedKey, + key: field.Name, + keyLen: int64(len(field.Name)), + } + allFields = append(allFields, fieldSet) + } + } else { + if tag.IsString && isStringTagSupportedType(runtime.Type2RType(field.Type)) { + dec = newWrappedStringDecoder(runtime.Type2RType(field.Type), dec, structName, field.Name) + } + var key string + if tag.Key != "" { + key = tag.Key + } else { + key = field.Name + } + fieldSet := &structFieldSet{ + dec: dec, + offset: field.Offset, + isTaggedKey: tag.IsTaggedKey, + key: key, + keyLen: int64(len(key)), + } + allFields = append(allFields, fieldSet) + } + } + for _, set := range filterDuplicatedFields(allFields) { + fieldMap[set.key] = set + lower := strings.ToLower(set.key) + if _, exists := fieldMap[lower]; !exists { + // first win + fieldMap[lower] = set + } + } + delete(structTypeToDecoder, typeptr) + structDec.tryOptimize() + return structDec, nil +} + +func filterDuplicatedFields(allFields []*structFieldSet) []*structFieldSet { + fieldMap := map[string][]*structFieldSet{} + for _, field := range allFields { + fieldMap[field.key] = append(fieldMap[field.key], field) + } + duplicatedFieldMap := map[string]struct{}{} + for k, sets := range fieldMap { + sets = filterFieldSets(sets) + if len(sets) != 1 { + duplicatedFieldMap[k] = struct{}{} + } + } + + filtered := make([]*structFieldSet, 0, len(allFields)) + for _, field := range allFields { + if _, exists := duplicatedFieldMap[field.key]; exists { + continue + } + filtered = append(filtered, field) + } + return filtered +} + +func filterFieldSets(sets []*structFieldSet) []*structFieldSet { + if len(sets) == 1 { + return sets + } + filtered := make([]*structFieldSet, 0, len(sets)) + for _, set := range sets { + if set.isTaggedKey { + filtered = append(filtered, set) + } + } + return filtered +} + +func implementsUnmarshalJSONType(typ *runtime.Type) bool { + return typ.Implements(unmarshalJSONType) || typ.Implements(unmarshalJSONContextType) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go new file mode 100644 index 000000000000..025ca85b5e22 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go @@ -0,0 +1,30 @@ +//go:build !race +// +build !race + +package decoder + +import ( + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { + initDecoder() + typeptr := uintptr(unsafe.Pointer(typ)) + if typeptr > typeAddr.MaxTypeAddr { + return compileToGetDecoderSlowPath(typeptr, typ) + } + + index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift + if dec := cachedDecoder[index]; dec != nil { + return dec, nil + } + + dec, err := compileHead(typ, map[uintptr]Decoder{}) + if err != nil { + return nil, err + } + cachedDecoder[index] = dec + return dec, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go new file mode 100644 index 000000000000..023b817c3681 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go @@ -0,0 +1,38 @@ +//go:build race +// +build race + +package decoder + +import ( + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +var decMu sync.RWMutex + +func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { + initDecoder() + typeptr := uintptr(unsafe.Pointer(typ)) + if typeptr > typeAddr.MaxTypeAddr { + return compileToGetDecoderSlowPath(typeptr, typ) + } + + index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift + decMu.RLock() + if dec := cachedDecoder[index]; dec != nil { + decMu.RUnlock() + return dec, nil + } + decMu.RUnlock() + + dec, err := compileHead(typ, map[uintptr]Decoder{}) + if err != nil { + return nil, err + } + decMu.Lock() + cachedDecoder[index] = dec + decMu.Unlock() + return dec, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/context.go b/vendor/github.com/goccy/go-json/internal/decoder/context.go new file mode 100644 index 000000000000..cb2ffdafd037 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/context.go @@ -0,0 +1,254 @@ +package decoder + +import ( + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type RuntimeContext struct { + Buf []byte + Option *Option +} + +var ( + runtimeContextPool = sync.Pool{ + New: func() interface{} { + return &RuntimeContext{ + Option: &Option{}, + } + }, + } +) + +func TakeRuntimeContext() *RuntimeContext { + return runtimeContextPool.Get().(*RuntimeContext) +} + +func ReleaseRuntimeContext(ctx *RuntimeContext) { + runtimeContextPool.Put(ctx) +} + +var ( + isWhiteSpace = [256]bool{} +) + +func init() { + isWhiteSpace[' '] = true + isWhiteSpace['\n'] = true + isWhiteSpace['\t'] = true + isWhiteSpace['\r'] = true +} + +func char(ptr unsafe.Pointer, offset int64) byte { + return *(*byte)(unsafe.Pointer(uintptr(ptr) + uintptr(offset))) +} + +func skipWhiteSpace(buf []byte, cursor int64) int64 { + for isWhiteSpace[buf[cursor]] { + cursor++ + } + return cursor +} + +func skipObject(buf []byte, cursor, depth int64) (int64, error) { + braceCount := 1 + for { + switch buf[cursor] { + case '{': + braceCount++ + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + case '}': + depth-- + braceCount-- + if braceCount == 0 { + return cursor + 1, nil + } + case '[': + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + case ']': + depth-- + case '"': + for { + cursor++ + switch buf[cursor] { + case '\\': + cursor++ + if buf[cursor] == nul { + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + goto SWITCH_OUT + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("object of object", cursor) + } + SWITCH_OUT: + cursor++ + } +} + +func skipArray(buf []byte, cursor, depth int64) (int64, error) { + bracketCount := 1 + for { + switch buf[cursor] { + case '[': + bracketCount++ + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + case ']': + bracketCount-- + depth-- + if bracketCount == 0 { + return cursor + 1, nil + } + case '{': + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + case '}': + depth-- + case '"': + for { + cursor++ + switch buf[cursor] { + case '\\': + cursor++ + if buf[cursor] == nul { + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + goto SWITCH_OUT + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("array of object", cursor) + } + SWITCH_OUT: + cursor++ + } +} + +func skipValue(buf []byte, cursor, depth int64) (int64, error) { + for { + switch buf[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case '{': + return skipObject(buf, cursor+1, depth+1) + case '[': + return skipArray(buf, cursor+1, depth+1) + case '"': + for { + cursor++ + switch buf[cursor] { + case '\\': + cursor++ + if buf[cursor] == nul { + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + return cursor + 1, nil + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + for { + cursor++ + if floatTable[buf[cursor]] { + continue + } + break + } + return cursor, nil + case 't': + if err := validateTrue(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + case 'f': + if err := validateFalse(buf, cursor); err != nil { + return 0, err + } + cursor += 5 + return cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + default: + return cursor, errors.ErrUnexpectedEndOfJSON("null", cursor) + } + } +} + +func validateTrue(buf []byte, cursor int64) error { + if cursor+3 >= int64(len(buf)) { + return errors.ErrUnexpectedEndOfJSON("true", cursor) + } + if buf[cursor+1] != 'r' { + return errors.ErrInvalidCharacter(buf[cursor+1], "true", cursor) + } + if buf[cursor+2] != 'u' { + return errors.ErrInvalidCharacter(buf[cursor+2], "true", cursor) + } + if buf[cursor+3] != 'e' { + return errors.ErrInvalidCharacter(buf[cursor+3], "true", cursor) + } + return nil +} + +func validateFalse(buf []byte, cursor int64) error { + if cursor+4 >= int64(len(buf)) { + return errors.ErrUnexpectedEndOfJSON("false", cursor) + } + if buf[cursor+1] != 'a' { + return errors.ErrInvalidCharacter(buf[cursor+1], "false", cursor) + } + if buf[cursor+2] != 'l' { + return errors.ErrInvalidCharacter(buf[cursor+2], "false", cursor) + } + if buf[cursor+3] != 's' { + return errors.ErrInvalidCharacter(buf[cursor+3], "false", cursor) + } + if buf[cursor+4] != 'e' { + return errors.ErrInvalidCharacter(buf[cursor+4], "false", cursor) + } + return nil +} + +func validateNull(buf []byte, cursor int64) error { + if cursor+3 >= int64(len(buf)) { + return errors.ErrUnexpectedEndOfJSON("null", cursor) + } + if buf[cursor+1] != 'u' { + return errors.ErrInvalidCharacter(buf[cursor+1], "null", cursor) + } + if buf[cursor+2] != 'l' { + return errors.ErrInvalidCharacter(buf[cursor+2], "null", cursor) + } + if buf[cursor+3] != 'l' { + return errors.ErrInvalidCharacter(buf[cursor+3], "null", cursor) + } + return nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/float.go b/vendor/github.com/goccy/go-json/internal/decoder/float.go new file mode 100644 index 000000000000..9b2eb8b35a4d --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/float.go @@ -0,0 +1,170 @@ +package decoder + +import ( + "strconv" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type floatDecoder struct { + op func(unsafe.Pointer, float64) + structName string + fieldName string +} + +func newFloatDecoder(structName, fieldName string, op func(unsafe.Pointer, float64)) *floatDecoder { + return &floatDecoder{op: op, structName: structName, fieldName: fieldName} +} + +var ( + floatTable = [256]bool{ + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + '.': true, + 'e': true, + 'E': true, + '+': true, + '-': true, + } + + validEndNumberChar = [256]bool{ + nul: true, + ' ': true, + '\t': true, + '\r': true, + '\n': true, + ',': true, + ':': true, + '}': true, + ']': true, + } +) + +func floatBytes(s *Stream) []byte { + start := s.cursor + for { + s.cursor++ + if floatTable[s.char()] { + continue + } else if s.char() == nul { + if s.read() { + s.cursor-- // for retry current character + continue + } + } + break + } + return s.buf[start:s.cursor] +} + +func (d *floatDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return floatBytes(s), nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + goto ERROR + default: + goto ERROR + } + } +ERROR: + return nil, errors.ErrUnexpectedEndOfJSON("float", s.totalOffset()) +} + +func (d *floatDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := cursor + cursor++ + for floatTable[buf[cursor]] { + cursor++ + } + num := buf[start:cursor] + return num, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + default: + return nil, 0, errors.ErrUnexpectedEndOfJSON("float", cursor) + } + } +} + +func (d *floatDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + return nil + } + str := *(*string)(unsafe.Pointer(&bytes)) + f64, err := strconv.ParseFloat(str, 64) + if err != nil { + return errors.ErrSyntax(err.Error(), s.totalOffset()) + } + d.op(p, f64) + return nil +} + +func (d *floatDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + bytes, c, err := d.decodeByte(buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + if !validEndNumberChar[buf[cursor]] { + return 0, errors.ErrUnexpectedEndOfJSON("float", cursor) + } + s := *(*string)(unsafe.Pointer(&bytes)) + f64, err := strconv.ParseFloat(s, 64) + if err != nil { + return 0, errors.ErrSyntax(err.Error(), cursor) + } + d.op(p, f64) + return cursor, nil +} + +func (d *floatDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + buf := ctx.Buf + bytes, c, err := d.decodeByte(buf, cursor) + if err != nil { + return nil, 0, err + } + if bytes == nil { + return [][]byte{nullbytes}, c, nil + } + return [][]byte{bytes}, c, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/func.go b/vendor/github.com/goccy/go-json/internal/decoder/func.go new file mode 100644 index 000000000000..4cc12ca81f14 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/func.go @@ -0,0 +1,146 @@ +package decoder + +import ( + "bytes" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type funcDecoder struct { + typ *runtime.Type + structName string + fieldName string +} + +func newFuncDecoder(typ *runtime.Type, structName, fieldName string) *funcDecoder { + fnDecoder := &funcDecoder{typ, structName, fieldName} + return fnDecoder +} + +func (d *funcDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + s.skipWhiteSpace() + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + if len(src) > 0 { + switch src[0] { + case '"': + return &errors.UnmarshalTypeError{ + Value: "string", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '[': + return &errors.UnmarshalTypeError{ + Value: "array", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '{': + return &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return &errors.UnmarshalTypeError{ + Value: "number", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case 'n': + if err := nullBytes(s); err != nil { + return err + } + *(*unsafe.Pointer)(p) = nil + return nil + case 't': + if err := trueBytes(s); err == nil { + return &errors.UnmarshalTypeError{ + Value: "boolean", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + } + case 'f': + if err := falseBytes(s); err == nil { + return &errors.UnmarshalTypeError{ + Value: "boolean", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + } + } + } + return errors.ErrInvalidBeginningOfValue(s.buf[s.cursor], s.totalOffset()) +} + +func (d *funcDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + if len(src) > 0 { + switch src[0] { + case '"': + return 0, &errors.UnmarshalTypeError{ + Value: "string", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '[': + return 0, &errors.UnmarshalTypeError{ + Value: "array", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '{': + return 0, &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return 0, &errors.UnmarshalTypeError{ + Value: "number", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case 'n': + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return end, nil + } + case 't': + if err := validateTrue(buf, start); err == nil { + return 0, &errors.UnmarshalTypeError{ + Value: "boolean", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + } + case 'f': + if err := validateFalse(buf, start); err == nil { + return 0, &errors.UnmarshalTypeError{ + Value: "boolean", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + } + } + } + return cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) +} + +func (d *funcDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: func decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/int.go b/vendor/github.com/goccy/go-json/internal/decoder/int.go new file mode 100644 index 000000000000..1a7f081994c0 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/int.go @@ -0,0 +1,246 @@ +package decoder + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type intDecoder struct { + typ *runtime.Type + kind reflect.Kind + op func(unsafe.Pointer, int64) + structName string + fieldName string +} + +func newIntDecoder(typ *runtime.Type, structName, fieldName string, op func(unsafe.Pointer, int64)) *intDecoder { + return &intDecoder{ + typ: typ, + kind: typ.Kind(), + op: op, + structName: structName, + fieldName: fieldName, + } +} + +func (d *intDecoder) typeError(buf []byte, offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: fmt.Sprintf("number %s", string(buf)), + Type: runtime.RType2Type(d.typ), + Struct: d.structName, + Field: d.fieldName, + Offset: offset, + } +} + +var ( + pow10i64 = [...]int64{ + 1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, + } + pow10i64Len = len(pow10i64) +) + +func (d *intDecoder) parseInt(b []byte) (int64, error) { + isNegative := false + if b[0] == '-' { + b = b[1:] + isNegative = true + } + maxDigit := len(b) + if maxDigit > pow10i64Len { + return 0, fmt.Errorf("invalid length of number") + } + sum := int64(0) + for i := 0; i < maxDigit; i++ { + c := int64(b[i]) - 48 + digitValue := pow10i64[maxDigit-i-1] + sum += c * digitValue + } + if isNegative { + return -1 * sum, nil + } + return sum, nil +} + +var ( + numTable = [256]bool{ + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + } +) + +var ( + numZeroBuf = []byte{'0'} +) + +func (d *intDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '-': + start := s.cursor + for { + s.cursor++ + if numTable[s.char()] { + continue + } else if s.char() == nul { + if s.read() { + s.cursor-- // for retry current character + continue + } + } + break + } + num := s.buf[start:s.cursor] + if len(num) < 2 { + goto ERROR + } + return num, nil + case '0': + s.cursor++ + return numZeroBuf, nil + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := s.cursor + for { + s.cursor++ + if numTable[s.char()] { + continue + } else if s.char() == nul { + if s.read() { + s.cursor-- // for retry current character + continue + } + } + break + } + num := s.buf[start:s.cursor] + return num, nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + goto ERROR + default: + return nil, d.typeError([]byte{s.char()}, s.totalOffset()) + } + } +ERROR: + return nil, errors.ErrUnexpectedEndOfJSON("number(integer)", s.totalOffset()) +} + +func (d *intDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + for { + switch char(b, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case '0': + cursor++ + return numZeroBuf, cursor, nil + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := cursor + cursor++ + for numTable[char(b, cursor)] { + cursor++ + } + num := buf[start:cursor] + return num, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + default: + return nil, 0, d.typeError([]byte{char(b, cursor)}, cursor) + } + } +} + +func (d *intDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + return nil + } + i64, err := d.parseInt(bytes) + if err != nil { + return d.typeError(bytes, s.totalOffset()) + } + switch d.kind { + case reflect.Int8: + if i64 < -1*(1<<7) || (1<<7) <= i64 { + return d.typeError(bytes, s.totalOffset()) + } + case reflect.Int16: + if i64 < -1*(1<<15) || (1<<15) <= i64 { + return d.typeError(bytes, s.totalOffset()) + } + case reflect.Int32: + if i64 < -1*(1<<31) || (1<<31) <= i64 { + return d.typeError(bytes, s.totalOffset()) + } + } + d.op(p, i64) + s.reset() + return nil +} + +func (d *intDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + + i64, err := d.parseInt(bytes) + if err != nil { + return 0, d.typeError(bytes, cursor) + } + switch d.kind { + case reflect.Int8: + if i64 < -1*(1<<7) || (1<<7) <= i64 { + return 0, d.typeError(bytes, cursor) + } + case reflect.Int16: + if i64 < -1*(1<<15) || (1<<15) <= i64 { + return 0, d.typeError(bytes, cursor) + } + case reflect.Int32: + if i64 < -1*(1<<31) || (1<<31) <= i64 { + return 0, d.typeError(bytes, cursor) + } + } + d.op(p, i64) + return cursor, nil +} + +func (d *intDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: int decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/interface.go b/vendor/github.com/goccy/go-json/internal/decoder/interface.go new file mode 100644 index 000000000000..45c69ab8c709 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/interface.go @@ -0,0 +1,528 @@ +package decoder + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type interfaceDecoder struct { + typ *runtime.Type + structName string + fieldName string + sliceDecoder *sliceDecoder + mapDecoder *mapDecoder + floatDecoder *floatDecoder + numberDecoder *numberDecoder + stringDecoder *stringDecoder +} + +func newEmptyInterfaceDecoder(structName, fieldName string) *interfaceDecoder { + ifaceDecoder := &interfaceDecoder{ + typ: emptyInterfaceType, + structName: structName, + fieldName: fieldName, + floatDecoder: newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { + *(*interface{})(p) = v + }), + numberDecoder: newNumberDecoder(structName, fieldName, func(p unsafe.Pointer, v json.Number) { + *(*interface{})(p) = v + }), + stringDecoder: newStringDecoder(structName, fieldName), + } + ifaceDecoder.sliceDecoder = newSliceDecoder( + ifaceDecoder, + emptyInterfaceType, + emptyInterfaceType.Size(), + structName, fieldName, + ) + ifaceDecoder.mapDecoder = newMapDecoder( + interfaceMapType, + stringType, + ifaceDecoder.stringDecoder, + interfaceMapType.Elem(), + ifaceDecoder, + structName, + fieldName, + ) + return ifaceDecoder +} + +func newInterfaceDecoder(typ *runtime.Type, structName, fieldName string) *interfaceDecoder { + emptyIfaceDecoder := newEmptyInterfaceDecoder(structName, fieldName) + stringDecoder := newStringDecoder(structName, fieldName) + return &interfaceDecoder{ + typ: typ, + structName: structName, + fieldName: fieldName, + sliceDecoder: newSliceDecoder( + emptyIfaceDecoder, + emptyInterfaceType, + emptyInterfaceType.Size(), + structName, fieldName, + ), + mapDecoder: newMapDecoder( + interfaceMapType, + stringType, + stringDecoder, + interfaceMapType.Elem(), + emptyIfaceDecoder, + structName, + fieldName, + ), + floatDecoder: newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { + *(*interface{})(p) = v + }), + numberDecoder: newNumberDecoder(structName, fieldName, func(p unsafe.Pointer, v json.Number) { + *(*interface{})(p) = v + }), + stringDecoder: stringDecoder, + } +} + +func (d *interfaceDecoder) numDecoder(s *Stream) Decoder { + if s.UseNumber { + return d.numberDecoder + } + return d.floatDecoder +} + +var ( + emptyInterfaceType = runtime.Type2RType(reflect.TypeOf((*interface{})(nil)).Elem()) + EmptyInterfaceType = emptyInterfaceType + interfaceMapType = runtime.Type2RType( + reflect.TypeOf((*map[string]interface{})(nil)).Elem(), + ) + stringType = runtime.Type2RType( + reflect.TypeOf(""), + ) +) + +func decodeStreamUnmarshaler(s *Stream, depth int64, unmarshaler json.Unmarshaler) error { + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalJSON(dst); err != nil { + return err + } + return nil +} + +func decodeStreamUnmarshalerContext(s *Stream, depth int64, unmarshaler unmarshalerContext) error { + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalJSON(s.Option.Context, dst); err != nil { + return err + } + return nil +} + +func decodeUnmarshaler(buf []byte, cursor, depth int64, unmarshaler json.Unmarshaler) (int64, error) { + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalJSON(dst); err != nil { + return 0, err + } + return end, nil +} + +func decodeUnmarshalerContext(ctx *RuntimeContext, buf []byte, cursor, depth int64, unmarshaler unmarshalerContext) (int64, error) { + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalJSON(ctx.Option.Context, dst); err != nil { + return 0, err + } + return end, nil +} + +func decodeStreamTextUnmarshaler(s *Stream, depth int64, unmarshaler encoding.TextUnmarshaler, p unsafe.Pointer) error { + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return nil + } + + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalText(dst); err != nil { + return err + } + return nil +} + +func decodeTextUnmarshaler(buf []byte, cursor, depth int64, unmarshaler encoding.TextUnmarshaler, p unsafe.Pointer) (int64, error) { + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return end, nil + } + if s, ok := unquoteBytes(src); ok { + src = s + } + if err := unmarshaler.UnmarshalText(src); err != nil { + return 0, err + } + return end, nil +} + +func (d *interfaceDecoder) decodeStreamEmptyInterface(s *Stream, depth int64, p unsafe.Pointer) error { + c := s.skipWhiteSpace() + for { + switch c { + case '{': + var v map[string]interface{} + ptr := unsafe.Pointer(&v) + if err := d.mapDecoder.DecodeStream(s, depth, ptr); err != nil { + return err + } + *(*interface{})(p) = v + return nil + case '[': + var v []interface{} + ptr := unsafe.Pointer(&v) + if err := d.sliceDecoder.DecodeStream(s, depth, ptr); err != nil { + return err + } + *(*interface{})(p) = v + return nil + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return d.numDecoder(s).DecodeStream(s, depth, p) + case '"': + s.cursor++ + start := s.cursor + for { + switch s.char() { + case '\\': + if _, err := decodeEscapeString(s, nil); err != nil { + return err + } + case '"': + literal := s.buf[start:s.cursor] + s.cursor++ + *(*interface{})(p) = string(literal) + return nil + case nul: + if s.read() { + continue + } + return errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + s.cursor++ + } + case 't': + if err := trueBytes(s); err != nil { + return err + } + **(**interface{})(unsafe.Pointer(&p)) = true + return nil + case 'f': + if err := falseBytes(s); err != nil { + return err + } + **(**interface{})(unsafe.Pointer(&p)) = false + return nil + case 'n': + if err := nullBytes(s); err != nil { + return err + } + *(*interface{})(p) = nil + return nil + case nul: + if s.read() { + c = s.char() + continue + } + } + break + } + return errors.ErrInvalidBeginningOfValue(c, s.totalOffset()) +} + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +func (d *interfaceDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + runtimeInterfaceValue := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + rv := reflect.ValueOf(runtimeInterfaceValue) + if rv.NumMethod() > 0 && rv.CanInterface() { + if u, ok := rv.Interface().(unmarshalerContext); ok { + return decodeStreamUnmarshalerContext(s, depth, u) + } + if u, ok := rv.Interface().(json.Unmarshaler); ok { + return decodeStreamUnmarshaler(s, depth, u) + } + if u, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + return decodeStreamTextUnmarshaler(s, depth, u, p) + } + if s.skipWhiteSpace() == 'n' { + if err := nullBytes(s); err != nil { + return err + } + *(*interface{})(p) = nil + return nil + } + return d.errUnmarshalType(rv.Type(), s.totalOffset()) + } + iface := rv.Interface() + ifaceHeader := (*emptyInterface)(unsafe.Pointer(&iface)) + typ := ifaceHeader.typ + if ifaceHeader.ptr == nil || d.typ == typ || typ == nil { + // concrete type is empty interface + return d.decodeStreamEmptyInterface(s, depth, p) + } + if typ.Kind() == reflect.Ptr && typ.Elem() == d.typ || typ.Kind() != reflect.Ptr { + return d.decodeStreamEmptyInterface(s, depth, p) + } + if s.skipWhiteSpace() == 'n' { + if err := nullBytes(s); err != nil { + return err + } + *(*interface{})(p) = nil + return nil + } + decoder, err := CompileToGetDecoder(typ) + if err != nil { + return err + } + return decoder.DecodeStream(s, depth, ifaceHeader.ptr) +} + +func (d *interfaceDecoder) errUnmarshalType(typ reflect.Type, offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: typ.String(), + Type: typ, + Offset: offset, + Struct: d.structName, + Field: d.fieldName, + } +} + +func (d *interfaceDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + runtimeInterfaceValue := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + rv := reflect.ValueOf(runtimeInterfaceValue) + if rv.NumMethod() > 0 && rv.CanInterface() { + if u, ok := rv.Interface().(unmarshalerContext); ok { + return decodeUnmarshalerContext(ctx, buf, cursor, depth, u) + } + if u, ok := rv.Interface().(json.Unmarshaler); ok { + return decodeUnmarshaler(buf, cursor, depth, u) + } + if u, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + return decodeTextUnmarshaler(buf, cursor, depth, u, p) + } + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == 'n' { + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**interface{})(unsafe.Pointer(&p)) = nil + return cursor, nil + } + return 0, d.errUnmarshalType(rv.Type(), cursor) + } + + iface := rv.Interface() + ifaceHeader := (*emptyInterface)(unsafe.Pointer(&iface)) + typ := ifaceHeader.typ + if ifaceHeader.ptr == nil || d.typ == typ || typ == nil { + // concrete type is empty interface + return d.decodeEmptyInterface(ctx, cursor, depth, p) + } + if typ.Kind() == reflect.Ptr && typ.Elem() == d.typ || typ.Kind() != reflect.Ptr { + return d.decodeEmptyInterface(ctx, cursor, depth, p) + } + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == 'n' { + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**interface{})(unsafe.Pointer(&p)) = nil + return cursor, nil + } + decoder, err := CompileToGetDecoder(typ) + if err != nil { + return 0, err + } + return decoder.Decode(ctx, cursor, depth, ifaceHeader.ptr) +} + +func (d *interfaceDecoder) decodeEmptyInterface(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case '{': + var v map[string]interface{} + ptr := unsafe.Pointer(&v) + cursor, err := d.mapDecoder.Decode(ctx, cursor, depth, ptr) + if err != nil { + return 0, err + } + **(**interface{})(unsafe.Pointer(&p)) = v + return cursor, nil + case '[': + var v []interface{} + ptr := unsafe.Pointer(&v) + cursor, err := d.sliceDecoder.Decode(ctx, cursor, depth, ptr) + if err != nil { + return 0, err + } + **(**interface{})(unsafe.Pointer(&p)) = v + return cursor, nil + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return d.floatDecoder.Decode(ctx, cursor, depth, p) + case '"': + var v string + ptr := unsafe.Pointer(&v) + cursor, err := d.stringDecoder.Decode(ctx, cursor, depth, ptr) + if err != nil { + return 0, err + } + **(**interface{})(unsafe.Pointer(&p)) = v + return cursor, nil + case 't': + if err := validateTrue(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**interface{})(unsafe.Pointer(&p)) = true + return cursor, nil + case 'f': + if err := validateFalse(buf, cursor); err != nil { + return 0, err + } + cursor += 5 + **(**interface{})(unsafe.Pointer(&p)) = false + return cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**interface{})(unsafe.Pointer(&p)) = nil + return cursor, nil + } + return cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) +} + +func NewPathDecoder() Decoder { + ifaceDecoder := &interfaceDecoder{ + typ: emptyInterfaceType, + structName: "", + fieldName: "", + floatDecoder: newFloatDecoder("", "", func(p unsafe.Pointer, v float64) { + *(*interface{})(p) = v + }), + numberDecoder: newNumberDecoder("", "", func(p unsafe.Pointer, v json.Number) { + *(*interface{})(p) = v + }), + stringDecoder: newStringDecoder("", ""), + } + ifaceDecoder.sliceDecoder = newSliceDecoder( + ifaceDecoder, + emptyInterfaceType, + emptyInterfaceType.Size(), + "", "", + ) + ifaceDecoder.mapDecoder = newMapDecoder( + interfaceMapType, + stringType, + ifaceDecoder.stringDecoder, + interfaceMapType.Elem(), + ifaceDecoder, + "", "", + ) + return ifaceDecoder +} + +var ( + truebytes = []byte("true") + falsebytes = []byte("false") +) + +func (d *interfaceDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case '{': + return d.mapDecoder.DecodePath(ctx, cursor, depth) + case '[': + return d.sliceDecoder.DecodePath(ctx, cursor, depth) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return d.floatDecoder.DecodePath(ctx, cursor, depth) + case '"': + return d.stringDecoder.DecodePath(ctx, cursor, depth) + case 't': + if err := validateTrue(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return [][]byte{truebytes}, cursor, nil + case 'f': + if err := validateFalse(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 5 + return [][]byte{falsebytes}, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return [][]byte{nullbytes}, cursor, nil + } + return nil, cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/invalid.go b/vendor/github.com/goccy/go-json/internal/decoder/invalid.go new file mode 100644 index 000000000000..4c9721b09892 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/invalid.go @@ -0,0 +1,55 @@ +package decoder + +import ( + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type invalidDecoder struct { + typ *runtime.Type + kind reflect.Kind + structName string + fieldName string +} + +func newInvalidDecoder(typ *runtime.Type, structName, fieldName string) *invalidDecoder { + return &invalidDecoder{ + typ: typ, + kind: typ.Kind(), + structName: structName, + fieldName: fieldName, + } +} + +func (d *invalidDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + return &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + Struct: d.structName, + Field: d.fieldName, + } +} + +func (d *invalidDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + return 0, &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: cursor, + Struct: d.structName, + Field: d.fieldName, + } +} + +func (d *invalidDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: cursor, + Struct: d.structName, + Field: d.fieldName, + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/map.go b/vendor/github.com/goccy/go-json/internal/decoder/map.go new file mode 100644 index 000000000000..07a9caea6513 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/map.go @@ -0,0 +1,280 @@ +package decoder + +import ( + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type mapDecoder struct { + mapType *runtime.Type + keyType *runtime.Type + valueType *runtime.Type + canUseAssignFaststrType bool + keyDecoder Decoder + valueDecoder Decoder + structName string + fieldName string +} + +func newMapDecoder(mapType *runtime.Type, keyType *runtime.Type, keyDec Decoder, valueType *runtime.Type, valueDec Decoder, structName, fieldName string) *mapDecoder { + return &mapDecoder{ + mapType: mapType, + keyDecoder: keyDec, + keyType: keyType, + canUseAssignFaststrType: canUseAssignFaststrType(keyType, valueType), + valueType: valueType, + valueDecoder: valueDec, + structName: structName, + fieldName: fieldName, + } +} + +const ( + mapMaxElemSize = 128 +) + +// See detail: https://github.com/goccy/go-json/pull/283 +func canUseAssignFaststrType(key *runtime.Type, value *runtime.Type) bool { + indirectElem := value.Size() > mapMaxElemSize + if indirectElem { + return false + } + return key.Kind() == reflect.String +} + +//go:linkname makemap reflect.makemap +func makemap(*runtime.Type, int) unsafe.Pointer + +//nolint:golint +//go:linkname mapassign_faststr runtime.mapassign_faststr +//go:noescape +func mapassign_faststr(t *runtime.Type, m unsafe.Pointer, s string) unsafe.Pointer + +//go:linkname mapassign reflect.mapassign +//go:noescape +func mapassign(t *runtime.Type, m unsafe.Pointer, k, v unsafe.Pointer) + +func (d *mapDecoder) mapassign(t *runtime.Type, m, k, v unsafe.Pointer) { + if d.canUseAssignFaststrType { + mapV := mapassign_faststr(t, m, *(*string)(k)) + typedmemmove(d.valueType, mapV, v) + } else { + mapassign(t, m, k, v) + } +} + +func (d *mapDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + + switch s.skipWhiteSpace() { + case 'n': + if err := nullBytes(s); err != nil { + return err + } + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = nil + return nil + case '{': + default: + return errors.ErrExpected("{ character for map value", s.totalOffset()) + } + mapValue := *(*unsafe.Pointer)(p) + if mapValue == nil { + mapValue = makemap(d.mapType, 0) + } + s.cursor++ + if s.skipWhiteSpace() == '}' { + *(*unsafe.Pointer)(p) = mapValue + s.cursor++ + return nil + } + for { + k := unsafe_New(d.keyType) + if err := d.keyDecoder.DecodeStream(s, depth, k); err != nil { + return err + } + s.skipWhiteSpace() + if !s.equalChar(':') { + return errors.ErrExpected("colon after object key", s.totalOffset()) + } + s.cursor++ + v := unsafe_New(d.valueType) + if err := d.valueDecoder.DecodeStream(s, depth, v); err != nil { + return err + } + d.mapassign(d.mapType, mapValue, k, v) + s.skipWhiteSpace() + if s.equalChar('}') { + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = mapValue + s.cursor++ + return nil + } + if !s.equalChar(',') { + return errors.ErrExpected("comma after object value", s.totalOffset()) + } + s.cursor++ + } +} + +func (d *mapDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + cursor = skipWhiteSpace(buf, cursor) + buflen := int64(len(buf)) + if buflen < 2 { + return 0, errors.ErrExpected("{} for map", cursor) + } + switch buf[cursor] { + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = nil + return cursor, nil + case '{': + default: + return 0, errors.ErrExpected("{ character for map value", cursor) + } + cursor++ + cursor = skipWhiteSpace(buf, cursor) + mapValue := *(*unsafe.Pointer)(p) + if mapValue == nil { + mapValue = makemap(d.mapType, 0) + } + if buf[cursor] == '}' { + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = mapValue + cursor++ + return cursor, nil + } + for { + k := unsafe_New(d.keyType) + keyCursor, err := d.keyDecoder.Decode(ctx, cursor, depth, k) + if err != nil { + return 0, err + } + cursor = skipWhiteSpace(buf, keyCursor) + if buf[cursor] != ':' { + return 0, errors.ErrExpected("colon after object key", cursor) + } + cursor++ + v := unsafe_New(d.valueType) + valueCursor, err := d.valueDecoder.Decode(ctx, cursor, depth, v) + if err != nil { + return 0, err + } + d.mapassign(d.mapType, mapValue, k, v) + cursor = skipWhiteSpace(buf, valueCursor) + if buf[cursor] == '}' { + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = mapValue + cursor++ + return cursor, nil + } + if buf[cursor] != ',' { + return 0, errors.ErrExpected("comma after object value", cursor) + } + cursor++ + } +} + +func (d *mapDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return nil, 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + cursor = skipWhiteSpace(buf, cursor) + buflen := int64(len(buf)) + if buflen < 2 { + return nil, 0, errors.ErrExpected("{} for map", cursor) + } + switch buf[cursor] { + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return [][]byte{nullbytes}, cursor, nil + case '{': + default: + return nil, 0, errors.ErrExpected("{ character for map value", cursor) + } + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == '}' { + cursor++ + return nil, cursor, nil + } + keyDecoder, ok := d.keyDecoder.(*stringDecoder) + if !ok { + return nil, 0, &errors.UnmarshalTypeError{ + Value: "string", + Type: reflect.TypeOf(""), + Offset: cursor, + Struct: d.structName, + Field: d.fieldName, + } + } + ret := [][]byte{} + for { + key, keyCursor, err := keyDecoder.decodeByte(buf, cursor) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(buf, keyCursor) + if buf[cursor] != ':' { + return nil, 0, errors.ErrExpected("colon after object key", cursor) + } + cursor++ + child, found, err := ctx.Option.Path.Field(string(key)) + if err != nil { + return nil, 0, err + } + if found { + if child != nil { + oldPath := ctx.Option.Path.node + ctx.Option.Path.node = child + paths, c, err := d.valueDecoder.DecodePath(ctx, cursor, depth) + if err != nil { + return nil, 0, err + } + ctx.Option.Path.node = oldPath + ret = append(ret, paths...) + cursor = c + } else { + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return nil, 0, err + } + ret = append(ret, buf[start:end]) + cursor = end + } + } else { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return nil, 0, err + } + cursor = c + } + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == '}' { + cursor++ + return ret, cursor, nil + } + if buf[cursor] != ',' { + return nil, 0, errors.ErrExpected("comma after object value", cursor) + } + cursor++ + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/number.go b/vendor/github.com/goccy/go-json/internal/decoder/number.go new file mode 100644 index 000000000000..10e5435e6ce2 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/number.go @@ -0,0 +1,123 @@ +package decoder + +import ( + "encoding/json" + "strconv" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type numberDecoder struct { + stringDecoder *stringDecoder + op func(unsafe.Pointer, json.Number) + structName string + fieldName string +} + +func newNumberDecoder(structName, fieldName string, op func(unsafe.Pointer, json.Number)) *numberDecoder { + return &numberDecoder{ + stringDecoder: newStringDecoder(structName, fieldName), + op: op, + structName: structName, + fieldName: fieldName, + } +} + +func (d *numberDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&bytes)), 64); err != nil { + return errors.ErrSyntax(err.Error(), s.totalOffset()) + } + d.op(p, json.Number(string(bytes))) + s.reset() + return nil +} + +func (d *numberDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&bytes)), 64); err != nil { + return 0, errors.ErrSyntax(err.Error(), c) + } + cursor = c + s := *(*string)(unsafe.Pointer(&bytes)) + d.op(p, json.Number(s)) + return cursor, nil +} + +func (d *numberDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return nil, 0, err + } + if bytes == nil { + return [][]byte{nullbytes}, c, nil + } + return [][]byte{bytes}, c, nil +} + +func (d *numberDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + start := s.cursor + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return floatBytes(s), nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case '"': + return d.stringDecoder.decodeStreamByte(s) + case nul: + if s.read() { + continue + } + goto ERROR + default: + goto ERROR + } + } +ERROR: + if s.cursor == start { + return nil, errors.ErrInvalidBeginningOfValue(s.char(), s.totalOffset()) + } + return nil, errors.ErrUnexpectedEndOfJSON("json.Number", s.totalOffset()) +} + +func (d *numberDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := cursor + cursor++ + for floatTable[buf[cursor]] { + cursor++ + } + num := buf[start:cursor] + return num, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + case '"': + return d.stringDecoder.decodeByte(buf, cursor) + default: + return nil, 0, errors.ErrUnexpectedEndOfJSON("json.Number", cursor) + } + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/option.go b/vendor/github.com/goccy/go-json/internal/decoder/option.go new file mode 100644 index 000000000000..502f772eba0b --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/option.go @@ -0,0 +1,17 @@ +package decoder + +import "context" + +type OptionFlags uint8 + +const ( + FirstWinOption OptionFlags = 1 << iota + ContextOption + PathOption +) + +type Option struct { + Flags OptionFlags + Context context.Context + Path *Path +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/path.go b/vendor/github.com/goccy/go-json/internal/decoder/path.go new file mode 100644 index 000000000000..a15ff69e3cd8 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/path.go @@ -0,0 +1,670 @@ +package decoder + +import ( + "fmt" + "reflect" + "strconv" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type PathString string + +func (s PathString) Build() (*Path, error) { + builder := new(PathBuilder) + return builder.Build([]rune(s)) +} + +type PathBuilder struct { + root PathNode + node PathNode + singleQuotePathSelector bool + doubleQuotePathSelector bool +} + +func (b *PathBuilder) Build(buf []rune) (*Path, error) { + node, err := b.build(buf) + if err != nil { + return nil, err + } + return &Path{ + node: node, + RootSelectorOnly: node == nil, + SingleQuotePathSelector: b.singleQuotePathSelector, + DoubleQuotePathSelector: b.doubleQuotePathSelector, + }, nil +} + +func (b *PathBuilder) build(buf []rune) (PathNode, error) { + if len(buf) == 0 { + return nil, errors.ErrEmptyPath() + } + if buf[0] != '$' { + return nil, errors.ErrInvalidPath("JSON Path must start with a $ character") + } + if len(buf) == 1 { + return nil, nil + } + buf = buf[1:] + offset, err := b.buildNext(buf) + if err != nil { + return nil, err + } + if len(buf) > offset { + return nil, errors.ErrInvalidPath("remain invalid path %q", buf[offset:]) + } + return b.root, nil +} + +func (b *PathBuilder) buildNextCharIfExists(buf []rune, cursor int) (int, error) { + if len(buf) > cursor { + offset, err := b.buildNext(buf[cursor:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + } + return cursor, nil +} + +func (b *PathBuilder) buildNext(buf []rune) (int, error) { + switch buf[0] { + case '.': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with dot character") + } + offset, err := b.buildSelector(buf[1:]) + if err != nil { + return 0, err + } + return offset + 1, nil + case '[': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with left bracket character") + } + offset, err := b.buildIndex(buf[1:]) + if err != nil { + return 0, err + } + return offset + 1, nil + default: + return 0, errors.ErrInvalidPath("expect dot or left bracket character. but found %c character", buf[0]) + } +} + +func (b *PathBuilder) buildSelector(buf []rune) (int, error) { + switch buf[0] { + case '.': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with double dot character") + } + offset, err := b.buildPathRecursive(buf[1:]) + if err != nil { + return 0, err + } + return 1 + offset, nil + case '[', ']', '$', '*': + return 0, errors.ErrInvalidPath("found invalid path character %c after dot", buf[0]) + } + for cursor := 0; cursor < len(buf); cursor++ { + switch buf[cursor] { + case '$', '*', ']': + return 0, errors.ErrInvalidPath("found %c character in field selector context", buf[cursor]) + case '.': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with dot character") + } + selector := buf[:cursor] + b.addSelectorNode(string(selector)) + offset, err := b.buildSelector(buf[cursor+1:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + case '[': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with left bracket character") + } + selector := buf[:cursor] + b.addSelectorNode(string(selector)) + offset, err := b.buildIndex(buf[cursor+1:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + case '"': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with double quote character") + } + offset, err := b.buildQuoteSelector(buf[cursor+1:], DoubleQuotePathSelector) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + } + } + b.addSelectorNode(string(buf)) + return len(buf), nil +} + +func (b *PathBuilder) buildQuoteSelector(buf []rune, sel QuotePathSelector) (int, error) { + switch buf[0] { + case '[', ']', '$', '.', '*', '\'', '"': + return 0, errors.ErrInvalidPath("found invalid path character %c after quote", buf[0]) + } + for cursor := 0; cursor < len(buf); cursor++ { + switch buf[cursor] { + case '\'': + if sel != SingleQuotePathSelector { + return 0, errors.ErrInvalidPath("found double quote character in field selector with single quote context") + } + if len(buf) <= cursor+1 { + return 0, errors.ErrInvalidPath("JSON Path ends with single quote character in field selector context") + } + if buf[cursor+1] != ']' { + return 0, errors.ErrInvalidPath("expect right bracket for field selector with single quote but found %c", buf[cursor+1]) + } + selector := buf[:cursor] + b.addSelectorNode(string(selector)) + b.singleQuotePathSelector = true + return b.buildNextCharIfExists(buf, cursor+2) + case '"': + if sel != DoubleQuotePathSelector { + return 0, errors.ErrInvalidPath("found single quote character in field selector with double quote context") + } + selector := buf[:cursor] + b.addSelectorNode(string(selector)) + b.doubleQuotePathSelector = true + return b.buildNextCharIfExists(buf, cursor+1) + } + } + return 0, errors.ErrInvalidPath("couldn't find quote character in selector quote path context") +} + +func (b *PathBuilder) buildPathRecursive(buf []rune) (int, error) { + switch buf[0] { + case '.', '[', ']', '$', '*': + return 0, errors.ErrInvalidPath("found invalid path character %c after double dot", buf[0]) + } + for cursor := 0; cursor < len(buf); cursor++ { + switch buf[cursor] { + case '$', '*', ']': + return 0, errors.ErrInvalidPath("found %c character in field selector context", buf[cursor]) + case '.': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with dot character") + } + selector := buf[:cursor] + b.addRecursiveNode(string(selector)) + offset, err := b.buildSelector(buf[cursor+1:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + case '[': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with left bracket character") + } + selector := buf[:cursor] + b.addRecursiveNode(string(selector)) + offset, err := b.buildIndex(buf[cursor+1:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + } + } + b.addRecursiveNode(string(buf)) + return len(buf), nil +} + +func (b *PathBuilder) buildIndex(buf []rune) (int, error) { + switch buf[0] { + case '.', '[', ']', '$': + return 0, errors.ErrInvalidPath("found invalid path character %c after left bracket", buf[0]) + case '\'': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with single quote character") + } + offset, err := b.buildQuoteSelector(buf[1:], SingleQuotePathSelector) + if err != nil { + return 0, err + } + return 1 + offset, nil + case '*': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with star character") + } + if buf[1] != ']' { + return 0, errors.ErrInvalidPath("expect right bracket character for index all path but found %c character", buf[1]) + } + b.addIndexAllNode() + offset := len("*]") + if len(buf) > 2 { + buildOffset, err := b.buildNext(buf[2:]) + if err != nil { + return 0, err + } + return offset + buildOffset, nil + } + return offset, nil + } + + for cursor := 0; cursor < len(buf); cursor++ { + switch buf[cursor] { + case ']': + index, err := strconv.ParseInt(string(buf[:cursor]), 10, 64) + if err != nil { + return 0, errors.ErrInvalidPath("%q is unexpected index path", buf[:cursor]) + } + b.addIndexNode(int(index)) + return b.buildNextCharIfExists(buf, cursor+1) + } + } + return 0, errors.ErrInvalidPath("couldn't find right bracket character in index path context") +} + +func (b *PathBuilder) addIndexAllNode() { + node := newPathIndexAllNode() + if b.root == nil { + b.root = node + b.node = node + } else { + b.node = b.node.chain(node) + } +} + +func (b *PathBuilder) addRecursiveNode(selector string) { + node := newPathRecursiveNode(selector) + if b.root == nil { + b.root = node + b.node = node + } else { + b.node = b.node.chain(node) + } +} + +func (b *PathBuilder) addSelectorNode(name string) { + node := newPathSelectorNode(name) + if b.root == nil { + b.root = node + b.node = node + } else { + b.node = b.node.chain(node) + } +} + +func (b *PathBuilder) addIndexNode(idx int) { + node := newPathIndexNode(idx) + if b.root == nil { + b.root = node + b.node = node + } else { + b.node = b.node.chain(node) + } +} + +type QuotePathSelector int + +const ( + SingleQuotePathSelector QuotePathSelector = 1 + DoubleQuotePathSelector QuotePathSelector = 2 +) + +type Path struct { + node PathNode + RootSelectorOnly bool + SingleQuotePathSelector bool + DoubleQuotePathSelector bool +} + +func (p *Path) Field(sel string) (PathNode, bool, error) { + if p.node == nil { + return nil, false, nil + } + return p.node.Field(sel) +} + +func (p *Path) Get(src, dst reflect.Value) error { + if p.node == nil { + return nil + } + return p.node.Get(src, dst) +} + +func (p *Path) String() string { + if p.node == nil { + return "$" + } + return p.node.String() +} + +type PathNode interface { + fmt.Stringer + Index(idx int) (PathNode, bool, error) + Field(fieldName string) (PathNode, bool, error) + Get(src, dst reflect.Value) error + chain(PathNode) PathNode + target() bool + single() bool +} + +type BasePathNode struct { + child PathNode +} + +func (n *BasePathNode) chain(node PathNode) PathNode { + n.child = node + return node +} + +func (n *BasePathNode) target() bool { + return n.child == nil +} + +func (n *BasePathNode) single() bool { + return true +} + +type PathSelectorNode struct { + *BasePathNode + selector string +} + +func newPathSelectorNode(selector string) *PathSelectorNode { + return &PathSelectorNode{ + BasePathNode: &BasePathNode{}, + selector: selector, + } +} + +func (n *PathSelectorNode) Index(idx int) (PathNode, bool, error) { + return nil, false, &errors.PathError{} +} + +func (n *PathSelectorNode) Field(fieldName string) (PathNode, bool, error) { + if n.selector == fieldName { + return n.child, true, nil + } + return nil, false, nil +} + +func (n *PathSelectorNode) Get(src, dst reflect.Value) error { + switch src.Type().Kind() { + case reflect.Map: + iter := src.MapRange() + for iter.Next() { + key, ok := iter.Key().Interface().(string) + if !ok { + return fmt.Errorf("invalid map key type %T", src.Type().Key()) + } + child, found, err := n.Field(key) + if err != nil { + return err + } + if found { + if child != nil { + return child.Get(iter.Value(), dst) + } + return AssignValue(iter.Value(), dst) + } + } + case reflect.Struct: + typ := src.Type() + for i := 0; i < typ.Len(); i++ { + tag := runtime.StructTagFromField(typ.Field(i)) + child, found, err := n.Field(tag.Key) + if err != nil { + return err + } + if found { + if child != nil { + return child.Get(src.Field(i), dst) + } + return AssignValue(src.Field(i), dst) + } + } + case reflect.Ptr: + return n.Get(src.Elem(), dst) + case reflect.Interface: + return n.Get(reflect.ValueOf(src.Interface()), dst) + case reflect.Float64, reflect.String, reflect.Bool: + return AssignValue(src, dst) + } + return fmt.Errorf("failed to get %s value from %s", n.selector, src.Type()) +} + +func (n *PathSelectorNode) String() string { + s := fmt.Sprintf(".%s", n.selector) + if n.child != nil { + s += n.child.String() + } + return s +} + +type PathIndexNode struct { + *BasePathNode + selector int +} + +func newPathIndexNode(selector int) *PathIndexNode { + return &PathIndexNode{ + BasePathNode: &BasePathNode{}, + selector: selector, + } +} + +func (n *PathIndexNode) Index(idx int) (PathNode, bool, error) { + if n.selector == idx { + return n.child, true, nil + } + return nil, false, nil +} + +func (n *PathIndexNode) Field(fieldName string) (PathNode, bool, error) { + return nil, false, &errors.PathError{} +} + +func (n *PathIndexNode) Get(src, dst reflect.Value) error { + switch src.Type().Kind() { + case reflect.Array, reflect.Slice: + if src.Len() > n.selector { + if n.child != nil { + return n.child.Get(src.Index(n.selector), dst) + } + return AssignValue(src.Index(n.selector), dst) + } + case reflect.Ptr: + return n.Get(src.Elem(), dst) + case reflect.Interface: + return n.Get(reflect.ValueOf(src.Interface()), dst) + } + return fmt.Errorf("failed to get [%d] value from %s", n.selector, src.Type()) +} + +func (n *PathIndexNode) String() string { + s := fmt.Sprintf("[%d]", n.selector) + if n.child != nil { + s += n.child.String() + } + return s +} + +type PathIndexAllNode struct { + *BasePathNode +} + +func newPathIndexAllNode() *PathIndexAllNode { + return &PathIndexAllNode{ + BasePathNode: &BasePathNode{}, + } +} + +func (n *PathIndexAllNode) Index(idx int) (PathNode, bool, error) { + return n.child, true, nil +} + +func (n *PathIndexAllNode) Field(fieldName string) (PathNode, bool, error) { + return nil, false, &errors.PathError{} +} + +func (n *PathIndexAllNode) Get(src, dst reflect.Value) error { + switch src.Type().Kind() { + case reflect.Array, reflect.Slice: + var arr []interface{} + for i := 0; i < src.Len(); i++ { + var v interface{} + rv := reflect.ValueOf(&v) + if n.child != nil { + if err := n.child.Get(src.Index(i), rv); err != nil { + return err + } + } else { + if err := AssignValue(src.Index(i), rv); err != nil { + return err + } + } + arr = append(arr, v) + } + if err := AssignValue(reflect.ValueOf(arr), dst); err != nil { + return err + } + return nil + case reflect.Ptr: + return n.Get(src.Elem(), dst) + case reflect.Interface: + return n.Get(reflect.ValueOf(src.Interface()), dst) + } + return fmt.Errorf("failed to get all value from %s", src.Type()) +} + +func (n *PathIndexAllNode) String() string { + s := "[*]" + if n.child != nil { + s += n.child.String() + } + return s +} + +type PathRecursiveNode struct { + *BasePathNode + selector string +} + +func newPathRecursiveNode(selector string) *PathRecursiveNode { + node := newPathSelectorNode(selector) + return &PathRecursiveNode{ + BasePathNode: &BasePathNode{ + child: node, + }, + selector: selector, + } +} + +func (n *PathRecursiveNode) Field(fieldName string) (PathNode, bool, error) { + if n.selector == fieldName { + return n.child, true, nil + } + return nil, false, nil +} + +func (n *PathRecursiveNode) Index(_ int) (PathNode, bool, error) { + return n, true, nil +} + +func valueToSliceValue(v interface{}) []interface{} { + rv := reflect.ValueOf(v) + ret := []interface{}{} + if rv.Type().Kind() == reflect.Slice || rv.Type().Kind() == reflect.Array { + for i := 0; i < rv.Len(); i++ { + ret = append(ret, rv.Index(i).Interface()) + } + return ret + } + return []interface{}{v} +} + +func (n *PathRecursiveNode) Get(src, dst reflect.Value) error { + if n.child == nil { + return fmt.Errorf("failed to get by recursive path ..%s", n.selector) + } + var arr []interface{} + switch src.Type().Kind() { + case reflect.Map: + iter := src.MapRange() + for iter.Next() { + key, ok := iter.Key().Interface().(string) + if !ok { + return fmt.Errorf("invalid map key type %T", src.Type().Key()) + } + child, found, err := n.Field(key) + if err != nil { + return err + } + if found { + var v interface{} + rv := reflect.ValueOf(&v) + _ = child.Get(iter.Value(), rv) + arr = append(arr, valueToSliceValue(v)...) + } else { + var v interface{} + rv := reflect.ValueOf(&v) + _ = n.Get(iter.Value(), rv) + if v != nil { + arr = append(arr, valueToSliceValue(v)...) + } + } + } + _ = AssignValue(reflect.ValueOf(arr), dst) + return nil + case reflect.Struct: + typ := src.Type() + for i := 0; i < typ.Len(); i++ { + tag := runtime.StructTagFromField(typ.Field(i)) + child, found, err := n.Field(tag.Key) + if err != nil { + return err + } + if found { + var v interface{} + rv := reflect.ValueOf(&v) + _ = child.Get(src.Field(i), rv) + arr = append(arr, valueToSliceValue(v)...) + } else { + var v interface{} + rv := reflect.ValueOf(&v) + _ = n.Get(src.Field(i), rv) + if v != nil { + arr = append(arr, valueToSliceValue(v)...) + } + } + } + _ = AssignValue(reflect.ValueOf(arr), dst) + return nil + case reflect.Array, reflect.Slice: + for i := 0; i < src.Len(); i++ { + var v interface{} + rv := reflect.ValueOf(&v) + _ = n.Get(src.Index(i), rv) + if v != nil { + arr = append(arr, valueToSliceValue(v)...) + } + } + _ = AssignValue(reflect.ValueOf(arr), dst) + return nil + case reflect.Ptr: + return n.Get(src.Elem(), dst) + case reflect.Interface: + return n.Get(reflect.ValueOf(src.Interface()), dst) + } + return fmt.Errorf("failed to get %s value from %s", n.selector, src.Type()) +} + +func (n *PathRecursiveNode) String() string { + s := fmt.Sprintf("..%s", n.selector) + if n.child != nil { + s += n.child.String() + } + return s +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/ptr.go b/vendor/github.com/goccy/go-json/internal/decoder/ptr.go new file mode 100644 index 000000000000..ae2299466af5 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/ptr.go @@ -0,0 +1,97 @@ +package decoder + +import ( + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type ptrDecoder struct { + dec Decoder + typ *runtime.Type + structName string + fieldName string +} + +func newPtrDecoder(dec Decoder, typ *runtime.Type, structName, fieldName string) *ptrDecoder { + return &ptrDecoder{ + dec: dec, + typ: typ, + structName: structName, + fieldName: fieldName, + } +} + +func (d *ptrDecoder) contentDecoder() Decoder { + dec, ok := d.dec.(*ptrDecoder) + if !ok { + return d.dec + } + return dec.contentDecoder() +} + +//nolint:golint +//go:linkname unsafe_New reflect.unsafe_New +func unsafe_New(*runtime.Type) unsafe.Pointer + +func UnsafeNew(t *runtime.Type) unsafe.Pointer { + return unsafe_New(t) +} + +func (d *ptrDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + if s.skipWhiteSpace() == nul { + s.read() + } + if s.char() == 'n' { + if err := nullBytes(s); err != nil { + return err + } + *(*unsafe.Pointer)(p) = nil + return nil + } + var newptr unsafe.Pointer + if *(*unsafe.Pointer)(p) == nil { + newptr = unsafe_New(d.typ) + *(*unsafe.Pointer)(p) = newptr + } else { + newptr = *(*unsafe.Pointer)(p) + } + if err := d.dec.DecodeStream(s, depth, newptr); err != nil { + return err + } + return nil +} + +func (d *ptrDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == 'n' { + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + if p != nil { + *(*unsafe.Pointer)(p) = nil + } + cursor += 4 + return cursor, nil + } + var newptr unsafe.Pointer + if *(*unsafe.Pointer)(p) == nil { + newptr = unsafe_New(d.typ) + *(*unsafe.Pointer)(p) = newptr + } else { + newptr = *(*unsafe.Pointer)(p) + } + c, err := d.dec.Decode(ctx, cursor, depth, newptr) + if err != nil { + *(*unsafe.Pointer)(p) = nil + return 0, err + } + cursor = c + return cursor, nil +} + +func (d *ptrDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: ptr decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/slice.go b/vendor/github.com/goccy/go-json/internal/decoder/slice.go new file mode 100644 index 000000000000..30a23e4b51ec --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/slice.go @@ -0,0 +1,380 @@ +package decoder + +import ( + "reflect" + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +var ( + sliceType = runtime.Type2RType( + reflect.TypeOf((*sliceHeader)(nil)).Elem(), + ) + nilSlice = unsafe.Pointer(&sliceHeader{}) +) + +type sliceDecoder struct { + elemType *runtime.Type + isElemPointerType bool + valueDecoder Decoder + size uintptr + arrayPool sync.Pool + structName string + fieldName string +} + +// If use reflect.SliceHeader, data type is uintptr. +// In this case, Go compiler cannot trace reference created by newArray(). +// So, define using unsafe.Pointer as data type +type sliceHeader struct { + data unsafe.Pointer + len int + cap int +} + +const ( + defaultSliceCapacity = 2 +) + +func newSliceDecoder(dec Decoder, elemType *runtime.Type, size uintptr, structName, fieldName string) *sliceDecoder { + return &sliceDecoder{ + valueDecoder: dec, + elemType: elemType, + isElemPointerType: elemType.Kind() == reflect.Ptr || elemType.Kind() == reflect.Map, + size: size, + arrayPool: sync.Pool{ + New: func() interface{} { + return &sliceHeader{ + data: newArray(elemType, defaultSliceCapacity), + len: 0, + cap: defaultSliceCapacity, + } + }, + }, + structName: structName, + fieldName: fieldName, + } +} + +func (d *sliceDecoder) newSlice(src *sliceHeader) *sliceHeader { + slice := d.arrayPool.Get().(*sliceHeader) + if src.len > 0 { + // copy original elem + if slice.cap < src.cap { + data := newArray(d.elemType, src.cap) + slice = &sliceHeader{data: data, len: src.len, cap: src.cap} + } else { + slice.len = src.len + } + copySlice(d.elemType, *slice, *src) + } else { + slice.len = 0 + } + return slice +} + +func (d *sliceDecoder) releaseSlice(p *sliceHeader) { + d.arrayPool.Put(p) +} + +//go:linkname copySlice reflect.typedslicecopy +func copySlice(elemType *runtime.Type, dst, src sliceHeader) int + +//go:linkname newArray reflect.unsafe_NewArray +func newArray(*runtime.Type, int) unsafe.Pointer + +//go:linkname typedmemmove reflect.typedmemmove +func typedmemmove(t *runtime.Type, dst, src unsafe.Pointer) + +func (d *sliceDecoder) errNumber(offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: "number", + Type: reflect.SliceOf(runtime.RType2Type(d.elemType)), + Struct: d.structName, + Field: d.fieldName, + Offset: offset, + } +} + +func (d *sliceDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case 'n': + if err := nullBytes(s); err != nil { + return err + } + typedmemmove(sliceType, p, nilSlice) + return nil + case '[': + s.cursor++ + if s.skipWhiteSpace() == ']' { + dst := (*sliceHeader)(p) + if dst.data == nil { + dst.data = newArray(d.elemType, 0) + } else { + dst.len = 0 + } + s.cursor++ + return nil + } + idx := 0 + slice := d.newSlice((*sliceHeader)(p)) + srcLen := slice.len + capacity := slice.cap + data := slice.data + for { + if capacity <= idx { + src := sliceHeader{data: data, len: idx, cap: capacity} + capacity *= 2 + data = newArray(d.elemType, capacity) + dst := sliceHeader{data: data, len: idx, cap: capacity} + copySlice(d.elemType, dst, src) + } + ep := unsafe.Pointer(uintptr(data) + uintptr(idx)*d.size) + + // if srcLen is greater than idx, keep the original reference + if srcLen <= idx { + if d.isElemPointerType { + **(**unsafe.Pointer)(unsafe.Pointer(&ep)) = nil // initialize elem pointer + } else { + // assign new element to the slice + typedmemmove(d.elemType, ep, unsafe_New(d.elemType)) + } + } + + if err := d.valueDecoder.DecodeStream(s, depth, ep); err != nil { + return err + } + s.skipWhiteSpace() + RETRY: + switch s.char() { + case ']': + slice.cap = capacity + slice.len = idx + 1 + slice.data = data + dst := (*sliceHeader)(p) + dst.len = idx + 1 + if dst.len > dst.cap { + dst.data = newArray(d.elemType, dst.len) + dst.cap = dst.len + } + copySlice(d.elemType, *dst, *slice) + d.releaseSlice(slice) + s.cursor++ + return nil + case ',': + idx++ + case nul: + if s.read() { + goto RETRY + } + slice.cap = capacity + slice.data = data + d.releaseSlice(slice) + goto ERROR + default: + slice.cap = capacity + slice.data = data + d.releaseSlice(slice) + goto ERROR + } + s.cursor++ + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return d.errNumber(s.totalOffset()) + case nul: + if s.read() { + continue + } + goto ERROR + default: + goto ERROR + } + } +ERROR: + return errors.ErrUnexpectedEndOfJSON("slice", s.totalOffset()) +} + +func (d *sliceDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + typedmemmove(sliceType, p, nilSlice) + return cursor, nil + case '[': + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == ']' { + dst := (*sliceHeader)(p) + if dst.data == nil { + dst.data = newArray(d.elemType, 0) + } else { + dst.len = 0 + } + cursor++ + return cursor, nil + } + idx := 0 + slice := d.newSlice((*sliceHeader)(p)) + srcLen := slice.len + capacity := slice.cap + data := slice.data + for { + if capacity <= idx { + src := sliceHeader{data: data, len: idx, cap: capacity} + capacity *= 2 + data = newArray(d.elemType, capacity) + dst := sliceHeader{data: data, len: idx, cap: capacity} + copySlice(d.elemType, dst, src) + } + ep := unsafe.Pointer(uintptr(data) + uintptr(idx)*d.size) + // if srcLen is greater than idx, keep the original reference + if srcLen <= idx { + if d.isElemPointerType { + **(**unsafe.Pointer)(unsafe.Pointer(&ep)) = nil // initialize elem pointer + } else { + // assign new element to the slice + typedmemmove(d.elemType, ep, unsafe_New(d.elemType)) + } + } + c, err := d.valueDecoder.Decode(ctx, cursor, depth, ep) + if err != nil { + return 0, err + } + cursor = c + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case ']': + slice.cap = capacity + slice.len = idx + 1 + slice.data = data + dst := (*sliceHeader)(p) + dst.len = idx + 1 + if dst.len > dst.cap { + dst.data = newArray(d.elemType, dst.len) + dst.cap = dst.len + } + copySlice(d.elemType, *dst, *slice) + d.releaseSlice(slice) + cursor++ + return cursor, nil + case ',': + idx++ + default: + slice.cap = capacity + slice.data = data + d.releaseSlice(slice) + return 0, errors.ErrInvalidCharacter(buf[cursor], "slice", cursor) + } + cursor++ + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return 0, d.errNumber(cursor) + default: + return 0, errors.ErrUnexpectedEndOfJSON("slice", cursor) + } + } +} + +func (d *sliceDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return nil, 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + ret := [][]byte{} + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return [][]byte{nullbytes}, cursor, nil + case '[': + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == ']' { + cursor++ + return ret, cursor, nil + } + idx := 0 + for { + child, found, err := ctx.Option.Path.node.Index(idx) + if err != nil { + return nil, 0, err + } + if found { + if child != nil { + oldPath := ctx.Option.Path.node + ctx.Option.Path.node = child + paths, c, err := d.valueDecoder.DecodePath(ctx, cursor, depth) + if err != nil { + return nil, 0, err + } + ctx.Option.Path.node = oldPath + ret = append(ret, paths...) + cursor = c + } else { + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return nil, 0, err + } + ret = append(ret, buf[start:end]) + cursor = end + } + } else { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return nil, 0, err + } + cursor = c + } + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case ']': + cursor++ + return ret, cursor, nil + case ',': + idx++ + default: + return nil, 0, errors.ErrInvalidCharacter(buf[cursor], "slice", cursor) + } + cursor++ + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return nil, 0, d.errNumber(cursor) + default: + return nil, 0, errors.ErrUnexpectedEndOfJSON("slice", cursor) + } + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/stream.go b/vendor/github.com/goccy/go-json/internal/decoder/stream.go new file mode 100644 index 000000000000..a383f7259694 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/stream.go @@ -0,0 +1,556 @@ +package decoder + +import ( + "bytes" + "encoding/json" + "io" + "strconv" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +const ( + initBufSize = 512 +) + +type Stream struct { + buf []byte + bufSize int64 + length int64 + r io.Reader + offset int64 + cursor int64 + filledBuffer bool + allRead bool + UseNumber bool + DisallowUnknownFields bool + Option *Option +} + +func NewStream(r io.Reader) *Stream { + return &Stream{ + r: r, + bufSize: initBufSize, + buf: make([]byte, initBufSize), + Option: &Option{}, + } +} + +func (s *Stream) TotalOffset() int64 { + return s.totalOffset() +} + +func (s *Stream) Buffered() io.Reader { + buflen := int64(len(s.buf)) + for i := s.cursor; i < buflen; i++ { + if s.buf[i] == nul { + return bytes.NewReader(s.buf[s.cursor:i]) + } + } + return bytes.NewReader(s.buf[s.cursor:]) +} + +func (s *Stream) PrepareForDecode() error { + for { + switch s.char() { + case ' ', '\t', '\r', '\n': + s.cursor++ + continue + case ',', ':': + s.cursor++ + return nil + case nul: + if s.read() { + continue + } + return io.EOF + } + break + } + return nil +} + +func (s *Stream) totalOffset() int64 { + return s.offset + s.cursor +} + +func (s *Stream) char() byte { + return s.buf[s.cursor] +} + +func (s *Stream) equalChar(c byte) bool { + cur := s.buf[s.cursor] + if cur == nul { + s.read() + cur = s.buf[s.cursor] + } + return cur == c +} + +func (s *Stream) stat() ([]byte, int64, unsafe.Pointer) { + return s.buf, s.cursor, (*sliceHeader)(unsafe.Pointer(&s.buf)).data +} + +func (s *Stream) bufptr() unsafe.Pointer { + return (*sliceHeader)(unsafe.Pointer(&s.buf)).data +} + +func (s *Stream) statForRetry() ([]byte, int64, unsafe.Pointer) { + s.cursor-- // for retry ( because caller progress cursor position in each loop ) + return s.buf, s.cursor, (*sliceHeader)(unsafe.Pointer(&s.buf)).data +} + +func (s *Stream) Reset() { + s.reset() + s.bufSize = int64(len(s.buf)) +} + +func (s *Stream) More() bool { + for { + switch s.char() { + case ' ', '\n', '\r', '\t': + s.cursor++ + continue + case '}', ']': + return false + case nul: + if s.read() { + continue + } + return false + } + break + } + return true +} + +func (s *Stream) Token() (interface{}, error) { + for { + c := s.char() + switch c { + case ' ', '\n', '\r', '\t': + s.cursor++ + case '{', '[', ']', '}': + s.cursor++ + return json.Delim(c), nil + case ',', ':': + s.cursor++ + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + bytes := floatBytes(s) + str := *(*string)(unsafe.Pointer(&bytes)) + if s.UseNumber { + return json.Number(str), nil + } + f64, err := strconv.ParseFloat(str, 64) + if err != nil { + return nil, err + } + return f64, nil + case '"': + bytes, err := stringBytes(s) + if err != nil { + return nil, err + } + return string(bytes), nil + case 't': + if err := trueBytes(s); err != nil { + return nil, err + } + return true, nil + case 'f': + if err := falseBytes(s); err != nil { + return nil, err + } + return false, nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + goto END + default: + return nil, errors.ErrInvalidCharacter(s.char(), "token", s.totalOffset()) + } + } +END: + return nil, io.EOF +} + +func (s *Stream) reset() { + s.offset += s.cursor + s.buf = s.buf[s.cursor:] + s.length -= s.cursor + s.cursor = 0 +} + +func (s *Stream) readBuf() []byte { + if s.filledBuffer { + s.bufSize *= 2 + remainBuf := s.buf + s.buf = make([]byte, s.bufSize) + copy(s.buf, remainBuf) + } + remainLen := s.length - s.cursor + remainNotNulCharNum := int64(0) + for i := int64(0); i < remainLen; i++ { + if s.buf[s.cursor+i] == nul { + break + } + remainNotNulCharNum++ + } + s.length = s.cursor + remainNotNulCharNum + return s.buf[s.cursor+remainNotNulCharNum:] +} + +func (s *Stream) read() bool { + if s.allRead { + return false + } + buf := s.readBuf() + last := len(buf) - 1 + buf[last] = nul + n, err := s.r.Read(buf[:last]) + s.length += int64(n) + if n == last { + s.filledBuffer = true + } else { + s.filledBuffer = false + } + if err == io.EOF { + s.allRead = true + } else if err != nil { + return false + } + return true +} + +func (s *Stream) skipWhiteSpace() byte { + p := s.bufptr() +LOOP: + c := char(p, s.cursor) + switch c { + case ' ', '\n', '\t', '\r': + s.cursor++ + goto LOOP + case nul: + if s.read() { + p = s.bufptr() + goto LOOP + } + } + return c +} + +func (s *Stream) skipObject(depth int64) error { + braceCount := 1 + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case '{': + braceCount++ + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + case '}': + braceCount-- + depth-- + if braceCount == 0 { + s.cursor = cursor + 1 + return nil + } + case '[': + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + case ']': + depth-- + case '"': + for { + cursor++ + switch char(p, cursor) { + case '\\': + cursor++ + if char(p, cursor) == nul { + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + goto SWITCH_OUT + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.statForRetry() + continue + } + return errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("object of object", cursor) + } + SWITCH_OUT: + cursor++ + } +} + +func (s *Stream) skipArray(depth int64) error { + bracketCount := 1 + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case '[': + bracketCount++ + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + case ']': + bracketCount-- + depth-- + if bracketCount == 0 { + s.cursor = cursor + 1 + return nil + } + case '{': + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + case '}': + depth-- + case '"': + for { + cursor++ + switch char(p, cursor) { + case '\\': + cursor++ + if char(p, cursor) == nul { + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + goto SWITCH_OUT + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.statForRetry() + continue + } + return errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("array of object", cursor) + } + SWITCH_OUT: + cursor++ + } +} + +func (s *Stream) skipValue(depth int64) error { + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("value of object", s.totalOffset()) + case '{': + s.cursor = cursor + 1 + return s.skipObject(depth + 1) + case '[': + s.cursor = cursor + 1 + return s.skipArray(depth + 1) + case '"': + for { + cursor++ + switch char(p, cursor) { + case '\\': + cursor++ + if char(p, cursor) == nul { + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("value of string", s.totalOffset()) + } + case '"': + s.cursor = cursor + 1 + return nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.statForRetry() + continue + } + return errors.ErrUnexpectedEndOfJSON("value of string", s.totalOffset()) + } + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + for { + cursor++ + c := char(p, cursor) + if floatTable[c] { + continue + } else if c == nul { + if s.read() { + _, cursor, p = s.stat() + continue + } + } + s.cursor = cursor + return nil + } + case 't': + s.cursor = cursor + if err := trueBytes(s); err != nil { + return err + } + return nil + case 'f': + s.cursor = cursor + if err := falseBytes(s); err != nil { + return err + } + return nil + case 'n': + s.cursor = cursor + if err := nullBytes(s); err != nil { + return err + } + return nil + } + cursor++ + } +} + +func nullBytes(s *Stream) error { + // current cursor's character is 'n' + s.cursor++ + if s.char() != 'u' { + if err := retryReadNull(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'l' { + if err := retryReadNull(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'l' { + if err := retryReadNull(s); err != nil { + return err + } + } + s.cursor++ + return nil +} + +func retryReadNull(s *Stream) error { + if s.char() == nul && s.read() { + return nil + } + return errors.ErrInvalidCharacter(s.char(), "null", s.totalOffset()) +} + +func trueBytes(s *Stream) error { + // current cursor's character is 't' + s.cursor++ + if s.char() != 'r' { + if err := retryReadTrue(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'u' { + if err := retryReadTrue(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'e' { + if err := retryReadTrue(s); err != nil { + return err + } + } + s.cursor++ + return nil +} + +func retryReadTrue(s *Stream) error { + if s.char() == nul && s.read() { + return nil + } + return errors.ErrInvalidCharacter(s.char(), "bool(true)", s.totalOffset()) +} + +func falseBytes(s *Stream) error { + // current cursor's character is 'f' + s.cursor++ + if s.char() != 'a' { + if err := retryReadFalse(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'l' { + if err := retryReadFalse(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 's' { + if err := retryReadFalse(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'e' { + if err := retryReadFalse(s); err != nil { + return err + } + } + s.cursor++ + return nil +} + +func retryReadFalse(s *Stream) error { + if s.char() == nul && s.read() { + return nil + } + return errors.ErrInvalidCharacter(s.char(), "bool(false)", s.totalOffset()) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/string.go b/vendor/github.com/goccy/go-json/internal/decoder/string.go new file mode 100644 index 000000000000..32602c908ae4 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/string.go @@ -0,0 +1,452 @@ +package decoder + +import ( + "bytes" + "fmt" + "reflect" + "unicode" + "unicode/utf16" + "unicode/utf8" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type stringDecoder struct { + structName string + fieldName string +} + +func newStringDecoder(structName, fieldName string) *stringDecoder { + return &stringDecoder{ + structName: structName, + fieldName: fieldName, + } +} + +func (d *stringDecoder) errUnmarshalType(typeName string, offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: typeName, + Type: reflect.TypeOf(""), + Offset: offset, + Struct: d.structName, + Field: d.fieldName, + } +} + +func (d *stringDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + return nil + } + **(**string)(unsafe.Pointer(&p)) = *(*string)(unsafe.Pointer(&bytes)) + s.reset() + return nil +} + +func (d *stringDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + **(**string)(unsafe.Pointer(&p)) = *(*string)(unsafe.Pointer(&bytes)) + return cursor, nil +} + +func (d *stringDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return nil, 0, err + } + if bytes == nil { + return [][]byte{nullbytes}, c, nil + } + return [][]byte{bytes}, c, nil +} + +var ( + hexToInt = [256]int{ + '0': 0, + '1': 1, + '2': 2, + '3': 3, + '4': 4, + '5': 5, + '6': 6, + '7': 7, + '8': 8, + '9': 9, + 'A': 10, + 'B': 11, + 'C': 12, + 'D': 13, + 'E': 14, + 'F': 15, + 'a': 10, + 'b': 11, + 'c': 12, + 'd': 13, + 'e': 14, + 'f': 15, + } +) + +func unicodeToRune(code []byte) rune { + var r rune + for i := 0; i < len(code); i++ { + r = r*16 + rune(hexToInt[code[i]]) + } + return r +} + +func readAtLeast(s *Stream, n int64, p *unsafe.Pointer) bool { + for s.cursor+n >= s.length { + if !s.read() { + return false + } + *p = s.bufptr() + } + return true +} + +func decodeUnicodeRune(s *Stream, p unsafe.Pointer) (rune, int64, unsafe.Pointer, error) { + const defaultOffset = 5 + const surrogateOffset = 11 + + if !readAtLeast(s, defaultOffset, &p) { + return rune(0), 0, nil, errors.ErrInvalidCharacter(s.char(), "escaped string", s.totalOffset()) + } + + r := unicodeToRune(s.buf[s.cursor+1 : s.cursor+defaultOffset]) + if utf16.IsSurrogate(r) { + if !readAtLeast(s, surrogateOffset, &p) { + return unicode.ReplacementChar, defaultOffset, p, nil + } + if s.buf[s.cursor+defaultOffset] != '\\' || s.buf[s.cursor+defaultOffset+1] != 'u' { + return unicode.ReplacementChar, defaultOffset, p, nil + } + r2 := unicodeToRune(s.buf[s.cursor+defaultOffset+2 : s.cursor+surrogateOffset]) + if r := utf16.DecodeRune(r, r2); r != unicode.ReplacementChar { + return r, surrogateOffset, p, nil + } + } + return r, defaultOffset, p, nil +} + +func decodeUnicode(s *Stream, p unsafe.Pointer) (unsafe.Pointer, error) { + const backSlashAndULen = 2 // length of \u + + r, offset, pp, err := decodeUnicodeRune(s, p) + if err != nil { + return nil, err + } + unicode := []byte(string(r)) + unicodeLen := int64(len(unicode)) + s.buf = append(append(s.buf[:s.cursor-1], unicode...), s.buf[s.cursor+offset:]...) + unicodeOrgLen := offset - 1 + s.length = s.length - (backSlashAndULen + (unicodeOrgLen - unicodeLen)) + s.cursor = s.cursor - backSlashAndULen + unicodeLen + return pp, nil +} + +func decodeEscapeString(s *Stream, p unsafe.Pointer) (unsafe.Pointer, error) { + s.cursor++ +RETRY: + switch s.buf[s.cursor] { + case '"': + s.buf[s.cursor] = '"' + case '\\': + s.buf[s.cursor] = '\\' + case '/': + s.buf[s.cursor] = '/' + case 'b': + s.buf[s.cursor] = '\b' + case 'f': + s.buf[s.cursor] = '\f' + case 'n': + s.buf[s.cursor] = '\n' + case 'r': + s.buf[s.cursor] = '\r' + case 't': + s.buf[s.cursor] = '\t' + case 'u': + return decodeUnicode(s, p) + case nul: + if !s.read() { + return nil, errors.ErrInvalidCharacter(s.char(), "escaped string", s.totalOffset()) + } + p = s.bufptr() + goto RETRY + default: + return nil, errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + s.buf = append(s.buf[:s.cursor-1], s.buf[s.cursor:]...) + s.length-- + s.cursor-- + p = s.bufptr() + return p, nil +} + +var ( + runeErrBytes = []byte(string(utf8.RuneError)) + runeErrBytesLen = int64(len(runeErrBytes)) +) + +func stringBytes(s *Stream) ([]byte, error) { + _, cursor, p := s.stat() + cursor++ // skip double quote char + start := cursor + for { + switch char(p, cursor) { + case '\\': + s.cursor = cursor + pp, err := decodeEscapeString(s, p) + if err != nil { + return nil, err + } + p = pp + cursor = s.cursor + case '"': + literal := s.buf[start:cursor] + cursor++ + s.cursor = cursor + return literal, nil + case + // 0x00 is nul, 0x5c is '\\', 0x22 is '"' . + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, // 0x10-0x1F + 0x20, 0x21 /*0x22,*/, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, // 0x20-0x2F + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, // 0x30-0x3F + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, // 0x40-0x4F + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B /*0x5C,*/, 0x5D, 0x5E, 0x5F, // 0x50-0x5F + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, // 0x60-0x6F + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F: // 0x70-0x7F + // character is ASCII. skip to next char + case + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F, // 0x80-0x8F + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F, // 0x90-0x9F + 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF, // 0xA0-0xAF + 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF, // 0xB0-0xBF + 0xC0, 0xC1, // 0xC0-0xC1 + 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF: // 0xF5-0xFE + // character is invalid + s.buf = append(append(append([]byte{}, s.buf[:cursor]...), runeErrBytes...), s.buf[cursor+1:]...) + _, _, p = s.stat() + cursor += runeErrBytesLen + s.length += runeErrBytesLen + continue + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + goto ERROR + case 0xEF: + // RuneError is {0xEF, 0xBF, 0xBD} + if s.buf[cursor+1] == 0xBF && s.buf[cursor+2] == 0xBD { + // found RuneError: skip + cursor += 2 + break + } + fallthrough + default: + // multi bytes character + if !utf8.FullRune(s.buf[cursor : len(s.buf)-1]) { + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + goto ERROR + } + r, size := utf8.DecodeRune(s.buf[cursor:]) + if r == utf8.RuneError { + s.buf = append(append(append([]byte{}, s.buf[:cursor]...), runeErrBytes...), s.buf[cursor+1:]...) + cursor += runeErrBytesLen + s.length += runeErrBytesLen + _, _, p = s.stat() + } else { + cursor += int64(size) + } + continue + } + cursor++ + } +ERROR: + return nil, errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) +} + +func (d *stringDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '[': + return nil, d.errUnmarshalType("array", s.totalOffset()) + case '{': + return nil, d.errUnmarshalType("object", s.totalOffset()) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return nil, d.errUnmarshalType("number", s.totalOffset()) + case '"': + return stringBytes(s) + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + } + break + } + return nil, errors.ErrInvalidBeginningOfValue(s.char(), s.totalOffset()) +} + +func (d *stringDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + case '[': + return nil, 0, d.errUnmarshalType("array", cursor) + case '{': + return nil, 0, d.errUnmarshalType("object", cursor) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return nil, 0, d.errUnmarshalType("number", cursor) + case '"': + cursor++ + start := cursor + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + escaped := 0 + for { + switch char(b, cursor) { + case '\\': + escaped++ + cursor++ + switch char(b, cursor) { + case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': + cursor++ + case 'u': + buflen := int64(len(buf)) + if cursor+5 >= buflen { + return nil, 0, errors.ErrUnexpectedEndOfJSON("escaped string", cursor) + } + for i := int64(1); i <= 4; i++ { + c := char(b, cursor+i) + if !(('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F')) { + return nil, 0, errors.ErrSyntax(fmt.Sprintf("json: invalid character %c in \\u hexadecimal character escape", c), cursor+i) + } + } + cursor += 5 + default: + return nil, 0, errors.ErrUnexpectedEndOfJSON("escaped string", cursor) + } + continue + case '"': + literal := buf[start:cursor] + if escaped > 0 { + literal = literal[:unescapeString(literal)] + } + cursor++ + return literal, cursor, nil + case nul: + return nil, 0, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + cursor++ + } + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + default: + return nil, 0, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) + } + } +} + +var unescapeMap = [256]byte{ + '"': '"', + '\\': '\\', + '/': '/', + 'b': '\b', + 'f': '\f', + 'n': '\n', + 'r': '\r', + 't': '\t', +} + +func unsafeAdd(ptr unsafe.Pointer, offset int) unsafe.Pointer { + return unsafe.Pointer(uintptr(ptr) + uintptr(offset)) +} + +func unescapeString(buf []byte) int { + p := (*sliceHeader)(unsafe.Pointer(&buf)).data + end := unsafeAdd(p, len(buf)) + src := unsafeAdd(p, bytes.IndexByte(buf, '\\')) + dst := src + for src != end { + c := char(src, 0) + if c == '\\' { + escapeChar := char(src, 1) + if escapeChar != 'u' { + *(*byte)(dst) = unescapeMap[escapeChar] + src = unsafeAdd(src, 2) + dst = unsafeAdd(dst, 1) + } else { + v1 := hexToInt[char(src, 2)] + v2 := hexToInt[char(src, 3)] + v3 := hexToInt[char(src, 4)] + v4 := hexToInt[char(src, 5)] + code := rune((v1 << 12) | (v2 << 8) | (v3 << 4) | v4) + if code >= 0xd800 && code < 0xdc00 && uintptr(unsafeAdd(src, 11)) < uintptr(end) { + if char(src, 6) == '\\' && char(src, 7) == 'u' { + v1 := hexToInt[char(src, 8)] + v2 := hexToInt[char(src, 9)] + v3 := hexToInt[char(src, 10)] + v4 := hexToInt[char(src, 11)] + lo := rune((v1 << 12) | (v2 << 8) | (v3 << 4) | v4) + if lo >= 0xdc00 && lo < 0xe000 { + code = (code-0xd800)<<10 | (lo - 0xdc00) + 0x10000 + src = unsafeAdd(src, 6) + } + } + } + var b [utf8.UTFMax]byte + n := utf8.EncodeRune(b[:], code) + switch n { + case 4: + *(*byte)(unsafeAdd(dst, 3)) = b[3] + fallthrough + case 3: + *(*byte)(unsafeAdd(dst, 2)) = b[2] + fallthrough + case 2: + *(*byte)(unsafeAdd(dst, 1)) = b[1] + fallthrough + case 1: + *(*byte)(unsafeAdd(dst, 0)) = b[0] + } + src = unsafeAdd(src, 6) + dst = unsafeAdd(dst, n) + } + } else { + *(*byte)(dst) = c + src = unsafeAdd(src, 1) + dst = unsafeAdd(dst, 1) + } + } + return int(uintptr(dst) - uintptr(p)) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/struct.go b/vendor/github.com/goccy/go-json/internal/decoder/struct.go new file mode 100644 index 000000000000..313da153b36e --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/struct.go @@ -0,0 +1,845 @@ +package decoder + +import ( + "fmt" + "math" + "math/bits" + "sort" + "strings" + "unicode" + "unicode/utf16" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type structFieldSet struct { + dec Decoder + offset uintptr + isTaggedKey bool + fieldIdx int + key string + keyLen int64 + err error +} + +type structDecoder struct { + fieldMap map[string]*structFieldSet + fieldUniqueNameNum int + stringDecoder *stringDecoder + structName string + fieldName string + isTriedOptimize bool + keyBitmapUint8 [][256]uint8 + keyBitmapUint16 [][256]uint16 + sortedFieldSets []*structFieldSet + keyDecoder func(*structDecoder, []byte, int64) (int64, *structFieldSet, error) + keyStreamDecoder func(*structDecoder, *Stream) (*structFieldSet, string, error) +} + +var ( + largeToSmallTable [256]byte +) + +func init() { + for i := 0; i < 256; i++ { + c := i + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + largeToSmallTable[i] = byte(c) + } +} + +func toASCIILower(s string) string { + b := []byte(s) + for i := range b { + b[i] = largeToSmallTable[b[i]] + } + return string(b) +} + +func newStructDecoder(structName, fieldName string, fieldMap map[string]*structFieldSet) *structDecoder { + return &structDecoder{ + fieldMap: fieldMap, + stringDecoder: newStringDecoder(structName, fieldName), + structName: structName, + fieldName: fieldName, + keyDecoder: decodeKey, + keyStreamDecoder: decodeKeyStream, + } +} + +const ( + allowOptimizeMaxKeyLen = 64 + allowOptimizeMaxFieldLen = 16 +) + +func (d *structDecoder) tryOptimize() { + fieldUniqueNameMap := map[string]int{} + fieldIdx := -1 + for k, v := range d.fieldMap { + lower := strings.ToLower(k) + idx, exists := fieldUniqueNameMap[lower] + if exists { + v.fieldIdx = idx + } else { + fieldIdx++ + v.fieldIdx = fieldIdx + } + fieldUniqueNameMap[lower] = fieldIdx + } + d.fieldUniqueNameNum = len(fieldUniqueNameMap) + + if d.isTriedOptimize { + return + } + fieldMap := map[string]*structFieldSet{} + conflicted := map[string]struct{}{} + for k, v := range d.fieldMap { + key := strings.ToLower(k) + if key != k { + if key != toASCIILower(k) { + d.isTriedOptimize = true + return + } + // already exists same key (e.g. Hello and HELLO has same lower case key + if _, exists := conflicted[key]; exists { + d.isTriedOptimize = true + return + } + conflicted[key] = struct{}{} + } + if field, exists := fieldMap[key]; exists { + if field != v { + d.isTriedOptimize = true + return + } + } + fieldMap[key] = v + } + + if len(fieldMap) > allowOptimizeMaxFieldLen { + d.isTriedOptimize = true + return + } + + var maxKeyLen int + sortedKeys := []string{} + for key := range fieldMap { + keyLen := len(key) + if keyLen > allowOptimizeMaxKeyLen { + d.isTriedOptimize = true + return + } + if maxKeyLen < keyLen { + maxKeyLen = keyLen + } + sortedKeys = append(sortedKeys, key) + } + sort.Strings(sortedKeys) + + // By allocating one extra capacity than `maxKeyLen`, + // it is possible to avoid the process of comparing the index of the key with the length of the bitmap each time. + bitmapLen := maxKeyLen + 1 + if len(sortedKeys) <= 8 { + keyBitmap := make([][256]uint8, bitmapLen) + for i, key := range sortedKeys { + for j := 0; j < len(key); j++ { + c := key[j] + keyBitmap[j][c] |= (1 << uint(i)) + } + d.sortedFieldSets = append(d.sortedFieldSets, fieldMap[key]) + } + d.keyBitmapUint8 = keyBitmap + d.keyDecoder = decodeKeyByBitmapUint8 + d.keyStreamDecoder = decodeKeyByBitmapUint8Stream + } else { + keyBitmap := make([][256]uint16, bitmapLen) + for i, key := range sortedKeys { + for j := 0; j < len(key); j++ { + c := key[j] + keyBitmap[j][c] |= (1 << uint(i)) + } + d.sortedFieldSets = append(d.sortedFieldSets, fieldMap[key]) + } + d.keyBitmapUint16 = keyBitmap + d.keyDecoder = decodeKeyByBitmapUint16 + d.keyStreamDecoder = decodeKeyByBitmapUint16Stream + } +} + +// decode from '\uXXXX' +func decodeKeyCharByUnicodeRune(buf []byte, cursor int64) ([]byte, int64, error) { + const defaultOffset = 4 + const surrogateOffset = 6 + + if cursor+defaultOffset >= int64(len(buf)) { + return nil, 0, errors.ErrUnexpectedEndOfJSON("escaped string", cursor) + } + + r := unicodeToRune(buf[cursor : cursor+defaultOffset]) + if utf16.IsSurrogate(r) { + cursor += defaultOffset + if cursor+surrogateOffset >= int64(len(buf)) || buf[cursor] != '\\' || buf[cursor+1] != 'u' { + return []byte(string(unicode.ReplacementChar)), cursor + defaultOffset - 1, nil + } + cursor += 2 + r2 := unicodeToRune(buf[cursor : cursor+defaultOffset]) + if r := utf16.DecodeRune(r, r2); r != unicode.ReplacementChar { + return []byte(string(r)), cursor + defaultOffset - 1, nil + } + } + return []byte(string(r)), cursor + defaultOffset - 1, nil +} + +func decodeKeyCharByEscapedChar(buf []byte, cursor int64) ([]byte, int64, error) { + c := buf[cursor] + cursor++ + switch c { + case '"': + return []byte{'"'}, cursor, nil + case '\\': + return []byte{'\\'}, cursor, nil + case '/': + return []byte{'/'}, cursor, nil + case 'b': + return []byte{'\b'}, cursor, nil + case 'f': + return []byte{'\f'}, cursor, nil + case 'n': + return []byte{'\n'}, cursor, nil + case 'r': + return []byte{'\r'}, cursor, nil + case 't': + return []byte{'\t'}, cursor, nil + case 'u': + return decodeKeyCharByUnicodeRune(buf, cursor) + } + return nil, cursor, nil +} + +func decodeKeyByBitmapUint8(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) { + var ( + curBit uint8 = math.MaxUint8 + ) + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + for { + switch char(b, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + case '"': + cursor++ + c := char(b, cursor) + switch c { + case '"': + cursor++ + return cursor, nil, nil + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + keyIdx := 0 + bitmap := d.keyBitmapUint8 + start := cursor + for { + c := char(b, cursor) + switch c { + case '"': + fieldSetIndex := bits.TrailingZeros8(curBit) + field := d.sortedFieldSets[fieldSetIndex] + keyLen := cursor - start + cursor++ + if keyLen < field.keyLen { + // early match + return cursor, nil, nil + } + return cursor, field, nil + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + case '\\': + cursor++ + chars, nextCursor, err := decodeKeyCharByEscapedChar(buf, cursor) + if err != nil { + return 0, nil, err + } + for _, c := range chars { + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + return decodeKeyNotFound(b, cursor) + } + keyIdx++ + } + cursor = nextCursor + default: + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + return decodeKeyNotFound(b, cursor) + } + keyIdx++ + } + cursor++ + } + default: + return cursor, nil, errors.ErrInvalidBeginningOfValue(char(b, cursor), cursor) + } + } +} + +func decodeKeyByBitmapUint16(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) { + var ( + curBit uint16 = math.MaxUint16 + ) + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + for { + switch char(b, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + case '"': + cursor++ + c := char(b, cursor) + switch c { + case '"': + cursor++ + return cursor, nil, nil + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + keyIdx := 0 + bitmap := d.keyBitmapUint16 + start := cursor + for { + c := char(b, cursor) + switch c { + case '"': + fieldSetIndex := bits.TrailingZeros16(curBit) + field := d.sortedFieldSets[fieldSetIndex] + keyLen := cursor - start + cursor++ + if keyLen < field.keyLen { + // early match + return cursor, nil, nil + } + return cursor, field, nil + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + case '\\': + cursor++ + chars, nextCursor, err := decodeKeyCharByEscapedChar(buf, cursor) + if err != nil { + return 0, nil, err + } + for _, c := range chars { + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + return decodeKeyNotFound(b, cursor) + } + keyIdx++ + } + cursor = nextCursor + default: + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + return decodeKeyNotFound(b, cursor) + } + keyIdx++ + } + cursor++ + } + default: + return cursor, nil, errors.ErrInvalidBeginningOfValue(char(b, cursor), cursor) + } + } +} + +func decodeKeyNotFound(b unsafe.Pointer, cursor int64) (int64, *structFieldSet, error) { + for { + cursor++ + switch char(b, cursor) { + case '"': + cursor++ + return cursor, nil, nil + case '\\': + cursor++ + if char(b, cursor) == nul { + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + } +} + +func decodeKey(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) { + key, c, err := d.stringDecoder.decodeByte(buf, cursor) + if err != nil { + return 0, nil, err + } + cursor = c + k := *(*string)(unsafe.Pointer(&key)) + field, exists := d.fieldMap[k] + if !exists { + return cursor, nil, nil + } + return cursor, field, nil +} + +func decodeKeyByBitmapUint8Stream(d *structDecoder, s *Stream) (*structFieldSet, string, error) { + var ( + curBit uint8 = math.MaxUint8 + ) + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) + case '"': + cursor++ + FIRST_CHAR: + start := cursor + switch char(p, cursor) { + case '"': + cursor++ + s.cursor = cursor + return nil, "", nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + goto FIRST_CHAR + } + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + keyIdx := 0 + bitmap := d.keyBitmapUint8 + for { + c := char(p, cursor) + switch c { + case '"': + fieldSetIndex := bits.TrailingZeros8(curBit) + field := d.sortedFieldSets[fieldSetIndex] + keyLen := cursor - start + cursor++ + s.cursor = cursor + if keyLen < field.keyLen { + // early match + return nil, field.key, nil + } + return field, field.key, nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + case '\\': + s.cursor = cursor + 1 // skip '\' char + chars, err := decodeKeyCharByEscapeCharStream(s) + if err != nil { + return nil, "", err + } + cursor = s.cursor + for _, c := range chars { + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + s.cursor = cursor + return decodeKeyNotFoundStream(s, start) + } + keyIdx++ + } + default: + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + s.cursor = cursor + return decodeKeyNotFoundStream(s, start) + } + keyIdx++ + } + cursor++ + } + default: + return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) + } + } +} + +func decodeKeyByBitmapUint16Stream(d *structDecoder, s *Stream) (*structFieldSet, string, error) { + var ( + curBit uint16 = math.MaxUint16 + ) + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) + case '"': + cursor++ + FIRST_CHAR: + start := cursor + switch char(p, cursor) { + case '"': + cursor++ + s.cursor = cursor + return nil, "", nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + goto FIRST_CHAR + } + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + keyIdx := 0 + bitmap := d.keyBitmapUint16 + for { + c := char(p, cursor) + switch c { + case '"': + fieldSetIndex := bits.TrailingZeros16(curBit) + field := d.sortedFieldSets[fieldSetIndex] + keyLen := cursor - start + cursor++ + s.cursor = cursor + if keyLen < field.keyLen { + // early match + return nil, field.key, nil + } + return field, field.key, nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + case '\\': + s.cursor = cursor + 1 // skip '\' char + chars, err := decodeKeyCharByEscapeCharStream(s) + if err != nil { + return nil, "", err + } + cursor = s.cursor + for _, c := range chars { + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + s.cursor = cursor + return decodeKeyNotFoundStream(s, start) + } + keyIdx++ + } + default: + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + s.cursor = cursor + return decodeKeyNotFoundStream(s, start) + } + keyIdx++ + } + cursor++ + } + default: + return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) + } + } +} + +// decode from '\uXXXX' +func decodeKeyCharByUnicodeRuneStream(s *Stream) ([]byte, error) { + const defaultOffset = 4 + const surrogateOffset = 6 + + if s.cursor+defaultOffset >= s.length { + if !s.read() { + return nil, errors.ErrInvalidCharacter(s.char(), "escaped unicode char", s.totalOffset()) + } + } + + r := unicodeToRune(s.buf[s.cursor : s.cursor+defaultOffset]) + if utf16.IsSurrogate(r) { + s.cursor += defaultOffset + if s.cursor+surrogateOffset >= s.length { + s.read() + } + if s.cursor+surrogateOffset >= s.length || s.buf[s.cursor] != '\\' || s.buf[s.cursor+1] != 'u' { + s.cursor += defaultOffset - 1 + return []byte(string(unicode.ReplacementChar)), nil + } + r2 := unicodeToRune(s.buf[s.cursor+defaultOffset+2 : s.cursor+surrogateOffset]) + if r := utf16.DecodeRune(r, r2); r != unicode.ReplacementChar { + s.cursor += defaultOffset - 1 + return []byte(string(r)), nil + } + } + s.cursor += defaultOffset - 1 + return []byte(string(r)), nil +} + +func decodeKeyCharByEscapeCharStream(s *Stream) ([]byte, error) { + c := s.buf[s.cursor] + s.cursor++ +RETRY: + switch c { + case '"': + return []byte{'"'}, nil + case '\\': + return []byte{'\\'}, nil + case '/': + return []byte{'/'}, nil + case 'b': + return []byte{'\b'}, nil + case 'f': + return []byte{'\f'}, nil + case 'n': + return []byte{'\n'}, nil + case 'r': + return []byte{'\r'}, nil + case 't': + return []byte{'\t'}, nil + case 'u': + return decodeKeyCharByUnicodeRuneStream(s) + case nul: + if !s.read() { + return nil, errors.ErrInvalidCharacter(s.char(), "escaped char", s.totalOffset()) + } + goto RETRY + default: + return nil, errors.ErrUnexpectedEndOfJSON("struct field", s.totalOffset()) + } +} + +func decodeKeyNotFoundStream(s *Stream, start int64) (*structFieldSet, string, error) { + buf, cursor, p := s.stat() + for { + cursor++ + switch char(p, cursor) { + case '"': + b := buf[start:cursor] + key := *(*string)(unsafe.Pointer(&b)) + cursor++ + s.cursor = cursor + return nil, key, nil + case '\\': + cursor++ + if char(p, cursor) == nul { + s.cursor = cursor + if !s.read() { + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + buf, cursor, p = s.statForRetry() + } + case nul: + s.cursor = cursor + if !s.read() { + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + buf, cursor, p = s.statForRetry() + } + } +} + +func decodeKeyStream(d *structDecoder, s *Stream) (*structFieldSet, string, error) { + key, err := d.stringDecoder.decodeStreamByte(s) + if err != nil { + return nil, "", err + } + k := *(*string)(unsafe.Pointer(&key)) + return d.fieldMap[k], k, nil +} + +func (d *structDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + + c := s.skipWhiteSpace() + switch c { + case 'n': + if err := nullBytes(s); err != nil { + return err + } + return nil + default: + if s.char() != '{' { + return errors.ErrInvalidBeginningOfValue(s.char(), s.totalOffset()) + } + } + s.cursor++ + if s.skipWhiteSpace() == '}' { + s.cursor++ + return nil + } + var ( + seenFields map[int]struct{} + seenFieldNum int + ) + firstWin := (s.Option.Flags & FirstWinOption) != 0 + if firstWin { + seenFields = make(map[int]struct{}, d.fieldUniqueNameNum) + } + for { + s.reset() + field, key, err := d.keyStreamDecoder(d, s) + if err != nil { + return err + } + if s.skipWhiteSpace() != ':' { + return errors.ErrExpected("colon after object key", s.totalOffset()) + } + s.cursor++ + if field != nil { + if field.err != nil { + return field.err + } + if firstWin { + if _, exists := seenFields[field.fieldIdx]; exists { + if err := s.skipValue(depth); err != nil { + return err + } + } else { + if err := field.dec.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+field.offset)); err != nil { + return err + } + seenFieldNum++ + if d.fieldUniqueNameNum <= seenFieldNum { + return s.skipObject(depth) + } + seenFields[field.fieldIdx] = struct{}{} + } + } else { + if err := field.dec.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+field.offset)); err != nil { + return err + } + } + } else if s.DisallowUnknownFields { + return fmt.Errorf("json: unknown field %q", key) + } else { + if err := s.skipValue(depth); err != nil { + return err + } + } + c := s.skipWhiteSpace() + if c == '}' { + s.cursor++ + return nil + } + if c != ',' { + return errors.ErrExpected("comma after object element", s.totalOffset()) + } + s.cursor++ + } +} + +func (d *structDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + buflen := int64(len(buf)) + cursor = skipWhiteSpace(buf, cursor) + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + switch char(b, cursor) { + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + case '{': + default: + return 0, errors.ErrInvalidBeginningOfValue(char(b, cursor), cursor) + } + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == '}' { + cursor++ + return cursor, nil + } + var ( + seenFields map[int]struct{} + seenFieldNum int + ) + firstWin := (ctx.Option.Flags & FirstWinOption) != 0 + if firstWin { + seenFields = make(map[int]struct{}, d.fieldUniqueNameNum) + } + for { + c, field, err := d.keyDecoder(d, buf, cursor) + if err != nil { + return 0, err + } + cursor = skipWhiteSpace(buf, c) + if char(b, cursor) != ':' { + return 0, errors.ErrExpected("colon after object key", cursor) + } + cursor++ + if cursor >= buflen { + return 0, errors.ErrExpected("object value after colon", cursor) + } + if field != nil { + if field.err != nil { + return 0, field.err + } + if firstWin { + if _, exists := seenFields[field.fieldIdx]; exists { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + cursor = c + } else { + c, err := field.dec.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+field.offset)) + if err != nil { + return 0, err + } + cursor = c + seenFieldNum++ + if d.fieldUniqueNameNum <= seenFieldNum { + return skipObject(buf, cursor, depth) + } + seenFields[field.fieldIdx] = struct{}{} + } + } else { + c, err := field.dec.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+field.offset)) + if err != nil { + return 0, err + } + cursor = c + } + } else { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + cursor = c + } + cursor = skipWhiteSpace(buf, cursor) + if char(b, cursor) == '}' { + cursor++ + return cursor, nil + } + if char(b, cursor) != ',' { + return 0, errors.ErrExpected("comma after object element", cursor) + } + cursor++ + } +} + +func (d *structDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: struct decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/type.go b/vendor/github.com/goccy/go-json/internal/decoder/type.go new file mode 100644 index 000000000000..beaf3ab866be --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/type.go @@ -0,0 +1,30 @@ +package decoder + +import ( + "context" + "encoding" + "encoding/json" + "reflect" + "unsafe" +) + +type Decoder interface { + Decode(*RuntimeContext, int64, int64, unsafe.Pointer) (int64, error) + DecodePath(*RuntimeContext, int64, int64) ([][]byte, int64, error) + DecodeStream(*Stream, int64, unsafe.Pointer) error +} + +const ( + nul = '\000' + maxDecodeNestingDepth = 10000 +) + +type unmarshalerContext interface { + UnmarshalJSON(context.Context, []byte) error +} + +var ( + unmarshalJSONType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + unmarshalJSONContextType = reflect.TypeOf((*unmarshalerContext)(nil)).Elem() + unmarshalTextType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) diff --git a/vendor/github.com/goccy/go-json/internal/decoder/uint.go b/vendor/github.com/goccy/go-json/internal/decoder/uint.go new file mode 100644 index 000000000000..4131731b8e4d --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/uint.go @@ -0,0 +1,194 @@ +package decoder + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type uintDecoder struct { + typ *runtime.Type + kind reflect.Kind + op func(unsafe.Pointer, uint64) + structName string + fieldName string +} + +func newUintDecoder(typ *runtime.Type, structName, fieldName string, op func(unsafe.Pointer, uint64)) *uintDecoder { + return &uintDecoder{ + typ: typ, + kind: typ.Kind(), + op: op, + structName: structName, + fieldName: fieldName, + } +} + +func (d *uintDecoder) typeError(buf []byte, offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: fmt.Sprintf("number %s", string(buf)), + Type: runtime.RType2Type(d.typ), + Offset: offset, + } +} + +var ( + pow10u64 = [...]uint64{ + 1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + } + pow10u64Len = len(pow10u64) +) + +func (d *uintDecoder) parseUint(b []byte) (uint64, error) { + maxDigit := len(b) + if maxDigit > pow10u64Len { + return 0, fmt.Errorf("invalid length of number") + } + sum := uint64(0) + for i := 0; i < maxDigit; i++ { + c := uint64(b[i]) - 48 + digitValue := pow10u64[maxDigit-i-1] + sum += c * digitValue + } + return sum, nil +} + +func (d *uintDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '0': + s.cursor++ + return numZeroBuf, nil + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := s.cursor + for { + s.cursor++ + if numTable[s.char()] { + continue + } else if s.char() == nul { + if s.read() { + s.cursor-- // for retry current character + continue + } + } + break + } + num := s.buf[start:s.cursor] + return num, nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + default: + return nil, d.typeError([]byte{s.char()}, s.totalOffset()) + } + break + } + return nil, errors.ErrUnexpectedEndOfJSON("number(unsigned integer)", s.totalOffset()) +} + +func (d *uintDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case '0': + cursor++ + return numZeroBuf, cursor, nil + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := cursor + cursor++ + for numTable[buf[cursor]] { + cursor++ + } + num := buf[start:cursor] + return num, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + default: + return nil, 0, d.typeError([]byte{buf[cursor]}, cursor) + } + } +} + +func (d *uintDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + return nil + } + u64, err := d.parseUint(bytes) + if err != nil { + return d.typeError(bytes, s.totalOffset()) + } + switch d.kind { + case reflect.Uint8: + if (1 << 8) <= u64 { + return d.typeError(bytes, s.totalOffset()) + } + case reflect.Uint16: + if (1 << 16) <= u64 { + return d.typeError(bytes, s.totalOffset()) + } + case reflect.Uint32: + if (1 << 32) <= u64 { + return d.typeError(bytes, s.totalOffset()) + } + } + d.op(p, u64) + return nil +} + +func (d *uintDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + u64, err := d.parseUint(bytes) + if err != nil { + return 0, d.typeError(bytes, cursor) + } + switch d.kind { + case reflect.Uint8: + if (1 << 8) <= u64 { + return 0, d.typeError(bytes, cursor) + } + case reflect.Uint16: + if (1 << 16) <= u64 { + return 0, d.typeError(bytes, cursor) + } + case reflect.Uint32: + if (1 << 32) <= u64 { + return 0, d.typeError(bytes, cursor) + } + } + d.op(p, u64) + return cursor, nil +} + +func (d *uintDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: uint decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go new file mode 100644 index 000000000000..4cd6dbd573ff --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go @@ -0,0 +1,104 @@ +package decoder + +import ( + "context" + "encoding/json" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type unmarshalJSONDecoder struct { + typ *runtime.Type + structName string + fieldName string +} + +func newUnmarshalJSONDecoder(typ *runtime.Type, structName, fieldName string) *unmarshalJSONDecoder { + return &unmarshalJSONDecoder{ + typ: typ, + structName: structName, + fieldName: fieldName, + } +} + +func (d *unmarshalJSONDecoder) annotateError(cursor int64, err error) { + switch e := err.(type) { + case *errors.UnmarshalTypeError: + e.Struct = d.structName + e.Field = d.fieldName + case *errors.SyntaxError: + e.Offset = cursor + } +} + +func (d *unmarshalJSONDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + s.skipWhiteSpace() + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + dst := make([]byte, len(src)) + copy(dst, src) + + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + switch v := v.(type) { + case unmarshalerContext: + var ctx context.Context + if (s.Option.Flags & ContextOption) != 0 { + ctx = s.Option.Context + } else { + ctx = context.Background() + } + if err := v.UnmarshalJSON(ctx, dst); err != nil { + d.annotateError(s.cursor, err) + return err + } + case json.Unmarshaler: + if err := v.UnmarshalJSON(dst); err != nil { + d.annotateError(s.cursor, err) + return err + } + } + return nil +} + +func (d *unmarshalJSONDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + dst := make([]byte, len(src)) + copy(dst, src) + + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + if (ctx.Option.Flags & ContextOption) != 0 { + if err := v.(unmarshalerContext).UnmarshalJSON(ctx.Option.Context, dst); err != nil { + d.annotateError(cursor, err) + return 0, err + } + } else { + if err := v.(json.Unmarshaler).UnmarshalJSON(dst); err != nil { + d.annotateError(cursor, err) + return 0, err + } + } + return end, nil +} + +func (d *unmarshalJSONDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: unmarshal json decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go new file mode 100644 index 000000000000..d711d0f85f06 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go @@ -0,0 +1,285 @@ +package decoder + +import ( + "bytes" + "encoding" + "fmt" + "unicode" + "unicode/utf16" + "unicode/utf8" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type unmarshalTextDecoder struct { + typ *runtime.Type + structName string + fieldName string +} + +func newUnmarshalTextDecoder(typ *runtime.Type, structName, fieldName string) *unmarshalTextDecoder { + return &unmarshalTextDecoder{ + typ: typ, + structName: structName, + fieldName: fieldName, + } +} + +func (d *unmarshalTextDecoder) annotateError(cursor int64, err error) { + switch e := err.(type) { + case *errors.UnmarshalTypeError: + e.Struct = d.structName + e.Field = d.fieldName + case *errors.SyntaxError: + e.Offset = cursor + } +} + +var ( + nullbytes = []byte(`null`) +) + +func (d *unmarshalTextDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + s.skipWhiteSpace() + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + if len(src) > 0 { + switch src[0] { + case '[': + return &errors.UnmarshalTypeError{ + Value: "array", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '{': + return &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return &errors.UnmarshalTypeError{ + Value: "number", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case 'n': + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return nil + } + } + } + dst := make([]byte, len(src)) + copy(dst, src) + + if b, ok := unquoteBytes(dst); ok { + dst = b + } + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + if err := v.(encoding.TextUnmarshaler).UnmarshalText(dst); err != nil { + d.annotateError(s.cursor, err) + return err + } + return nil +} + +func (d *unmarshalTextDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + if len(src) > 0 { + switch src[0] { + case '[': + return 0, &errors.UnmarshalTypeError{ + Value: "array", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '{': + return 0, &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return 0, &errors.UnmarshalTypeError{ + Value: "number", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case 'n': + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return end, nil + } + } + } + + if s, ok := unquoteBytes(src); ok { + src = s + } + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) + if err := v.(encoding.TextUnmarshaler).UnmarshalText(src); err != nil { + d.annotateError(cursor, err) + return 0, err + } + return end, nil +} + +func (d *unmarshalTextDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: unmarshal text decoder does not support decode path") +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { //nolint: nonamedreturns + length := len(s) + if length < 2 || s[0] != '"' || s[length-1] != '"' { + return + } + s = s[1 : length-1] + length -= 2 + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < length { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == length { + return s, true + } + + b := make([]byte, length+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < length { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= length { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} + +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var r rune + for _, c := range s[2:6] { + switch { + case '0' <= c && c <= '9': + c = c - '0' + case 'a' <= c && c <= 'f': + c = c - 'a' + 10 + case 'A' <= c && c <= 'F': + c = c - 'A' + 10 + default: + return -1 + } + r = r*16 + rune(c) + } + return r +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go b/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go new file mode 100644 index 000000000000..0c4e2e6eacfc --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go @@ -0,0 +1,73 @@ +package decoder + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type wrappedStringDecoder struct { + typ *runtime.Type + dec Decoder + stringDecoder *stringDecoder + structName string + fieldName string + isPtrType bool +} + +func newWrappedStringDecoder(typ *runtime.Type, dec Decoder, structName, fieldName string) *wrappedStringDecoder { + return &wrappedStringDecoder{ + typ: typ, + dec: dec, + stringDecoder: newStringDecoder(structName, fieldName), + structName: structName, + fieldName: fieldName, + isPtrType: typ.Kind() == reflect.Ptr, + } +} + +func (d *wrappedStringDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.stringDecoder.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + if d.isPtrType { + *(*unsafe.Pointer)(p) = nil + } + return nil + } + b := make([]byte, len(bytes)+1) + copy(b, bytes) + if _, err := d.dec.Decode(&RuntimeContext{Buf: b}, 0, depth, p); err != nil { + return err + } + return nil +} + +func (d *wrappedStringDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.stringDecoder.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + if d.isPtrType { + *(*unsafe.Pointer)(p) = nil + } + return c, nil + } + bytes = append(bytes, nul) + oldBuf := ctx.Buf + ctx.Buf = bytes + if _, err := d.dec.Decode(ctx, 0, depth, p); err != nil { + return 0, err + } + ctx.Buf = oldBuf + return c, nil +} + +func (d *wrappedStringDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: wrapped string decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/code.go b/vendor/github.com/goccy/go-json/internal/encoder/code.go new file mode 100644 index 000000000000..5b08faefc738 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/code.go @@ -0,0 +1,1023 @@ +package encoder + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type Code interface { + Kind() CodeKind + ToOpcode(*compileContext) Opcodes + Filter(*FieldQuery) Code +} + +type AnonymousCode interface { + ToAnonymousOpcode(*compileContext) Opcodes +} + +type Opcodes []*Opcode + +func (o Opcodes) First() *Opcode { + if len(o) == 0 { + return nil + } + return o[0] +} + +func (o Opcodes) Last() *Opcode { + if len(o) == 0 { + return nil + } + return o[len(o)-1] +} + +func (o Opcodes) Add(codes ...*Opcode) Opcodes { + return append(o, codes...) +} + +type CodeKind int + +const ( + CodeKindInterface CodeKind = iota + CodeKindPtr + CodeKindInt + CodeKindUint + CodeKindFloat + CodeKindString + CodeKindBool + CodeKindStruct + CodeKindMap + CodeKindSlice + CodeKindArray + CodeKindBytes + CodeKindMarshalJSON + CodeKindMarshalText + CodeKindRecursive +) + +type IntCode struct { + typ *runtime.Type + bitSize uint8 + isString bool + isPtr bool +} + +func (c *IntCode) Kind() CodeKind { + return CodeKindInt +} + +func (c *IntCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpIntPtr) + case c.isString: + code = newOpCode(ctx, c.typ, OpIntString) + default: + code = newOpCode(ctx, c.typ, OpInt) + } + code.NumBitSize = c.bitSize + ctx.incIndex() + return Opcodes{code} +} + +func (c *IntCode) Filter(_ *FieldQuery) Code { + return c +} + +type UintCode struct { + typ *runtime.Type + bitSize uint8 + isString bool + isPtr bool +} + +func (c *UintCode) Kind() CodeKind { + return CodeKindUint +} + +func (c *UintCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpUintPtr) + case c.isString: + code = newOpCode(ctx, c.typ, OpUintString) + default: + code = newOpCode(ctx, c.typ, OpUint) + } + code.NumBitSize = c.bitSize + ctx.incIndex() + return Opcodes{code} +} + +func (c *UintCode) Filter(_ *FieldQuery) Code { + return c +} + +type FloatCode struct { + typ *runtime.Type + bitSize uint8 + isPtr bool +} + +func (c *FloatCode) Kind() CodeKind { + return CodeKindFloat +} + +func (c *FloatCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + switch c.bitSize { + case 32: + code = newOpCode(ctx, c.typ, OpFloat32Ptr) + default: + code = newOpCode(ctx, c.typ, OpFloat64Ptr) + } + default: + switch c.bitSize { + case 32: + code = newOpCode(ctx, c.typ, OpFloat32) + default: + code = newOpCode(ctx, c.typ, OpFloat64) + } + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *FloatCode) Filter(_ *FieldQuery) Code { + return c +} + +type StringCode struct { + typ *runtime.Type + isPtr bool +} + +func (c *StringCode) Kind() CodeKind { + return CodeKindString +} + +func (c *StringCode) ToOpcode(ctx *compileContext) Opcodes { + isJSONNumberType := c.typ == runtime.Type2RType(jsonNumberType) + var code *Opcode + if c.isPtr { + if isJSONNumberType { + code = newOpCode(ctx, c.typ, OpNumberPtr) + } else { + code = newOpCode(ctx, c.typ, OpStringPtr) + } + } else { + if isJSONNumberType { + code = newOpCode(ctx, c.typ, OpNumber) + } else { + code = newOpCode(ctx, c.typ, OpString) + } + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *StringCode) Filter(_ *FieldQuery) Code { + return c +} + +type BoolCode struct { + typ *runtime.Type + isPtr bool +} + +func (c *BoolCode) Kind() CodeKind { + return CodeKindBool +} + +func (c *BoolCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpBoolPtr) + default: + code = newOpCode(ctx, c.typ, OpBool) + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *BoolCode) Filter(_ *FieldQuery) Code { + return c +} + +type BytesCode struct { + typ *runtime.Type + isPtr bool +} + +func (c *BytesCode) Kind() CodeKind { + return CodeKindBytes +} + +func (c *BytesCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpBytesPtr) + default: + code = newOpCode(ctx, c.typ, OpBytes) + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *BytesCode) Filter(_ *FieldQuery) Code { + return c +} + +type SliceCode struct { + typ *runtime.Type + value Code +} + +func (c *SliceCode) Kind() CodeKind { + return CodeKindSlice +} + +func (c *SliceCode) ToOpcode(ctx *compileContext) Opcodes { + // header => opcode => elem => end + // ^ | + // |________| + size := c.typ.Elem().Size() + header := newSliceHeaderCode(ctx, c.typ) + ctx.incIndex() + + ctx.incIndent() + codes := c.value.ToOpcode(ctx) + ctx.decIndent() + + codes.First().Flags |= IndirectFlags + elemCode := newSliceElemCode(ctx, c.typ.Elem(), header, size) + ctx.incIndex() + end := newOpCode(ctx, c.typ, OpSliceEnd) + ctx.incIndex() + header.End = end + header.Next = codes.First() + codes.Last().Next = elemCode + elemCode.Next = codes.First() + elemCode.End = end + return Opcodes{header}.Add(codes...).Add(elemCode).Add(end) +} + +func (c *SliceCode) Filter(_ *FieldQuery) Code { + return c +} + +type ArrayCode struct { + typ *runtime.Type + value Code +} + +func (c *ArrayCode) Kind() CodeKind { + return CodeKindArray +} + +func (c *ArrayCode) ToOpcode(ctx *compileContext) Opcodes { + // header => opcode => elem => end + // ^ | + // |________| + elem := c.typ.Elem() + alen := c.typ.Len() + size := elem.Size() + + header := newArrayHeaderCode(ctx, c.typ, alen) + ctx.incIndex() + + ctx.incIndent() + codes := c.value.ToOpcode(ctx) + ctx.decIndent() + + codes.First().Flags |= IndirectFlags + + elemCode := newArrayElemCode(ctx, elem, header, alen, size) + ctx.incIndex() + + end := newOpCode(ctx, c.typ, OpArrayEnd) + ctx.incIndex() + + header.End = end + header.Next = codes.First() + codes.Last().Next = elemCode + elemCode.Next = codes.First() + elemCode.End = end + + return Opcodes{header}.Add(codes...).Add(elemCode).Add(end) +} + +func (c *ArrayCode) Filter(_ *FieldQuery) Code { + return c +} + +type MapCode struct { + typ *runtime.Type + key Code + value Code +} + +func (c *MapCode) Kind() CodeKind { + return CodeKindMap +} + +func (c *MapCode) ToOpcode(ctx *compileContext) Opcodes { + // header => code => value => code => key => code => value => code => end + // ^ | + // |_______________________| + header := newMapHeaderCode(ctx, c.typ) + ctx.incIndex() + + keyCodes := c.key.ToOpcode(ctx) + + value := newMapValueCode(ctx, c.typ.Elem(), header) + ctx.incIndex() + + ctx.incIndent() + valueCodes := c.value.ToOpcode(ctx) + ctx.decIndent() + + valueCodes.First().Flags |= IndirectFlags + + key := newMapKeyCode(ctx, c.typ.Key(), header) + ctx.incIndex() + + end := newMapEndCode(ctx, c.typ, header) + ctx.incIndex() + + header.Next = keyCodes.First() + keyCodes.Last().Next = value + value.Next = valueCodes.First() + valueCodes.Last().Next = key + key.Next = keyCodes.First() + + header.End = end + key.End = end + value.End = end + return Opcodes{header}.Add(keyCodes...).Add(value).Add(valueCodes...).Add(key).Add(end) +} + +func (c *MapCode) Filter(_ *FieldQuery) Code { + return c +} + +type StructCode struct { + typ *runtime.Type + fields []*StructFieldCode + isPtr bool + disableIndirectConversion bool + isIndirect bool + isRecursive bool +} + +func (c *StructCode) Kind() CodeKind { + return CodeKindStruct +} + +func (c *StructCode) lastFieldCode(field *StructFieldCode, firstField *Opcode) *Opcode { + if isEmbeddedStruct(field) { + return c.lastAnonymousFieldCode(firstField) + } + lastField := firstField + for lastField.NextField != nil { + lastField = lastField.NextField + } + return lastField +} + +func (c *StructCode) lastAnonymousFieldCode(firstField *Opcode) *Opcode { + // firstField is special StructHead operation for anonymous structure. + // So, StructHead's next operation is truly struct head operation. + for firstField.Op == OpStructHead || firstField.Op == OpStructField { + firstField = firstField.Next + } + lastField := firstField + for lastField.NextField != nil { + lastField = lastField.NextField + } + return lastField +} + +func (c *StructCode) ToOpcode(ctx *compileContext) Opcodes { + // header => code => structField => code => end + // ^ | + // |__________| + if c.isRecursive { + recursive := newRecursiveCode(ctx, c.typ, &CompiledCode{}) + recursive.Type = c.typ + ctx.incIndex() + *ctx.recursiveCodes = append(*ctx.recursiveCodes, recursive) + return Opcodes{recursive} + } + codes := Opcodes{} + var prevField *Opcode + ctx.incIndent() + for idx, field := range c.fields { + isFirstField := idx == 0 + isEndField := idx == len(c.fields)-1 + fieldCodes := field.ToOpcode(ctx, isFirstField, isEndField) + for _, code := range fieldCodes { + if c.isIndirect { + code.Flags |= IndirectFlags + } + } + firstField := fieldCodes.First() + if len(codes) > 0 { + codes.Last().Next = firstField + firstField.Idx = codes.First().Idx + } + if prevField != nil { + prevField.NextField = firstField + } + if isEndField { + endField := fieldCodes.Last() + if len(codes) > 0 { + codes.First().End = endField + } else { + firstField.End = endField + } + codes = codes.Add(fieldCodes...) + break + } + prevField = c.lastFieldCode(field, firstField) + codes = codes.Add(fieldCodes...) + } + if len(codes) == 0 { + head := &Opcode{ + Op: OpStructHead, + Idx: opcodeOffset(ctx.ptrIndex), + Type: c.typ, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } + ctx.incOpcodeIndex() + end := &Opcode{ + Op: OpStructEnd, + Idx: opcodeOffset(ctx.ptrIndex), + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } + head.NextField = end + head.Next = end + head.End = end + codes = codes.Add(head, end) + ctx.incIndex() + } + ctx.decIndent() + ctx.structTypeToCodes[uintptr(unsafe.Pointer(c.typ))] = codes + return codes +} + +func (c *StructCode) ToAnonymousOpcode(ctx *compileContext) Opcodes { + // header => code => structField => code => end + // ^ | + // |__________| + if c.isRecursive { + recursive := newRecursiveCode(ctx, c.typ, &CompiledCode{}) + recursive.Type = c.typ + ctx.incIndex() + *ctx.recursiveCodes = append(*ctx.recursiveCodes, recursive) + return Opcodes{recursive} + } + codes := Opcodes{} + var prevField *Opcode + for idx, field := range c.fields { + isFirstField := idx == 0 + isEndField := idx == len(c.fields)-1 + fieldCodes := field.ToAnonymousOpcode(ctx, isFirstField, isEndField) + for _, code := range fieldCodes { + if c.isIndirect { + code.Flags |= IndirectFlags + } + } + firstField := fieldCodes.First() + if len(codes) > 0 { + codes.Last().Next = firstField + firstField.Idx = codes.First().Idx + } + if prevField != nil { + prevField.NextField = firstField + } + if isEndField { + lastField := fieldCodes.Last() + if len(codes) > 0 { + codes.First().End = lastField + } else { + firstField.End = lastField + } + } + prevField = firstField + codes = codes.Add(fieldCodes...) + } + return codes +} + +func (c *StructCode) removeFieldsByTags(tags runtime.StructTags) { + fields := make([]*StructFieldCode, 0, len(c.fields)) + for _, field := range c.fields { + if field.isAnonymous { + structCode := field.getAnonymousStruct() + if structCode != nil && !structCode.isRecursive { + structCode.removeFieldsByTags(tags) + if len(structCode.fields) > 0 { + fields = append(fields, field) + } + continue + } + } + if tags.ExistsKey(field.key) { + continue + } + fields = append(fields, field) + } + c.fields = fields +} + +func (c *StructCode) enableIndirect() { + if c.isIndirect { + return + } + c.isIndirect = true + if len(c.fields) == 0 { + return + } + structCode := c.fields[0].getStruct() + if structCode == nil { + return + } + structCode.enableIndirect() +} + +func (c *StructCode) Filter(query *FieldQuery) Code { + fieldMap := map[string]*FieldQuery{} + for _, field := range query.Fields { + fieldMap[field.Name] = field + } + fields := make([]*StructFieldCode, 0, len(c.fields)) + for _, field := range c.fields { + query, exists := fieldMap[field.key] + if !exists { + continue + } + fieldCode := &StructFieldCode{ + typ: field.typ, + key: field.key, + tag: field.tag, + value: field.value, + offset: field.offset, + isAnonymous: field.isAnonymous, + isTaggedKey: field.isTaggedKey, + isNilableType: field.isNilableType, + isNilCheck: field.isNilCheck, + isAddrForMarshaler: field.isAddrForMarshaler, + isNextOpPtrType: field.isNextOpPtrType, + } + if len(query.Fields) > 0 { + fieldCode.value = fieldCode.value.Filter(query) + } + fields = append(fields, fieldCode) + } + return &StructCode{ + typ: c.typ, + fields: fields, + isPtr: c.isPtr, + disableIndirectConversion: c.disableIndirectConversion, + isIndirect: c.isIndirect, + isRecursive: c.isRecursive, + } +} + +type StructFieldCode struct { + typ *runtime.Type + key string + tag *runtime.StructTag + value Code + offset uintptr + isAnonymous bool + isTaggedKey bool + isNilableType bool + isNilCheck bool + isAddrForMarshaler bool + isNextOpPtrType bool + isMarshalerContext bool +} + +func (c *StructFieldCode) getStruct() *StructCode { + value := c.value + ptr, ok := value.(*PtrCode) + if ok { + value = ptr.value + } + structCode, ok := value.(*StructCode) + if ok { + return structCode + } + return nil +} + +func (c *StructFieldCode) getAnonymousStruct() *StructCode { + if !c.isAnonymous { + return nil + } + return c.getStruct() +} + +func optimizeStructHeader(code *Opcode, tag *runtime.StructTag) OpType { + headType := code.ToHeaderType(tag.IsString) + if tag.IsOmitEmpty { + headType = headType.HeadToOmitEmptyHead() + } + return headType +} + +func optimizeStructField(code *Opcode, tag *runtime.StructTag) OpType { + fieldType := code.ToFieldType(tag.IsString) + if tag.IsOmitEmpty { + fieldType = fieldType.FieldToOmitEmptyField() + } + return fieldType +} + +func (c *StructFieldCode) headerOpcodes(ctx *compileContext, field *Opcode, valueCodes Opcodes) Opcodes { + value := valueCodes.First() + op := optimizeStructHeader(value, c.tag) + field.Op = op + if value.Flags&MarshalerContextFlags != 0 { + field.Flags |= MarshalerContextFlags + } + field.NumBitSize = value.NumBitSize + field.PtrNum = value.PtrNum + field.FieldQuery = value.FieldQuery + fieldCodes := Opcodes{field} + if op.IsMultipleOpHead() { + field.Next = value + fieldCodes = fieldCodes.Add(valueCodes...) + } else { + ctx.decIndex() + } + return fieldCodes +} + +func (c *StructFieldCode) fieldOpcodes(ctx *compileContext, field *Opcode, valueCodes Opcodes) Opcodes { + value := valueCodes.First() + op := optimizeStructField(value, c.tag) + field.Op = op + if value.Flags&MarshalerContextFlags != 0 { + field.Flags |= MarshalerContextFlags + } + field.NumBitSize = value.NumBitSize + field.PtrNum = value.PtrNum + field.FieldQuery = value.FieldQuery + + fieldCodes := Opcodes{field} + if op.IsMultipleOpField() { + field.Next = value + fieldCodes = fieldCodes.Add(valueCodes...) + } else { + ctx.decIndex() + } + return fieldCodes +} + +func (c *StructFieldCode) addStructEndCode(ctx *compileContext, codes Opcodes) Opcodes { + end := &Opcode{ + Op: OpStructEnd, + Idx: opcodeOffset(ctx.ptrIndex), + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } + codes.Last().Next = end + code := codes.First() + for code.Op == OpStructField || code.Op == OpStructHead { + code = code.Next + } + for code.NextField != nil { + code = code.NextField + } + code.NextField = end + + codes = codes.Add(end) + ctx.incOpcodeIndex() + return codes +} + +func (c *StructFieldCode) structKey(ctx *compileContext) string { + if ctx.escapeKey { + rctx := &RuntimeContext{Option: &Option{Flag: HTMLEscapeOption}} + return fmt.Sprintf(`%s:`, string(AppendString(rctx, []byte{}, c.key))) + } + return fmt.Sprintf(`"%s":`, c.key) +} + +func (c *StructFieldCode) flags() OpFlags { + var flags OpFlags + if c.isTaggedKey { + flags |= IsTaggedKeyFlags + } + if c.isNilableType { + flags |= IsNilableTypeFlags + } + if c.isNilCheck { + flags |= NilCheckFlags + } + if c.isAddrForMarshaler { + flags |= AddrForMarshalerFlags + } + if c.isNextOpPtrType { + flags |= IsNextOpPtrTypeFlags + } + if c.isAnonymous { + flags |= AnonymousKeyFlags + } + if c.isMarshalerContext { + flags |= MarshalerContextFlags + } + return flags +} + +func (c *StructFieldCode) toValueOpcodes(ctx *compileContext) Opcodes { + if c.isAnonymous { + anonymCode, ok := c.value.(AnonymousCode) + if ok { + return anonymCode.ToAnonymousOpcode(ctx) + } + } + return c.value.ToOpcode(ctx) +} + +func (c *StructFieldCode) ToOpcode(ctx *compileContext, isFirstField, isEndField bool) Opcodes { + field := &Opcode{ + Idx: opcodeOffset(ctx.ptrIndex), + Flags: c.flags(), + Key: c.structKey(ctx), + Offset: uint32(c.offset), + Type: c.typ, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + DisplayKey: c.key, + } + ctx.incIndex() + valueCodes := c.toValueOpcodes(ctx) + if isFirstField { + codes := c.headerOpcodes(ctx, field, valueCodes) + if isEndField { + codes = c.addStructEndCode(ctx, codes) + } + return codes + } + codes := c.fieldOpcodes(ctx, field, valueCodes) + if isEndField { + if isEnableStructEndOptimization(c.value) { + field.Op = field.Op.FieldToEnd() + } else { + codes = c.addStructEndCode(ctx, codes) + } + } + return codes +} + +func (c *StructFieldCode) ToAnonymousOpcode(ctx *compileContext, isFirstField, isEndField bool) Opcodes { + field := &Opcode{ + Idx: opcodeOffset(ctx.ptrIndex), + Flags: c.flags() | AnonymousHeadFlags, + Key: c.structKey(ctx), + Offset: uint32(c.offset), + Type: c.typ, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + DisplayKey: c.key, + } + ctx.incIndex() + valueCodes := c.toValueOpcodes(ctx) + if isFirstField { + return c.headerOpcodes(ctx, field, valueCodes) + } + return c.fieldOpcodes(ctx, field, valueCodes) +} + +func isEnableStructEndOptimization(value Code) bool { + switch value.Kind() { + case CodeKindInt, + CodeKindUint, + CodeKindFloat, + CodeKindString, + CodeKindBool, + CodeKindBytes: + return true + case CodeKindPtr: + return isEnableStructEndOptimization(value.(*PtrCode).value) + default: + return false + } +} + +type InterfaceCode struct { + typ *runtime.Type + fieldQuery *FieldQuery + isPtr bool +} + +func (c *InterfaceCode) Kind() CodeKind { + return CodeKindInterface +} + +func (c *InterfaceCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpInterfacePtr) + default: + code = newOpCode(ctx, c.typ, OpInterface) + } + code.FieldQuery = c.fieldQuery + if c.typ.NumMethod() > 0 { + code.Flags |= NonEmptyInterfaceFlags + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *InterfaceCode) Filter(query *FieldQuery) Code { + return &InterfaceCode{ + typ: c.typ, + fieldQuery: query, + isPtr: c.isPtr, + } +} + +type MarshalJSONCode struct { + typ *runtime.Type + fieldQuery *FieldQuery + isAddrForMarshaler bool + isNilableType bool + isMarshalerContext bool +} + +func (c *MarshalJSONCode) Kind() CodeKind { + return CodeKindMarshalJSON +} + +func (c *MarshalJSONCode) ToOpcode(ctx *compileContext) Opcodes { + code := newOpCode(ctx, c.typ, OpMarshalJSON) + code.FieldQuery = c.fieldQuery + if c.isAddrForMarshaler { + code.Flags |= AddrForMarshalerFlags + } + if c.isMarshalerContext { + code.Flags |= MarshalerContextFlags + } + if c.isNilableType { + code.Flags |= IsNilableTypeFlags + } else { + code.Flags &= ^IsNilableTypeFlags + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *MarshalJSONCode) Filter(query *FieldQuery) Code { + return &MarshalJSONCode{ + typ: c.typ, + fieldQuery: query, + isAddrForMarshaler: c.isAddrForMarshaler, + isNilableType: c.isNilableType, + isMarshalerContext: c.isMarshalerContext, + } +} + +type MarshalTextCode struct { + typ *runtime.Type + fieldQuery *FieldQuery + isAddrForMarshaler bool + isNilableType bool +} + +func (c *MarshalTextCode) Kind() CodeKind { + return CodeKindMarshalText +} + +func (c *MarshalTextCode) ToOpcode(ctx *compileContext) Opcodes { + code := newOpCode(ctx, c.typ, OpMarshalText) + code.FieldQuery = c.fieldQuery + if c.isAddrForMarshaler { + code.Flags |= AddrForMarshalerFlags + } + if c.isNilableType { + code.Flags |= IsNilableTypeFlags + } else { + code.Flags &= ^IsNilableTypeFlags + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *MarshalTextCode) Filter(query *FieldQuery) Code { + return &MarshalTextCode{ + typ: c.typ, + fieldQuery: query, + isAddrForMarshaler: c.isAddrForMarshaler, + isNilableType: c.isNilableType, + } +} + +type PtrCode struct { + typ *runtime.Type + value Code + ptrNum uint8 +} + +func (c *PtrCode) Kind() CodeKind { + return CodeKindPtr +} + +func (c *PtrCode) ToOpcode(ctx *compileContext) Opcodes { + codes := c.value.ToOpcode(ctx) + codes.First().Op = convertPtrOp(codes.First()) + codes.First().PtrNum = c.ptrNum + return codes +} + +func (c *PtrCode) ToAnonymousOpcode(ctx *compileContext) Opcodes { + var codes Opcodes + anonymCode, ok := c.value.(AnonymousCode) + if ok { + codes = anonymCode.ToAnonymousOpcode(ctx) + } else { + codes = c.value.ToOpcode(ctx) + } + codes.First().Op = convertPtrOp(codes.First()) + codes.First().PtrNum = c.ptrNum + return codes +} + +func (c *PtrCode) Filter(query *FieldQuery) Code { + return &PtrCode{ + typ: c.typ, + value: c.value.Filter(query), + ptrNum: c.ptrNum, + } +} + +func convertPtrOp(code *Opcode) OpType { + ptrHeadOp := code.Op.HeadToPtrHead() + if code.Op != ptrHeadOp { + if code.PtrNum > 0 { + // ptr field and ptr head + code.PtrNum-- + } + return ptrHeadOp + } + switch code.Op { + case OpInt: + return OpIntPtr + case OpUint: + return OpUintPtr + case OpFloat32: + return OpFloat32Ptr + case OpFloat64: + return OpFloat64Ptr + case OpString: + return OpStringPtr + case OpBool: + return OpBoolPtr + case OpBytes: + return OpBytesPtr + case OpNumber: + return OpNumberPtr + case OpArray: + return OpArrayPtr + case OpSlice: + return OpSlicePtr + case OpMap: + return OpMapPtr + case OpMarshalJSON: + return OpMarshalJSONPtr + case OpMarshalText: + return OpMarshalTextPtr + case OpInterface: + return OpInterfacePtr + case OpRecursive: + return OpRecursivePtr + } + return code.Op +} + +func isEmbeddedStruct(field *StructFieldCode) bool { + if !field.isAnonymous { + return false + } + t := field.typ + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t.Kind() == reflect.Struct +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compact.go b/vendor/github.com/goccy/go-json/internal/encoder/compact.go new file mode 100644 index 000000000000..e287a6c03f43 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/compact.go @@ -0,0 +1,286 @@ +package encoder + +import ( + "bytes" + "fmt" + "strconv" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +var ( + isWhiteSpace = [256]bool{ + ' ': true, + '\n': true, + '\t': true, + '\r': true, + } + isHTMLEscapeChar = [256]bool{ + '<': true, + '>': true, + '&': true, + } + nul = byte('\000') +) + +func Compact(buf *bytes.Buffer, src []byte, escape bool) error { + if len(src) == 0 { + return errors.ErrUnexpectedEndOfJSON("", 0) + } + buf.Grow(len(src)) + dst := buf.Bytes() + + ctx := TakeRuntimeContext() + ctxBuf := ctx.Buf[:0] + ctxBuf = append(append(ctxBuf, src...), nul) + ctx.Buf = ctxBuf + + if err := compactAndWrite(buf, dst, ctxBuf, escape); err != nil { + ReleaseRuntimeContext(ctx) + return err + } + ReleaseRuntimeContext(ctx) + return nil +} + +func compactAndWrite(buf *bytes.Buffer, dst []byte, src []byte, escape bool) error { + dst, err := compact(dst, src, escape) + if err != nil { + return err + } + if _, err := buf.Write(dst); err != nil { + return err + } + return nil +} + +func compact(dst, src []byte, escape bool) ([]byte, error) { + buf, cursor, err := compactValue(dst, src, 0, escape) + if err != nil { + return nil, err + } + if err := validateEndBuf(src, cursor); err != nil { + return nil, err + } + return buf, nil +} + +func validateEndBuf(src []byte, cursor int64) error { + for { + switch src[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case nul: + return nil + } + return errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after top-level value", src[cursor]), + cursor+1, + ) + } +} + +func skipWhiteSpace(buf []byte, cursor int64) int64 { +LOOP: + if isWhiteSpace[buf[cursor]] { + cursor++ + goto LOOP + } + return cursor +} + +func compactValue(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { + for { + switch src[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case '{': + return compactObject(dst, src, cursor, escape) + case '}': + return nil, 0, errors.ErrSyntax("unexpected character '}'", cursor) + case '[': + return compactArray(dst, src, cursor, escape) + case ']': + return nil, 0, errors.ErrSyntax("unexpected character ']'", cursor) + case '"': + return compactString(dst, src, cursor, escape) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return compactNumber(dst, src, cursor) + case 't': + return compactTrue(dst, src, cursor) + case 'f': + return compactFalse(dst, src, cursor) + case 'n': + return compactNull(dst, src, cursor) + default: + return nil, 0, errors.ErrSyntax(fmt.Sprintf("unexpected character '%c'", src[cursor]), cursor) + } + } +} + +func compactObject(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { + if src[cursor] == '{' { + dst = append(dst, '{') + } else { + return nil, 0, errors.ErrExpected("expected { character for object value", cursor) + } + cursor = skipWhiteSpace(src, cursor+1) + if src[cursor] == '}' { + dst = append(dst, '}') + return dst, cursor + 1, nil + } + var err error + for { + cursor = skipWhiteSpace(src, cursor) + dst, cursor, err = compactString(dst, src, cursor, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + if src[cursor] != ':' { + return nil, 0, errors.ErrExpected("colon after object key", cursor) + } + dst = append(dst, ':') + dst, cursor, err = compactValue(dst, src, cursor+1, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + switch src[cursor] { + case '}': + dst = append(dst, '}') + cursor++ + return dst, cursor, nil + case ',': + dst = append(dst, ',') + default: + return nil, 0, errors.ErrExpected("comma after object value", cursor) + } + cursor++ + } +} + +func compactArray(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { + if src[cursor] == '[' { + dst = append(dst, '[') + } else { + return nil, 0, errors.ErrExpected("expected [ character for array value", cursor) + } + cursor = skipWhiteSpace(src, cursor+1) + if src[cursor] == ']' { + dst = append(dst, ']') + return dst, cursor + 1, nil + } + var err error + for { + dst, cursor, err = compactValue(dst, src, cursor, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + switch src[cursor] { + case ']': + dst = append(dst, ']') + cursor++ + return dst, cursor, nil + case ',': + dst = append(dst, ',') + default: + return nil, 0, errors.ErrExpected("comma after array value", cursor) + } + cursor++ + } +} + +func compactString(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { + if src[cursor] != '"' { + return nil, 0, errors.ErrInvalidCharacter(src[cursor], "string", cursor) + } + start := cursor + for { + cursor++ + c := src[cursor] + if escape { + if isHTMLEscapeChar[c] { + dst = append(dst, src[start:cursor]...) + dst = append(dst, `\u00`...) + dst = append(dst, hex[c>>4], hex[c&0xF]) + start = cursor + 1 + } else if c == 0xE2 && cursor+2 < int64(len(src)) && src[cursor+1] == 0x80 && src[cursor+2]&^1 == 0xA8 { + dst = append(dst, src[start:cursor]...) + dst = append(dst, `\u202`...) + dst = append(dst, hex[src[cursor+2]&0xF]) + start = cursor + 3 + cursor += 2 + } + } + switch c { + case '\\': + cursor++ + if src[cursor] == nul { + return nil, 0, errors.ErrUnexpectedEndOfJSON("string", int64(len(src))) + } + case '"': + cursor++ + return append(dst, src[start:cursor]...), cursor, nil + case nul: + return nil, 0, errors.ErrUnexpectedEndOfJSON("string", int64(len(src))) + } + } +} + +func compactNumber(dst, src []byte, cursor int64) ([]byte, int64, error) { + start := cursor + for { + cursor++ + if floatTable[src[cursor]] { + continue + } + break + } + num := src[start:cursor] + if _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&num)), 64); err != nil { + return nil, 0, err + } + dst = append(dst, num...) + return dst, cursor, nil +} + +func compactTrue(dst, src []byte, cursor int64) ([]byte, int64, error) { + if cursor+3 >= int64(len(src)) { + return nil, 0, errors.ErrUnexpectedEndOfJSON("true", cursor) + } + if !bytes.Equal(src[cursor:cursor+4], []byte(`true`)) { + return nil, 0, errors.ErrInvalidCharacter(src[cursor], "true", cursor) + } + dst = append(dst, "true"...) + cursor += 4 + return dst, cursor, nil +} + +func compactFalse(dst, src []byte, cursor int64) ([]byte, int64, error) { + if cursor+4 >= int64(len(src)) { + return nil, 0, errors.ErrUnexpectedEndOfJSON("false", cursor) + } + if !bytes.Equal(src[cursor:cursor+5], []byte(`false`)) { + return nil, 0, errors.ErrInvalidCharacter(src[cursor], "false", cursor) + } + dst = append(dst, "false"...) + cursor += 5 + return dst, cursor, nil +} + +func compactNull(dst, src []byte, cursor int64) ([]byte, int64, error) { + if cursor+3 >= int64(len(src)) { + return nil, 0, errors.ErrUnexpectedEndOfJSON("null", cursor) + } + if !bytes.Equal(src[cursor:cursor+4], []byte(`null`)) { + return nil, 0, errors.ErrInvalidCharacter(src[cursor], "null", cursor) + } + dst = append(dst, "null"...) + cursor += 4 + return dst, cursor, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go new file mode 100644 index 000000000000..b107636890af --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go @@ -0,0 +1,939 @@ +package encoder + +import ( + "context" + "encoding" + "encoding/json" + "reflect" + "sync" + "sync/atomic" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type marshalerContext interface { + MarshalJSON(context.Context) ([]byte, error) +} + +var ( + marshalJSONType = reflect.TypeOf((*json.Marshaler)(nil)).Elem() + marshalJSONContextType = reflect.TypeOf((*marshalerContext)(nil)).Elem() + marshalTextType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + jsonNumberType = reflect.TypeOf(json.Number("")) + cachedOpcodeSets []*OpcodeSet + cachedOpcodeMap unsafe.Pointer // map[uintptr]*OpcodeSet + typeAddr *runtime.TypeAddr + initEncoderOnce sync.Once +) + +func initEncoder() { + initEncoderOnce.Do(func() { + typeAddr = runtime.AnalyzeTypeAddr() + if typeAddr == nil { + typeAddr = &runtime.TypeAddr{} + } + cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1) + }) +} + +func loadOpcodeMap() map[uintptr]*OpcodeSet { + p := atomic.LoadPointer(&cachedOpcodeMap) + return *(*map[uintptr]*OpcodeSet)(unsafe.Pointer(&p)) +} + +func storeOpcodeSet(typ uintptr, set *OpcodeSet, m map[uintptr]*OpcodeSet) { + newOpcodeMap := make(map[uintptr]*OpcodeSet, len(m)+1) + newOpcodeMap[typ] = set + + for k, v := range m { + newOpcodeMap[k] = v + } + + atomic.StorePointer(&cachedOpcodeMap, *(*unsafe.Pointer)(unsafe.Pointer(&newOpcodeMap))) +} + +func compileToGetCodeSetSlowPath(typeptr uintptr) (*OpcodeSet, error) { + opcodeMap := loadOpcodeMap() + if codeSet, exists := opcodeMap[typeptr]; exists { + return codeSet, nil + } + codeSet, err := newCompiler().compile(typeptr) + if err != nil { + return nil, err + } + storeOpcodeSet(typeptr, codeSet, opcodeMap) + return codeSet, nil +} + +func getFilteredCodeSetIfNeeded(ctx *RuntimeContext, codeSet *OpcodeSet) (*OpcodeSet, error) { + if (ctx.Option.Flag & ContextOption) == 0 { + return codeSet, nil + } + query := FieldQueryFromContext(ctx.Option.Context) + if query == nil { + return codeSet, nil + } + ctx.Option.Flag |= FieldQueryOption + cacheCodeSet := codeSet.getQueryCache(query.Hash()) + if cacheCodeSet != nil { + return cacheCodeSet, nil + } + queryCodeSet, err := newCompiler().codeToOpcodeSet(codeSet.Type, codeSet.Code.Filter(query)) + if err != nil { + return nil, err + } + codeSet.setQueryCache(query.Hash(), queryCodeSet) + return queryCodeSet, nil +} + +type Compiler struct { + structTypeToCode map[uintptr]*StructCode +} + +func newCompiler() *Compiler { + return &Compiler{ + structTypeToCode: map[uintptr]*StructCode{}, + } +} + +func (c *Compiler) compile(typeptr uintptr) (*OpcodeSet, error) { + // noescape trick for header.typ ( reflect.*rtype ) + typ := *(**runtime.Type)(unsafe.Pointer(&typeptr)) + code, err := c.typeToCode(typ) + if err != nil { + return nil, err + } + return c.codeToOpcodeSet(typ, code) +} + +func (c *Compiler) codeToOpcodeSet(typ *runtime.Type, code Code) (*OpcodeSet, error) { + noescapeKeyCode := c.codeToOpcode(&compileContext{ + structTypeToCodes: map[uintptr]Opcodes{}, + recursiveCodes: &Opcodes{}, + }, typ, code) + if err := noescapeKeyCode.Validate(); err != nil { + return nil, err + } + escapeKeyCode := c.codeToOpcode(&compileContext{ + structTypeToCodes: map[uintptr]Opcodes{}, + recursiveCodes: &Opcodes{}, + escapeKey: true, + }, typ, code) + noescapeKeyCode = copyOpcode(noescapeKeyCode) + escapeKeyCode = copyOpcode(escapeKeyCode) + setTotalLengthToInterfaceOp(noescapeKeyCode) + setTotalLengthToInterfaceOp(escapeKeyCode) + interfaceNoescapeKeyCode := copyToInterfaceOpcode(noescapeKeyCode) + interfaceEscapeKeyCode := copyToInterfaceOpcode(escapeKeyCode) + codeLength := noescapeKeyCode.TotalLength() + return &OpcodeSet{ + Type: typ, + NoescapeKeyCode: noescapeKeyCode, + EscapeKeyCode: escapeKeyCode, + InterfaceNoescapeKeyCode: interfaceNoescapeKeyCode, + InterfaceEscapeKeyCode: interfaceEscapeKeyCode, + CodeLength: codeLength, + EndCode: ToEndCode(interfaceNoescapeKeyCode), + Code: code, + QueryCache: map[string]*OpcodeSet{}, + }, nil +} + +func (c *Compiler) typeToCode(typ *runtime.Type) (Code, error) { + switch { + case c.implementsMarshalJSON(typ): + return c.marshalJSONCode(typ) + case c.implementsMarshalText(typ): + return c.marshalTextCode(typ) + } + + isPtr := false + orgType := typ + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + isPtr = true + } + switch { + case c.implementsMarshalJSON(typ): + return c.marshalJSONCode(orgType) + case c.implementsMarshalText(typ): + return c.marshalTextCode(orgType) + } + switch typ.Kind() { + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + p := runtime.PtrTo(elem) + if !c.implementsMarshalJSONType(p) && !p.Implements(marshalTextType) { + return c.bytesCode(typ, isPtr) + } + } + return c.sliceCode(typ) + case reflect.Map: + if isPtr { + return c.ptrCode(runtime.PtrTo(typ)) + } + return c.mapCode(typ) + case reflect.Struct: + return c.structCode(typ, isPtr) + case reflect.Int: + return c.intCode(typ, isPtr) + case reflect.Int8: + return c.int8Code(typ, isPtr) + case reflect.Int16: + return c.int16Code(typ, isPtr) + case reflect.Int32: + return c.int32Code(typ, isPtr) + case reflect.Int64: + return c.int64Code(typ, isPtr) + case reflect.Uint, reflect.Uintptr: + return c.uintCode(typ, isPtr) + case reflect.Uint8: + return c.uint8Code(typ, isPtr) + case reflect.Uint16: + return c.uint16Code(typ, isPtr) + case reflect.Uint32: + return c.uint32Code(typ, isPtr) + case reflect.Uint64: + return c.uint64Code(typ, isPtr) + case reflect.Float32: + return c.float32Code(typ, isPtr) + case reflect.Float64: + return c.float64Code(typ, isPtr) + case reflect.String: + return c.stringCode(typ, isPtr) + case reflect.Bool: + return c.boolCode(typ, isPtr) + case reflect.Interface: + return c.interfaceCode(typ, isPtr) + default: + if isPtr && typ.Implements(marshalTextType) { + typ = orgType + } + return c.typeToCodeWithPtr(typ, isPtr) + } +} + +func (c *Compiler) typeToCodeWithPtr(typ *runtime.Type, isPtr bool) (Code, error) { + switch { + case c.implementsMarshalJSON(typ): + return c.marshalJSONCode(typ) + case c.implementsMarshalText(typ): + return c.marshalTextCode(typ) + } + switch typ.Kind() { + case reflect.Ptr: + return c.ptrCode(typ) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + p := runtime.PtrTo(elem) + if !c.implementsMarshalJSONType(p) && !p.Implements(marshalTextType) { + return c.bytesCode(typ, false) + } + } + return c.sliceCode(typ) + case reflect.Array: + return c.arrayCode(typ) + case reflect.Map: + return c.mapCode(typ) + case reflect.Struct: + return c.structCode(typ, isPtr) + case reflect.Interface: + return c.interfaceCode(typ, false) + case reflect.Int: + return c.intCode(typ, false) + case reflect.Int8: + return c.int8Code(typ, false) + case reflect.Int16: + return c.int16Code(typ, false) + case reflect.Int32: + return c.int32Code(typ, false) + case reflect.Int64: + return c.int64Code(typ, false) + case reflect.Uint: + return c.uintCode(typ, false) + case reflect.Uint8: + return c.uint8Code(typ, false) + case reflect.Uint16: + return c.uint16Code(typ, false) + case reflect.Uint32: + return c.uint32Code(typ, false) + case reflect.Uint64: + return c.uint64Code(typ, false) + case reflect.Uintptr: + return c.uintCode(typ, false) + case reflect.Float32: + return c.float32Code(typ, false) + case reflect.Float64: + return c.float64Code(typ, false) + case reflect.String: + return c.stringCode(typ, false) + case reflect.Bool: + return c.boolCode(typ, false) + } + return nil, &errors.UnsupportedTypeError{Type: runtime.RType2Type(typ)} +} + +const intSize = 32 << (^uint(0) >> 63) + +//nolint:unparam +func (c *Compiler) intCode(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: intSize, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) int8Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 8, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) int16Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 16, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) int32Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 32, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) int64Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 64, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uintCode(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: intSize, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uint8Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 8, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uint16Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 16, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uint32Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 32, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uint64Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 64, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) float32Code(typ *runtime.Type, isPtr bool) (*FloatCode, error) { + return &FloatCode{typ: typ, bitSize: 32, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) float64Code(typ *runtime.Type, isPtr bool) (*FloatCode, error) { + return &FloatCode{typ: typ, bitSize: 64, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) stringCode(typ *runtime.Type, isPtr bool) (*StringCode, error) { + return &StringCode{typ: typ, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) boolCode(typ *runtime.Type, isPtr bool) (*BoolCode, error) { + return &BoolCode{typ: typ, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) intStringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: intSize, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) int8StringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 8, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) int16StringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 16, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) int32StringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 32, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) int64StringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 64, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uintStringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: intSize, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uint8StringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 8, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uint16StringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 16, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uint32StringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 32, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uint64StringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 64, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) bytesCode(typ *runtime.Type, isPtr bool) (*BytesCode, error) { + return &BytesCode{typ: typ, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) interfaceCode(typ *runtime.Type, isPtr bool) (*InterfaceCode, error) { + return &InterfaceCode{typ: typ, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) marshalJSONCode(typ *runtime.Type) (*MarshalJSONCode, error) { + return &MarshalJSONCode{ + typ: typ, + isAddrForMarshaler: c.isPtrMarshalJSONType(typ), + isNilableType: c.isNilableType(typ), + isMarshalerContext: typ.Implements(marshalJSONContextType) || runtime.PtrTo(typ).Implements(marshalJSONContextType), + }, nil +} + +//nolint:unparam +func (c *Compiler) marshalTextCode(typ *runtime.Type) (*MarshalTextCode, error) { + return &MarshalTextCode{ + typ: typ, + isAddrForMarshaler: c.isPtrMarshalTextType(typ), + isNilableType: c.isNilableType(typ), + }, nil +} + +func (c *Compiler) ptrCode(typ *runtime.Type) (*PtrCode, error) { + code, err := c.typeToCodeWithPtr(typ.Elem(), true) + if err != nil { + return nil, err + } + ptr, ok := code.(*PtrCode) + if ok { + return &PtrCode{typ: typ, value: ptr.value, ptrNum: ptr.ptrNum + 1}, nil + } + return &PtrCode{typ: typ, value: code, ptrNum: 1}, nil +} + +func (c *Compiler) sliceCode(typ *runtime.Type) (*SliceCode, error) { + elem := typ.Elem() + code, err := c.listElemCode(elem) + if err != nil { + return nil, err + } + if code.Kind() == CodeKindStruct { + structCode := code.(*StructCode) + structCode.enableIndirect() + } + return &SliceCode{typ: typ, value: code}, nil +} + +func (c *Compiler) arrayCode(typ *runtime.Type) (*ArrayCode, error) { + elem := typ.Elem() + code, err := c.listElemCode(elem) + if err != nil { + return nil, err + } + if code.Kind() == CodeKindStruct { + structCode := code.(*StructCode) + structCode.enableIndirect() + } + return &ArrayCode{typ: typ, value: code}, nil +} + +func (c *Compiler) mapCode(typ *runtime.Type) (*MapCode, error) { + keyCode, err := c.mapKeyCode(typ.Key()) + if err != nil { + return nil, err + } + valueCode, err := c.mapValueCode(typ.Elem()) + if err != nil { + return nil, err + } + if valueCode.Kind() == CodeKindStruct { + structCode := valueCode.(*StructCode) + structCode.enableIndirect() + } + return &MapCode{typ: typ, key: keyCode, value: valueCode}, nil +} + +func (c *Compiler) listElemCode(typ *runtime.Type) (Code, error) { + switch { + case c.implementsMarshalJSONType(typ) || c.implementsMarshalJSONType(runtime.PtrTo(typ)): + return c.marshalJSONCode(typ) + case !typ.Implements(marshalTextType) && runtime.PtrTo(typ).Implements(marshalTextType): + return c.marshalTextCode(typ) + case typ.Kind() == reflect.Map: + return c.ptrCode(runtime.PtrTo(typ)) + default: + // isPtr was originally used to indicate whether the type of top level is pointer. + // However, since the slice/array element is a specification that can get the pointer address, explicitly set isPtr to true. + // See here for related issues: https://github.com/goccy/go-json/issues/370 + code, err := c.typeToCodeWithPtr(typ, true) + if err != nil { + return nil, err + } + ptr, ok := code.(*PtrCode) + if ok { + if ptr.value.Kind() == CodeKindMap { + ptr.ptrNum++ + } + } + return code, nil + } +} + +func (c *Compiler) mapKeyCode(typ *runtime.Type) (Code, error) { + switch { + case c.implementsMarshalText(typ): + return c.marshalTextCode(typ) + } + switch typ.Kind() { + case reflect.Ptr: + return c.ptrCode(typ) + case reflect.String: + return c.stringCode(typ, false) + case reflect.Int: + return c.intStringCode(typ) + case reflect.Int8: + return c.int8StringCode(typ) + case reflect.Int16: + return c.int16StringCode(typ) + case reflect.Int32: + return c.int32StringCode(typ) + case reflect.Int64: + return c.int64StringCode(typ) + case reflect.Uint: + return c.uintStringCode(typ) + case reflect.Uint8: + return c.uint8StringCode(typ) + case reflect.Uint16: + return c.uint16StringCode(typ) + case reflect.Uint32: + return c.uint32StringCode(typ) + case reflect.Uint64: + return c.uint64StringCode(typ) + case reflect.Uintptr: + return c.uintStringCode(typ) + } + return nil, &errors.UnsupportedTypeError{Type: runtime.RType2Type(typ)} +} + +func (c *Compiler) mapValueCode(typ *runtime.Type) (Code, error) { + switch typ.Kind() { + case reflect.Map: + return c.ptrCode(runtime.PtrTo(typ)) + default: + code, err := c.typeToCodeWithPtr(typ, false) + if err != nil { + return nil, err + } + ptr, ok := code.(*PtrCode) + if ok { + if ptr.value.Kind() == CodeKindMap { + ptr.ptrNum++ + } + } + return code, nil + } +} + +func (c *Compiler) structCode(typ *runtime.Type, isPtr bool) (*StructCode, error) { + typeptr := uintptr(unsafe.Pointer(typ)) + if code, exists := c.structTypeToCode[typeptr]; exists { + derefCode := *code + derefCode.isRecursive = true + return &derefCode, nil + } + indirect := runtime.IfaceIndir(typ) + code := &StructCode{typ: typ, isPtr: isPtr, isIndirect: indirect} + c.structTypeToCode[typeptr] = code + + fieldNum := typ.NumField() + tags := c.typeToStructTags(typ) + fields := []*StructFieldCode{} + for i, tag := range tags { + isOnlyOneFirstField := i == 0 && fieldNum == 1 + field, err := c.structFieldCode(code, tag, isPtr, isOnlyOneFirstField) + if err != nil { + return nil, err + } + if field.isAnonymous { + structCode := field.getAnonymousStruct() + if structCode != nil { + structCode.removeFieldsByTags(tags) + if c.isAssignableIndirect(field, isPtr) { + if indirect { + structCode.isIndirect = true + } else { + structCode.isIndirect = false + } + } + } + } else { + structCode := field.getStruct() + if structCode != nil { + if indirect { + // if parent is indirect type, set child indirect property to true + structCode.isIndirect = true + } else { + // if parent is not indirect type, set child indirect property to false. + // but if parent's indirect is false and isPtr is true, then indirect must be true. + // Do this only if indirectConversion is enabled at the end of compileStruct. + structCode.isIndirect = false + } + } + } + fields = append(fields, field) + } + fieldMap := c.getFieldMap(fields) + duplicatedFieldMap := c.getDuplicatedFieldMap(fieldMap) + code.fields = c.filteredDuplicatedFields(fields, duplicatedFieldMap) + if !code.disableIndirectConversion && !indirect && isPtr { + code.enableIndirect() + } + delete(c.structTypeToCode, typeptr) + return code, nil +} + +func toElemType(t *runtime.Type) *runtime.Type { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +func (c *Compiler) structFieldCode(structCode *StructCode, tag *runtime.StructTag, isPtr, isOnlyOneFirstField bool) (*StructFieldCode, error) { + field := tag.Field + fieldType := runtime.Type2RType(field.Type) + isIndirectSpecialCase := isPtr && isOnlyOneFirstField + fieldCode := &StructFieldCode{ + typ: fieldType, + key: tag.Key, + tag: tag, + offset: field.Offset, + isAnonymous: field.Anonymous && !tag.IsTaggedKey && toElemType(fieldType).Kind() == reflect.Struct, + isTaggedKey: tag.IsTaggedKey, + isNilableType: c.isNilableType(fieldType), + isNilCheck: true, + } + switch { + case c.isMovePointerPositionFromHeadToFirstMarshalJSONFieldCase(fieldType, isIndirectSpecialCase): + code, err := c.marshalJSONCode(fieldType) + if err != nil { + return nil, err + } + fieldCode.value = code + fieldCode.isAddrForMarshaler = true + fieldCode.isNilCheck = false + structCode.isIndirect = false + structCode.disableIndirectConversion = true + case c.isMovePointerPositionFromHeadToFirstMarshalTextFieldCase(fieldType, isIndirectSpecialCase): + code, err := c.marshalTextCode(fieldType) + if err != nil { + return nil, err + } + fieldCode.value = code + fieldCode.isAddrForMarshaler = true + fieldCode.isNilCheck = false + structCode.isIndirect = false + structCode.disableIndirectConversion = true + case isPtr && c.isPtrMarshalJSONType(fieldType): + // *struct{ field T } + // func (*T) MarshalJSON() ([]byte, error) + code, err := c.marshalJSONCode(fieldType) + if err != nil { + return nil, err + } + fieldCode.value = code + fieldCode.isAddrForMarshaler = true + fieldCode.isNilCheck = false + case isPtr && c.isPtrMarshalTextType(fieldType): + // *struct{ field T } + // func (*T) MarshalText() ([]byte, error) + code, err := c.marshalTextCode(fieldType) + if err != nil { + return nil, err + } + fieldCode.value = code + fieldCode.isAddrForMarshaler = true + fieldCode.isNilCheck = false + default: + code, err := c.typeToCodeWithPtr(fieldType, isPtr) + if err != nil { + return nil, err + } + switch code.Kind() { + case CodeKindPtr, CodeKindInterface: + fieldCode.isNextOpPtrType = true + } + fieldCode.value = code + } + return fieldCode, nil +} + +func (c *Compiler) isAssignableIndirect(fieldCode *StructFieldCode, isPtr bool) bool { + if isPtr { + return false + } + codeType := fieldCode.value.Kind() + if codeType == CodeKindMarshalJSON { + return false + } + if codeType == CodeKindMarshalText { + return false + } + return true +} + +func (c *Compiler) getFieldMap(fields []*StructFieldCode) map[string][]*StructFieldCode { + fieldMap := map[string][]*StructFieldCode{} + for _, field := range fields { + if field.isAnonymous { + for k, v := range c.getAnonymousFieldMap(field) { + fieldMap[k] = append(fieldMap[k], v...) + } + continue + } + fieldMap[field.key] = append(fieldMap[field.key], field) + } + return fieldMap +} + +func (c *Compiler) getAnonymousFieldMap(field *StructFieldCode) map[string][]*StructFieldCode { + fieldMap := map[string][]*StructFieldCode{} + structCode := field.getAnonymousStruct() + if structCode == nil || structCode.isRecursive { + fieldMap[field.key] = append(fieldMap[field.key], field) + return fieldMap + } + for k, v := range c.getFieldMapFromAnonymousParent(structCode.fields) { + fieldMap[k] = append(fieldMap[k], v...) + } + return fieldMap +} + +func (c *Compiler) getFieldMapFromAnonymousParent(fields []*StructFieldCode) map[string][]*StructFieldCode { + fieldMap := map[string][]*StructFieldCode{} + for _, field := range fields { + if field.isAnonymous { + for k, v := range c.getAnonymousFieldMap(field) { + // Do not handle tagged key when embedding more than once + for _, vv := range v { + vv.isTaggedKey = false + } + fieldMap[k] = append(fieldMap[k], v...) + } + continue + } + fieldMap[field.key] = append(fieldMap[field.key], field) + } + return fieldMap +} + +func (c *Compiler) getDuplicatedFieldMap(fieldMap map[string][]*StructFieldCode) map[*StructFieldCode]struct{} { + duplicatedFieldMap := map[*StructFieldCode]struct{}{} + for _, fields := range fieldMap { + if len(fields) == 1 { + continue + } + if c.isTaggedKeyOnly(fields) { + for _, field := range fields { + if field.isTaggedKey { + continue + } + duplicatedFieldMap[field] = struct{}{} + } + } else { + for _, field := range fields { + duplicatedFieldMap[field] = struct{}{} + } + } + } + return duplicatedFieldMap +} + +func (c *Compiler) filteredDuplicatedFields(fields []*StructFieldCode, duplicatedFieldMap map[*StructFieldCode]struct{}) []*StructFieldCode { + filteredFields := make([]*StructFieldCode, 0, len(fields)) + for _, field := range fields { + if field.isAnonymous { + structCode := field.getAnonymousStruct() + if structCode != nil && !structCode.isRecursive { + structCode.fields = c.filteredDuplicatedFields(structCode.fields, duplicatedFieldMap) + if len(structCode.fields) > 0 { + filteredFields = append(filteredFields, field) + } + continue + } + } + if _, exists := duplicatedFieldMap[field]; exists { + continue + } + filteredFields = append(filteredFields, field) + } + return filteredFields +} + +func (c *Compiler) isTaggedKeyOnly(fields []*StructFieldCode) bool { + var taggedKeyFieldCount int + for _, field := range fields { + if field.isTaggedKey { + taggedKeyFieldCount++ + } + } + return taggedKeyFieldCount == 1 +} + +func (c *Compiler) typeToStructTags(typ *runtime.Type) runtime.StructTags { + tags := runtime.StructTags{} + fieldNum := typ.NumField() + for i := 0; i < fieldNum; i++ { + field := typ.Field(i) + if runtime.IsIgnoredStructField(field) { + continue + } + tags = append(tags, runtime.StructTagFromField(field)) + } + return tags +} + +// *struct{ field T } => struct { field *T } +// func (*T) MarshalJSON() ([]byte, error) +func (c *Compiler) isMovePointerPositionFromHeadToFirstMarshalJSONFieldCase(typ *runtime.Type, isIndirectSpecialCase bool) bool { + return isIndirectSpecialCase && !c.isNilableType(typ) && c.isPtrMarshalJSONType(typ) +} + +// *struct{ field T } => struct { field *T } +// func (*T) MarshalText() ([]byte, error) +func (c *Compiler) isMovePointerPositionFromHeadToFirstMarshalTextFieldCase(typ *runtime.Type, isIndirectSpecialCase bool) bool { + return isIndirectSpecialCase && !c.isNilableType(typ) && c.isPtrMarshalTextType(typ) +} + +func (c *Compiler) implementsMarshalJSON(typ *runtime.Type) bool { + if !c.implementsMarshalJSONType(typ) { + return false + } + if typ.Kind() != reflect.Ptr { + return true + } + // type kind is reflect.Ptr + if !c.implementsMarshalJSONType(typ.Elem()) { + return true + } + // needs to dereference + return false +} + +func (c *Compiler) implementsMarshalText(typ *runtime.Type) bool { + if !typ.Implements(marshalTextType) { + return false + } + if typ.Kind() != reflect.Ptr { + return true + } + // type kind is reflect.Ptr + if !typ.Elem().Implements(marshalTextType) { + return true + } + // needs to dereference + return false +} + +func (c *Compiler) isNilableType(typ *runtime.Type) bool { + if !runtime.IfaceIndir(typ) { + return true + } + switch typ.Kind() { + case reflect.Ptr: + return true + case reflect.Map: + return true + case reflect.Func: + return true + default: + return false + } +} + +func (c *Compiler) implementsMarshalJSONType(typ *runtime.Type) bool { + return typ.Implements(marshalJSONType) || typ.Implements(marshalJSONContextType) +} + +func (c *Compiler) isPtrMarshalJSONType(typ *runtime.Type) bool { + return !c.implementsMarshalJSONType(typ) && c.implementsMarshalJSONType(runtime.PtrTo(typ)) +} + +func (c *Compiler) isPtrMarshalTextType(typ *runtime.Type) bool { + return !typ.Implements(marshalTextType) && runtime.PtrTo(typ).Implements(marshalTextType) +} + +func (c *Compiler) codeToOpcode(ctx *compileContext, typ *runtime.Type, code Code) *Opcode { + codes := code.ToOpcode(ctx) + codes.Last().Next = newEndOp(ctx, typ) + c.linkRecursiveCode(ctx) + return codes.First() +} + +func (c *Compiler) linkRecursiveCode(ctx *compileContext) { + recursiveCodes := map[uintptr]*CompiledCode{} + for _, recursive := range *ctx.recursiveCodes { + typeptr := uintptr(unsafe.Pointer(recursive.Type)) + codes := ctx.structTypeToCodes[typeptr] + if recursiveCode, ok := recursiveCodes[typeptr]; ok { + *recursive.Jmp = *recursiveCode + continue + } + + code := copyOpcode(codes.First()) + code.Op = code.Op.PtrHeadToHead() + lastCode := newEndOp(&compileContext{}, recursive.Type) + lastCode.Op = OpRecursiveEnd + + // OpRecursiveEnd must set before call TotalLength + code.End.Next = lastCode + + totalLength := code.TotalLength() + + // Idx, ElemIdx, Length must set after call TotalLength + lastCode.Idx = uint32((totalLength + 1) * uintptrSize) + lastCode.ElemIdx = lastCode.Idx + uintptrSize + lastCode.Length = lastCode.Idx + 2*uintptrSize + + // extend length to alloc slot for elemIdx + length + curTotalLength := uintptr(recursive.TotalLength()) + 3 + nextTotalLength := uintptr(totalLength) + 3 + + compiled := recursive.Jmp + compiled.Code = code + compiled.CurLen = curTotalLength + compiled.NextLen = nextTotalLength + compiled.Linked = true + + recursiveCodes[typeptr] = compiled + } +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go new file mode 100644 index 000000000000..b6f45a49b0e7 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go @@ -0,0 +1,33 @@ +//go:build !race +// +build !race + +package encoder + +func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { + initEncoder() + if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { + codeSet, err := compileToGetCodeSetSlowPath(typeptr) + if err != nil { + return nil, err + } + return getFilteredCodeSetIfNeeded(ctx, codeSet) + } + index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift + if codeSet := cachedOpcodeSets[index]; codeSet != nil { + filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) + if err != nil { + return nil, err + } + return filtered, nil + } + codeSet, err := newCompiler().compile(typeptr) + if err != nil { + return nil, err + } + filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) + if err != nil { + return nil, err + } + cachedOpcodeSets[index] = codeSet + return filtered, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go new file mode 100644 index 000000000000..47b482f7fb66 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go @@ -0,0 +1,46 @@ +//go:build race +// +build race + +package encoder + +import ( + "sync" +) + +var setsMu sync.RWMutex + +func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { + initEncoder() + if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { + codeSet, err := compileToGetCodeSetSlowPath(typeptr) + if err != nil { + return nil, err + } + return getFilteredCodeSetIfNeeded(ctx, codeSet) + } + index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift + setsMu.RLock() + if codeSet := cachedOpcodeSets[index]; codeSet != nil { + filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) + if err != nil { + setsMu.RUnlock() + return nil, err + } + setsMu.RUnlock() + return filtered, nil + } + setsMu.RUnlock() + + codeSet, err := newCompiler().compile(typeptr) + if err != nil { + return nil, err + } + filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) + if err != nil { + return nil, err + } + setsMu.Lock() + cachedOpcodeSets[index] = codeSet + setsMu.Unlock() + return filtered, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/context.go b/vendor/github.com/goccy/go-json/internal/encoder/context.go new file mode 100644 index 000000000000..3833d0c86db5 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/context.go @@ -0,0 +1,105 @@ +package encoder + +import ( + "context" + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type compileContext struct { + opcodeIndex uint32 + ptrIndex int + indent uint32 + escapeKey bool + structTypeToCodes map[uintptr]Opcodes + recursiveCodes *Opcodes +} + +func (c *compileContext) incIndent() { + c.indent++ +} + +func (c *compileContext) decIndent() { + c.indent-- +} + +func (c *compileContext) incIndex() { + c.incOpcodeIndex() + c.incPtrIndex() +} + +func (c *compileContext) decIndex() { + c.decOpcodeIndex() + c.decPtrIndex() +} + +func (c *compileContext) incOpcodeIndex() { + c.opcodeIndex++ +} + +func (c *compileContext) decOpcodeIndex() { + c.opcodeIndex-- +} + +func (c *compileContext) incPtrIndex() { + c.ptrIndex++ +} + +func (c *compileContext) decPtrIndex() { + c.ptrIndex-- +} + +const ( + bufSize = 1024 +) + +var ( + runtimeContextPool = sync.Pool{ + New: func() interface{} { + return &RuntimeContext{ + Buf: make([]byte, 0, bufSize), + Ptrs: make([]uintptr, 128), + KeepRefs: make([]unsafe.Pointer, 0, 8), + Option: &Option{}, + } + }, + } +) + +type RuntimeContext struct { + Context context.Context + Buf []byte + MarshalBuf []byte + Ptrs []uintptr + KeepRefs []unsafe.Pointer + SeenPtr []uintptr + BaseIndent uint32 + Prefix []byte + IndentStr []byte + Option *Option +} + +func (c *RuntimeContext) Init(p uintptr, codelen int) { + if len(c.Ptrs) < codelen { + c.Ptrs = make([]uintptr, codelen) + } + c.Ptrs[0] = p + c.KeepRefs = c.KeepRefs[:0] + c.SeenPtr = c.SeenPtr[:0] + c.BaseIndent = 0 +} + +func (c *RuntimeContext) Ptr() uintptr { + header := (*runtime.SliceHeader)(unsafe.Pointer(&c.Ptrs)) + return uintptr(header.Data) +} + +func TakeRuntimeContext() *RuntimeContext { + return runtimeContextPool.Get().(*RuntimeContext) +} + +func ReleaseRuntimeContext(ctx *RuntimeContext) { + runtimeContextPool.Put(ctx) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go b/vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go new file mode 100644 index 000000000000..35c959d48185 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go @@ -0,0 +1,126 @@ +package encoder + +import "unicode/utf8" + +const ( + // The default lowest and highest continuation byte. + locb = 128 //0b10000000 + hicb = 191 //0b10111111 + + // These names of these constants are chosen to give nice alignment in the + // table below. The first nibble is an index into acceptRanges or F for + // special one-byte cases. The second nibble is the Rune length or the + // Status for the special one-byte case. + xx = 0xF1 // invalid: size 1 + as = 0xF0 // ASCII: size 1 + s1 = 0x02 // accept 0, size 2 + s2 = 0x13 // accept 1, size 3 + s3 = 0x03 // accept 0, size 3 + s4 = 0x23 // accept 2, size 3 + s5 = 0x34 // accept 3, size 4 + s6 = 0x04 // accept 0, size 4 + s7 = 0x44 // accept 4, size 4 +) + +// first is information about the first byte in a UTF-8 sequence. +var first = [256]uint8{ + // 1 2 3 4 5 6 7 8 9 A B C D E F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F + // 1 2 3 4 5 6 7 8 9 A B C D E F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF + xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF + s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF + s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF + s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF +} + +const ( + lineSep = byte(168) //'\u2028' + paragraphSep = byte(169) //'\u2029' +) + +type decodeRuneState int + +const ( + validUTF8State decodeRuneState = iota + runeErrorState + lineSepState + paragraphSepState +) + +func decodeRuneInString(s string) (decodeRuneState, int) { + n := len(s) + s0 := s[0] + x := first[s0] + if x >= as { + // The following code simulates an additional check for x == xx and + // handling the ASCII and invalid cases accordingly. This mask-and-or + // approach prevents an additional branch. + mask := rune(x) << 31 >> 31 // Create 0x0000 or 0xFFFF. + if rune(s[0])&^mask|utf8.RuneError&mask == utf8.RuneError { + return runeErrorState, 1 + } + return validUTF8State, 1 + } + sz := int(x & 7) + if n < sz { + return runeErrorState, 1 + } + s1 := s[1] + switch x >> 4 { + case 0: + if s1 < locb || hicb < s1 { + return runeErrorState, 1 + } + case 1: + if s1 < 0xA0 || hicb < s1 { + return runeErrorState, 1 + } + case 2: + if s1 < locb || 0x9F < s1 { + return runeErrorState, 1 + } + case 3: + if s1 < 0x90 || hicb < s1 { + return runeErrorState, 1 + } + case 4: + if s1 < locb || 0x8F < s1 { + return runeErrorState, 1 + } + } + if sz <= 2 { + return validUTF8State, 2 + } + s2 := s[2] + if s2 < locb || hicb < s2 { + return runeErrorState, 1 + } + if sz <= 3 { + // separator character prefixes: [2]byte{226, 128} + if s0 == 226 && s1 == 128 { + switch s2 { + case lineSep: + return lineSepState, 3 + case paragraphSep: + return paragraphSepState, 3 + } + } + return validUTF8State, 3 + } + s3 := s[3] + if s3 < locb || hicb < s3 { + return runeErrorState, 1 + } + return validUTF8State, 4 +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/encoder.go b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go new file mode 100644 index 000000000000..b436f5b21ffe --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go @@ -0,0 +1,601 @@ +package encoder + +import ( + "bytes" + "encoding" + "encoding/base64" + "encoding/json" + "fmt" + "math" + "reflect" + "strconv" + "strings" + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +func (t OpType) IsMultipleOpHead() bool { + switch t { + case OpStructHead: + return true + case OpStructHeadSlice: + return true + case OpStructHeadArray: + return true + case OpStructHeadMap: + return true + case OpStructHeadStruct: + return true + case OpStructHeadOmitEmpty: + return true + case OpStructHeadOmitEmptySlice: + return true + case OpStructHeadOmitEmptyArray: + return true + case OpStructHeadOmitEmptyMap: + return true + case OpStructHeadOmitEmptyStruct: + return true + case OpStructHeadSlicePtr: + return true + case OpStructHeadOmitEmptySlicePtr: + return true + case OpStructHeadArrayPtr: + return true + case OpStructHeadOmitEmptyArrayPtr: + return true + case OpStructHeadMapPtr: + return true + case OpStructHeadOmitEmptyMapPtr: + return true + } + return false +} + +func (t OpType) IsMultipleOpField() bool { + switch t { + case OpStructField: + return true + case OpStructFieldSlice: + return true + case OpStructFieldArray: + return true + case OpStructFieldMap: + return true + case OpStructFieldStruct: + return true + case OpStructFieldOmitEmpty: + return true + case OpStructFieldOmitEmptySlice: + return true + case OpStructFieldOmitEmptyArray: + return true + case OpStructFieldOmitEmptyMap: + return true + case OpStructFieldOmitEmptyStruct: + return true + case OpStructFieldSlicePtr: + return true + case OpStructFieldOmitEmptySlicePtr: + return true + case OpStructFieldArrayPtr: + return true + case OpStructFieldOmitEmptyArrayPtr: + return true + case OpStructFieldMapPtr: + return true + case OpStructFieldOmitEmptyMapPtr: + return true + } + return false +} + +type OpcodeSet struct { + Type *runtime.Type + NoescapeKeyCode *Opcode + EscapeKeyCode *Opcode + InterfaceNoescapeKeyCode *Opcode + InterfaceEscapeKeyCode *Opcode + CodeLength int + EndCode *Opcode + Code Code + QueryCache map[string]*OpcodeSet + cacheMu sync.RWMutex +} + +func (s *OpcodeSet) getQueryCache(hash string) *OpcodeSet { + s.cacheMu.RLock() + codeSet := s.QueryCache[hash] + s.cacheMu.RUnlock() + return codeSet +} + +func (s *OpcodeSet) setQueryCache(hash string, codeSet *OpcodeSet) { + s.cacheMu.Lock() + s.QueryCache[hash] = codeSet + s.cacheMu.Unlock() +} + +type CompiledCode struct { + Code *Opcode + Linked bool // whether recursive code already have linked + CurLen uintptr + NextLen uintptr +} + +const StartDetectingCyclesAfter = 1000 + +func Load(base uintptr, idx uintptr) uintptr { + addr := base + idx + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func Store(base uintptr, idx uintptr, p uintptr) { + addr := base + idx + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func LoadNPtr(base uintptr, idx uintptr, ptrNum int) uintptr { + addr := base + idx + p := **(**uintptr)(unsafe.Pointer(&addr)) + if p == 0 { + return 0 + } + return PtrToPtr(p) + /* + for i := 0; i < ptrNum; i++ { + if p == 0 { + return p + } + p = PtrToPtr(p) + } + return p + */ +} + +func PtrToUint64(p uintptr) uint64 { return **(**uint64)(unsafe.Pointer(&p)) } +func PtrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func PtrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func PtrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func PtrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func PtrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func PtrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func PtrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func PtrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func PtrToNPtr(p uintptr, ptrNum int) uintptr { + for i := 0; i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = PtrToPtr(p) + } + return p +} + +func PtrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func PtrToInterface(code *Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func ErrUnsupportedValue(code *Opcode, ptr uintptr) *errors.UnsupportedValueError { + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&ptr)), + })) + return &errors.UnsupportedValueError{ + Value: reflect.ValueOf(v), + Str: fmt.Sprintf("encountered a cycle via %s", code.Type), + } +} + +func ErrUnsupportedFloat(v float64) *errors.UnsupportedValueError { + return &errors.UnsupportedValueError{ + Value: reflect.ValueOf(v), + Str: strconv.FormatFloat(v, 'g', -1, 64), + } +} + +func ErrMarshalerWithCode(code *Opcode, err error) *errors.MarshalerError { + return &errors.MarshalerError{ + Type: runtime.RType2Type(code.Type), + Err: err, + } +} + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type MapItem struct { + Key []byte + Value []byte +} + +type Mapslice struct { + Items []MapItem +} + +func (m *Mapslice) Len() int { + return len(m.Items) +} + +func (m *Mapslice) Less(i, j int) bool { + return bytes.Compare(m.Items[i].Key, m.Items[j].Key) < 0 +} + +func (m *Mapslice) Swap(i, j int) { + m.Items[i], m.Items[j] = m.Items[j], m.Items[i] +} + +//nolint:structcheck,unused +type mapIter struct { + key unsafe.Pointer + elem unsafe.Pointer + t unsafe.Pointer + h unsafe.Pointer + buckets unsafe.Pointer + bptr unsafe.Pointer + overflow unsafe.Pointer + oldoverflow unsafe.Pointer + startBucket uintptr + offset uint8 + wrapped bool + B uint8 + i uint8 + bucket uintptr + checkBucket uintptr +} + +type MapContext struct { + Start int + First int + Idx int + Slice *Mapslice + Buf []byte + Len int + Iter mapIter +} + +var mapContextPool = sync.Pool{ + New: func() interface{} { + return &MapContext{ + Slice: &Mapslice{}, + } + }, +} + +func NewMapContext(mapLen int, unorderedMap bool) *MapContext { + ctx := mapContextPool.Get().(*MapContext) + if !unorderedMap { + if len(ctx.Slice.Items) < mapLen { + ctx.Slice.Items = make([]MapItem, mapLen) + } else { + ctx.Slice.Items = ctx.Slice.Items[:mapLen] + } + } + ctx.Buf = ctx.Buf[:0] + ctx.Iter = mapIter{} + ctx.Idx = 0 + ctx.Len = mapLen + return ctx +} + +func ReleaseMapContext(c *MapContext) { + mapContextPool.Put(c) +} + +//go:linkname MapIterInit runtime.mapiterinit +//go:noescape +func MapIterInit(mapType *runtime.Type, m unsafe.Pointer, it *mapIter) + +//go:linkname MapIterKey reflect.mapiterkey +//go:noescape +func MapIterKey(it *mapIter) unsafe.Pointer + +//go:linkname MapIterNext reflect.mapiternext +//go:noescape +func MapIterNext(it *mapIter) + +//go:linkname MapLen reflect.maplen +//go:noescape +func MapLen(m unsafe.Pointer) int + +func AppendByteSlice(_ *RuntimeContext, b []byte, src []byte) []byte { + if src == nil { + return append(b, `null`...) + } + encodedLen := base64.StdEncoding.EncodedLen(len(src)) + b = append(b, '"') + pos := len(b) + remainLen := cap(b[pos:]) + var buf []byte + if remainLen > encodedLen { + buf = b[pos : pos+encodedLen] + } else { + buf = make([]byte, encodedLen) + } + base64.StdEncoding.Encode(buf, src) + return append(append(b, buf...), '"') +} + +func AppendFloat32(_ *RuntimeContext, b []byte, v float32) []byte { + f64 := float64(v) + abs := math.Abs(f64) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + f32 := float32(abs) + if f32 < 1e-6 || f32 >= 1e21 { + fmt = 'e' + } + } + return strconv.AppendFloat(b, f64, fmt, -1, 32) +} + +func AppendFloat64(_ *RuntimeContext, b []byte, v float64) []byte { + abs := math.Abs(v) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + return strconv.AppendFloat(b, v, fmt, -1, 64) +} + +func AppendBool(_ *RuntimeContext, b []byte, v bool) []byte { + if v { + return append(b, "true"...) + } + return append(b, "false"...) +} + +var ( + floatTable = [256]bool{ + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + '.': true, + 'e': true, + 'E': true, + '+': true, + '-': true, + } +) + +func AppendNumber(_ *RuntimeContext, b []byte, n json.Number) ([]byte, error) { + if len(n) == 0 { + return append(b, '0'), nil + } + for i := 0; i < len(n); i++ { + if !floatTable[n[i]] { + return nil, fmt.Errorf("json: invalid number literal %q", n) + } + } + b = append(b, n...) + return b, nil +} + +func AppendMarshalJSON(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { + rv := reflect.ValueOf(v) // convert by dynamic interface type + if (code.Flags & AddrForMarshalerFlags) != 0 { + if rv.CanAddr() { + rv = rv.Addr() + } else { + newV := reflect.New(rv.Type()) + newV.Elem().Set(rv) + rv = newV + } + } + + if rv.Kind() == reflect.Ptr && rv.IsNil() { + return AppendNull(ctx, b), nil + } + + v = rv.Interface() + var bb []byte + if (code.Flags & MarshalerContextFlags) != 0 { + marshaler, ok := v.(marshalerContext) + if !ok { + return AppendNull(ctx, b), nil + } + stdctx := ctx.Option.Context + if ctx.Option.Flag&FieldQueryOption != 0 { + stdctx = SetFieldQueryToContext(stdctx, code.FieldQuery) + } + b, err := marshaler.MarshalJSON(stdctx) + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + bb = b + } else { + marshaler, ok := v.(json.Marshaler) + if !ok { + return AppendNull(ctx, b), nil + } + b, err := marshaler.MarshalJSON() + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + bb = b + } + marshalBuf := ctx.MarshalBuf[:0] + marshalBuf = append(append(marshalBuf, bb...), nul) + compactedBuf, err := compact(b, marshalBuf, (ctx.Option.Flag&HTMLEscapeOption) != 0) + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + ctx.MarshalBuf = marshalBuf + return compactedBuf, nil +} + +func AppendMarshalJSONIndent(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { + rv := reflect.ValueOf(v) // convert by dynamic interface type + if (code.Flags & AddrForMarshalerFlags) != 0 { + if rv.CanAddr() { + rv = rv.Addr() + } else { + newV := reflect.New(rv.Type()) + newV.Elem().Set(rv) + rv = newV + } + } + v = rv.Interface() + var bb []byte + if (code.Flags & MarshalerContextFlags) != 0 { + marshaler, ok := v.(marshalerContext) + if !ok { + return AppendNull(ctx, b), nil + } + b, err := marshaler.MarshalJSON(ctx.Option.Context) + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + bb = b + } else { + marshaler, ok := v.(json.Marshaler) + if !ok { + return AppendNull(ctx, b), nil + } + b, err := marshaler.MarshalJSON() + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + bb = b + } + marshalBuf := ctx.MarshalBuf[:0] + marshalBuf = append(append(marshalBuf, bb...), nul) + indentedBuf, err := doIndent( + b, + marshalBuf, + string(ctx.Prefix)+strings.Repeat(string(ctx.IndentStr), int(ctx.BaseIndent+code.Indent)), + string(ctx.IndentStr), + (ctx.Option.Flag&HTMLEscapeOption) != 0, + ) + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + ctx.MarshalBuf = marshalBuf + return indentedBuf, nil +} + +func AppendMarshalText(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { + rv := reflect.ValueOf(v) // convert by dynamic interface type + if (code.Flags & AddrForMarshalerFlags) != 0 { + if rv.CanAddr() { + rv = rv.Addr() + } else { + newV := reflect.New(rv.Type()) + newV.Elem().Set(rv) + rv = newV + } + } + v = rv.Interface() + marshaler, ok := v.(encoding.TextMarshaler) + if !ok { + return AppendNull(ctx, b), nil + } + bytes, err := marshaler.MarshalText() + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + return AppendString(ctx, b, *(*string)(unsafe.Pointer(&bytes))), nil +} + +func AppendMarshalTextIndent(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { + rv := reflect.ValueOf(v) // convert by dynamic interface type + if (code.Flags & AddrForMarshalerFlags) != 0 { + if rv.CanAddr() { + rv = rv.Addr() + } else { + newV := reflect.New(rv.Type()) + newV.Elem().Set(rv) + rv = newV + } + } + v = rv.Interface() + marshaler, ok := v.(encoding.TextMarshaler) + if !ok { + return AppendNull(ctx, b), nil + } + bytes, err := marshaler.MarshalText() + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + return AppendString(ctx, b, *(*string)(unsafe.Pointer(&bytes))), nil +} + +func AppendNull(_ *RuntimeContext, b []byte) []byte { + return append(b, "null"...) +} + +func AppendComma(_ *RuntimeContext, b []byte) []byte { + return append(b, ',') +} + +func AppendCommaIndent(_ *RuntimeContext, b []byte) []byte { + return append(b, ',', '\n') +} + +func AppendStructEnd(_ *RuntimeContext, b []byte) []byte { + return append(b, '}', ',') +} + +func AppendStructEndIndent(ctx *RuntimeContext, code *Opcode, b []byte) []byte { + b = append(b, '\n') + b = append(b, ctx.Prefix...) + indentNum := ctx.BaseIndent + code.Indent - 1 + for i := uint32(0); i < indentNum; i++ { + b = append(b, ctx.IndentStr...) + } + return append(b, '}', ',', '\n') +} + +func AppendIndent(ctx *RuntimeContext, b []byte, indent uint32) []byte { + b = append(b, ctx.Prefix...) + indentNum := ctx.BaseIndent + indent + for i := uint32(0); i < indentNum; i++ { + b = append(b, ctx.IndentStr...) + } + return b +} + +func IsNilForMarshaler(v interface{}) bool { + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Bool: + return !rv.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(rv.Float()) == 0 + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Func: + return rv.IsNil() + case reflect.Slice: + return rv.IsNil() || rv.Len() == 0 + case reflect.String: + return rv.Len() == 0 + } + return false +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/indent.go b/vendor/github.com/goccy/go-json/internal/encoder/indent.go new file mode 100644 index 000000000000..dfe04b5e3c43 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/indent.go @@ -0,0 +1,211 @@ +package encoder + +import ( + "bytes" + "fmt" + + "github.com/goccy/go-json/internal/errors" +) + +func takeIndentSrcRuntimeContext(src []byte) (*RuntimeContext, []byte) { + ctx := TakeRuntimeContext() + buf := ctx.Buf[:0] + buf = append(append(buf, src...), nul) + ctx.Buf = buf + return ctx, buf +} + +func Indent(buf *bytes.Buffer, src []byte, prefix, indentStr string) error { + if len(src) == 0 { + return errors.ErrUnexpectedEndOfJSON("", 0) + } + + srcCtx, srcBuf := takeIndentSrcRuntimeContext(src) + dstCtx := TakeRuntimeContext() + dst := dstCtx.Buf[:0] + + dst, err := indentAndWrite(buf, dst, srcBuf, prefix, indentStr) + if err != nil { + ReleaseRuntimeContext(srcCtx) + ReleaseRuntimeContext(dstCtx) + return err + } + dstCtx.Buf = dst + ReleaseRuntimeContext(srcCtx) + ReleaseRuntimeContext(dstCtx) + return nil +} + +func indentAndWrite(buf *bytes.Buffer, dst []byte, src []byte, prefix, indentStr string) ([]byte, error) { + dst, err := doIndent(dst, src, prefix, indentStr, false) + if err != nil { + return nil, err + } + if _, err := buf.Write(dst); err != nil { + return nil, err + } + return dst, nil +} + +func doIndent(dst, src []byte, prefix, indentStr string, escape bool) ([]byte, error) { + buf, cursor, err := indentValue(dst, src, 0, 0, []byte(prefix), []byte(indentStr), escape) + if err != nil { + return nil, err + } + if err := validateEndBuf(src, cursor); err != nil { + return nil, err + } + return buf, nil +} + +func indentValue( + dst []byte, + src []byte, + indentNum int, + cursor int64, + prefix []byte, + indentBytes []byte, + escape bool) ([]byte, int64, error) { + for { + switch src[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case '{': + return indentObject(dst, src, indentNum, cursor, prefix, indentBytes, escape) + case '}': + return nil, 0, errors.ErrSyntax("unexpected character '}'", cursor) + case '[': + return indentArray(dst, src, indentNum, cursor, prefix, indentBytes, escape) + case ']': + return nil, 0, errors.ErrSyntax("unexpected character ']'", cursor) + case '"': + return compactString(dst, src, cursor, escape) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return compactNumber(dst, src, cursor) + case 't': + return compactTrue(dst, src, cursor) + case 'f': + return compactFalse(dst, src, cursor) + case 'n': + return compactNull(dst, src, cursor) + default: + return nil, 0, errors.ErrSyntax(fmt.Sprintf("unexpected character '%c'", src[cursor]), cursor) + } + } +} + +func indentObject( + dst []byte, + src []byte, + indentNum int, + cursor int64, + prefix []byte, + indentBytes []byte, + escape bool) ([]byte, int64, error) { + if src[cursor] == '{' { + dst = append(dst, '{') + } else { + return nil, 0, errors.ErrExpected("expected { character for object value", cursor) + } + cursor = skipWhiteSpace(src, cursor+1) + if src[cursor] == '}' { + dst = append(dst, '}') + return dst, cursor + 1, nil + } + indentNum++ + var err error + for { + dst = append(append(dst, '\n'), prefix...) + for i := 0; i < indentNum; i++ { + dst = append(dst, indentBytes...) + } + cursor = skipWhiteSpace(src, cursor) + dst, cursor, err = compactString(dst, src, cursor, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + if src[cursor] != ':' { + return nil, 0, errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after object key", src[cursor]), + cursor+1, + ) + } + dst = append(dst, ':', ' ') + dst, cursor, err = indentValue(dst, src, indentNum, cursor+1, prefix, indentBytes, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + switch src[cursor] { + case '}': + dst = append(append(dst, '\n'), prefix...) + for i := 0; i < indentNum-1; i++ { + dst = append(dst, indentBytes...) + } + dst = append(dst, '}') + cursor++ + return dst, cursor, nil + case ',': + dst = append(dst, ',') + default: + return nil, 0, errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after object key:value pair", src[cursor]), + cursor+1, + ) + } + cursor++ + } +} + +func indentArray( + dst []byte, + src []byte, + indentNum int, + cursor int64, + prefix []byte, + indentBytes []byte, + escape bool) ([]byte, int64, error) { + if src[cursor] == '[' { + dst = append(dst, '[') + } else { + return nil, 0, errors.ErrExpected("expected [ character for array value", cursor) + } + cursor = skipWhiteSpace(src, cursor+1) + if src[cursor] == ']' { + dst = append(dst, ']') + return dst, cursor + 1, nil + } + indentNum++ + var err error + for { + dst = append(append(dst, '\n'), prefix...) + for i := 0; i < indentNum; i++ { + dst = append(dst, indentBytes...) + } + dst, cursor, err = indentValue(dst, src, indentNum, cursor, prefix, indentBytes, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + switch src[cursor] { + case ']': + dst = append(append(dst, '\n'), prefix...) + for i := 0; i < indentNum-1; i++ { + dst = append(dst, indentBytes...) + } + dst = append(dst, ']') + cursor++ + return dst, cursor, nil + case ',': + dst = append(dst, ',') + default: + return nil, 0, errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after array value", src[cursor]), + cursor+1, + ) + } + cursor++ + } +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/int.go b/vendor/github.com/goccy/go-json/internal/encoder/int.go new file mode 100644 index 000000000000..8b5febeaa63b --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/int.go @@ -0,0 +1,176 @@ +// This files's processing codes are inspired by https://github.com/segmentio/encoding. +// The license notation is as follows. +// +// # MIT License +// +// Copyright (c) 2019 Segment.io, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +package encoder + +import ( + "unsafe" +) + +var endianness int + +func init() { + var b [2]byte + *(*uint16)(unsafe.Pointer(&b)) = uint16(0xABCD) + + switch b[0] { + case 0xCD: + endianness = 0 // LE + case 0xAB: + endianness = 1 // BE + default: + panic("could not determine endianness") + } +} + +// "00010203...96979899" cast to []uint16 +var intLELookup = [100]uint16{ + 0x3030, 0x3130, 0x3230, 0x3330, 0x3430, 0x3530, 0x3630, 0x3730, 0x3830, 0x3930, + 0x3031, 0x3131, 0x3231, 0x3331, 0x3431, 0x3531, 0x3631, 0x3731, 0x3831, 0x3931, + 0x3032, 0x3132, 0x3232, 0x3332, 0x3432, 0x3532, 0x3632, 0x3732, 0x3832, 0x3932, + 0x3033, 0x3133, 0x3233, 0x3333, 0x3433, 0x3533, 0x3633, 0x3733, 0x3833, 0x3933, + 0x3034, 0x3134, 0x3234, 0x3334, 0x3434, 0x3534, 0x3634, 0x3734, 0x3834, 0x3934, + 0x3035, 0x3135, 0x3235, 0x3335, 0x3435, 0x3535, 0x3635, 0x3735, 0x3835, 0x3935, + 0x3036, 0x3136, 0x3236, 0x3336, 0x3436, 0x3536, 0x3636, 0x3736, 0x3836, 0x3936, + 0x3037, 0x3137, 0x3237, 0x3337, 0x3437, 0x3537, 0x3637, 0x3737, 0x3837, 0x3937, + 0x3038, 0x3138, 0x3238, 0x3338, 0x3438, 0x3538, 0x3638, 0x3738, 0x3838, 0x3938, + 0x3039, 0x3139, 0x3239, 0x3339, 0x3439, 0x3539, 0x3639, 0x3739, 0x3839, 0x3939, +} + +var intBELookup = [100]uint16{ + 0x3030, 0x3031, 0x3032, 0x3033, 0x3034, 0x3035, 0x3036, 0x3037, 0x3038, 0x3039, + 0x3130, 0x3131, 0x3132, 0x3133, 0x3134, 0x3135, 0x3136, 0x3137, 0x3138, 0x3139, + 0x3230, 0x3231, 0x3232, 0x3233, 0x3234, 0x3235, 0x3236, 0x3237, 0x3238, 0x3239, + 0x3330, 0x3331, 0x3332, 0x3333, 0x3334, 0x3335, 0x3336, 0x3337, 0x3338, 0x3339, + 0x3430, 0x3431, 0x3432, 0x3433, 0x3434, 0x3435, 0x3436, 0x3437, 0x3438, 0x3439, + 0x3530, 0x3531, 0x3532, 0x3533, 0x3534, 0x3535, 0x3536, 0x3537, 0x3538, 0x3539, + 0x3630, 0x3631, 0x3632, 0x3633, 0x3634, 0x3635, 0x3636, 0x3637, 0x3638, 0x3639, + 0x3730, 0x3731, 0x3732, 0x3733, 0x3734, 0x3735, 0x3736, 0x3737, 0x3738, 0x3739, + 0x3830, 0x3831, 0x3832, 0x3833, 0x3834, 0x3835, 0x3836, 0x3837, 0x3838, 0x3839, + 0x3930, 0x3931, 0x3932, 0x3933, 0x3934, 0x3935, 0x3936, 0x3937, 0x3938, 0x3939, +} + +var intLookup = [2]*[100]uint16{&intLELookup, &intBELookup} + +func numMask(numBitSize uint8) uint64 { + return 1<>(code.NumBitSize-1))&1 == 1 + if !negative { + if n < 10 { + return append(out, byte(n+'0')) + } else if n < 100 { + u := intLELookup[n] + return append(out, byte(u), byte(u>>8)) + } + } else { + n = -n & mask + } + + lookup := intLookup[endianness] + + var b [22]byte + u := (*[11]uint16)(unsafe.Pointer(&b)) + i := 11 + + for n >= 100 { + j := n % 100 + n /= 100 + i-- + u[i] = lookup[j] + } + + i-- + u[i] = lookup[n] + + i *= 2 // convert to byte index + if n < 10 { + i++ // remove leading zero + } + if negative { + i-- + b[i] = '-' + } + + return append(out, b[i:]...) +} + +func AppendUint(_ *RuntimeContext, out []byte, p uintptr, code *Opcode) []byte { + var u64 uint64 + switch code.NumBitSize { + case 8: + u64 = (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + u64 = (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + u64 = (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + u64 = **(**uint64)(unsafe.Pointer(&p)) + } + mask := numMask(code.NumBitSize) + n := u64 & mask + if n < 10 { + return append(out, byte(n+'0')) + } else if n < 100 { + u := intLELookup[n] + return append(out, byte(u), byte(u>>8)) + } + + lookup := intLookup[endianness] + + var b [22]byte + u := (*[11]uint16)(unsafe.Pointer(&b)) + i := 11 + + for n >= 100 { + j := n % 100 + n /= 100 + i-- + u[i] = lookup[j] + } + + i-- + u[i] = lookup[n] + + i *= 2 // convert to byte index + if n < 10 { + i++ // remove leading zero + } + return append(out, b[i:]...) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/map112.go b/vendor/github.com/goccy/go-json/internal/encoder/map112.go new file mode 100644 index 000000000000..e96ffadf7abf --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/map112.go @@ -0,0 +1,9 @@ +//go:build !go1.13 +// +build !go1.13 + +package encoder + +import "unsafe" + +//go:linkname MapIterValue reflect.mapitervalue +func MapIterValue(it *mapIter) unsafe.Pointer diff --git a/vendor/github.com/goccy/go-json/internal/encoder/map113.go b/vendor/github.com/goccy/go-json/internal/encoder/map113.go new file mode 100644 index 000000000000..9b69dcc360dc --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/map113.go @@ -0,0 +1,9 @@ +//go:build go1.13 +// +build go1.13 + +package encoder + +import "unsafe" + +//go:linkname MapIterValue reflect.mapiterelem +func MapIterValue(it *mapIter) unsafe.Pointer diff --git a/vendor/github.com/goccy/go-json/internal/encoder/opcode.go b/vendor/github.com/goccy/go-json/internal/encoder/opcode.go new file mode 100644 index 000000000000..df22f55423d0 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/opcode.go @@ -0,0 +1,752 @@ +package encoder + +import ( + "bytes" + "fmt" + "sort" + "strings" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +type OpFlags uint16 + +const ( + AnonymousHeadFlags OpFlags = 1 << 0 + AnonymousKeyFlags OpFlags = 1 << 1 + IndirectFlags OpFlags = 1 << 2 + IsTaggedKeyFlags OpFlags = 1 << 3 + NilCheckFlags OpFlags = 1 << 4 + AddrForMarshalerFlags OpFlags = 1 << 5 + IsNextOpPtrTypeFlags OpFlags = 1 << 6 + IsNilableTypeFlags OpFlags = 1 << 7 + MarshalerContextFlags OpFlags = 1 << 8 + NonEmptyInterfaceFlags OpFlags = 1 << 9 +) + +type Opcode struct { + Op OpType // operation type + Idx uint32 // offset to access ptr + Next *Opcode // next opcode + End *Opcode // array/slice/struct/map end + NextField *Opcode // next struct field + Key string // struct field key + Offset uint32 // offset size from struct header + PtrNum uint8 // pointer number: e.g. double pointer is 2. + NumBitSize uint8 + Flags OpFlags + + Type *runtime.Type // go type + Jmp *CompiledCode // for recursive call + FieldQuery *FieldQuery // field query for Interface / MarshalJSON / MarshalText + ElemIdx uint32 // offset to access array/slice elem + Length uint32 // offset to access slice length or array length + Indent uint32 // indent number + Size uint32 // array/slice elem size + DisplayIdx uint32 // opcode index + DisplayKey string // key text to display +} + +func (c *Opcode) Validate() error { + var prevIdx uint32 + for code := c; !code.IsEnd(); { + if prevIdx != 0 { + if code.DisplayIdx != prevIdx+1 { + return fmt.Errorf( + "invalid index. previous display index is %d but next is %d. dump = %s", + prevIdx, code.DisplayIdx, c.Dump(), + ) + } + } + prevIdx = code.DisplayIdx + code = code.IterNext() + } + return nil +} + +func (c *Opcode) IterNext() *Opcode { + if c == nil { + return nil + } + switch c.Op.CodeType() { + case CodeArrayElem, CodeSliceElem, CodeMapKey: + return c.End + default: + return c.Next + } +} + +func (c *Opcode) IsEnd() bool { + if c == nil { + return true + } + return c.Op == OpEnd || c.Op == OpInterfaceEnd || c.Op == OpRecursiveEnd +} + +func (c *Opcode) MaxIdx() uint32 { + max := uint32(0) + for _, value := range []uint32{ + c.Idx, + c.ElemIdx, + c.Length, + c.Size, + } { + if max < value { + max = value + } + } + return max +} + +func (c *Opcode) ToHeaderType(isString bool) OpType { + switch c.Op { + case OpInt: + if isString { + return OpStructHeadIntString + } + return OpStructHeadInt + case OpIntPtr: + if isString { + return OpStructHeadIntPtrString + } + return OpStructHeadIntPtr + case OpUint: + if isString { + return OpStructHeadUintString + } + return OpStructHeadUint + case OpUintPtr: + if isString { + return OpStructHeadUintPtrString + } + return OpStructHeadUintPtr + case OpFloat32: + if isString { + return OpStructHeadFloat32String + } + return OpStructHeadFloat32 + case OpFloat32Ptr: + if isString { + return OpStructHeadFloat32PtrString + } + return OpStructHeadFloat32Ptr + case OpFloat64: + if isString { + return OpStructHeadFloat64String + } + return OpStructHeadFloat64 + case OpFloat64Ptr: + if isString { + return OpStructHeadFloat64PtrString + } + return OpStructHeadFloat64Ptr + case OpString: + if isString { + return OpStructHeadStringString + } + return OpStructHeadString + case OpStringPtr: + if isString { + return OpStructHeadStringPtrString + } + return OpStructHeadStringPtr + case OpNumber: + if isString { + return OpStructHeadNumberString + } + return OpStructHeadNumber + case OpNumberPtr: + if isString { + return OpStructHeadNumberPtrString + } + return OpStructHeadNumberPtr + case OpBool: + if isString { + return OpStructHeadBoolString + } + return OpStructHeadBool + case OpBoolPtr: + if isString { + return OpStructHeadBoolPtrString + } + return OpStructHeadBoolPtr + case OpBytes: + return OpStructHeadBytes + case OpBytesPtr: + return OpStructHeadBytesPtr + case OpMap: + return OpStructHeadMap + case OpMapPtr: + c.Op = OpMap + return OpStructHeadMapPtr + case OpArray: + return OpStructHeadArray + case OpArrayPtr: + c.Op = OpArray + return OpStructHeadArrayPtr + case OpSlice: + return OpStructHeadSlice + case OpSlicePtr: + c.Op = OpSlice + return OpStructHeadSlicePtr + case OpMarshalJSON: + return OpStructHeadMarshalJSON + case OpMarshalJSONPtr: + return OpStructHeadMarshalJSONPtr + case OpMarshalText: + return OpStructHeadMarshalText + case OpMarshalTextPtr: + return OpStructHeadMarshalTextPtr + } + return OpStructHead +} + +func (c *Opcode) ToFieldType(isString bool) OpType { + switch c.Op { + case OpInt: + if isString { + return OpStructFieldIntString + } + return OpStructFieldInt + case OpIntPtr: + if isString { + return OpStructFieldIntPtrString + } + return OpStructFieldIntPtr + case OpUint: + if isString { + return OpStructFieldUintString + } + return OpStructFieldUint + case OpUintPtr: + if isString { + return OpStructFieldUintPtrString + } + return OpStructFieldUintPtr + case OpFloat32: + if isString { + return OpStructFieldFloat32String + } + return OpStructFieldFloat32 + case OpFloat32Ptr: + if isString { + return OpStructFieldFloat32PtrString + } + return OpStructFieldFloat32Ptr + case OpFloat64: + if isString { + return OpStructFieldFloat64String + } + return OpStructFieldFloat64 + case OpFloat64Ptr: + if isString { + return OpStructFieldFloat64PtrString + } + return OpStructFieldFloat64Ptr + case OpString: + if isString { + return OpStructFieldStringString + } + return OpStructFieldString + case OpStringPtr: + if isString { + return OpStructFieldStringPtrString + } + return OpStructFieldStringPtr + case OpNumber: + if isString { + return OpStructFieldNumberString + } + return OpStructFieldNumber + case OpNumberPtr: + if isString { + return OpStructFieldNumberPtrString + } + return OpStructFieldNumberPtr + case OpBool: + if isString { + return OpStructFieldBoolString + } + return OpStructFieldBool + case OpBoolPtr: + if isString { + return OpStructFieldBoolPtrString + } + return OpStructFieldBoolPtr + case OpBytes: + return OpStructFieldBytes + case OpBytesPtr: + return OpStructFieldBytesPtr + case OpMap: + return OpStructFieldMap + case OpMapPtr: + c.Op = OpMap + return OpStructFieldMapPtr + case OpArray: + return OpStructFieldArray + case OpArrayPtr: + c.Op = OpArray + return OpStructFieldArrayPtr + case OpSlice: + return OpStructFieldSlice + case OpSlicePtr: + c.Op = OpSlice + return OpStructFieldSlicePtr + case OpMarshalJSON: + return OpStructFieldMarshalJSON + case OpMarshalJSONPtr: + return OpStructFieldMarshalJSONPtr + case OpMarshalText: + return OpStructFieldMarshalText + case OpMarshalTextPtr: + return OpStructFieldMarshalTextPtr + } + return OpStructField +} + +func newOpCode(ctx *compileContext, typ *runtime.Type, op OpType) *Opcode { + return newOpCodeWithNext(ctx, typ, op, newEndOp(ctx, typ)) +} + +func opcodeOffset(idx int) uint32 { + return uint32(idx) * uintptrSize +} + +func getCodeAddrByIdx(head *Opcode, idx uint32) *Opcode { + addr := uintptr(unsafe.Pointer(head)) + uintptr(idx)*unsafe.Sizeof(Opcode{}) + return *(**Opcode)(unsafe.Pointer(&addr)) +} + +func copyOpcode(code *Opcode) *Opcode { + codeNum := ToEndCode(code).DisplayIdx + 1 + codeSlice := make([]Opcode, codeNum) + head := (*Opcode)((*runtime.SliceHeader)(unsafe.Pointer(&codeSlice)).Data) + ptr := head + c := code + for { + *ptr = Opcode{ + Op: c.Op, + Key: c.Key, + PtrNum: c.PtrNum, + NumBitSize: c.NumBitSize, + Flags: c.Flags, + Idx: c.Idx, + Offset: c.Offset, + Type: c.Type, + FieldQuery: c.FieldQuery, + DisplayIdx: c.DisplayIdx, + DisplayKey: c.DisplayKey, + ElemIdx: c.ElemIdx, + Length: c.Length, + Size: c.Size, + Indent: c.Indent, + Jmp: c.Jmp, + } + if c.End != nil { + ptr.End = getCodeAddrByIdx(head, c.End.DisplayIdx) + } + if c.NextField != nil { + ptr.NextField = getCodeAddrByIdx(head, c.NextField.DisplayIdx) + } + if c.Next != nil { + ptr.Next = getCodeAddrByIdx(head, c.Next.DisplayIdx) + } + if c.IsEnd() { + break + } + ptr = getCodeAddrByIdx(head, c.DisplayIdx+1) + c = c.IterNext() + } + return head +} + +func setTotalLengthToInterfaceOp(code *Opcode) { + for c := code; !c.IsEnd(); { + if c.Op == OpInterface || c.Op == OpInterfacePtr { + c.Length = uint32(code.TotalLength()) + } + c = c.IterNext() + } +} + +func ToEndCode(code *Opcode) *Opcode { + c := code + for !c.IsEnd() { + c = c.IterNext() + } + return c +} + +func copyToInterfaceOpcode(code *Opcode) *Opcode { + copied := copyOpcode(code) + c := copied + c = ToEndCode(c) + c.Idx += uintptrSize + c.ElemIdx = c.Idx + uintptrSize + c.Length = c.Idx + 2*uintptrSize + c.Op = OpInterfaceEnd + return copied +} + +func newOpCodeWithNext(ctx *compileContext, typ *runtime.Type, op OpType, next *Opcode) *Opcode { + return &Opcode{ + Op: op, + Idx: opcodeOffset(ctx.ptrIndex), + Next: next, + Type: typ, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } +} + +func newEndOp(ctx *compileContext, typ *runtime.Type) *Opcode { + return newOpCodeWithNext(ctx, typ, OpEnd, nil) +} + +func (c *Opcode) TotalLength() int { + var idx int + code := c + for !code.IsEnd() { + maxIdx := int(code.MaxIdx() / uintptrSize) + if idx < maxIdx { + idx = maxIdx + } + if code.Op == OpRecursiveEnd { + break + } + code = code.IterNext() + } + maxIdx := int(code.MaxIdx() / uintptrSize) + if idx < maxIdx { + idx = maxIdx + } + return idx + 1 +} + +func (c *Opcode) dumpHead(code *Opcode) string { + var length uint32 + if code.Op.CodeType() == CodeArrayHead { + length = code.Length + } else { + length = code.Length / uintptrSize + } + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d][elemIdx:%d][length:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + code.ElemIdx/uintptrSize, + length, + ) +} + +func (c *Opcode) dumpMapHead(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + ) +} + +func (c *Opcode) dumpMapEnd(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + ) +} + +func (c *Opcode) dumpElem(code *Opcode) string { + var length uint32 + if code.Op.CodeType() == CodeArrayElem { + length = code.Length + } else { + length = code.Length / uintptrSize + } + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d][elemIdx:%d][length:%d][size:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + code.ElemIdx/uintptrSize, + length, + code.Size, + ) +} + +func (c *Opcode) dumpField(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d][key:%s][offset:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + code.DisplayKey, + code.Offset, + ) +} + +func (c *Opcode) dumpKey(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + ) +} + +func (c *Opcode) dumpValue(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + ) +} + +func (c *Opcode) Dump() string { + codes := []string{} + for code := c; !code.IsEnd(); { + switch code.Op.CodeType() { + case CodeSliceHead: + codes = append(codes, c.dumpHead(code)) + code = code.Next + case CodeMapHead: + codes = append(codes, c.dumpMapHead(code)) + code = code.Next + case CodeArrayElem, CodeSliceElem: + codes = append(codes, c.dumpElem(code)) + code = code.End + case CodeMapKey: + codes = append(codes, c.dumpKey(code)) + code = code.End + case CodeMapValue: + codes = append(codes, c.dumpValue(code)) + code = code.Next + case CodeMapEnd: + codes = append(codes, c.dumpMapEnd(code)) + code = code.Next + case CodeStructField: + codes = append(codes, c.dumpField(code)) + code = code.Next + case CodeStructEnd: + codes = append(codes, c.dumpField(code)) + code = code.Next + default: + codes = append(codes, fmt.Sprintf( + "[%03d]%s%s ([idx:%d])", + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + )) + code = code.Next + } + } + return strings.Join(codes, "\n") +} + +func (c *Opcode) DumpDOT() string { + type edge struct { + from, to *Opcode + label string + weight int + } + var edges []edge + + b := &bytes.Buffer{} + fmt.Fprintf(b, "digraph \"%p\" {\n", c.Type) + fmt.Fprintln(b, "mclimit=1.5;\nrankdir=TD;\nordering=out;\nnode[shape=box];") + for code := c; !code.IsEnd(); { + label := code.Op.String() + fmt.Fprintf(b, "\"%p\" [label=%q];\n", code, label) + if p := code.Next; p != nil { + edges = append(edges, edge{ + from: code, + to: p, + label: "Next", + weight: 10, + }) + } + if p := code.NextField; p != nil { + edges = append(edges, edge{ + from: code, + to: p, + label: "NextField", + weight: 2, + }) + } + if p := code.End; p != nil { + edges = append(edges, edge{ + from: code, + to: p, + label: "End", + weight: 1, + }) + } + if p := code.Jmp; p != nil { + edges = append(edges, edge{ + from: code, + to: p.Code, + label: "Jmp", + weight: 1, + }) + } + + switch code.Op.CodeType() { + case CodeSliceHead: + code = code.Next + case CodeMapHead: + code = code.Next + case CodeArrayElem, CodeSliceElem: + code = code.End + case CodeMapKey: + code = code.End + case CodeMapValue: + code = code.Next + case CodeMapEnd: + code = code.Next + case CodeStructField: + code = code.Next + case CodeStructEnd: + code = code.Next + default: + code = code.Next + } + if code.IsEnd() { + fmt.Fprintf(b, "\"%p\" [label=%q];\n", code, code.Op.String()) + } + } + sort.Slice(edges, func(i, j int) bool { + return edges[i].to.DisplayIdx < edges[j].to.DisplayIdx + }) + for _, e := range edges { + fmt.Fprintf(b, "\"%p\" -> \"%p\" [label=%q][weight=%d];\n", e.from, e.to, e.label, e.weight) + } + fmt.Fprint(b, "}") + return b.String() +} + +func newSliceHeaderCode(ctx *compileContext, typ *runtime.Type) *Opcode { + idx := opcodeOffset(ctx.ptrIndex) + ctx.incPtrIndex() + elemIdx := opcodeOffset(ctx.ptrIndex) + ctx.incPtrIndex() + length := opcodeOffset(ctx.ptrIndex) + return &Opcode{ + Op: OpSlice, + Type: typ, + Idx: idx, + DisplayIdx: ctx.opcodeIndex, + ElemIdx: elemIdx, + Length: length, + Indent: ctx.indent, + } +} + +func newSliceElemCode(ctx *compileContext, typ *runtime.Type, head *Opcode, size uintptr) *Opcode { + return &Opcode{ + Op: OpSliceElem, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + ElemIdx: head.ElemIdx, + Length: head.Length, + Indent: ctx.indent, + Size: uint32(size), + } +} + +func newArrayHeaderCode(ctx *compileContext, typ *runtime.Type, alen int) *Opcode { + idx := opcodeOffset(ctx.ptrIndex) + ctx.incPtrIndex() + elemIdx := opcodeOffset(ctx.ptrIndex) + return &Opcode{ + Op: OpArray, + Type: typ, + Idx: idx, + DisplayIdx: ctx.opcodeIndex, + ElemIdx: elemIdx, + Indent: ctx.indent, + Length: uint32(alen), + } +} + +func newArrayElemCode(ctx *compileContext, typ *runtime.Type, head *Opcode, length int, size uintptr) *Opcode { + return &Opcode{ + Op: OpArrayElem, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + ElemIdx: head.ElemIdx, + Length: uint32(length), + Indent: ctx.indent, + Size: uint32(size), + } +} + +func newMapHeaderCode(ctx *compileContext, typ *runtime.Type) *Opcode { + idx := opcodeOffset(ctx.ptrIndex) + ctx.incPtrIndex() + return &Opcode{ + Op: OpMap, + Type: typ, + Idx: idx, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } +} + +func newMapKeyCode(ctx *compileContext, typ *runtime.Type, head *Opcode) *Opcode { + return &Opcode{ + Op: OpMapKey, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } +} + +func newMapValueCode(ctx *compileContext, typ *runtime.Type, head *Opcode) *Opcode { + return &Opcode{ + Op: OpMapValue, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } +} + +func newMapEndCode(ctx *compileContext, typ *runtime.Type, head *Opcode) *Opcode { + return &Opcode{ + Op: OpMapEnd, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + Next: newEndOp(ctx, typ), + } +} + +func newRecursiveCode(ctx *compileContext, typ *runtime.Type, jmp *CompiledCode) *Opcode { + return &Opcode{ + Op: OpRecursive, + Type: typ, + Idx: opcodeOffset(ctx.ptrIndex), + Next: newEndOp(ctx, typ), + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + Jmp: jmp, + } +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/option.go b/vendor/github.com/goccy/go-json/internal/encoder/option.go new file mode 100644 index 000000000000..12c58e46c01a --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/option.go @@ -0,0 +1,48 @@ +package encoder + +import ( + "context" + "io" +) + +type OptionFlag uint8 + +const ( + HTMLEscapeOption OptionFlag = 1 << iota + IndentOption + UnorderedMapOption + DebugOption + ColorizeOption + ContextOption + NormalizeUTF8Option + FieldQueryOption +) + +type Option struct { + Flag OptionFlag + ColorScheme *ColorScheme + Context context.Context + DebugOut io.Writer + DebugDOTOut io.WriteCloser +} + +type EncodeFormat struct { + Header string + Footer string +} + +type EncodeFormatScheme struct { + Int EncodeFormat + Uint EncodeFormat + Float EncodeFormat + Bool EncodeFormat + String EncodeFormat + Binary EncodeFormat + ObjectKey EncodeFormat + Null EncodeFormat +} + +type ( + ColorScheme = EncodeFormatScheme + ColorFormat = EncodeFormat +) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/optype.go b/vendor/github.com/goccy/go-json/internal/encoder/optype.go new file mode 100644 index 000000000000..5c1241b47d00 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/optype.go @@ -0,0 +1,932 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package encoder + +import ( + "strings" +) + +type CodeType int + +const ( + CodeOp CodeType = 0 + CodeArrayHead CodeType = 1 + CodeArrayElem CodeType = 2 + CodeSliceHead CodeType = 3 + CodeSliceElem CodeType = 4 + CodeMapHead CodeType = 5 + CodeMapKey CodeType = 6 + CodeMapValue CodeType = 7 + CodeMapEnd CodeType = 8 + CodeRecursive CodeType = 9 + CodeStructField CodeType = 10 + CodeStructEnd CodeType = 11 +) + +var opTypeStrings = [400]string{ + "End", + "Interface", + "Ptr", + "SliceElem", + "SliceEnd", + "ArrayElem", + "ArrayEnd", + "MapKey", + "MapValue", + "MapEnd", + "Recursive", + "RecursivePtr", + "RecursiveEnd", + "InterfaceEnd", + "Int", + "Uint", + "Float32", + "Float64", + "Bool", + "String", + "Bytes", + "Number", + "Array", + "Map", + "Slice", + "Struct", + "MarshalJSON", + "MarshalText", + "IntString", + "UintString", + "Float32String", + "Float64String", + "BoolString", + "StringString", + "NumberString", + "IntPtr", + "UintPtr", + "Float32Ptr", + "Float64Ptr", + "BoolPtr", + "StringPtr", + "BytesPtr", + "NumberPtr", + "ArrayPtr", + "MapPtr", + "SlicePtr", + "MarshalJSONPtr", + "MarshalTextPtr", + "InterfacePtr", + "IntPtrString", + "UintPtrString", + "Float32PtrString", + "Float64PtrString", + "BoolPtrString", + "StringPtrString", + "NumberPtrString", + "StructHeadInt", + "StructHeadOmitEmptyInt", + "StructPtrHeadInt", + "StructPtrHeadOmitEmptyInt", + "StructHeadUint", + "StructHeadOmitEmptyUint", + "StructPtrHeadUint", + "StructPtrHeadOmitEmptyUint", + "StructHeadFloat32", + "StructHeadOmitEmptyFloat32", + "StructPtrHeadFloat32", + "StructPtrHeadOmitEmptyFloat32", + "StructHeadFloat64", + "StructHeadOmitEmptyFloat64", + "StructPtrHeadFloat64", + "StructPtrHeadOmitEmptyFloat64", + "StructHeadBool", + "StructHeadOmitEmptyBool", + "StructPtrHeadBool", + "StructPtrHeadOmitEmptyBool", + "StructHeadString", + "StructHeadOmitEmptyString", + "StructPtrHeadString", + "StructPtrHeadOmitEmptyString", + "StructHeadBytes", + "StructHeadOmitEmptyBytes", + "StructPtrHeadBytes", + "StructPtrHeadOmitEmptyBytes", + "StructHeadNumber", + "StructHeadOmitEmptyNumber", + "StructPtrHeadNumber", + "StructPtrHeadOmitEmptyNumber", + "StructHeadArray", + "StructHeadOmitEmptyArray", + "StructPtrHeadArray", + "StructPtrHeadOmitEmptyArray", + "StructHeadMap", + "StructHeadOmitEmptyMap", + "StructPtrHeadMap", + "StructPtrHeadOmitEmptyMap", + "StructHeadSlice", + "StructHeadOmitEmptySlice", + "StructPtrHeadSlice", + "StructPtrHeadOmitEmptySlice", + "StructHeadStruct", + "StructHeadOmitEmptyStruct", + "StructPtrHeadStruct", + "StructPtrHeadOmitEmptyStruct", + "StructHeadMarshalJSON", + "StructHeadOmitEmptyMarshalJSON", + "StructPtrHeadMarshalJSON", + "StructPtrHeadOmitEmptyMarshalJSON", + "StructHeadMarshalText", + "StructHeadOmitEmptyMarshalText", + "StructPtrHeadMarshalText", + "StructPtrHeadOmitEmptyMarshalText", + "StructHeadIntString", + "StructHeadOmitEmptyIntString", + "StructPtrHeadIntString", + "StructPtrHeadOmitEmptyIntString", + "StructHeadUintString", + "StructHeadOmitEmptyUintString", + "StructPtrHeadUintString", + "StructPtrHeadOmitEmptyUintString", + "StructHeadFloat32String", + "StructHeadOmitEmptyFloat32String", + "StructPtrHeadFloat32String", + "StructPtrHeadOmitEmptyFloat32String", + "StructHeadFloat64String", + "StructHeadOmitEmptyFloat64String", + "StructPtrHeadFloat64String", + "StructPtrHeadOmitEmptyFloat64String", + "StructHeadBoolString", + "StructHeadOmitEmptyBoolString", + "StructPtrHeadBoolString", + "StructPtrHeadOmitEmptyBoolString", + "StructHeadStringString", + "StructHeadOmitEmptyStringString", + "StructPtrHeadStringString", + "StructPtrHeadOmitEmptyStringString", + "StructHeadNumberString", + "StructHeadOmitEmptyNumberString", + "StructPtrHeadNumberString", + "StructPtrHeadOmitEmptyNumberString", + "StructHeadIntPtr", + "StructHeadOmitEmptyIntPtr", + "StructPtrHeadIntPtr", + "StructPtrHeadOmitEmptyIntPtr", + "StructHeadUintPtr", + "StructHeadOmitEmptyUintPtr", + "StructPtrHeadUintPtr", + "StructPtrHeadOmitEmptyUintPtr", + "StructHeadFloat32Ptr", + "StructHeadOmitEmptyFloat32Ptr", + "StructPtrHeadFloat32Ptr", + "StructPtrHeadOmitEmptyFloat32Ptr", + "StructHeadFloat64Ptr", + "StructHeadOmitEmptyFloat64Ptr", + "StructPtrHeadFloat64Ptr", + "StructPtrHeadOmitEmptyFloat64Ptr", + "StructHeadBoolPtr", + "StructHeadOmitEmptyBoolPtr", + "StructPtrHeadBoolPtr", + "StructPtrHeadOmitEmptyBoolPtr", + "StructHeadStringPtr", + "StructHeadOmitEmptyStringPtr", + "StructPtrHeadStringPtr", + "StructPtrHeadOmitEmptyStringPtr", + "StructHeadBytesPtr", + "StructHeadOmitEmptyBytesPtr", + "StructPtrHeadBytesPtr", + "StructPtrHeadOmitEmptyBytesPtr", + "StructHeadNumberPtr", + "StructHeadOmitEmptyNumberPtr", + "StructPtrHeadNumberPtr", + "StructPtrHeadOmitEmptyNumberPtr", + "StructHeadArrayPtr", + "StructHeadOmitEmptyArrayPtr", + "StructPtrHeadArrayPtr", + "StructPtrHeadOmitEmptyArrayPtr", + "StructHeadMapPtr", + "StructHeadOmitEmptyMapPtr", + "StructPtrHeadMapPtr", + "StructPtrHeadOmitEmptyMapPtr", + "StructHeadSlicePtr", + "StructHeadOmitEmptySlicePtr", + "StructPtrHeadSlicePtr", + "StructPtrHeadOmitEmptySlicePtr", + "StructHeadMarshalJSONPtr", + "StructHeadOmitEmptyMarshalJSONPtr", + "StructPtrHeadMarshalJSONPtr", + "StructPtrHeadOmitEmptyMarshalJSONPtr", + "StructHeadMarshalTextPtr", + "StructHeadOmitEmptyMarshalTextPtr", + "StructPtrHeadMarshalTextPtr", + "StructPtrHeadOmitEmptyMarshalTextPtr", + "StructHeadInterfacePtr", + "StructHeadOmitEmptyInterfacePtr", + "StructPtrHeadInterfacePtr", + "StructPtrHeadOmitEmptyInterfacePtr", + "StructHeadIntPtrString", + "StructHeadOmitEmptyIntPtrString", + "StructPtrHeadIntPtrString", + "StructPtrHeadOmitEmptyIntPtrString", + "StructHeadUintPtrString", + "StructHeadOmitEmptyUintPtrString", + "StructPtrHeadUintPtrString", + "StructPtrHeadOmitEmptyUintPtrString", + "StructHeadFloat32PtrString", + "StructHeadOmitEmptyFloat32PtrString", + "StructPtrHeadFloat32PtrString", + "StructPtrHeadOmitEmptyFloat32PtrString", + "StructHeadFloat64PtrString", + "StructHeadOmitEmptyFloat64PtrString", + "StructPtrHeadFloat64PtrString", + "StructPtrHeadOmitEmptyFloat64PtrString", + "StructHeadBoolPtrString", + "StructHeadOmitEmptyBoolPtrString", + "StructPtrHeadBoolPtrString", + "StructPtrHeadOmitEmptyBoolPtrString", + "StructHeadStringPtrString", + "StructHeadOmitEmptyStringPtrString", + "StructPtrHeadStringPtrString", + "StructPtrHeadOmitEmptyStringPtrString", + "StructHeadNumberPtrString", + "StructHeadOmitEmptyNumberPtrString", + "StructPtrHeadNumberPtrString", + "StructPtrHeadOmitEmptyNumberPtrString", + "StructHead", + "StructHeadOmitEmpty", + "StructPtrHead", + "StructPtrHeadOmitEmpty", + "StructFieldInt", + "StructFieldOmitEmptyInt", + "StructEndInt", + "StructEndOmitEmptyInt", + "StructFieldUint", + "StructFieldOmitEmptyUint", + "StructEndUint", + "StructEndOmitEmptyUint", + "StructFieldFloat32", + "StructFieldOmitEmptyFloat32", + "StructEndFloat32", + "StructEndOmitEmptyFloat32", + "StructFieldFloat64", + "StructFieldOmitEmptyFloat64", + "StructEndFloat64", + "StructEndOmitEmptyFloat64", + "StructFieldBool", + "StructFieldOmitEmptyBool", + "StructEndBool", + "StructEndOmitEmptyBool", + "StructFieldString", + "StructFieldOmitEmptyString", + "StructEndString", + "StructEndOmitEmptyString", + "StructFieldBytes", + "StructFieldOmitEmptyBytes", + "StructEndBytes", + "StructEndOmitEmptyBytes", + "StructFieldNumber", + "StructFieldOmitEmptyNumber", + "StructEndNumber", + "StructEndOmitEmptyNumber", + "StructFieldArray", + "StructFieldOmitEmptyArray", + "StructEndArray", + "StructEndOmitEmptyArray", + "StructFieldMap", + "StructFieldOmitEmptyMap", + "StructEndMap", + "StructEndOmitEmptyMap", + "StructFieldSlice", + "StructFieldOmitEmptySlice", + "StructEndSlice", + "StructEndOmitEmptySlice", + "StructFieldStruct", + "StructFieldOmitEmptyStruct", + "StructEndStruct", + "StructEndOmitEmptyStruct", + "StructFieldMarshalJSON", + "StructFieldOmitEmptyMarshalJSON", + "StructEndMarshalJSON", + "StructEndOmitEmptyMarshalJSON", + "StructFieldMarshalText", + "StructFieldOmitEmptyMarshalText", + "StructEndMarshalText", + "StructEndOmitEmptyMarshalText", + "StructFieldIntString", + "StructFieldOmitEmptyIntString", + "StructEndIntString", + "StructEndOmitEmptyIntString", + "StructFieldUintString", + "StructFieldOmitEmptyUintString", + "StructEndUintString", + "StructEndOmitEmptyUintString", + "StructFieldFloat32String", + "StructFieldOmitEmptyFloat32String", + "StructEndFloat32String", + "StructEndOmitEmptyFloat32String", + "StructFieldFloat64String", + "StructFieldOmitEmptyFloat64String", + "StructEndFloat64String", + "StructEndOmitEmptyFloat64String", + "StructFieldBoolString", + "StructFieldOmitEmptyBoolString", + "StructEndBoolString", + "StructEndOmitEmptyBoolString", + "StructFieldStringString", + "StructFieldOmitEmptyStringString", + "StructEndStringString", + "StructEndOmitEmptyStringString", + "StructFieldNumberString", + "StructFieldOmitEmptyNumberString", + "StructEndNumberString", + "StructEndOmitEmptyNumberString", + "StructFieldIntPtr", + "StructFieldOmitEmptyIntPtr", + "StructEndIntPtr", + "StructEndOmitEmptyIntPtr", + "StructFieldUintPtr", + "StructFieldOmitEmptyUintPtr", + "StructEndUintPtr", + "StructEndOmitEmptyUintPtr", + "StructFieldFloat32Ptr", + "StructFieldOmitEmptyFloat32Ptr", + "StructEndFloat32Ptr", + "StructEndOmitEmptyFloat32Ptr", + "StructFieldFloat64Ptr", + "StructFieldOmitEmptyFloat64Ptr", + "StructEndFloat64Ptr", + "StructEndOmitEmptyFloat64Ptr", + "StructFieldBoolPtr", + "StructFieldOmitEmptyBoolPtr", + "StructEndBoolPtr", + "StructEndOmitEmptyBoolPtr", + "StructFieldStringPtr", + "StructFieldOmitEmptyStringPtr", + "StructEndStringPtr", + "StructEndOmitEmptyStringPtr", + "StructFieldBytesPtr", + "StructFieldOmitEmptyBytesPtr", + "StructEndBytesPtr", + "StructEndOmitEmptyBytesPtr", + "StructFieldNumberPtr", + "StructFieldOmitEmptyNumberPtr", + "StructEndNumberPtr", + "StructEndOmitEmptyNumberPtr", + "StructFieldArrayPtr", + "StructFieldOmitEmptyArrayPtr", + "StructEndArrayPtr", + "StructEndOmitEmptyArrayPtr", + "StructFieldMapPtr", + "StructFieldOmitEmptyMapPtr", + "StructEndMapPtr", + "StructEndOmitEmptyMapPtr", + "StructFieldSlicePtr", + "StructFieldOmitEmptySlicePtr", + "StructEndSlicePtr", + "StructEndOmitEmptySlicePtr", + "StructFieldMarshalJSONPtr", + "StructFieldOmitEmptyMarshalJSONPtr", + "StructEndMarshalJSONPtr", + "StructEndOmitEmptyMarshalJSONPtr", + "StructFieldMarshalTextPtr", + "StructFieldOmitEmptyMarshalTextPtr", + "StructEndMarshalTextPtr", + "StructEndOmitEmptyMarshalTextPtr", + "StructFieldInterfacePtr", + "StructFieldOmitEmptyInterfacePtr", + "StructEndInterfacePtr", + "StructEndOmitEmptyInterfacePtr", + "StructFieldIntPtrString", + "StructFieldOmitEmptyIntPtrString", + "StructEndIntPtrString", + "StructEndOmitEmptyIntPtrString", + "StructFieldUintPtrString", + "StructFieldOmitEmptyUintPtrString", + "StructEndUintPtrString", + "StructEndOmitEmptyUintPtrString", + "StructFieldFloat32PtrString", + "StructFieldOmitEmptyFloat32PtrString", + "StructEndFloat32PtrString", + "StructEndOmitEmptyFloat32PtrString", + "StructFieldFloat64PtrString", + "StructFieldOmitEmptyFloat64PtrString", + "StructEndFloat64PtrString", + "StructEndOmitEmptyFloat64PtrString", + "StructFieldBoolPtrString", + "StructFieldOmitEmptyBoolPtrString", + "StructEndBoolPtrString", + "StructEndOmitEmptyBoolPtrString", + "StructFieldStringPtrString", + "StructFieldOmitEmptyStringPtrString", + "StructEndStringPtrString", + "StructEndOmitEmptyStringPtrString", + "StructFieldNumberPtrString", + "StructFieldOmitEmptyNumberPtrString", + "StructEndNumberPtrString", + "StructEndOmitEmptyNumberPtrString", + "StructField", + "StructFieldOmitEmpty", + "StructEnd", + "StructEndOmitEmpty", +} + +type OpType uint16 + +const ( + OpEnd OpType = 0 + OpInterface OpType = 1 + OpPtr OpType = 2 + OpSliceElem OpType = 3 + OpSliceEnd OpType = 4 + OpArrayElem OpType = 5 + OpArrayEnd OpType = 6 + OpMapKey OpType = 7 + OpMapValue OpType = 8 + OpMapEnd OpType = 9 + OpRecursive OpType = 10 + OpRecursivePtr OpType = 11 + OpRecursiveEnd OpType = 12 + OpInterfaceEnd OpType = 13 + OpInt OpType = 14 + OpUint OpType = 15 + OpFloat32 OpType = 16 + OpFloat64 OpType = 17 + OpBool OpType = 18 + OpString OpType = 19 + OpBytes OpType = 20 + OpNumber OpType = 21 + OpArray OpType = 22 + OpMap OpType = 23 + OpSlice OpType = 24 + OpStruct OpType = 25 + OpMarshalJSON OpType = 26 + OpMarshalText OpType = 27 + OpIntString OpType = 28 + OpUintString OpType = 29 + OpFloat32String OpType = 30 + OpFloat64String OpType = 31 + OpBoolString OpType = 32 + OpStringString OpType = 33 + OpNumberString OpType = 34 + OpIntPtr OpType = 35 + OpUintPtr OpType = 36 + OpFloat32Ptr OpType = 37 + OpFloat64Ptr OpType = 38 + OpBoolPtr OpType = 39 + OpStringPtr OpType = 40 + OpBytesPtr OpType = 41 + OpNumberPtr OpType = 42 + OpArrayPtr OpType = 43 + OpMapPtr OpType = 44 + OpSlicePtr OpType = 45 + OpMarshalJSONPtr OpType = 46 + OpMarshalTextPtr OpType = 47 + OpInterfacePtr OpType = 48 + OpIntPtrString OpType = 49 + OpUintPtrString OpType = 50 + OpFloat32PtrString OpType = 51 + OpFloat64PtrString OpType = 52 + OpBoolPtrString OpType = 53 + OpStringPtrString OpType = 54 + OpNumberPtrString OpType = 55 + OpStructHeadInt OpType = 56 + OpStructHeadOmitEmptyInt OpType = 57 + OpStructPtrHeadInt OpType = 58 + OpStructPtrHeadOmitEmptyInt OpType = 59 + OpStructHeadUint OpType = 60 + OpStructHeadOmitEmptyUint OpType = 61 + OpStructPtrHeadUint OpType = 62 + OpStructPtrHeadOmitEmptyUint OpType = 63 + OpStructHeadFloat32 OpType = 64 + OpStructHeadOmitEmptyFloat32 OpType = 65 + OpStructPtrHeadFloat32 OpType = 66 + OpStructPtrHeadOmitEmptyFloat32 OpType = 67 + OpStructHeadFloat64 OpType = 68 + OpStructHeadOmitEmptyFloat64 OpType = 69 + OpStructPtrHeadFloat64 OpType = 70 + OpStructPtrHeadOmitEmptyFloat64 OpType = 71 + OpStructHeadBool OpType = 72 + OpStructHeadOmitEmptyBool OpType = 73 + OpStructPtrHeadBool OpType = 74 + OpStructPtrHeadOmitEmptyBool OpType = 75 + OpStructHeadString OpType = 76 + OpStructHeadOmitEmptyString OpType = 77 + OpStructPtrHeadString OpType = 78 + OpStructPtrHeadOmitEmptyString OpType = 79 + OpStructHeadBytes OpType = 80 + OpStructHeadOmitEmptyBytes OpType = 81 + OpStructPtrHeadBytes OpType = 82 + OpStructPtrHeadOmitEmptyBytes OpType = 83 + OpStructHeadNumber OpType = 84 + OpStructHeadOmitEmptyNumber OpType = 85 + OpStructPtrHeadNumber OpType = 86 + OpStructPtrHeadOmitEmptyNumber OpType = 87 + OpStructHeadArray OpType = 88 + OpStructHeadOmitEmptyArray OpType = 89 + OpStructPtrHeadArray OpType = 90 + OpStructPtrHeadOmitEmptyArray OpType = 91 + OpStructHeadMap OpType = 92 + OpStructHeadOmitEmptyMap OpType = 93 + OpStructPtrHeadMap OpType = 94 + OpStructPtrHeadOmitEmptyMap OpType = 95 + OpStructHeadSlice OpType = 96 + OpStructHeadOmitEmptySlice OpType = 97 + OpStructPtrHeadSlice OpType = 98 + OpStructPtrHeadOmitEmptySlice OpType = 99 + OpStructHeadStruct OpType = 100 + OpStructHeadOmitEmptyStruct OpType = 101 + OpStructPtrHeadStruct OpType = 102 + OpStructPtrHeadOmitEmptyStruct OpType = 103 + OpStructHeadMarshalJSON OpType = 104 + OpStructHeadOmitEmptyMarshalJSON OpType = 105 + OpStructPtrHeadMarshalJSON OpType = 106 + OpStructPtrHeadOmitEmptyMarshalJSON OpType = 107 + OpStructHeadMarshalText OpType = 108 + OpStructHeadOmitEmptyMarshalText OpType = 109 + OpStructPtrHeadMarshalText OpType = 110 + OpStructPtrHeadOmitEmptyMarshalText OpType = 111 + OpStructHeadIntString OpType = 112 + OpStructHeadOmitEmptyIntString OpType = 113 + OpStructPtrHeadIntString OpType = 114 + OpStructPtrHeadOmitEmptyIntString OpType = 115 + OpStructHeadUintString OpType = 116 + OpStructHeadOmitEmptyUintString OpType = 117 + OpStructPtrHeadUintString OpType = 118 + OpStructPtrHeadOmitEmptyUintString OpType = 119 + OpStructHeadFloat32String OpType = 120 + OpStructHeadOmitEmptyFloat32String OpType = 121 + OpStructPtrHeadFloat32String OpType = 122 + OpStructPtrHeadOmitEmptyFloat32String OpType = 123 + OpStructHeadFloat64String OpType = 124 + OpStructHeadOmitEmptyFloat64String OpType = 125 + OpStructPtrHeadFloat64String OpType = 126 + OpStructPtrHeadOmitEmptyFloat64String OpType = 127 + OpStructHeadBoolString OpType = 128 + OpStructHeadOmitEmptyBoolString OpType = 129 + OpStructPtrHeadBoolString OpType = 130 + OpStructPtrHeadOmitEmptyBoolString OpType = 131 + OpStructHeadStringString OpType = 132 + OpStructHeadOmitEmptyStringString OpType = 133 + OpStructPtrHeadStringString OpType = 134 + OpStructPtrHeadOmitEmptyStringString OpType = 135 + OpStructHeadNumberString OpType = 136 + OpStructHeadOmitEmptyNumberString OpType = 137 + OpStructPtrHeadNumberString OpType = 138 + OpStructPtrHeadOmitEmptyNumberString OpType = 139 + OpStructHeadIntPtr OpType = 140 + OpStructHeadOmitEmptyIntPtr OpType = 141 + OpStructPtrHeadIntPtr OpType = 142 + OpStructPtrHeadOmitEmptyIntPtr OpType = 143 + OpStructHeadUintPtr OpType = 144 + OpStructHeadOmitEmptyUintPtr OpType = 145 + OpStructPtrHeadUintPtr OpType = 146 + OpStructPtrHeadOmitEmptyUintPtr OpType = 147 + OpStructHeadFloat32Ptr OpType = 148 + OpStructHeadOmitEmptyFloat32Ptr OpType = 149 + OpStructPtrHeadFloat32Ptr OpType = 150 + OpStructPtrHeadOmitEmptyFloat32Ptr OpType = 151 + OpStructHeadFloat64Ptr OpType = 152 + OpStructHeadOmitEmptyFloat64Ptr OpType = 153 + OpStructPtrHeadFloat64Ptr OpType = 154 + OpStructPtrHeadOmitEmptyFloat64Ptr OpType = 155 + OpStructHeadBoolPtr OpType = 156 + OpStructHeadOmitEmptyBoolPtr OpType = 157 + OpStructPtrHeadBoolPtr OpType = 158 + OpStructPtrHeadOmitEmptyBoolPtr OpType = 159 + OpStructHeadStringPtr OpType = 160 + OpStructHeadOmitEmptyStringPtr OpType = 161 + OpStructPtrHeadStringPtr OpType = 162 + OpStructPtrHeadOmitEmptyStringPtr OpType = 163 + OpStructHeadBytesPtr OpType = 164 + OpStructHeadOmitEmptyBytesPtr OpType = 165 + OpStructPtrHeadBytesPtr OpType = 166 + OpStructPtrHeadOmitEmptyBytesPtr OpType = 167 + OpStructHeadNumberPtr OpType = 168 + OpStructHeadOmitEmptyNumberPtr OpType = 169 + OpStructPtrHeadNumberPtr OpType = 170 + OpStructPtrHeadOmitEmptyNumberPtr OpType = 171 + OpStructHeadArrayPtr OpType = 172 + OpStructHeadOmitEmptyArrayPtr OpType = 173 + OpStructPtrHeadArrayPtr OpType = 174 + OpStructPtrHeadOmitEmptyArrayPtr OpType = 175 + OpStructHeadMapPtr OpType = 176 + OpStructHeadOmitEmptyMapPtr OpType = 177 + OpStructPtrHeadMapPtr OpType = 178 + OpStructPtrHeadOmitEmptyMapPtr OpType = 179 + OpStructHeadSlicePtr OpType = 180 + OpStructHeadOmitEmptySlicePtr OpType = 181 + OpStructPtrHeadSlicePtr OpType = 182 + OpStructPtrHeadOmitEmptySlicePtr OpType = 183 + OpStructHeadMarshalJSONPtr OpType = 184 + OpStructHeadOmitEmptyMarshalJSONPtr OpType = 185 + OpStructPtrHeadMarshalJSONPtr OpType = 186 + OpStructPtrHeadOmitEmptyMarshalJSONPtr OpType = 187 + OpStructHeadMarshalTextPtr OpType = 188 + OpStructHeadOmitEmptyMarshalTextPtr OpType = 189 + OpStructPtrHeadMarshalTextPtr OpType = 190 + OpStructPtrHeadOmitEmptyMarshalTextPtr OpType = 191 + OpStructHeadInterfacePtr OpType = 192 + OpStructHeadOmitEmptyInterfacePtr OpType = 193 + OpStructPtrHeadInterfacePtr OpType = 194 + OpStructPtrHeadOmitEmptyInterfacePtr OpType = 195 + OpStructHeadIntPtrString OpType = 196 + OpStructHeadOmitEmptyIntPtrString OpType = 197 + OpStructPtrHeadIntPtrString OpType = 198 + OpStructPtrHeadOmitEmptyIntPtrString OpType = 199 + OpStructHeadUintPtrString OpType = 200 + OpStructHeadOmitEmptyUintPtrString OpType = 201 + OpStructPtrHeadUintPtrString OpType = 202 + OpStructPtrHeadOmitEmptyUintPtrString OpType = 203 + OpStructHeadFloat32PtrString OpType = 204 + OpStructHeadOmitEmptyFloat32PtrString OpType = 205 + OpStructPtrHeadFloat32PtrString OpType = 206 + OpStructPtrHeadOmitEmptyFloat32PtrString OpType = 207 + OpStructHeadFloat64PtrString OpType = 208 + OpStructHeadOmitEmptyFloat64PtrString OpType = 209 + OpStructPtrHeadFloat64PtrString OpType = 210 + OpStructPtrHeadOmitEmptyFloat64PtrString OpType = 211 + OpStructHeadBoolPtrString OpType = 212 + OpStructHeadOmitEmptyBoolPtrString OpType = 213 + OpStructPtrHeadBoolPtrString OpType = 214 + OpStructPtrHeadOmitEmptyBoolPtrString OpType = 215 + OpStructHeadStringPtrString OpType = 216 + OpStructHeadOmitEmptyStringPtrString OpType = 217 + OpStructPtrHeadStringPtrString OpType = 218 + OpStructPtrHeadOmitEmptyStringPtrString OpType = 219 + OpStructHeadNumberPtrString OpType = 220 + OpStructHeadOmitEmptyNumberPtrString OpType = 221 + OpStructPtrHeadNumberPtrString OpType = 222 + OpStructPtrHeadOmitEmptyNumberPtrString OpType = 223 + OpStructHead OpType = 224 + OpStructHeadOmitEmpty OpType = 225 + OpStructPtrHead OpType = 226 + OpStructPtrHeadOmitEmpty OpType = 227 + OpStructFieldInt OpType = 228 + OpStructFieldOmitEmptyInt OpType = 229 + OpStructEndInt OpType = 230 + OpStructEndOmitEmptyInt OpType = 231 + OpStructFieldUint OpType = 232 + OpStructFieldOmitEmptyUint OpType = 233 + OpStructEndUint OpType = 234 + OpStructEndOmitEmptyUint OpType = 235 + OpStructFieldFloat32 OpType = 236 + OpStructFieldOmitEmptyFloat32 OpType = 237 + OpStructEndFloat32 OpType = 238 + OpStructEndOmitEmptyFloat32 OpType = 239 + OpStructFieldFloat64 OpType = 240 + OpStructFieldOmitEmptyFloat64 OpType = 241 + OpStructEndFloat64 OpType = 242 + OpStructEndOmitEmptyFloat64 OpType = 243 + OpStructFieldBool OpType = 244 + OpStructFieldOmitEmptyBool OpType = 245 + OpStructEndBool OpType = 246 + OpStructEndOmitEmptyBool OpType = 247 + OpStructFieldString OpType = 248 + OpStructFieldOmitEmptyString OpType = 249 + OpStructEndString OpType = 250 + OpStructEndOmitEmptyString OpType = 251 + OpStructFieldBytes OpType = 252 + OpStructFieldOmitEmptyBytes OpType = 253 + OpStructEndBytes OpType = 254 + OpStructEndOmitEmptyBytes OpType = 255 + OpStructFieldNumber OpType = 256 + OpStructFieldOmitEmptyNumber OpType = 257 + OpStructEndNumber OpType = 258 + OpStructEndOmitEmptyNumber OpType = 259 + OpStructFieldArray OpType = 260 + OpStructFieldOmitEmptyArray OpType = 261 + OpStructEndArray OpType = 262 + OpStructEndOmitEmptyArray OpType = 263 + OpStructFieldMap OpType = 264 + OpStructFieldOmitEmptyMap OpType = 265 + OpStructEndMap OpType = 266 + OpStructEndOmitEmptyMap OpType = 267 + OpStructFieldSlice OpType = 268 + OpStructFieldOmitEmptySlice OpType = 269 + OpStructEndSlice OpType = 270 + OpStructEndOmitEmptySlice OpType = 271 + OpStructFieldStruct OpType = 272 + OpStructFieldOmitEmptyStruct OpType = 273 + OpStructEndStruct OpType = 274 + OpStructEndOmitEmptyStruct OpType = 275 + OpStructFieldMarshalJSON OpType = 276 + OpStructFieldOmitEmptyMarshalJSON OpType = 277 + OpStructEndMarshalJSON OpType = 278 + OpStructEndOmitEmptyMarshalJSON OpType = 279 + OpStructFieldMarshalText OpType = 280 + OpStructFieldOmitEmptyMarshalText OpType = 281 + OpStructEndMarshalText OpType = 282 + OpStructEndOmitEmptyMarshalText OpType = 283 + OpStructFieldIntString OpType = 284 + OpStructFieldOmitEmptyIntString OpType = 285 + OpStructEndIntString OpType = 286 + OpStructEndOmitEmptyIntString OpType = 287 + OpStructFieldUintString OpType = 288 + OpStructFieldOmitEmptyUintString OpType = 289 + OpStructEndUintString OpType = 290 + OpStructEndOmitEmptyUintString OpType = 291 + OpStructFieldFloat32String OpType = 292 + OpStructFieldOmitEmptyFloat32String OpType = 293 + OpStructEndFloat32String OpType = 294 + OpStructEndOmitEmptyFloat32String OpType = 295 + OpStructFieldFloat64String OpType = 296 + OpStructFieldOmitEmptyFloat64String OpType = 297 + OpStructEndFloat64String OpType = 298 + OpStructEndOmitEmptyFloat64String OpType = 299 + OpStructFieldBoolString OpType = 300 + OpStructFieldOmitEmptyBoolString OpType = 301 + OpStructEndBoolString OpType = 302 + OpStructEndOmitEmptyBoolString OpType = 303 + OpStructFieldStringString OpType = 304 + OpStructFieldOmitEmptyStringString OpType = 305 + OpStructEndStringString OpType = 306 + OpStructEndOmitEmptyStringString OpType = 307 + OpStructFieldNumberString OpType = 308 + OpStructFieldOmitEmptyNumberString OpType = 309 + OpStructEndNumberString OpType = 310 + OpStructEndOmitEmptyNumberString OpType = 311 + OpStructFieldIntPtr OpType = 312 + OpStructFieldOmitEmptyIntPtr OpType = 313 + OpStructEndIntPtr OpType = 314 + OpStructEndOmitEmptyIntPtr OpType = 315 + OpStructFieldUintPtr OpType = 316 + OpStructFieldOmitEmptyUintPtr OpType = 317 + OpStructEndUintPtr OpType = 318 + OpStructEndOmitEmptyUintPtr OpType = 319 + OpStructFieldFloat32Ptr OpType = 320 + OpStructFieldOmitEmptyFloat32Ptr OpType = 321 + OpStructEndFloat32Ptr OpType = 322 + OpStructEndOmitEmptyFloat32Ptr OpType = 323 + OpStructFieldFloat64Ptr OpType = 324 + OpStructFieldOmitEmptyFloat64Ptr OpType = 325 + OpStructEndFloat64Ptr OpType = 326 + OpStructEndOmitEmptyFloat64Ptr OpType = 327 + OpStructFieldBoolPtr OpType = 328 + OpStructFieldOmitEmptyBoolPtr OpType = 329 + OpStructEndBoolPtr OpType = 330 + OpStructEndOmitEmptyBoolPtr OpType = 331 + OpStructFieldStringPtr OpType = 332 + OpStructFieldOmitEmptyStringPtr OpType = 333 + OpStructEndStringPtr OpType = 334 + OpStructEndOmitEmptyStringPtr OpType = 335 + OpStructFieldBytesPtr OpType = 336 + OpStructFieldOmitEmptyBytesPtr OpType = 337 + OpStructEndBytesPtr OpType = 338 + OpStructEndOmitEmptyBytesPtr OpType = 339 + OpStructFieldNumberPtr OpType = 340 + OpStructFieldOmitEmptyNumberPtr OpType = 341 + OpStructEndNumberPtr OpType = 342 + OpStructEndOmitEmptyNumberPtr OpType = 343 + OpStructFieldArrayPtr OpType = 344 + OpStructFieldOmitEmptyArrayPtr OpType = 345 + OpStructEndArrayPtr OpType = 346 + OpStructEndOmitEmptyArrayPtr OpType = 347 + OpStructFieldMapPtr OpType = 348 + OpStructFieldOmitEmptyMapPtr OpType = 349 + OpStructEndMapPtr OpType = 350 + OpStructEndOmitEmptyMapPtr OpType = 351 + OpStructFieldSlicePtr OpType = 352 + OpStructFieldOmitEmptySlicePtr OpType = 353 + OpStructEndSlicePtr OpType = 354 + OpStructEndOmitEmptySlicePtr OpType = 355 + OpStructFieldMarshalJSONPtr OpType = 356 + OpStructFieldOmitEmptyMarshalJSONPtr OpType = 357 + OpStructEndMarshalJSONPtr OpType = 358 + OpStructEndOmitEmptyMarshalJSONPtr OpType = 359 + OpStructFieldMarshalTextPtr OpType = 360 + OpStructFieldOmitEmptyMarshalTextPtr OpType = 361 + OpStructEndMarshalTextPtr OpType = 362 + OpStructEndOmitEmptyMarshalTextPtr OpType = 363 + OpStructFieldInterfacePtr OpType = 364 + OpStructFieldOmitEmptyInterfacePtr OpType = 365 + OpStructEndInterfacePtr OpType = 366 + OpStructEndOmitEmptyInterfacePtr OpType = 367 + OpStructFieldIntPtrString OpType = 368 + OpStructFieldOmitEmptyIntPtrString OpType = 369 + OpStructEndIntPtrString OpType = 370 + OpStructEndOmitEmptyIntPtrString OpType = 371 + OpStructFieldUintPtrString OpType = 372 + OpStructFieldOmitEmptyUintPtrString OpType = 373 + OpStructEndUintPtrString OpType = 374 + OpStructEndOmitEmptyUintPtrString OpType = 375 + OpStructFieldFloat32PtrString OpType = 376 + OpStructFieldOmitEmptyFloat32PtrString OpType = 377 + OpStructEndFloat32PtrString OpType = 378 + OpStructEndOmitEmptyFloat32PtrString OpType = 379 + OpStructFieldFloat64PtrString OpType = 380 + OpStructFieldOmitEmptyFloat64PtrString OpType = 381 + OpStructEndFloat64PtrString OpType = 382 + OpStructEndOmitEmptyFloat64PtrString OpType = 383 + OpStructFieldBoolPtrString OpType = 384 + OpStructFieldOmitEmptyBoolPtrString OpType = 385 + OpStructEndBoolPtrString OpType = 386 + OpStructEndOmitEmptyBoolPtrString OpType = 387 + OpStructFieldStringPtrString OpType = 388 + OpStructFieldOmitEmptyStringPtrString OpType = 389 + OpStructEndStringPtrString OpType = 390 + OpStructEndOmitEmptyStringPtrString OpType = 391 + OpStructFieldNumberPtrString OpType = 392 + OpStructFieldOmitEmptyNumberPtrString OpType = 393 + OpStructEndNumberPtrString OpType = 394 + OpStructEndOmitEmptyNumberPtrString OpType = 395 + OpStructField OpType = 396 + OpStructFieldOmitEmpty OpType = 397 + OpStructEnd OpType = 398 + OpStructEndOmitEmpty OpType = 399 +) + +func (t OpType) String() string { + if int(t) >= 400 { + return "" + } + return opTypeStrings[int(t)] +} + +func (t OpType) CodeType() CodeType { + if strings.Contains(t.String(), "Struct") { + if strings.Contains(t.String(), "End") { + return CodeStructEnd + } + return CodeStructField + } + switch t { + case OpArray, OpArrayPtr: + return CodeArrayHead + case OpArrayElem: + return CodeArrayElem + case OpSlice, OpSlicePtr: + return CodeSliceHead + case OpSliceElem: + return CodeSliceElem + case OpMap, OpMapPtr: + return CodeMapHead + case OpMapKey: + return CodeMapKey + case OpMapValue: + return CodeMapValue + case OpMapEnd: + return CodeMapEnd + } + + return CodeOp +} + +func (t OpType) HeadToPtrHead() OpType { + if strings.Index(t.String(), "PtrHead") > 0 { + return t + } + + idx := strings.Index(t.String(), "Head") + if idx == -1 { + return t + } + suffix := "PtrHead" + t.String()[idx+len("Head"):] + + const toPtrOffset = 2 + if strings.Contains(OpType(int(t)+toPtrOffset).String(), suffix) { + return OpType(int(t) + toPtrOffset) + } + return t +} + +func (t OpType) HeadToOmitEmptyHead() OpType { + const toOmitEmptyOffset = 1 + if strings.Contains(OpType(int(t)+toOmitEmptyOffset).String(), "OmitEmpty") { + return OpType(int(t) + toOmitEmptyOffset) + } + + return t +} + +func (t OpType) PtrHeadToHead() OpType { + idx := strings.Index(t.String(), "PtrHead") + if idx == -1 { + return t + } + suffix := t.String()[idx+len("Ptr"):] + + const toPtrOffset = 2 + if strings.Contains(OpType(int(t)-toPtrOffset).String(), suffix) { + return OpType(int(t) - toPtrOffset) + } + return t +} + +func (t OpType) FieldToEnd() OpType { + idx := strings.Index(t.String(), "Field") + if idx == -1 { + return t + } + suffix := t.String()[idx+len("Field"):] + if suffix == "" || suffix == "OmitEmpty" { + return t + } + const toEndOffset = 2 + if strings.Contains(OpType(int(t)+toEndOffset).String(), "End"+suffix) { + return OpType(int(t) + toEndOffset) + } + return t +} + +func (t OpType) FieldToOmitEmptyField() OpType { + const toOmitEmptyOffset = 1 + if strings.Contains(OpType(int(t)+toOmitEmptyOffset).String(), "OmitEmpty") { + return OpType(int(t) + toOmitEmptyOffset) + } + return t +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/query.go b/vendor/github.com/goccy/go-json/internal/encoder/query.go new file mode 100644 index 000000000000..1e1850cc153d --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/query.go @@ -0,0 +1,135 @@ +package encoder + +import ( + "context" + "fmt" + "reflect" +) + +var ( + Marshal func(interface{}) ([]byte, error) + Unmarshal func([]byte, interface{}) error +) + +type FieldQuery struct { + Name string + Fields []*FieldQuery + hash string +} + +func (q *FieldQuery) Hash() string { + if q.hash != "" { + return q.hash + } + b, _ := Marshal(q) + q.hash = string(b) + return q.hash +} + +func (q *FieldQuery) MarshalJSON() ([]byte, error) { + if q.Name != "" { + if len(q.Fields) > 0 { + return Marshal(map[string][]*FieldQuery{q.Name: q.Fields}) + } + return Marshal(q.Name) + } + return Marshal(q.Fields) +} + +func (q *FieldQuery) QueryString() (FieldQueryString, error) { + b, err := Marshal(q) + if err != nil { + return "", err + } + return FieldQueryString(b), nil +} + +type FieldQueryString string + +func (s FieldQueryString) Build() (*FieldQuery, error) { + var query interface{} + if err := Unmarshal([]byte(s), &query); err != nil { + return nil, err + } + return s.build(reflect.ValueOf(query)) +} + +func (s FieldQueryString) build(v reflect.Value) (*FieldQuery, error) { + switch v.Type().Kind() { + case reflect.String: + return s.buildString(v) + case reflect.Map: + return s.buildMap(v) + case reflect.Slice: + return s.buildSlice(v) + case reflect.Interface: + return s.build(reflect.ValueOf(v.Interface())) + } + return nil, fmt.Errorf("failed to build field query") +} + +func (s FieldQueryString) buildString(v reflect.Value) (*FieldQuery, error) { + b := []byte(v.String()) + switch b[0] { + case '[', '{': + var query interface{} + if err := Unmarshal(b, &query); err != nil { + return nil, err + } + if str, ok := query.(string); ok { + return &FieldQuery{Name: str}, nil + } + return s.build(reflect.ValueOf(query)) + } + return &FieldQuery{Name: string(b)}, nil +} + +func (s FieldQueryString) buildSlice(v reflect.Value) (*FieldQuery, error) { + fields := make([]*FieldQuery, 0, v.Len()) + for i := 0; i < v.Len(); i++ { + def, err := s.build(v.Index(i)) + if err != nil { + return nil, err + } + fields = append(fields, def) + } + return &FieldQuery{Fields: fields}, nil +} + +func (s FieldQueryString) buildMap(v reflect.Value) (*FieldQuery, error) { + keys := v.MapKeys() + if len(keys) != 1 { + return nil, fmt.Errorf("failed to build field query object") + } + key := keys[0] + if key.Type().Kind() != reflect.String { + return nil, fmt.Errorf("failed to build field query. invalid object key type") + } + name := key.String() + def, err := s.build(v.MapIndex(key)) + if err != nil { + return nil, err + } + return &FieldQuery{ + Name: name, + Fields: def.Fields, + }, nil +} + +type queryKey struct{} + +func FieldQueryFromContext(ctx context.Context) *FieldQuery { + query := ctx.Value(queryKey{}) + if query == nil { + return nil + } + q, ok := query.(*FieldQuery) + if !ok { + return nil + } + return q +} + +func SetFieldQueryToContext(ctx context.Context, query *FieldQuery) context.Context { + return context.WithValue(ctx, queryKey{}, query) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/string.go b/vendor/github.com/goccy/go-json/internal/encoder/string.go new file mode 100644 index 000000000000..4abb84165e9c --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/string.go @@ -0,0 +1,483 @@ +// This files's string processing codes are inspired by https://github.com/segmentio/encoding. +// The license notation is as follows. +// +// # MIT License +// +// Copyright (c) 2019 Segment.io, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +package encoder + +import ( + "math/bits" + "reflect" + "unsafe" +) + +const ( + lsb = 0x0101010101010101 + msb = 0x8080808080808080 +) + +var hex = "0123456789abcdef" + +//nolint:govet +func stringToUint64Slice(s string) []uint64 { + return *(*[]uint64)(unsafe.Pointer(&reflect.SliceHeader{ + Data: ((*reflect.StringHeader)(unsafe.Pointer(&s))).Data, + Len: len(s) / 8, + Cap: len(s) / 8, + })) +} + +func AppendString(ctx *RuntimeContext, buf []byte, s string) []byte { + if ctx.Option.Flag&HTMLEscapeOption != 0 { + if ctx.Option.Flag&NormalizeUTF8Option != 0 { + return appendNormalizedHTMLString(buf, s) + } + return appendHTMLString(buf, s) + } + if ctx.Option.Flag&NormalizeUTF8Option != 0 { + return appendNormalizedString(buf, s) + } + return appendString(buf, s) +} + +func appendNormalizedHTMLString(buf []byte, s string) []byte { + valLen := len(s) + if valLen == 0 { + return append(buf, `""`...) + } + buf = append(buf, '"') + var ( + i, j int + ) + if valLen >= 8 { + chunks := stringToUint64Slice(s) + for _, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | (n - (lsb * 0x20)) | + ((n ^ (lsb * '"')) - lsb) | + ((n ^ (lsb * '\\')) - lsb) | + ((n ^ (lsb * '<')) - lsb) | + ((n ^ (lsb * '>')) - lsb) | + ((n ^ (lsb * '&')) - lsb) + if (mask & msb) != 0 { + j = bits.TrailingZeros64(mask&msb) / 8 + goto ESCAPE_END + } + } + for i := len(chunks) * 8; i < valLen; i++ { + if needEscapeHTMLNormalizeUTF8[s[i]] { + j = i + goto ESCAPE_END + } + } + // no found any escape characters. + return append(append(buf, s...), '"') + } +ESCAPE_END: + for j < valLen { + c := s[j] + + if !needEscapeHTMLNormalizeUTF8[c] { + // fast path: most of the time, printable ascii characters are used + j++ + continue + } + + switch c { + case '\\', '"': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', c) + i = j + 1 + j = j + 1 + continue + + case '\n': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'n') + i = j + 1 + j = j + 1 + continue + + case '\r': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'r') + i = j + 1 + j = j + 1 + continue + + case '\t': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 't') + i = j + 1 + j = j + 1 + continue + + case '<', '>', '&': + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + + case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + state, size := decodeRuneInString(s[j:]) + switch state { + case runeErrorState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\ufffd`...) + i = j + 1 + j = j + 1 + continue + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + case lineSepState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\u2028`...) + i = j + 3 + j = j + 3 + continue + case paragraphSepState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\u2029`...) + i = j + 3 + j = j + 3 + continue + } + j += size + } + + return append(append(buf, s[i:]...), '"') +} + +func appendHTMLString(buf []byte, s string) []byte { + valLen := len(s) + if valLen == 0 { + return append(buf, `""`...) + } + buf = append(buf, '"') + var ( + i, j int + ) + if valLen >= 8 { + chunks := stringToUint64Slice(s) + for _, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | (n - (lsb * 0x20)) | + ((n ^ (lsb * '"')) - lsb) | + ((n ^ (lsb * '\\')) - lsb) | + ((n ^ (lsb * '<')) - lsb) | + ((n ^ (lsb * '>')) - lsb) | + ((n ^ (lsb * '&')) - lsb) + if (mask & msb) != 0 { + j = bits.TrailingZeros64(mask&msb) / 8 + goto ESCAPE_END + } + } + for i := len(chunks) * 8; i < valLen; i++ { + if needEscapeHTML[s[i]] { + j = i + goto ESCAPE_END + } + } + // no found any escape characters. + return append(append(buf, s...), '"') + } +ESCAPE_END: + for j < valLen { + c := s[j] + + if !needEscapeHTML[c] { + // fast path: most of the time, printable ascii characters are used + j++ + continue + } + + switch c { + case '\\', '"': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', c) + i = j + 1 + j = j + 1 + continue + + case '\n': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'n') + i = j + 1 + j = j + 1 + continue + + case '\r': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'r') + i = j + 1 + j = j + 1 + continue + + case '\t': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 't') + i = j + 1 + j = j + 1 + continue + + case '<', '>', '&': + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + + case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + j++ + } + + return append(append(buf, s[i:]...), '"') +} + +func appendNormalizedString(buf []byte, s string) []byte { + valLen := len(s) + if valLen == 0 { + return append(buf, `""`...) + } + buf = append(buf, '"') + var ( + i, j int + ) + if valLen >= 8 { + chunks := stringToUint64Slice(s) + for _, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | (n - (lsb * 0x20)) | + ((n ^ (lsb * '"')) - lsb) | + ((n ^ (lsb * '\\')) - lsb) + if (mask & msb) != 0 { + j = bits.TrailingZeros64(mask&msb) / 8 + goto ESCAPE_END + } + } + valLen := len(s) + for i := len(chunks) * 8; i < valLen; i++ { + if needEscapeNormalizeUTF8[s[i]] { + j = i + goto ESCAPE_END + } + } + return append(append(buf, s...), '"') + } +ESCAPE_END: + for j < valLen { + c := s[j] + + if !needEscapeNormalizeUTF8[c] { + // fast path: most of the time, printable ascii characters are used + j++ + continue + } + + switch c { + case '\\', '"': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', c) + i = j + 1 + j = j + 1 + continue + + case '\n': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'n') + i = j + 1 + j = j + 1 + continue + + case '\r': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'r') + i = j + 1 + j = j + 1 + continue + + case '\t': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 't') + i = j + 1 + j = j + 1 + continue + + case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + + state, size := decodeRuneInString(s[j:]) + switch state { + case runeErrorState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\ufffd`...) + i = j + 1 + j = j + 1 + continue + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + case lineSepState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\u2028`...) + i = j + 3 + j = j + 3 + continue + case paragraphSepState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\u2029`...) + i = j + 3 + j = j + 3 + continue + } + j += size + } + + return append(append(buf, s[i:]...), '"') +} + +func appendString(buf []byte, s string) []byte { + valLen := len(s) + if valLen == 0 { + return append(buf, `""`...) + } + buf = append(buf, '"') + var ( + i, j int + ) + if valLen >= 8 { + chunks := stringToUint64Slice(s) + for _, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | (n - (lsb * 0x20)) | + ((n ^ (lsb * '"')) - lsb) | + ((n ^ (lsb * '\\')) - lsb) + if (mask & msb) != 0 { + j = bits.TrailingZeros64(mask&msb) / 8 + goto ESCAPE_END + } + } + valLen := len(s) + for i := len(chunks) * 8; i < valLen; i++ { + if needEscape[s[i]] { + j = i + goto ESCAPE_END + } + } + return append(append(buf, s...), '"') + } +ESCAPE_END: + for j < valLen { + c := s[j] + + if !needEscape[c] { + // fast path: most of the time, printable ascii characters are used + j++ + continue + } + + switch c { + case '\\', '"': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', c) + i = j + 1 + j = j + 1 + continue + + case '\n': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'n') + i = j + 1 + j = j + 1 + continue + + case '\r': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'r') + i = j + 1 + j = j + 1 + continue + + case '\t': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 't') + i = j + 1 + j = j + 1 + continue + + case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + j++ + } + + return append(append(buf, s[i:]...), '"') +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/string_table.go b/vendor/github.com/goccy/go-json/internal/encoder/string_table.go new file mode 100644 index 000000000000..ebe42c92dfd8 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/string_table.go @@ -0,0 +1,415 @@ +package encoder + +var needEscapeHTMLNormalizeUTF8 = [256]bool{ + '"': true, + '&': true, + '<': true, + '>': true, + '\\': true, + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + 0x09: true, + 0x0a: true, + 0x0b: true, + 0x0c: true, + 0x0d: true, + 0x0e: true, + 0x0f: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1a: true, + 0x1b: true, + 0x1c: true, + 0x1d: true, + 0x1e: true, + 0x1f: true, + /* 0x20 - 0x7f */ + 0x80: true, + 0x81: true, + 0x82: true, + 0x83: true, + 0x84: true, + 0x85: true, + 0x86: true, + 0x87: true, + 0x88: true, + 0x89: true, + 0x8a: true, + 0x8b: true, + 0x8c: true, + 0x8d: true, + 0x8e: true, + 0x8f: true, + 0x90: true, + 0x91: true, + 0x92: true, + 0x93: true, + 0x94: true, + 0x95: true, + 0x96: true, + 0x97: true, + 0x98: true, + 0x99: true, + 0x9a: true, + 0x9b: true, + 0x9c: true, + 0x9d: true, + 0x9e: true, + 0x9f: true, + 0xa0: true, + 0xa1: true, + 0xa2: true, + 0xa3: true, + 0xa4: true, + 0xa5: true, + 0xa6: true, + 0xa7: true, + 0xa8: true, + 0xa9: true, + 0xaa: true, + 0xab: true, + 0xac: true, + 0xad: true, + 0xae: true, + 0xaf: true, + 0xb0: true, + 0xb1: true, + 0xb2: true, + 0xb3: true, + 0xb4: true, + 0xb5: true, + 0xb6: true, + 0xb7: true, + 0xb8: true, + 0xb9: true, + 0xba: true, + 0xbb: true, + 0xbc: true, + 0xbd: true, + 0xbe: true, + 0xbf: true, + 0xc0: true, + 0xc1: true, + 0xc2: true, + 0xc3: true, + 0xc4: true, + 0xc5: true, + 0xc6: true, + 0xc7: true, + 0xc8: true, + 0xc9: true, + 0xca: true, + 0xcb: true, + 0xcc: true, + 0xcd: true, + 0xce: true, + 0xcf: true, + 0xd0: true, + 0xd1: true, + 0xd2: true, + 0xd3: true, + 0xd4: true, + 0xd5: true, + 0xd6: true, + 0xd7: true, + 0xd8: true, + 0xd9: true, + 0xda: true, + 0xdb: true, + 0xdc: true, + 0xdd: true, + 0xde: true, + 0xdf: true, + 0xe0: true, + 0xe1: true, + 0xe2: true, + 0xe3: true, + 0xe4: true, + 0xe5: true, + 0xe6: true, + 0xe7: true, + 0xe8: true, + 0xe9: true, + 0xea: true, + 0xeb: true, + 0xec: true, + 0xed: true, + 0xee: true, + 0xef: true, + 0xf0: true, + 0xf1: true, + 0xf2: true, + 0xf3: true, + 0xf4: true, + 0xf5: true, + 0xf6: true, + 0xf7: true, + 0xf8: true, + 0xf9: true, + 0xfa: true, + 0xfb: true, + 0xfc: true, + 0xfd: true, + 0xfe: true, + 0xff: true, +} + +var needEscapeNormalizeUTF8 = [256]bool{ + '"': true, + '\\': true, + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + 0x09: true, + 0x0a: true, + 0x0b: true, + 0x0c: true, + 0x0d: true, + 0x0e: true, + 0x0f: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1a: true, + 0x1b: true, + 0x1c: true, + 0x1d: true, + 0x1e: true, + 0x1f: true, + /* 0x20 - 0x7f */ + 0x80: true, + 0x81: true, + 0x82: true, + 0x83: true, + 0x84: true, + 0x85: true, + 0x86: true, + 0x87: true, + 0x88: true, + 0x89: true, + 0x8a: true, + 0x8b: true, + 0x8c: true, + 0x8d: true, + 0x8e: true, + 0x8f: true, + 0x90: true, + 0x91: true, + 0x92: true, + 0x93: true, + 0x94: true, + 0x95: true, + 0x96: true, + 0x97: true, + 0x98: true, + 0x99: true, + 0x9a: true, + 0x9b: true, + 0x9c: true, + 0x9d: true, + 0x9e: true, + 0x9f: true, + 0xa0: true, + 0xa1: true, + 0xa2: true, + 0xa3: true, + 0xa4: true, + 0xa5: true, + 0xa6: true, + 0xa7: true, + 0xa8: true, + 0xa9: true, + 0xaa: true, + 0xab: true, + 0xac: true, + 0xad: true, + 0xae: true, + 0xaf: true, + 0xb0: true, + 0xb1: true, + 0xb2: true, + 0xb3: true, + 0xb4: true, + 0xb5: true, + 0xb6: true, + 0xb7: true, + 0xb8: true, + 0xb9: true, + 0xba: true, + 0xbb: true, + 0xbc: true, + 0xbd: true, + 0xbe: true, + 0xbf: true, + 0xc0: true, + 0xc1: true, + 0xc2: true, + 0xc3: true, + 0xc4: true, + 0xc5: true, + 0xc6: true, + 0xc7: true, + 0xc8: true, + 0xc9: true, + 0xca: true, + 0xcb: true, + 0xcc: true, + 0xcd: true, + 0xce: true, + 0xcf: true, + 0xd0: true, + 0xd1: true, + 0xd2: true, + 0xd3: true, + 0xd4: true, + 0xd5: true, + 0xd6: true, + 0xd7: true, + 0xd8: true, + 0xd9: true, + 0xda: true, + 0xdb: true, + 0xdc: true, + 0xdd: true, + 0xde: true, + 0xdf: true, + 0xe0: true, + 0xe1: true, + 0xe2: true, + 0xe3: true, + 0xe4: true, + 0xe5: true, + 0xe6: true, + 0xe7: true, + 0xe8: true, + 0xe9: true, + 0xea: true, + 0xeb: true, + 0xec: true, + 0xed: true, + 0xee: true, + 0xef: true, + 0xf0: true, + 0xf1: true, + 0xf2: true, + 0xf3: true, + 0xf4: true, + 0xf5: true, + 0xf6: true, + 0xf7: true, + 0xf8: true, + 0xf9: true, + 0xfa: true, + 0xfb: true, + 0xfc: true, + 0xfd: true, + 0xfe: true, + 0xff: true, +} + +var needEscapeHTML = [256]bool{ + '"': true, + '&': true, + '<': true, + '>': true, + '\\': true, + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + 0x09: true, + 0x0a: true, + 0x0b: true, + 0x0c: true, + 0x0d: true, + 0x0e: true, + 0x0f: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1a: true, + 0x1b: true, + 0x1c: true, + 0x1d: true, + 0x1e: true, + 0x1f: true, + /* 0x20 - 0xff */ +} + +var needEscape = [256]bool{ + '"': true, + '\\': true, + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + 0x09: true, + 0x0a: true, + 0x0b: true, + 0x0c: true, + 0x0d: true, + 0x0e: true, + 0x0f: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1a: true, + 0x1b: true, + 0x1c: true, + 0x1d: true, + 0x1e: true, + 0x1f: true, + /* 0x20 - 0xff */ +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go new file mode 100644 index 000000000000..82b6dd47f864 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go @@ -0,0 +1,41 @@ +package vm + +import ( + "fmt" + "io" + + "github.com/goccy/go-json/internal/encoder" +) + +func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + defer func() { + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + if wc := ctx.Option.DebugDOTOut; wc != nil { + _, _ = io.WriteString(wc, code.DumpDOT()) + wc.Close() + ctx.Option.DebugDOTOut = nil + } + + if err := recover(); err != nil { + w := ctx.Option.DebugOut + fmt.Fprintln(w, "=============[DEBUG]===============") + fmt.Fprintln(w, "* [TYPE]") + fmt.Fprintln(w, codeSet.Type) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [ALL OPCODE]") + fmt.Fprintln(w, code.Dump()) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [CONTEXT]") + fmt.Fprintf(w, "%+v\n", ctx) + fmt.Fprintln(w, "===================================") + panic(err) + } + }() + + return Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go new file mode 100644 index 000000000000..65252b4a5cd7 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go @@ -0,0 +1,9 @@ +package vm + +import ( + // HACK: compile order + // `vm`, `vm_indent`, `vm_color`, `vm_color_indent` packages uses a lot of memory to compile, + // so forcibly make dependencies and avoid compiling in concurrent. + // dependency order: vm => vm_indent => vm_color => vm_color_indent + _ "github.com/goccy/go-json/internal/encoder/vm_indent" +) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/util.go new file mode 100644 index 000000000000..86291d7bb377 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/util.go @@ -0,0 +1,207 @@ +package vm + +import ( + "encoding/json" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +var ( + appendInt = encoder.AppendInt + appendUint = encoder.AppendUint + appendFloat32 = encoder.AppendFloat32 + appendFloat64 = encoder.AppendFloat64 + appendString = encoder.AppendString + appendByteSlice = encoder.AppendByteSlice + appendNumber = encoder.AppendNumber + errUnsupportedValue = encoder.ErrUnsupportedValue + errUnsupportedFloat = encoder.ErrUnsupportedFloat + mapiterinit = encoder.MapIterInit + mapiterkey = encoder.MapIterKey + mapitervalue = encoder.MapIterValue + mapiternext = encoder.MapIterNext + maplen = encoder.MapLen +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type nonEmptyInterface struct { + itab *struct { + ityp *runtime.Type // static interface type + typ *runtime.Type // dynamic concrete type + // unused fields... + } + ptr unsafe.Pointer +} + +func errUnimplementedOp(op encoder.OpType) error { + return fmt.Errorf("encoder: opcode %s has not been implemented", op) +} + +func load(base uintptr, idx uint32) uintptr { + addr := base + uintptr(idx) + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func store(base uintptr, idx uint32, p uintptr) { + addr := base + uintptr(idx) + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { + addr := base + uintptr(idx) + p := **(**uintptr)(unsafe.Pointer(&addr)) + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUint64(p uintptr, bitSize uint8) uint64 { + switch bitSize { + case 8: + return (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + return (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + return (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + return **(**uint64)(unsafe.Pointer(&p)) + } + return 0 +} +func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func ptrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func appendBool(_ *encoder.RuntimeContext, b []byte, v bool) []byte { + if v { + return append(b, "true"...) + } + return append(b, "false"...) +} + +func appendNull(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, "null"...) +} + +func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, ',') +} + +func appendNullComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, "null,"...) +} + +func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { + last := len(b) - 1 + b[last] = ':' + return b +} + +func appendMapKeyValue(_ *encoder.RuntimeContext, _ *encoder.Opcode, b, key, value []byte) []byte { + b = append(b, key...) + b[len(b)-1] = ':' + return append(b, value...) +} + +func appendMapEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + b[len(b)-1] = '}' + b = append(b, ',') + return b +} + +func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalJSON(ctx, code, b, v) +} + +func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalText(ctx, code, b, v) +} + +func appendArrayHead(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + return append(b, '[') +} + +func appendArrayEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = ']' + return append(b, ',') +} + +func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '[', ']', ',') +} + +func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '}', ',') +} + +func appendObjectEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = '}' + return append(b, ',') +} + +func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{') +} + +func appendStructKey(_ *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return append(b, code.Key...) +} + +func appendStructEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + return append(b, '}', ',') +} + +func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + if b[last] == ',' { + b[last] = '}' + return appendComma(ctx, b) + } + return appendStructEnd(ctx, code, b) +} + +func restoreIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, _ uintptr) {} +func storeIndent(_ uintptr, _ *encoder.Opcode, _ uintptr) {} +func appendMapKeyIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b } +func appendArrayElemIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b } diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go new file mode 100644 index 000000000000..645d20f9fbe9 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go @@ -0,0 +1,4859 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package vm + +import ( + "math" + "reflect" + "sort" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + recursiveLevel := 0 + ptrOffset := uintptr(0) + ctxptr := ctx.Ptr() + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + for { + switch code.Op { + default: + return nil, errUnimplementedOp(code.Op) + case encoder.OpPtr: + p := load(ctxptr, code.Idx) + code = code.Next + store(ctxptr, code.Idx, ptrToPtr(p)) + case encoder.OpIntPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInt: + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpUint: + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpIntString: + b = append(b, '"') + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintString: + b = append(b, '"') + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat32Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat32: + b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat64Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat64: + v := ptrToFloat64(load(ctxptr, code.Idx)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStringPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpString: + b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBoolPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBool: + b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBytesPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBytes: + b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpNumberPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpNumber: + bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpInterfacePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInterface: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if p == seen { + return nil, errUnsupportedValue(code, p) + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, p) + var ( + typ *runtime.Type + ifacePtr unsafe.Pointer + ) + up := ptrToUnsafePtr(p) + if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { + iface := (*nonEmptyInterface)(up) + ifacePtr = iface.ptr + if iface.itab != nil { + typ = iface.itab.typ + } + } else { + iface := (*emptyInterface)(up) + ifacePtr = iface.ptr + typ = iface.typ + } + if ifacePtr == nil { + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } + } + ctx.KeepRefs = append(ctx.KeepRefs, up) + ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) + if err != nil { + return nil, err + } + + totalLength := uintptr(code.Length) + 3 + nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 + + var c *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + c = ifaceCodeSet.InterfaceEscapeKeyCode + } else { + c = ifaceCodeSet.InterfaceNoescapeKeyCode + } + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += totalLength * uintptrSize + oldBaseIndent := ctx.BaseIndent + ctx.BaseIndent += code.Indent + + newLen := offsetNum + totalLength + nextTotalLength + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + end := ifaceCodeSet.EndCode + store(ctxptr, c.Idx, uintptr(ifacePtr)) + store(ctxptr, end.Idx, oldOffset) + store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, end, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpInterfaceEnd: + recursiveLevel-- + + // restore ctxptr + offset := load(ctxptr, code.Idx) + restoreIndent(ctx, code, ctxptr) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + b = append(b, `""`...) + b = appendComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpSlicePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpSlice: + p := load(ctxptr, code.Idx) + slice := ptrToSlice(p) + if p == 0 || slice.Data == nil { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.ElemIdx, 0) + store(ctxptr, code.Length, uintptr(slice.Len)) + store(ctxptr, code.Idx, uintptr(slice.Data)) + if slice.Len > 0 { + b = appendArrayHead(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, uintptr(slice.Data)) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpSliceElem: + idx := load(ctxptr, code.ElemIdx) + length := load(ctxptr, code.Length) + idx++ + if idx < length { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + data := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, data+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpArrayPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpArray: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + if code.Length > 0 { + b = appendArrayHead(ctx, code, b) + store(ctxptr, code.ElemIdx, 0) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpArrayElem: + idx := load(ctxptr, code.ElemIdx) + idx++ + if idx < uintptr(code.Length) { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + p := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, p+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpMapPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpMap: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + uptr := ptrToUnsafePtr(p) + mlen := maplen(uptr) + if mlen <= 0 { + b = appendEmptyObject(ctx, b) + code = code.End.Next + break + } + b = appendStructHead(ctx, b) + unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 + mapCtx := encoder.NewMapContext(mlen, unorderedMap) + mapiterinit(code.Type, uptr, &mapCtx.Iter) + store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) + ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) + if unorderedMap { + b = appendMapKeyIndent(ctx, code.Next, b) + } else { + mapCtx.Start = len(b) + mapCtx.First = len(b) + } + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + case encoder.OpMapKey: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + idx := mapCtx.Idx + idx++ + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + if idx < mapCtx.Len { + b = appendMapKeyIndent(ctx, code, b) + mapCtx.Idx = int(idx) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + b = appendObjectEnd(ctx, code, b) + encoder.ReleaseMapContext(mapCtx) + code = code.End.Next + } + } else { + mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] + if idx < mapCtx.Len { + mapCtx.Idx = int(idx) + mapCtx.Start = len(b) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + code = code.End + } + } + case encoder.OpMapValue: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + b = appendColon(ctx, b) + } else { + mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] + mapCtx.Start = len(b) + } + value := mapitervalue(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(value)) + mapiternext(&mapCtx.Iter) + code = code.Next + case encoder.OpMapEnd: + // this operation only used by sorted map. + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + sort.Sort(mapCtx.Slice) + buf := mapCtx.Buf + for _, item := range mapCtx.Slice.Items { + buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) + } + buf = appendMapEnd(ctx, code, buf) + b = b[:mapCtx.First] + b = append(b, buf...) + mapCtx.Buf = buf + encoder.ReleaseMapContext(mapCtx) + code = code.Next + case encoder.OpRecursivePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpRecursive: + ptr := load(ctxptr, code.Idx) + if ptr != 0 { + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if ptr == seen { + return nil, errUnsupportedValue(code, ptr) + } + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, ptr) + c := code.Jmp.Code + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += code.Jmp.CurLen * uintptrSize + oldBaseIndent := ctx.BaseIndent + indentDiffFromTop := c.Indent - 1 + ctx.BaseIndent += code.Indent - indentDiffFromTop + + newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + store(ctxptr, c.Idx, ptr) + store(ctxptr, c.End.Next.Idx, oldOffset) + store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpRecursiveEnd: + recursiveLevel-- + + // restore ctxptr + restoreIndent(ctx, code, ctxptr) + offset := load(ctxptr, code.Idx) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpStructPtrHead: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHead: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if len(code.Key) > 0 { + if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + } + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + u64 := ptrToUint64(p, code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset))))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + } + case encoder.OpStructPtrHeadNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructPtrHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadArray, encoder.OpStructHeadSlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyArray: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyArray: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptySlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptySlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + if maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON { + p = ptrToPtr(p) + } + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructField: + if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + p := load(ctxptr, code.Idx) + uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmpty: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringString: + p := load(ctxptr, code.Idx) + s := ptrToString(p + uintptr(code.Offset)) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldMarshalJSON: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalJSONPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldMarshalText: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalTextPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldArrayPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArrayPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldSlice: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlice: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldSlicePtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldMap: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMap: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldMapPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldStruct: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyStruct: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructEnd: + b = appendStructEndSkipLast(ctx, code, b) + code = code.Next + case encoder.OpStructEndInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendStructEnd(ctx, code, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + s := ptrToString(p + uintptr(code.Offset)) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytesPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + code = code.Next + case encoder.OpStructEndOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpEnd: + goto END + } + } +END: + return b, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go new file mode 100644 index 000000000000..925f61ed8e69 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go @@ -0,0 +1,35 @@ +package vm_color + +import ( + "fmt" + + "github.com/goccy/go-json/internal/encoder" +) + +func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + defer func() { + if err := recover(); err != nil { + w := ctx.Option.DebugOut + fmt.Fprintln(w, "=============[DEBUG]===============") + fmt.Fprintln(w, "* [TYPE]") + fmt.Fprintln(w, codeSet.Type) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [ALL OPCODE]") + fmt.Fprintln(w, code.Dump()) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [CONTEXT]") + fmt.Fprintf(w, "%+v\n", ctx) + fmt.Fprintln(w, "===================================") + panic(err) + } + }() + + return Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go new file mode 100644 index 000000000000..12ec56c5bbd2 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go @@ -0,0 +1,9 @@ +package vm_color + +import ( + // HACK: compile order + // `vm`, `vm_indent`, `vm_color`, `vm_color_indent` packages uses a lot of memory to compile, + // so forcibly make dependencies and avoid compiling in concurrent. + // dependency order: vm => vm_indent => vm_color => vm_color_indent + _ "github.com/goccy/go-json/internal/encoder/vm_color_indent" +) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go new file mode 100644 index 000000000000..33f29aee4481 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go @@ -0,0 +1,274 @@ +package vm_color + +import ( + "encoding/json" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +var ( + errUnsupportedValue = encoder.ErrUnsupportedValue + errUnsupportedFloat = encoder.ErrUnsupportedFloat + mapiterinit = encoder.MapIterInit + mapiterkey = encoder.MapIterKey + mapitervalue = encoder.MapIterValue + mapiternext = encoder.MapIterNext + maplen = encoder.MapLen +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type nonEmptyInterface struct { + itab *struct { + ityp *runtime.Type // static interface type + typ *runtime.Type // dynamic concrete type + // unused fields... + } + ptr unsafe.Pointer +} + +func errUnimplementedOp(op encoder.OpType) error { + return fmt.Errorf("encoder: opcode %s has not been implemented", op) +} + +func load(base uintptr, idx uint32) uintptr { + addr := base + uintptr(idx) + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func store(base uintptr, idx uint32, p uintptr) { + addr := base + uintptr(idx) + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { + addr := base + uintptr(idx) + p := **(**uintptr)(unsafe.Pointer(&addr)) + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUint64(p uintptr, bitSize uint8) uint64 { + switch bitSize { + case 8: + return (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + return (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + return (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + return **(**uint64)(unsafe.Pointer(&p)) + } + return 0 +} +func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func ptrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func appendInt(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { + format := ctx.Option.ColorScheme.Int + b = append(b, format.Header...) + b = encoder.AppendInt(ctx, b, p, code) + return append(b, format.Footer...) +} + +func appendUint(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { + format := ctx.Option.ColorScheme.Uint + b = append(b, format.Header...) + b = encoder.AppendUint(ctx, b, p, code) + return append(b, format.Footer...) +} + +func appendFloat32(ctx *encoder.RuntimeContext, b []byte, v float32) []byte { + format := ctx.Option.ColorScheme.Float + b = append(b, format.Header...) + b = encoder.AppendFloat32(ctx, b, v) + return append(b, format.Footer...) +} + +func appendFloat64(ctx *encoder.RuntimeContext, b []byte, v float64) []byte { + format := ctx.Option.ColorScheme.Float + b = append(b, format.Header...) + b = encoder.AppendFloat64(ctx, b, v) + return append(b, format.Footer...) +} + +func appendString(ctx *encoder.RuntimeContext, b []byte, v string) []byte { + format := ctx.Option.ColorScheme.String + b = append(b, format.Header...) + b = encoder.AppendString(ctx, b, v) + return append(b, format.Footer...) +} + +func appendByteSlice(ctx *encoder.RuntimeContext, b []byte, src []byte) []byte { + format := ctx.Option.ColorScheme.Binary + b = append(b, format.Header...) + b = encoder.AppendByteSlice(ctx, b, src) + return append(b, format.Footer...) +} + +func appendNumber(ctx *encoder.RuntimeContext, b []byte, n json.Number) ([]byte, error) { + format := ctx.Option.ColorScheme.Int + b = append(b, format.Header...) + bb, err := encoder.AppendNumber(ctx, b, n) + if err != nil { + return nil, err + } + return append(bb, format.Footer...), nil +} + +func appendBool(ctx *encoder.RuntimeContext, b []byte, v bool) []byte { + format := ctx.Option.ColorScheme.Bool + b = append(b, format.Header...) + if v { + b = append(b, "true"...) + } else { + b = append(b, "false"...) + } + return append(b, format.Footer...) +} + +func appendNull(ctx *encoder.RuntimeContext, b []byte) []byte { + format := ctx.Option.ColorScheme.Null + b = append(b, format.Header...) + b = append(b, "null"...) + return append(b, format.Footer...) +} + +func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, ',') +} + +func appendNullComma(ctx *encoder.RuntimeContext, b []byte) []byte { + format := ctx.Option.ColorScheme.Null + b = append(b, format.Header...) + b = append(b, "null"...) + return append(append(b, format.Footer...), ',') +} + +func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { + last := len(b) - 1 + b[last] = ':' + return b +} + +func appendMapKeyValue(_ *encoder.RuntimeContext, _ *encoder.Opcode, b, key, value []byte) []byte { + b = append(b, key[:len(key)-1]...) + b = append(b, ':') + return append(b, value...) +} + +func appendMapEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = '}' + b = append(b, ',') + return b +} + +func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalJSON(ctx, code, b, v) +} + +func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + format := ctx.Option.ColorScheme.String + b = append(b, format.Header...) + bb, err := encoder.AppendMarshalText(ctx, code, b, v) + if err != nil { + return nil, err + } + return append(bb, format.Footer...), nil +} + +func appendArrayHead(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + return append(b, '[') +} + +func appendArrayEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = ']' + return append(b, ',') +} + +func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '[', ']', ',') +} + +func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '}', ',') +} + +func appendObjectEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = '}' + return append(b, ',') +} + +func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{') +} + +func appendStructKey(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + format := ctx.Option.ColorScheme.ObjectKey + b = append(b, format.Header...) + b = append(b, code.Key[:len(code.Key)-1]...) + b = append(b, format.Footer...) + + return append(b, ':') +} + +func appendStructEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + return append(b, '}', ',') +} + +func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + if b[last] == ',' { + b[last] = '}' + return appendComma(ctx, b) + } + return appendStructEnd(ctx, code, b) +} + +func restoreIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, _ uintptr) {} +func storeIndent(_ uintptr, _ *encoder.Opcode, _ uintptr) {} +func appendMapKeyIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b } +func appendArrayElemIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b } diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go new file mode 100644 index 000000000000..a63e83e5505a --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go @@ -0,0 +1,4859 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package vm_color + +import ( + "math" + "reflect" + "sort" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + recursiveLevel := 0 + ptrOffset := uintptr(0) + ctxptr := ctx.Ptr() + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + for { + switch code.Op { + default: + return nil, errUnimplementedOp(code.Op) + case encoder.OpPtr: + p := load(ctxptr, code.Idx) + code = code.Next + store(ctxptr, code.Idx, ptrToPtr(p)) + case encoder.OpIntPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInt: + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpUint: + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpIntString: + b = append(b, '"') + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintString: + b = append(b, '"') + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat32Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat32: + b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat64Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat64: + v := ptrToFloat64(load(ctxptr, code.Idx)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStringPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpString: + b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBoolPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBool: + b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBytesPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBytes: + b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpNumberPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpNumber: + bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpInterfacePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInterface: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if p == seen { + return nil, errUnsupportedValue(code, p) + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, p) + var ( + typ *runtime.Type + ifacePtr unsafe.Pointer + ) + up := ptrToUnsafePtr(p) + if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { + iface := (*nonEmptyInterface)(up) + ifacePtr = iface.ptr + if iface.itab != nil { + typ = iface.itab.typ + } + } else { + iface := (*emptyInterface)(up) + ifacePtr = iface.ptr + typ = iface.typ + } + if ifacePtr == nil { + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } + } + ctx.KeepRefs = append(ctx.KeepRefs, up) + ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) + if err != nil { + return nil, err + } + + totalLength := uintptr(code.Length) + 3 + nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 + + var c *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + c = ifaceCodeSet.InterfaceEscapeKeyCode + } else { + c = ifaceCodeSet.InterfaceNoescapeKeyCode + } + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += totalLength * uintptrSize + oldBaseIndent := ctx.BaseIndent + ctx.BaseIndent += code.Indent + + newLen := offsetNum + totalLength + nextTotalLength + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + end := ifaceCodeSet.EndCode + store(ctxptr, c.Idx, uintptr(ifacePtr)) + store(ctxptr, end.Idx, oldOffset) + store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, end, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpInterfaceEnd: + recursiveLevel-- + + // restore ctxptr + offset := load(ctxptr, code.Idx) + restoreIndent(ctx, code, ctxptr) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + b = append(b, `""`...) + b = appendComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpSlicePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpSlice: + p := load(ctxptr, code.Idx) + slice := ptrToSlice(p) + if p == 0 || slice.Data == nil { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.ElemIdx, 0) + store(ctxptr, code.Length, uintptr(slice.Len)) + store(ctxptr, code.Idx, uintptr(slice.Data)) + if slice.Len > 0 { + b = appendArrayHead(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, uintptr(slice.Data)) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpSliceElem: + idx := load(ctxptr, code.ElemIdx) + length := load(ctxptr, code.Length) + idx++ + if idx < length { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + data := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, data+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpArrayPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpArray: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + if code.Length > 0 { + b = appendArrayHead(ctx, code, b) + store(ctxptr, code.ElemIdx, 0) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpArrayElem: + idx := load(ctxptr, code.ElemIdx) + idx++ + if idx < uintptr(code.Length) { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + p := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, p+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpMapPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpMap: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + uptr := ptrToUnsafePtr(p) + mlen := maplen(uptr) + if mlen <= 0 { + b = appendEmptyObject(ctx, b) + code = code.End.Next + break + } + b = appendStructHead(ctx, b) + unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 + mapCtx := encoder.NewMapContext(mlen, unorderedMap) + mapiterinit(code.Type, uptr, &mapCtx.Iter) + store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) + ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) + if unorderedMap { + b = appendMapKeyIndent(ctx, code.Next, b) + } else { + mapCtx.Start = len(b) + mapCtx.First = len(b) + } + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + case encoder.OpMapKey: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + idx := mapCtx.Idx + idx++ + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + if idx < mapCtx.Len { + b = appendMapKeyIndent(ctx, code, b) + mapCtx.Idx = int(idx) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + b = appendObjectEnd(ctx, code, b) + encoder.ReleaseMapContext(mapCtx) + code = code.End.Next + } + } else { + mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] + if idx < mapCtx.Len { + mapCtx.Idx = int(idx) + mapCtx.Start = len(b) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + code = code.End + } + } + case encoder.OpMapValue: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + b = appendColon(ctx, b) + } else { + mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] + mapCtx.Start = len(b) + } + value := mapitervalue(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(value)) + mapiternext(&mapCtx.Iter) + code = code.Next + case encoder.OpMapEnd: + // this operation only used by sorted map. + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + sort.Sort(mapCtx.Slice) + buf := mapCtx.Buf + for _, item := range mapCtx.Slice.Items { + buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) + } + buf = appendMapEnd(ctx, code, buf) + b = b[:mapCtx.First] + b = append(b, buf...) + mapCtx.Buf = buf + encoder.ReleaseMapContext(mapCtx) + code = code.Next + case encoder.OpRecursivePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpRecursive: + ptr := load(ctxptr, code.Idx) + if ptr != 0 { + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if ptr == seen { + return nil, errUnsupportedValue(code, ptr) + } + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, ptr) + c := code.Jmp.Code + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += code.Jmp.CurLen * uintptrSize + oldBaseIndent := ctx.BaseIndent + indentDiffFromTop := c.Indent - 1 + ctx.BaseIndent += code.Indent - indentDiffFromTop + + newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + store(ctxptr, c.Idx, ptr) + store(ctxptr, c.End.Next.Idx, oldOffset) + store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpRecursiveEnd: + recursiveLevel-- + + // restore ctxptr + restoreIndent(ctx, code, ctxptr) + offset := load(ctxptr, code.Idx) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpStructPtrHead: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHead: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if len(code.Key) > 0 { + if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + } + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + u64 := ptrToUint64(p, code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset))))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + } + case encoder.OpStructPtrHeadNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructPtrHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadArray, encoder.OpStructHeadSlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyArray: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyArray: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptySlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptySlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + if maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON { + p = ptrToPtr(p) + } + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructField: + if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + p := load(ctxptr, code.Idx) + uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmpty: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringString: + p := load(ctxptr, code.Idx) + s := ptrToString(p + uintptr(code.Offset)) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldMarshalJSON: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalJSONPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldMarshalText: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalTextPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldArrayPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArrayPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldSlice: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlice: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldSlicePtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldMap: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMap: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldMapPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldStruct: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyStruct: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructEnd: + b = appendStructEndSkipLast(ctx, code, b) + code = code.Next + case encoder.OpStructEndInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendStructEnd(ctx, code, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + s := ptrToString(p + uintptr(code.Offset)) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytesPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + code = code.Next + case encoder.OpStructEndOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpEnd: + goto END + } + } +END: + return b, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go new file mode 100644 index 000000000000..dd4cd489e06d --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go @@ -0,0 +1,35 @@ +package vm_color_indent + +import ( + "fmt" + + "github.com/goccy/go-json/internal/encoder" +) + +func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + defer func() { + if err := recover(); err != nil { + w := ctx.Option.DebugOut + fmt.Fprintln(w, "=============[DEBUG]===============") + fmt.Fprintln(w, "* [TYPE]") + fmt.Fprintln(w, codeSet.Type) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [ALL OPCODE]") + fmt.Fprintln(w, code.Dump()) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [CONTEXT]") + fmt.Fprintf(w, "%+v\n", ctx) + fmt.Fprintln(w, "===================================") + panic(err) + } + }() + + return Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go new file mode 100644 index 000000000000..2395abec975b --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go @@ -0,0 +1,297 @@ +package vm_color_indent + +import ( + "encoding/json" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +var ( + appendIndent = encoder.AppendIndent + appendStructEnd = encoder.AppendStructEndIndent + errUnsupportedValue = encoder.ErrUnsupportedValue + errUnsupportedFloat = encoder.ErrUnsupportedFloat + mapiterinit = encoder.MapIterInit + mapiterkey = encoder.MapIterKey + mapitervalue = encoder.MapIterValue + mapiternext = encoder.MapIterNext + maplen = encoder.MapLen +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type nonEmptyInterface struct { + itab *struct { + ityp *runtime.Type // static interface type + typ *runtime.Type // dynamic concrete type + // unused fields... + } + ptr unsafe.Pointer +} + +func errUnimplementedOp(op encoder.OpType) error { + return fmt.Errorf("encoder (indent): opcode %s has not been implemented", op) +} + +func load(base uintptr, idx uint32) uintptr { + addr := base + uintptr(idx) + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func store(base uintptr, idx uint32, p uintptr) { + addr := base + uintptr(idx) + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { + addr := base + uintptr(idx) + p := **(**uintptr)(unsafe.Pointer(&addr)) + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUint64(p uintptr, bitSize uint8) uint64 { + switch bitSize { + case 8: + return (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + return (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + return (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + return **(**uint64)(unsafe.Pointer(&p)) + } + return 0 +} + +func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func ptrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func appendInt(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { + format := ctx.Option.ColorScheme.Int + b = append(b, format.Header...) + b = encoder.AppendInt(ctx, b, p, code) + return append(b, format.Footer...) +} + +func appendUint(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { + format := ctx.Option.ColorScheme.Uint + b = append(b, format.Header...) + b = encoder.AppendUint(ctx, b, p, code) + return append(b, format.Footer...) +} + +func appendFloat32(ctx *encoder.RuntimeContext, b []byte, v float32) []byte { + format := ctx.Option.ColorScheme.Float + b = append(b, format.Header...) + b = encoder.AppendFloat32(ctx, b, v) + return append(b, format.Footer...) +} + +func appendFloat64(ctx *encoder.RuntimeContext, b []byte, v float64) []byte { + format := ctx.Option.ColorScheme.Float + b = append(b, format.Header...) + b = encoder.AppendFloat64(ctx, b, v) + return append(b, format.Footer...) +} + +func appendString(ctx *encoder.RuntimeContext, b []byte, v string) []byte { + format := ctx.Option.ColorScheme.String + b = append(b, format.Header...) + b = encoder.AppendString(ctx, b, v) + return append(b, format.Footer...) +} + +func appendByteSlice(ctx *encoder.RuntimeContext, b []byte, src []byte) []byte { + format := ctx.Option.ColorScheme.Binary + b = append(b, format.Header...) + b = encoder.AppendByteSlice(ctx, b, src) + return append(b, format.Footer...) +} + +func appendNumber(ctx *encoder.RuntimeContext, b []byte, n json.Number) ([]byte, error) { + format := ctx.Option.ColorScheme.Int + b = append(b, format.Header...) + bb, err := encoder.AppendNumber(ctx, b, n) + if err != nil { + return nil, err + } + return append(bb, format.Footer...), nil +} + +func appendBool(ctx *encoder.RuntimeContext, b []byte, v bool) []byte { + format := ctx.Option.ColorScheme.Bool + b = append(b, format.Header...) + if v { + b = append(b, "true"...) + } else { + b = append(b, "false"...) + } + return append(b, format.Footer...) +} + +func appendNull(ctx *encoder.RuntimeContext, b []byte) []byte { + format := ctx.Option.ColorScheme.Null + b = append(b, format.Header...) + b = append(b, "null"...) + return append(b, format.Footer...) +} + +func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, ',', '\n') +} + +func appendNullComma(ctx *encoder.RuntimeContext, b []byte) []byte { + format := ctx.Option.ColorScheme.Null + b = append(b, format.Header...) + b = append(b, "null"...) + return append(append(b, format.Footer...), ',', '\n') +} + +func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b[:len(b)-2], ':', ' ') +} + +func appendMapKeyValue(ctx *encoder.RuntimeContext, code *encoder.Opcode, b, key, value []byte) []byte { + b = appendIndent(ctx, b, code.Indent+1) + b = append(b, key...) + b[len(b)-2] = ':' + b[len(b)-1] = ' ' + return append(b, value...) +} + +func appendMapEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = b[:len(b)-2] + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent) + return append(b, '}', ',', '\n') +} + +func appendArrayHead(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = append(b, '[', '\n') + return appendIndent(ctx, b, code.Indent+1) +} + +func appendArrayEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = b[:len(b)-2] + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent) + return append(b, ']', ',', '\n') +} + +func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '[', ']', ',', '\n') +} + +func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '}', ',', '\n') +} + +func appendObjectEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + // replace comma to newline + b[last-1] = '\n' + b = appendIndent(ctx, b[:last], code.Indent) + return append(b, '}', ',', '\n') +} + +func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalJSONIndent(ctx, code, b, v) +} + +func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + format := ctx.Option.ColorScheme.String + b = append(b, format.Header...) + bb, err := encoder.AppendMarshalTextIndent(ctx, code, b, v) + if err != nil { + return nil, err + } + return append(bb, format.Footer...), nil +} + +func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '\n') +} + +func appendStructKey(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = appendIndent(ctx, b, code.Indent) + + format := ctx.Option.ColorScheme.ObjectKey + b = append(b, format.Header...) + b = append(b, code.Key[:len(code.Key)-1]...) + b = append(b, format.Footer...) + + return append(b, ':', ' ') +} + +func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + if b[last-1] == '{' { + b[last] = '}' + } else { + if b[last] == '\n' { + // to remove ',' and '\n' characters + b = b[:len(b)-2] + } + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent-1) + b = append(b, '}') + } + return appendComma(ctx, b) +} + +func restoreIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, ctxptr uintptr) { + ctx.BaseIndent = uint32(load(ctxptr, code.Length)) +} + +func storeIndent(ctxptr uintptr, code *encoder.Opcode, indent uintptr) { + store(ctxptr, code.Length, indent) +} + +func appendArrayElemIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return appendIndent(ctx, b, code.Indent+1) +} + +func appendMapKeyIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return appendIndent(ctx, b, code.Indent) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go new file mode 100644 index 000000000000..3b4e22e5d421 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go @@ -0,0 +1,4859 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package vm_color_indent + +import ( + "math" + "reflect" + "sort" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + recursiveLevel := 0 + ptrOffset := uintptr(0) + ctxptr := ctx.Ptr() + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + for { + switch code.Op { + default: + return nil, errUnimplementedOp(code.Op) + case encoder.OpPtr: + p := load(ctxptr, code.Idx) + code = code.Next + store(ctxptr, code.Idx, ptrToPtr(p)) + case encoder.OpIntPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInt: + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpUint: + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpIntString: + b = append(b, '"') + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintString: + b = append(b, '"') + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat32Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat32: + b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat64Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat64: + v := ptrToFloat64(load(ctxptr, code.Idx)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStringPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpString: + b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBoolPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBool: + b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBytesPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBytes: + b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpNumberPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpNumber: + bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpInterfacePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInterface: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if p == seen { + return nil, errUnsupportedValue(code, p) + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, p) + var ( + typ *runtime.Type + ifacePtr unsafe.Pointer + ) + up := ptrToUnsafePtr(p) + if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { + iface := (*nonEmptyInterface)(up) + ifacePtr = iface.ptr + if iface.itab != nil { + typ = iface.itab.typ + } + } else { + iface := (*emptyInterface)(up) + ifacePtr = iface.ptr + typ = iface.typ + } + if ifacePtr == nil { + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } + } + ctx.KeepRefs = append(ctx.KeepRefs, up) + ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) + if err != nil { + return nil, err + } + + totalLength := uintptr(code.Length) + 3 + nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 + + var c *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + c = ifaceCodeSet.InterfaceEscapeKeyCode + } else { + c = ifaceCodeSet.InterfaceNoescapeKeyCode + } + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += totalLength * uintptrSize + oldBaseIndent := ctx.BaseIndent + ctx.BaseIndent += code.Indent + + newLen := offsetNum + totalLength + nextTotalLength + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + end := ifaceCodeSet.EndCode + store(ctxptr, c.Idx, uintptr(ifacePtr)) + store(ctxptr, end.Idx, oldOffset) + store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, end, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpInterfaceEnd: + recursiveLevel-- + + // restore ctxptr + offset := load(ctxptr, code.Idx) + restoreIndent(ctx, code, ctxptr) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + b = append(b, `""`...) + b = appendComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpSlicePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpSlice: + p := load(ctxptr, code.Idx) + slice := ptrToSlice(p) + if p == 0 || slice.Data == nil { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.ElemIdx, 0) + store(ctxptr, code.Length, uintptr(slice.Len)) + store(ctxptr, code.Idx, uintptr(slice.Data)) + if slice.Len > 0 { + b = appendArrayHead(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, uintptr(slice.Data)) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpSliceElem: + idx := load(ctxptr, code.ElemIdx) + length := load(ctxptr, code.Length) + idx++ + if idx < length { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + data := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, data+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpArrayPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpArray: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + if code.Length > 0 { + b = appendArrayHead(ctx, code, b) + store(ctxptr, code.ElemIdx, 0) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpArrayElem: + idx := load(ctxptr, code.ElemIdx) + idx++ + if idx < uintptr(code.Length) { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + p := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, p+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpMapPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpMap: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + uptr := ptrToUnsafePtr(p) + mlen := maplen(uptr) + if mlen <= 0 { + b = appendEmptyObject(ctx, b) + code = code.End.Next + break + } + b = appendStructHead(ctx, b) + unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 + mapCtx := encoder.NewMapContext(mlen, unorderedMap) + mapiterinit(code.Type, uptr, &mapCtx.Iter) + store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) + ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) + if unorderedMap { + b = appendMapKeyIndent(ctx, code.Next, b) + } else { + mapCtx.Start = len(b) + mapCtx.First = len(b) + } + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + case encoder.OpMapKey: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + idx := mapCtx.Idx + idx++ + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + if idx < mapCtx.Len { + b = appendMapKeyIndent(ctx, code, b) + mapCtx.Idx = int(idx) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + b = appendObjectEnd(ctx, code, b) + encoder.ReleaseMapContext(mapCtx) + code = code.End.Next + } + } else { + mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] + if idx < mapCtx.Len { + mapCtx.Idx = int(idx) + mapCtx.Start = len(b) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + code = code.End + } + } + case encoder.OpMapValue: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + b = appendColon(ctx, b) + } else { + mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] + mapCtx.Start = len(b) + } + value := mapitervalue(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(value)) + mapiternext(&mapCtx.Iter) + code = code.Next + case encoder.OpMapEnd: + // this operation only used by sorted map. + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + sort.Sort(mapCtx.Slice) + buf := mapCtx.Buf + for _, item := range mapCtx.Slice.Items { + buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) + } + buf = appendMapEnd(ctx, code, buf) + b = b[:mapCtx.First] + b = append(b, buf...) + mapCtx.Buf = buf + encoder.ReleaseMapContext(mapCtx) + code = code.Next + case encoder.OpRecursivePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpRecursive: + ptr := load(ctxptr, code.Idx) + if ptr != 0 { + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if ptr == seen { + return nil, errUnsupportedValue(code, ptr) + } + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, ptr) + c := code.Jmp.Code + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += code.Jmp.CurLen * uintptrSize + oldBaseIndent := ctx.BaseIndent + indentDiffFromTop := c.Indent - 1 + ctx.BaseIndent += code.Indent - indentDiffFromTop + + newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + store(ctxptr, c.Idx, ptr) + store(ctxptr, c.End.Next.Idx, oldOffset) + store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpRecursiveEnd: + recursiveLevel-- + + // restore ctxptr + restoreIndent(ctx, code, ctxptr) + offset := load(ctxptr, code.Idx) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpStructPtrHead: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHead: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if len(code.Key) > 0 { + if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + } + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + u64 := ptrToUint64(p, code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset))))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + } + case encoder.OpStructPtrHeadNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructPtrHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadArray, encoder.OpStructHeadSlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyArray: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyArray: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptySlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptySlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + if maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON { + p = ptrToPtr(p) + } + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructField: + if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + p := load(ctxptr, code.Idx) + uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmpty: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringString: + p := load(ctxptr, code.Idx) + s := ptrToString(p + uintptr(code.Offset)) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldMarshalJSON: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalJSONPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldMarshalText: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalTextPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldArrayPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArrayPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldSlice: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlice: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldSlicePtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldMap: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMap: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldMapPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldStruct: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyStruct: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructEnd: + b = appendStructEndSkipLast(ctx, code, b) + code = code.Next + case encoder.OpStructEndInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendStructEnd(ctx, code, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + s := ptrToString(p + uintptr(code.Offset)) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytesPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + code = code.Next + case encoder.OpStructEndOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpEnd: + goto END + } + } +END: + return b, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go new file mode 100644 index 000000000000..99395388c1e3 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go @@ -0,0 +1,35 @@ +package vm_indent + +import ( + "fmt" + + "github.com/goccy/go-json/internal/encoder" +) + +func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + defer func() { + if err := recover(); err != nil { + w := ctx.Option.DebugOut + fmt.Fprintln(w, "=============[DEBUG]===============") + fmt.Fprintln(w, "* [TYPE]") + fmt.Fprintln(w, codeSet.Type) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [ALL OPCODE]") + fmt.Fprintln(w, code.Dump()) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [CONTEXT]") + fmt.Fprintf(w, "%+v\n", ctx) + fmt.Fprintln(w, "===================================") + panic(err) + } + }() + + return Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go new file mode 100644 index 000000000000..9e245bfe57d3 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go @@ -0,0 +1,9 @@ +package vm_indent + +import ( + // HACK: compile order + // `vm`, `vm_indent`, `vm_color`, `vm_color_indent` packages uses a lot of memory to compile, + // so forcibly make dependencies and avoid compiling in concurrent. + // dependency order: vm => vm_indent => vm_color => vm_color_indent + _ "github.com/goccy/go-json/internal/encoder/vm_color" +) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go new file mode 100644 index 000000000000..6cb745e3939b --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go @@ -0,0 +1,230 @@ +package vm_indent + +import ( + "encoding/json" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +var ( + appendInt = encoder.AppendInt + appendUint = encoder.AppendUint + appendFloat32 = encoder.AppendFloat32 + appendFloat64 = encoder.AppendFloat64 + appendString = encoder.AppendString + appendByteSlice = encoder.AppendByteSlice + appendNumber = encoder.AppendNumber + appendStructEnd = encoder.AppendStructEndIndent + appendIndent = encoder.AppendIndent + errUnsupportedValue = encoder.ErrUnsupportedValue + errUnsupportedFloat = encoder.ErrUnsupportedFloat + mapiterinit = encoder.MapIterInit + mapiterkey = encoder.MapIterKey + mapitervalue = encoder.MapIterValue + mapiternext = encoder.MapIterNext + maplen = encoder.MapLen +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type nonEmptyInterface struct { + itab *struct { + ityp *runtime.Type // static interface type + typ *runtime.Type // dynamic concrete type + // unused fields... + } + ptr unsafe.Pointer +} + +func errUnimplementedOp(op encoder.OpType) error { + return fmt.Errorf("encoder (indent): opcode %s has not been implemented", op) +} + +func load(base uintptr, idx uint32) uintptr { + addr := base + uintptr(idx) + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func store(base uintptr, idx uint32, p uintptr) { + addr := base + uintptr(idx) + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { + addr := base + uintptr(idx) + p := **(**uintptr)(unsafe.Pointer(&addr)) + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUint64(p uintptr, bitSize uint8) uint64 { + switch bitSize { + case 8: + return (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + return (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + return (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + return **(**uint64)(unsafe.Pointer(&p)) + } + return 0 +} +func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func ptrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func appendBool(_ *encoder.RuntimeContext, b []byte, v bool) []byte { + if v { + return append(b, "true"...) + } + return append(b, "false"...) +} + +func appendNull(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, "null"...) +} + +func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, ',', '\n') +} + +func appendNullComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, "null,\n"...) +} + +func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b[:len(b)-2], ':', ' ') +} + +func appendMapKeyValue(ctx *encoder.RuntimeContext, code *encoder.Opcode, b, key, value []byte) []byte { + b = appendIndent(ctx, b, code.Indent+1) + b = append(b, key...) + b[len(b)-2] = ':' + b[len(b)-1] = ' ' + return append(b, value...) +} + +func appendMapEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = b[:len(b)-2] + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent) + return append(b, '}', ',', '\n') +} + +func appendArrayHead(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = append(b, '[', '\n') + return appendIndent(ctx, b, code.Indent+1) +} + +func appendArrayEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = b[:len(b)-2] + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent) + return append(b, ']', ',', '\n') +} + +func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '[', ']', ',', '\n') +} + +func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '}', ',', '\n') +} + +func appendObjectEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + // replace comma to newline + b[last-1] = '\n' + b = appendIndent(ctx, b[:last], code.Indent) + return append(b, '}', ',', '\n') +} + +func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalJSONIndent(ctx, code, b, v) +} + +func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalTextIndent(ctx, code, b, v) +} + +func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '\n') +} + +func appendStructKey(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = appendIndent(ctx, b, code.Indent) + b = append(b, code.Key...) + return append(b, ' ') +} + +func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + if b[last-1] == '{' { + b[last] = '}' + } else { + if b[last] == '\n' { + // to remove ',' and '\n' characters + b = b[:len(b)-2] + } + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent-1) + b = append(b, '}') + } + return appendComma(ctx, b) +} + +func restoreIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, ctxptr uintptr) { + ctx.BaseIndent = uint32(load(ctxptr, code.Length)) +} + +func storeIndent(ctxptr uintptr, code *encoder.Opcode, indent uintptr) { + store(ctxptr, code.Length, indent) +} + +func appendArrayElemIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return appendIndent(ctx, b, code.Indent+1) +} + +func appendMapKeyIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return appendIndent(ctx, b, code.Indent) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go new file mode 100644 index 000000000000..836c5c8a85ac --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go @@ -0,0 +1,4859 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package vm_indent + +import ( + "math" + "reflect" + "sort" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + recursiveLevel := 0 + ptrOffset := uintptr(0) + ctxptr := ctx.Ptr() + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + for { + switch code.Op { + default: + return nil, errUnimplementedOp(code.Op) + case encoder.OpPtr: + p := load(ctxptr, code.Idx) + code = code.Next + store(ctxptr, code.Idx, ptrToPtr(p)) + case encoder.OpIntPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInt: + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpUint: + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpIntString: + b = append(b, '"') + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintString: + b = append(b, '"') + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat32Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat32: + b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat64Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat64: + v := ptrToFloat64(load(ctxptr, code.Idx)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStringPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpString: + b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBoolPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBool: + b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBytesPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBytes: + b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpNumberPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpNumber: + bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpInterfacePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInterface: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if p == seen { + return nil, errUnsupportedValue(code, p) + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, p) + var ( + typ *runtime.Type + ifacePtr unsafe.Pointer + ) + up := ptrToUnsafePtr(p) + if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { + iface := (*nonEmptyInterface)(up) + ifacePtr = iface.ptr + if iface.itab != nil { + typ = iface.itab.typ + } + } else { + iface := (*emptyInterface)(up) + ifacePtr = iface.ptr + typ = iface.typ + } + if ifacePtr == nil { + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } + } + ctx.KeepRefs = append(ctx.KeepRefs, up) + ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) + if err != nil { + return nil, err + } + + totalLength := uintptr(code.Length) + 3 + nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 + + var c *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + c = ifaceCodeSet.InterfaceEscapeKeyCode + } else { + c = ifaceCodeSet.InterfaceNoescapeKeyCode + } + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += totalLength * uintptrSize + oldBaseIndent := ctx.BaseIndent + ctx.BaseIndent += code.Indent + + newLen := offsetNum + totalLength + nextTotalLength + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + end := ifaceCodeSet.EndCode + store(ctxptr, c.Idx, uintptr(ifacePtr)) + store(ctxptr, end.Idx, oldOffset) + store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, end, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpInterfaceEnd: + recursiveLevel-- + + // restore ctxptr + offset := load(ctxptr, code.Idx) + restoreIndent(ctx, code, ctxptr) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + b = append(b, `""`...) + b = appendComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpSlicePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpSlice: + p := load(ctxptr, code.Idx) + slice := ptrToSlice(p) + if p == 0 || slice.Data == nil { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.ElemIdx, 0) + store(ctxptr, code.Length, uintptr(slice.Len)) + store(ctxptr, code.Idx, uintptr(slice.Data)) + if slice.Len > 0 { + b = appendArrayHead(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, uintptr(slice.Data)) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpSliceElem: + idx := load(ctxptr, code.ElemIdx) + length := load(ctxptr, code.Length) + idx++ + if idx < length { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + data := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, data+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpArrayPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpArray: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + if code.Length > 0 { + b = appendArrayHead(ctx, code, b) + store(ctxptr, code.ElemIdx, 0) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpArrayElem: + idx := load(ctxptr, code.ElemIdx) + idx++ + if idx < uintptr(code.Length) { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + p := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, p+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpMapPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpMap: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + uptr := ptrToUnsafePtr(p) + mlen := maplen(uptr) + if mlen <= 0 { + b = appendEmptyObject(ctx, b) + code = code.End.Next + break + } + b = appendStructHead(ctx, b) + unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 + mapCtx := encoder.NewMapContext(mlen, unorderedMap) + mapiterinit(code.Type, uptr, &mapCtx.Iter) + store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) + ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) + if unorderedMap { + b = appendMapKeyIndent(ctx, code.Next, b) + } else { + mapCtx.Start = len(b) + mapCtx.First = len(b) + } + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + case encoder.OpMapKey: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + idx := mapCtx.Idx + idx++ + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + if idx < mapCtx.Len { + b = appendMapKeyIndent(ctx, code, b) + mapCtx.Idx = int(idx) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + b = appendObjectEnd(ctx, code, b) + encoder.ReleaseMapContext(mapCtx) + code = code.End.Next + } + } else { + mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] + if idx < mapCtx.Len { + mapCtx.Idx = int(idx) + mapCtx.Start = len(b) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + code = code.End + } + } + case encoder.OpMapValue: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + b = appendColon(ctx, b) + } else { + mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] + mapCtx.Start = len(b) + } + value := mapitervalue(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(value)) + mapiternext(&mapCtx.Iter) + code = code.Next + case encoder.OpMapEnd: + // this operation only used by sorted map. + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + sort.Sort(mapCtx.Slice) + buf := mapCtx.Buf + for _, item := range mapCtx.Slice.Items { + buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) + } + buf = appendMapEnd(ctx, code, buf) + b = b[:mapCtx.First] + b = append(b, buf...) + mapCtx.Buf = buf + encoder.ReleaseMapContext(mapCtx) + code = code.Next + case encoder.OpRecursivePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpRecursive: + ptr := load(ctxptr, code.Idx) + if ptr != 0 { + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if ptr == seen { + return nil, errUnsupportedValue(code, ptr) + } + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, ptr) + c := code.Jmp.Code + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += code.Jmp.CurLen * uintptrSize + oldBaseIndent := ctx.BaseIndent + indentDiffFromTop := c.Indent - 1 + ctx.BaseIndent += code.Indent - indentDiffFromTop + + newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + store(ctxptr, c.Idx, ptr) + store(ctxptr, c.End.Next.Idx, oldOffset) + store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpRecursiveEnd: + recursiveLevel-- + + // restore ctxptr + restoreIndent(ctx, code, ctxptr) + offset := load(ctxptr, code.Idx) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpStructPtrHead: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHead: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if len(code.Key) > 0 { + if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + } + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + u64 := ptrToUint64(p, code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset))))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + } + case encoder.OpStructPtrHeadNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructPtrHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadArray, encoder.OpStructHeadSlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyArray: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyArray: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptySlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptySlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + if maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON { + p = ptrToPtr(p) + } + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructField: + if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + p := load(ctxptr, code.Idx) + uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmpty: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringString: + p := load(ctxptr, code.Idx) + s := ptrToString(p + uintptr(code.Offset)) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldMarshalJSON: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalJSONPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldMarshalText: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalTextPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldArrayPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArrayPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldSlice: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlice: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldSlicePtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldMap: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMap: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldMapPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldStruct: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyStruct: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructEnd: + b = appendStructEndSkipLast(ctx, code, b) + code = code.Next + case encoder.OpStructEndInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendStructEnd(ctx, code, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + s := ptrToString(p + uintptr(code.Offset)) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytesPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + code = code.Next + case encoder.OpStructEndOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpEnd: + goto END + } + } +END: + return b, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/errors/error.go b/vendor/github.com/goccy/go-json/internal/errors/error.go new file mode 100644 index 000000000000..9207d0ff25e3 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/errors/error.go @@ -0,0 +1,183 @@ +package errors + +import ( + "fmt" + "reflect" + "strconv" +) + +type InvalidUTF8Error struct { + S string // the whole string value that caused the error +} + +func (e *InvalidUTF8Error) Error() string { + return fmt.Sprintf("json: invalid UTF-8 in string: %s", strconv.Quote(e.S)) +} + +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return fmt.Sprintf("json: Unmarshal(non-pointer %s)", e.Type) + } + return fmt.Sprintf("json: Unmarshal(nil %s)", e.Type) +} + +// A MarshalerError represents an error from calling a MarshalJSON or MarshalText method. +type MarshalerError struct { + Type reflect.Type + Err error + sourceFunc string +} + +func (e *MarshalerError) Error() string { + srcFunc := e.sourceFunc + if srcFunc == "" { + srcFunc = "MarshalJSON" + } + return fmt.Sprintf("json: error calling %s for type %s: %s", srcFunc, e.Type, e.Err.Error()) +} + +// Unwrap returns the underlying error. +func (e *MarshalerError) Unwrap() error { return e.Err } + +// A SyntaxError is a description of a JSON syntax error. +type SyntaxError struct { + msg string // description of error + Offset int64 // error occurred after reading Offset bytes +} + +func (e *SyntaxError) Error() string { return e.msg } + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// +// Deprecated: No longer used; kept for compatibility. +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return fmt.Sprintf("json: cannot unmarshal object key %s into unexported field %s of type %s", + strconv.Quote(e.Key), e.Field.Name, e.Type.String(), + ) +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes + Struct string // name of the struct type containing the field + Field string // the full path from root node to the field +} + +func (e *UnmarshalTypeError) Error() string { + if e.Struct != "" || e.Field != "" { + return fmt.Sprintf("json: cannot unmarshal %s into Go struct field %s.%s of type %s", + e.Value, e.Struct, e.Field, e.Type, + ) + } + return fmt.Sprintf("json: cannot unmarshal %s into Go value of type %s", e.Value, e.Type) +} + +// An UnsupportedTypeError is returned by Marshal when attempting +// to encode an unsupported value type. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return fmt.Sprintf("json: unsupported type: %s", e.Type) +} + +type UnsupportedValueError struct { + Value reflect.Value + Str string +} + +func (e *UnsupportedValueError) Error() string { + return fmt.Sprintf("json: unsupported value: %s", e.Str) +} + +func ErrSyntax(msg string, offset int64) *SyntaxError { + return &SyntaxError{msg: msg, Offset: offset} +} + +func ErrMarshaler(typ reflect.Type, err error, msg string) *MarshalerError { + return &MarshalerError{ + Type: typ, + Err: err, + sourceFunc: msg, + } +} + +func ErrExceededMaxDepth(c byte, cursor int64) *SyntaxError { + return &SyntaxError{ + msg: fmt.Sprintf(`invalid character "%c" exceeded max depth`, c), + Offset: cursor, + } +} + +func ErrNotAtBeginningOfValue(cursor int64) *SyntaxError { + return &SyntaxError{msg: "not at beginning of value", Offset: cursor} +} + +func ErrUnexpectedEndOfJSON(msg string, cursor int64) *SyntaxError { + return &SyntaxError{ + msg: fmt.Sprintf("json: %s unexpected end of JSON input", msg), + Offset: cursor, + } +} + +func ErrExpected(msg string, cursor int64) *SyntaxError { + return &SyntaxError{msg: fmt.Sprintf("expected %s", msg), Offset: cursor} +} + +func ErrInvalidCharacter(c byte, context string, cursor int64) *SyntaxError { + if c == 0 { + return &SyntaxError{ + msg: fmt.Sprintf("json: invalid character as %s", context), + Offset: cursor, + } + } + return &SyntaxError{ + msg: fmt.Sprintf("json: invalid character %c as %s", c, context), + Offset: cursor, + } +} + +func ErrInvalidBeginningOfValue(c byte, cursor int64) *SyntaxError { + return &SyntaxError{ + msg: fmt.Sprintf("invalid character '%c' looking for beginning of value", c), + Offset: cursor, + } +} + +type PathError struct { + msg string +} + +func (e *PathError) Error() string { + return fmt.Sprintf("json: invalid path format: %s", e.msg) +} + +func ErrInvalidPath(msg string, args ...interface{}) *PathError { + if len(args) != 0 { + return &PathError{msg: fmt.Sprintf(msg, args...)} + } + return &PathError{msg: msg} +} + +func ErrEmptyPath() *PathError { + return &PathError{msg: "path is empty"} +} diff --git a/vendor/github.com/goccy/go-json/internal/runtime/rtype.go b/vendor/github.com/goccy/go-json/internal/runtime/rtype.go new file mode 100644 index 000000000000..37cfe35a1fd3 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/runtime/rtype.go @@ -0,0 +1,262 @@ +package runtime + +import ( + "reflect" + "unsafe" +) + +// Type representing reflect.rtype for noescape trick +type Type struct{} + +//go:linkname rtype_Align reflect.(*rtype).Align +//go:noescape +func rtype_Align(*Type) int + +func (t *Type) Align() int { + return rtype_Align(t) +} + +//go:linkname rtype_FieldAlign reflect.(*rtype).FieldAlign +//go:noescape +func rtype_FieldAlign(*Type) int + +func (t *Type) FieldAlign() int { + return rtype_FieldAlign(t) +} + +//go:linkname rtype_Method reflect.(*rtype).Method +//go:noescape +func rtype_Method(*Type, int) reflect.Method + +func (t *Type) Method(a0 int) reflect.Method { + return rtype_Method(t, a0) +} + +//go:linkname rtype_MethodByName reflect.(*rtype).MethodByName +//go:noescape +func rtype_MethodByName(*Type, string) (reflect.Method, bool) + +func (t *Type) MethodByName(a0 string) (reflect.Method, bool) { + return rtype_MethodByName(t, a0) +} + +//go:linkname rtype_NumMethod reflect.(*rtype).NumMethod +//go:noescape +func rtype_NumMethod(*Type) int + +func (t *Type) NumMethod() int { + return rtype_NumMethod(t) +} + +//go:linkname rtype_Name reflect.(*rtype).Name +//go:noescape +func rtype_Name(*Type) string + +func (t *Type) Name() string { + return rtype_Name(t) +} + +//go:linkname rtype_PkgPath reflect.(*rtype).PkgPath +//go:noescape +func rtype_PkgPath(*Type) string + +func (t *Type) PkgPath() string { + return rtype_PkgPath(t) +} + +//go:linkname rtype_Size reflect.(*rtype).Size +//go:noescape +func rtype_Size(*Type) uintptr + +func (t *Type) Size() uintptr { + return rtype_Size(t) +} + +//go:linkname rtype_String reflect.(*rtype).String +//go:noescape +func rtype_String(*Type) string + +func (t *Type) String() string { + return rtype_String(t) +} + +//go:linkname rtype_Kind reflect.(*rtype).Kind +//go:noescape +func rtype_Kind(*Type) reflect.Kind + +func (t *Type) Kind() reflect.Kind { + return rtype_Kind(t) +} + +//go:linkname rtype_Implements reflect.(*rtype).Implements +//go:noescape +func rtype_Implements(*Type, reflect.Type) bool + +func (t *Type) Implements(u reflect.Type) bool { + return rtype_Implements(t, u) +} + +//go:linkname rtype_AssignableTo reflect.(*rtype).AssignableTo +//go:noescape +func rtype_AssignableTo(*Type, reflect.Type) bool + +func (t *Type) AssignableTo(u reflect.Type) bool { + return rtype_AssignableTo(t, u) +} + +//go:linkname rtype_ConvertibleTo reflect.(*rtype).ConvertibleTo +//go:noescape +func rtype_ConvertibleTo(*Type, reflect.Type) bool + +func (t *Type) ConvertibleTo(u reflect.Type) bool { + return rtype_ConvertibleTo(t, u) +} + +//go:linkname rtype_Comparable reflect.(*rtype).Comparable +//go:noescape +func rtype_Comparable(*Type) bool + +func (t *Type) Comparable() bool { + return rtype_Comparable(t) +} + +//go:linkname rtype_Bits reflect.(*rtype).Bits +//go:noescape +func rtype_Bits(*Type) int + +func (t *Type) Bits() int { + return rtype_Bits(t) +} + +//go:linkname rtype_ChanDir reflect.(*rtype).ChanDir +//go:noescape +func rtype_ChanDir(*Type) reflect.ChanDir + +func (t *Type) ChanDir() reflect.ChanDir { + return rtype_ChanDir(t) +} + +//go:linkname rtype_IsVariadic reflect.(*rtype).IsVariadic +//go:noescape +func rtype_IsVariadic(*Type) bool + +func (t *Type) IsVariadic() bool { + return rtype_IsVariadic(t) +} + +//go:linkname rtype_Elem reflect.(*rtype).Elem +//go:noescape +func rtype_Elem(*Type) reflect.Type + +func (t *Type) Elem() *Type { + return Type2RType(rtype_Elem(t)) +} + +//go:linkname rtype_Field reflect.(*rtype).Field +//go:noescape +func rtype_Field(*Type, int) reflect.StructField + +func (t *Type) Field(i int) reflect.StructField { + return rtype_Field(t, i) +} + +//go:linkname rtype_FieldByIndex reflect.(*rtype).FieldByIndex +//go:noescape +func rtype_FieldByIndex(*Type, []int) reflect.StructField + +func (t *Type) FieldByIndex(index []int) reflect.StructField { + return rtype_FieldByIndex(t, index) +} + +//go:linkname rtype_FieldByName reflect.(*rtype).FieldByName +//go:noescape +func rtype_FieldByName(*Type, string) (reflect.StructField, bool) + +func (t *Type) FieldByName(name string) (reflect.StructField, bool) { + return rtype_FieldByName(t, name) +} + +//go:linkname rtype_FieldByNameFunc reflect.(*rtype).FieldByNameFunc +//go:noescape +func rtype_FieldByNameFunc(*Type, func(string) bool) (reflect.StructField, bool) + +func (t *Type) FieldByNameFunc(match func(string) bool) (reflect.StructField, bool) { + return rtype_FieldByNameFunc(t, match) +} + +//go:linkname rtype_In reflect.(*rtype).In +//go:noescape +func rtype_In(*Type, int) reflect.Type + +func (t *Type) In(i int) reflect.Type { + return rtype_In(t, i) +} + +//go:linkname rtype_Key reflect.(*rtype).Key +//go:noescape +func rtype_Key(*Type) reflect.Type + +func (t *Type) Key() *Type { + return Type2RType(rtype_Key(t)) +} + +//go:linkname rtype_Len reflect.(*rtype).Len +//go:noescape +func rtype_Len(*Type) int + +func (t *Type) Len() int { + return rtype_Len(t) +} + +//go:linkname rtype_NumField reflect.(*rtype).NumField +//go:noescape +func rtype_NumField(*Type) int + +func (t *Type) NumField() int { + return rtype_NumField(t) +} + +//go:linkname rtype_NumIn reflect.(*rtype).NumIn +//go:noescape +func rtype_NumIn(*Type) int + +func (t *Type) NumIn() int { + return rtype_NumIn(t) +} + +//go:linkname rtype_NumOut reflect.(*rtype).NumOut +//go:noescape +func rtype_NumOut(*Type) int + +func (t *Type) NumOut() int { + return rtype_NumOut(t) +} + +//go:linkname rtype_Out reflect.(*rtype).Out +//go:noescape +func rtype_Out(*Type, int) reflect.Type + +//go:linkname PtrTo reflect.(*rtype).ptrTo +//go:noescape +func PtrTo(*Type) *Type + +func (t *Type) Out(i int) reflect.Type { + return rtype_Out(t, i) +} + +//go:linkname IfaceIndir reflect.ifaceIndir +//go:noescape +func IfaceIndir(*Type) bool + +//go:linkname RType2Type reflect.toType +//go:noescape +func RType2Type(t *Type) reflect.Type + +type emptyInterface struct { + _ *Type + ptr unsafe.Pointer +} + +func Type2RType(t reflect.Type) *Type { + return (*Type)(((*emptyInterface)(unsafe.Pointer(&t))).ptr) +} diff --git a/vendor/github.com/goccy/go-json/internal/runtime/struct_field.go b/vendor/github.com/goccy/go-json/internal/runtime/struct_field.go new file mode 100644 index 000000000000..baab0c5978d3 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/runtime/struct_field.go @@ -0,0 +1,91 @@ +package runtime + +import ( + "reflect" + "strings" + "unicode" +) + +func getTag(field reflect.StructField) string { + return field.Tag.Get("json") +} + +func IsIgnoredStructField(field reflect.StructField) bool { + if field.PkgPath != "" { + if field.Anonymous { + t := field.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return true + } + } else { + // private field + return true + } + } + tag := getTag(field) + return tag == "-" +} + +type StructTag struct { + Key string + IsTaggedKey bool + IsOmitEmpty bool + IsString bool + Field reflect.StructField +} + +type StructTags []*StructTag + +func (t StructTags) ExistsKey(key string) bool { + for _, tt := range t { + if tt.Key == key { + return true + } + } + return false +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + case !unicode.IsLetter(c) && !unicode.IsDigit(c): + return false + } + } + return true +} + +func StructTagFromField(field reflect.StructField) *StructTag { + keyName := field.Name + tag := getTag(field) + st := &StructTag{Field: field} + opts := strings.Split(tag, ",") + if len(opts) > 0 { + if opts[0] != "" && isValidTag(opts[0]) { + keyName = opts[0] + st.IsTaggedKey = true + } + } + st.Key = keyName + if len(opts) > 1 { + for _, opt := range opts[1:] { + switch opt { + case "omitempty": + st.IsOmitEmpty = true + case "string": + st.IsString = true + } + } + } + return st +} diff --git a/vendor/github.com/goccy/go-json/internal/runtime/type.go b/vendor/github.com/goccy/go-json/internal/runtime/type.go new file mode 100644 index 000000000000..4b693cb0bbed --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/runtime/type.go @@ -0,0 +1,98 @@ +package runtime + +import ( + "reflect" + "sync" + "unsafe" +) + +type SliceHeader struct { + Data unsafe.Pointer + Len int + Cap int +} + +const ( + maxAcceptableTypeAddrRange = 1024 * 1024 * 2 // 2 Mib +) + +type TypeAddr struct { + BaseTypeAddr uintptr + MaxTypeAddr uintptr + AddrRange uintptr + AddrShift uintptr +} + +var ( + typeAddr *TypeAddr + once sync.Once +) + +//go:linkname typelinks reflect.typelinks +func typelinks() ([]unsafe.Pointer, [][]int32) + +//go:linkname rtypeOff reflect.rtypeOff +func rtypeOff(unsafe.Pointer, int32) unsafe.Pointer + +func AnalyzeTypeAddr() *TypeAddr { + once.Do(func() { + sections, offsets := typelinks() + if len(sections) != 1 { + return + } + if len(offsets) != 1 { + return + } + section := sections[0] + offset := offsets[0] + var ( + min uintptr = uintptr(^uint(0)) + max uintptr = 0 + isAligned64 = true + isAligned32 = true + ) + for i := 0; i < len(offset); i++ { + typ := (*Type)(rtypeOff(section, offset[i])) + addr := uintptr(unsafe.Pointer(typ)) + if min > addr { + min = addr + } + if max < addr { + max = addr + } + if typ.Kind() == reflect.Ptr { + addr = uintptr(unsafe.Pointer(typ.Elem())) + if min > addr { + min = addr + } + if max < addr { + max = addr + } + } + isAligned64 = isAligned64 && (addr-min)&63 == 0 + isAligned32 = isAligned32 && (addr-min)&31 == 0 + } + addrRange := max - min + if addrRange == 0 { + return + } + var addrShift uintptr + if isAligned64 { + addrShift = 6 + } else if isAligned32 { + addrShift = 5 + } + cacheSize := addrRange >> addrShift + if cacheSize > maxAcceptableTypeAddrRange { + return + } + typeAddr = &TypeAddr{ + BaseTypeAddr: min, + MaxTypeAddr: max, + AddrRange: addrRange, + AddrShift: addrShift, + } + }) + + return typeAddr +} diff --git a/vendor/github.com/goccy/go-json/json.go b/vendor/github.com/goccy/go-json/json.go new file mode 100644 index 000000000000..fb18065a2312 --- /dev/null +++ b/vendor/github.com/goccy/go-json/json.go @@ -0,0 +1,368 @@ +package json + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/goccy/go-json/internal/encoder" +) + +// Marshaler is the interface implemented by types that +// can marshal themselves into valid JSON. +type Marshaler interface { + MarshalJSON() ([]byte, error) +} + +// MarshalerContext is the interface implemented by types that +// can marshal themselves into valid JSON with context.Context. +type MarshalerContext interface { + MarshalJSON(context.Context) ([]byte, error) +} + +// Unmarshaler is the interface implemented by types +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +// +// By convention, to approximate the behavior of Unmarshal itself, +// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// UnmarshalerContext is the interface implemented by types +// that can unmarshal with context.Context a JSON description of themselves. +type UnmarshalerContext interface { + UnmarshalJSON(context.Context, []byte) error +} + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method and encodes the result as a JSON string. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// This escaping can be disabled using an Encoder that had SetEscapeHTML(false) +// called on it. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON value. +// +// Struct values encode as JSON objects. +// Each exported struct field becomes a member of the object, using the +// field name as the object key, unless the field is omitted for one of the +// reasons given below. +// +// The encoding of each struct field can be customized by the format string +// stored under the "json" key in the struct field's tag. +// The format string gives the name of the field, possibly followed by a +// comma-separated list of options. The name may be empty in order to +// specify options without overriding the default field name. +// +// The "omitempty" option specifies that the field should be omitted +// from the encoding if the field has an empty value, defined as +// false, 0, a nil pointer, a nil interface value, and any empty array, +// slice, map, or string. +// +// As a special case, if the field tag is "-", the field is always omitted. +// Note that a field with name "-" can still be generated using the tag "-,". +// +// Examples of struct field tags and their meanings: +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "-". +// Field int `json:"-,"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, and ASCII punctuation except quotation +// marks, backslash, and comma. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. The map's key type must either be a +// string, an integer type, or implement encoding.TextMarshaler. The map keys +// are sorted and used as JSON object keys by applying the following rules, +// subject to the UTF-8 coercion described for string values above: +// - string keys are used directly +// - encoding.TextMarshalers are marshaled +// - integer keys are converted to strings +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON value. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON value. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +func Marshal(v interface{}) ([]byte, error) { + return MarshalWithOption(v) +} + +// MarshalNoEscape returns the JSON encoding of v and doesn't escape v. +func MarshalNoEscape(v interface{}) ([]byte, error) { + return marshalNoEscape(v) +} + +// MarshalContext returns the JSON encoding of v with context.Context and EncodeOption. +func MarshalContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { + return marshalContext(ctx, v, optFuncs...) +} + +// MarshalWithOption returns the JSON encoding of v with EncodeOption. +func MarshalWithOption(v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { + return marshal(v, optFuncs...) +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +// Each JSON element in the output will begin on a new line beginning with prefix +// followed by one or more copies of indent according to the indentation nesting. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return MarshalIndentWithOption(v, prefix, indent) +} + +// MarshalIndentWithOption is like Marshal but applies Indent to format the output with EncodeOption. +func MarshalIndentWithOption(v interface{}, prefix, indent string, optFuncs ...EncodeOptionFunc) ([]byte, error) { + return marshalIndent(v, prefix, indent, optFuncs...) +} + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. If v is nil or not a pointer, +// Unmarshal returns an InvalidUnmarshalError. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalJSON method, including +// when the input is a JSON null. +// Otherwise, if the value implements encoding.TextUnmarshaler +// and the input is a JSON quoted string, Unmarshal calls that value's +// UnmarshalText method with the unquoted form of the string. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. By +// default, object keys which don't have a corresponding struct field are +// ignored (see Decoder.DisallowUnknownFields for an alternative). +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a map, Unmarshal first establishes a map to +// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores +// key-value pairs from the JSON object into the map. The map's key type must +// either be any string type, an integer, implement json.Unmarshaler, or +// implement encoding.TextUnmarshaler. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. In any +// case, it's not guaranteed that all the remaining fields following +// the problematic one will be unmarshaled into the target object. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// “not present,” unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +func Unmarshal(data []byte, v interface{}) error { + return unmarshal(data, v) +} + +// UnmarshalContext parses the JSON-encoded data and stores the result +// in the value pointed to by v. If you implement the UnmarshalerContext interface, +// call it with ctx as an argument. +func UnmarshalContext(ctx context.Context, data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + return unmarshalContext(ctx, data, v) +} + +func UnmarshalWithOption(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + return unmarshal(data, v, optFuncs...) +} + +func UnmarshalNoEscape(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + return unmarshalNoEscape(data, v, optFuncs...) +} + +// A Token holds a value of one of these types: +// +// Delim, for the four JSON delimiters [ ] { } +// bool, for JSON booleans +// float64, for JSON numbers +// Number, for JSON numbers +// string, for JSON string literals +// nil, for JSON null +type Token = json.Token + +// A Number represents a JSON number literal. +type Number = json.Number + +// RawMessage is a raw encoded JSON value. +// It implements Marshaler and Unmarshaler and can +// be used to delay JSON decoding or precompute a JSON encoding. +type RawMessage = json.RawMessage + +// A Delim is a JSON array or object delimiter, one of [ ] { or }. +type Delim = json.Delim + +// Compact appends to dst the JSON-encoded src with +// insignificant space characters elided. +func Compact(dst *bytes.Buffer, src []byte) error { + return encoder.Compact(dst, src, false) +} + +// Indent appends to dst an indented form of the JSON-encoded src. +// Each element in a JSON object or array begins on a new, +// indented line beginning with prefix followed by one or more +// copies of indent according to the indentation nesting. +// The data appended to dst does not begin with the prefix nor +// any indentation, to make it easier to embed inside other formatted JSON data. +// Although leading space characters (space, tab, carriage return, newline) +// at the beginning of src are dropped, trailing space characters +// at the end of src are preserved and copied to dst. +// For example, if src has no trailing spaces, neither will dst; +// if src ends in a trailing newline, so will dst. +func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error { + return encoder.Indent(dst, src, prefix, indent) +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML + + + + + +` +) diff --git a/vendor/github.com/go-openapi/runtime/middleware/redoc.go b/vendor/github.com/go-openapi/runtime/middleware/redoc.go new file mode 100644 index 000000000000..cbaec73c438d --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/redoc.go @@ -0,0 +1,97 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + "bytes" + "fmt" + "html/template" + "net/http" + "path" +) + +// RedocOpts configures the Redoc middlewares +type RedocOpts struct { + // BasePath for the UI, defaults to: / + BasePath string + + // Path combines with BasePath to construct the path to the UI, defaults to: "docs". + Path string + + // SpecURL is the URL of the spec document. + // + // Defaults to: /swagger.json + SpecURL string + + // Title for the documentation site, default to: API documentation + Title string + + // Template specifies a custom template to serve the UI + Template string + + // RedocURL points to the js that generates the redoc site. + // + // Defaults to: https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js + RedocURL string +} + +// EnsureDefaults in case some options are missing +func (r *RedocOpts) EnsureDefaults() { + common := toCommonUIOptions(r) + common.EnsureDefaults() + fromCommonToAnyOptions(common, r) + + // redoc-specifics + if r.RedocURL == "" { + r.RedocURL = redocLatest + } + if r.Template == "" { + r.Template = redocTemplate + } +} + +// Redoc creates a middleware to serve a documentation site for a swagger spec. +// +// This allows for altering the spec before starting the http listener. +func Redoc(opts RedocOpts, next http.Handler) http.Handler { + opts.EnsureDefaults() + + pth := path.Join(opts.BasePath, opts.Path) + tmpl := template.Must(template.New("redoc").Parse(opts.Template)) + assets := bytes.NewBuffer(nil) + if err := tmpl.Execute(assets, opts); err != nil { + panic(fmt.Errorf("cannot execute template: %w", err)) + } + + return serveUI(pth, assets.Bytes(), next) +} + +const ( + redocLatest = "https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js" + redocTemplate = ` + + + {{ .Title }} + + + + + + + + + + + + + +` +) diff --git a/vendor/github.com/go-openapi/runtime/middleware/request.go b/vendor/github.com/go-openapi/runtime/middleware/request.go new file mode 100644 index 000000000000..52facfefcd22 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/request.go @@ -0,0 +1,106 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + "net/http" + "reflect" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/logger" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +// UntypedRequestBinder binds and validates the data from a http request +type UntypedRequestBinder struct { + Spec *spec.Swagger + Parameters map[string]spec.Parameter + Formats strfmt.Registry + paramBinders map[string]*untypedParamBinder + debugLogf func(string, ...any) // a logging function to debug context and all components using it +} + +// NewUntypedRequestBinder creates a new binder for reading a request. +func NewUntypedRequestBinder(parameters map[string]spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *UntypedRequestBinder { + binders := make(map[string]*untypedParamBinder) + for fieldName, param := range parameters { + binders[fieldName] = newUntypedParamBinder(param, spec, formats) + } + return &UntypedRequestBinder{ + Parameters: parameters, + paramBinders: binders, + Spec: spec, + Formats: formats, + debugLogf: debugLogfFunc(nil), + } +} + +// Bind perform the databinding and validation +func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, data any) error { + val := reflect.Indirect(reflect.ValueOf(data)) + isMap := val.Kind() == reflect.Map + var result []error + o.debugLogf("binding %d parameters for %s %s", len(o.Parameters), request.Method, request.URL.EscapedPath()) + for fieldName, param := range o.Parameters { + binder := o.paramBinders[fieldName] + o.debugLogf("binding parameter %s for %s %s", fieldName, request.Method, request.URL.EscapedPath()) + var target reflect.Value + if !isMap { + binder.Name = fieldName + target = val.FieldByName(fieldName) + } + + if isMap { + tpe := binder.Type() + if tpe == nil { + if param.Schema.Type.Contains(typeArray) { + tpe = reflect.TypeFor[[]any]() + } else { + tpe = reflect.TypeFor[map[string]any]() + } + } + target = reflect.Indirect(reflect.New(tpe)) + } + + if !target.IsValid() { + result = append(result, errors.New(http.StatusInternalServerError, "parameter name %q is an unknown field", binder.Name)) + continue + } + + if err := binder.Bind(request, routeParams, consumer, target); err != nil { + result = append(result, err) + continue + } + + if binder.validator != nil { + rr := binder.validator.Validate(target.Interface()) + if rr != nil && rr.HasErrors() { + result = append(result, rr.AsError()) + } + } + + if isMap { + val.SetMapIndex(reflect.ValueOf(param.Name), target) + } + } + + if len(result) > 0 { + return errors.CompositeValidationError(result...) + } + + return nil +} + +// SetLogger allows for injecting a logger to catch debug entries. +// +// The logger is enabled in DEBUG mode only. +func (o *UntypedRequestBinder) SetLogger(lg logger.Logger) { + o.debugLogf = debugLogfFunc(lg) +} + +func (o *UntypedRequestBinder) setDebugLogf(fn func(string, ...any)) { + o.debugLogf = fn +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/router.go b/vendor/github.com/go-openapi/runtime/middleware/router.go new file mode 100644 index 000000000000..16816580da8b --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/router.go @@ -0,0 +1,520 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + "fmt" + "net/http" + "net/url" + fpath "path" + "regexp" + "strings" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/errors" + "github.com/go-openapi/loads" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/logger" + "github.com/go-openapi/runtime/middleware/denco" + "github.com/go-openapi/runtime/security" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag/stringutils" +) + +// RouteParam is a object to capture route params in a framework agnostic way. +// implementations of the muxer should use these route params to communicate with the +// swagger framework +type RouteParam struct { + Name string + Value string +} + +// RouteParams the collection of route params +type RouteParams []RouteParam + +// Get gets the value for the route param for the specified key +func (r RouteParams) Get(name string) string { + vv, _, _ := r.GetOK(name) + if len(vv) > 0 { + return vv[len(vv)-1] + } + return "" +} + +// GetOK gets the value but also returns booleans to indicate if a key or value +// is present. This aids in validation and satisfies an interface in use there +// +// The returned values are: data, has key, has value +func (r RouteParams) GetOK(name string) ([]string, bool, bool) { + for _, p := range r { + if p.Name == name { + return []string{p.Value}, true, p.Value != "" + } + } + return nil, false, false +} + +// NewRouter creates a new context-aware router middleware +func NewRouter(ctx *Context, next http.Handler) http.Handler { + if ctx.router == nil { + ctx.router = DefaultRouter(ctx.spec, ctx.api, WithDefaultRouterLoggerFunc(ctx.debugLogf)) + } + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if _, rCtx, ok := ctx.RouteInfo(r); ok { + next.ServeHTTP(rw, rCtx) + return + } + + // Not found, check if it exists in the other methods first + if others := ctx.AllowedMethods(r); len(others) > 0 { + ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others)) + return + } + + ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.EscapedPath())) + }) +} + +// RoutableAPI represents an interface for things that can serve +// as a provider of implementations for the swagger router +type RoutableAPI interface { + HandlerFor(string, string) (http.Handler, bool) + ServeErrorFor(string) func(http.ResponseWriter, *http.Request, error) + ConsumersFor([]string) map[string]runtime.Consumer + ProducersFor([]string) map[string]runtime.Producer + AuthenticatorsFor(map[string]spec.SecurityScheme) map[string]runtime.Authenticator + Authorizer() runtime.Authorizer + Formats() strfmt.Registry + DefaultProduces() string + DefaultConsumes() string +} + +// Router represents a swagger-aware router +type Router interface { + Lookup(method, path string) (*MatchedRoute, bool) + OtherMethods(method, path string) []string +} + +type defaultRouteBuilder struct { + spec *loads.Document + analyzer *analysis.Spec + api RoutableAPI + records map[string][]denco.Record + debugLogf func(string, ...any) // a logging function to debug context and all components using it +} + +type defaultRouter struct { + spec *loads.Document + routers map[string]*denco.Router + debugLogf func(string, ...any) // a logging function to debug context and all components using it +} + +func newDefaultRouteBuilder(spec *loads.Document, api RoutableAPI, opts ...DefaultRouterOpt) *defaultRouteBuilder { + var o defaultRouterOpts + for _, apply := range opts { + apply(&o) + } + if o.debugLogf == nil { + o.debugLogf = debugLogfFunc(nil) // defaults to standard logger + } + + return &defaultRouteBuilder{ + spec: spec, + analyzer: analysis.New(spec.Spec()), + api: api, + records: make(map[string][]denco.Record), + debugLogf: o.debugLogf, + } +} + +// DefaultRouterOpt allows to inject optional behavior to the default router. +type DefaultRouterOpt func(*defaultRouterOpts) + +type defaultRouterOpts struct { + debugLogf func(string, ...any) +} + +// WithDefaultRouterLogger sets the debug logger for the default router. +// +// This is enabled only in DEBUG mode. +func WithDefaultRouterLogger(lg logger.Logger) DefaultRouterOpt { + return func(o *defaultRouterOpts) { + o.debugLogf = debugLogfFunc(lg) + } +} + +// WithDefaultRouterLoggerFunc sets a logging debug method for the default router. +func WithDefaultRouterLoggerFunc(fn func(string, ...any)) DefaultRouterOpt { + return func(o *defaultRouterOpts) { + o.debugLogf = fn + } +} + +// DefaultRouter creates a default implementation of the router +func DefaultRouter(spec *loads.Document, api RoutableAPI, opts ...DefaultRouterOpt) Router { + builder := newDefaultRouteBuilder(spec, api, opts...) + if spec != nil { + for method, paths := range builder.analyzer.Operations() { + for path, operation := range paths { + fp := fpath.Join(spec.BasePath(), path) + builder.debugLogf("adding route %s %s %q", method, fp, operation.ID) + builder.AddRoute(method, fp, operation) + } + } + } + return builder.Build() +} + +// RouteAuthenticator is an authenticator that can compose several authenticators together. +// It also knows when it contains an authenticator that allows for anonymous pass through. +// Contains a group of 1 or more authenticators that have a logical AND relationship +type RouteAuthenticator struct { + Authenticator map[string]runtime.Authenticator + Schemes []string + Scopes map[string][]string + allScopes []string + commonScopes []string + allowAnonymous bool +} + +func (ra *RouteAuthenticator) AllowsAnonymous() bool { + return ra.allowAnonymous +} + +// AllScopes returns a list of unique scopes that is the combination +// of all the scopes in the requirements +func (ra *RouteAuthenticator) AllScopes() []string { + return ra.allScopes +} + +// CommonScopes returns a list of unique scopes that are common in all the +// scopes in the requirements +func (ra *RouteAuthenticator) CommonScopes() []string { + return ra.commonScopes +} + +// Authenticate Authenticator interface implementation +func (ra *RouteAuthenticator) Authenticate(req *http.Request, route *MatchedRoute) (bool, any, error) { + if ra.allowAnonymous { + route.Authenticator = ra + return true, nil, nil + } + // iterate in proper order + var lastResult any + for _, scheme := range ra.Schemes { + if authenticator, ok := ra.Authenticator[scheme]; ok { + applies, princ, err := authenticator.Authenticate(&security.ScopedAuthRequest{ + Request: req, + RequiredScopes: ra.Scopes[scheme], + }) + if !applies { + return false, nil, nil + } + if err != nil { + route.Authenticator = ra + return true, nil, err + } + lastResult = princ + } + } + route.Authenticator = ra + return true, lastResult, nil +} + +func stringSliceUnion(slices ...[]string) []string { + unique := make(map[string]struct{}) + var result []string + for _, slice := range slices { + for _, entry := range slice { + if _, ok := unique[entry]; ok { + continue + } + unique[entry] = struct{}{} + result = append(result, entry) + } + } + return result +} + +func stringSliceIntersection(slices ...[]string) []string { + unique := make(map[string]int) + var intersection []string + + total := len(slices) + var emptyCnt int + for _, slice := range slices { + if len(slice) == 0 { + emptyCnt++ + continue + } + + for _, entry := range slice { + unique[entry]++ + if unique[entry] == total-emptyCnt { // this entry appeared in all the non-empty slices + intersection = append(intersection, entry) + } + } + } + + return intersection +} + +// RouteAuthenticators represents a group of authenticators that represent a logical OR +type RouteAuthenticators []RouteAuthenticator + +// AllowsAnonymous returns true when there is an authenticator that means optional auth +func (ras RouteAuthenticators) AllowsAnonymous() bool { + for _, ra := range ras { + if ra.AllowsAnonymous() { + return true + } + } + return false +} + +// Authenticate method implemention so this collection can be used as authenticator +func (ras RouteAuthenticators) Authenticate(req *http.Request, route *MatchedRoute) (bool, any, error) { + var lastError error + var allowsAnon bool + var anonAuth RouteAuthenticator + + for _, ra := range ras { + if ra.AllowsAnonymous() { + anonAuth = ra + allowsAnon = true + continue + } + applies, usr, err := ra.Authenticate(req, route) + if !applies || err != nil || usr == nil { + if err != nil { + lastError = err + } + continue + } + return applies, usr, nil + } + + if allowsAnon && lastError == nil { + route.Authenticator = &anonAuth + return true, nil, lastError + } + return lastError != nil, nil, lastError +} + +type routeEntry struct { + PathPattern string + BasePath string + Operation *spec.Operation + Consumes []string + Consumers map[string]runtime.Consumer + Produces []string + Producers map[string]runtime.Producer + Parameters map[string]spec.Parameter + Handler http.Handler + Formats strfmt.Registry + Binder *UntypedRequestBinder + Authenticators RouteAuthenticators + Authorizer runtime.Authorizer +} + +// MatchedRoute represents the route that was matched in this request +type MatchedRoute struct { + routeEntry + + Params RouteParams + Consumer runtime.Consumer + Producer runtime.Producer + Authenticator *RouteAuthenticator +} + +// HasAuth returns true when the route has a security requirement defined +func (m *MatchedRoute) HasAuth() bool { + return len(m.Authenticators) > 0 +} + +// NeedsAuth returns true when the request still +// needs to perform authentication +func (m *MatchedRoute) NeedsAuth() bool { + return m.HasAuth() && m.Authenticator == nil +} + +func (d *defaultRouter) Lookup(method, path string) (*MatchedRoute, bool) { + mth := strings.ToUpper(method) + d.debugLogf("looking up route for %s %s", method, path) + if Debug { + if len(d.routers) == 0 { + d.debugLogf("there are no known routers") + } + for meth := range d.routers { + d.debugLogf("got a router for %s", meth) + } + } + if router, ok := d.routers[mth]; ok { + if m, rp, ok := router.Lookup(fpath.Clean(path)); ok && m != nil { + if entry, ok := m.(*routeEntry); ok { + d.debugLogf("found a route for %s %s with %d parameters", method, path, len(entry.Parameters)) + var params RouteParams + for _, p := range rp { + v, err := url.PathUnescape(p.Value) + if err != nil { + d.debugLogf("failed to escape %q: %v", p.Value, err) + v = p.Value + } + // a workaround to handle fragment/composing parameters until they are supported in denco router + // check if this parameter is a fragment within a path segment + const enclosureSize = 2 + if xpos := strings.Index(entry.PathPattern, fmt.Sprintf("{%s}", p.Name)) + len(p.Name) + enclosureSize; xpos < len(entry.PathPattern) && entry.PathPattern[xpos] != '/' { + // extract fragment parameters + ep := strings.Split(entry.PathPattern[xpos:], "/")[0] + pnames, pvalues := decodeCompositParams(p.Name, v, ep, nil, nil) + for i, pname := range pnames { + params = append(params, RouteParam{Name: pname, Value: pvalues[i]}) + } + } else { + // use the parameter directly + params = append(params, RouteParam{Name: p.Name, Value: v}) + } + } + return &MatchedRoute{routeEntry: *entry, Params: params}, true + } + } else { + d.debugLogf("couldn't find a route by path for %s %s", method, path) + } + } else { + d.debugLogf("couldn't find a route by method for %s %s", method, path) + } + return nil, false +} + +func (d *defaultRouter) OtherMethods(method, path string) []string { + mn := strings.ToUpper(method) + var methods []string + for k, v := range d.routers { + if k != mn { + if _, _, ok := v.Lookup(fpath.Clean(path)); ok { + methods = append(methods, k) + continue + } + } + } + return methods +} + +func (d *defaultRouter) SetLogger(lg logger.Logger) { + d.debugLogf = debugLogfFunc(lg) +} + +// convert swagger parameters per path segment into a denco parameter as multiple parameters per segment are not supported in denco +var pathConverter = regexp.MustCompile(`{(.+?)}([^/]*)`) + +func decodeCompositParams(name string, value string, pattern string, names []string, values []string) ([]string, []string) { + pleft := strings.Index(pattern, "{") + names = append(names, name) + if pleft < 0 { + if strings.HasSuffix(value, pattern) { + values = append(values, value[:len(value)-len(pattern)]) + } else { + values = append(values, "") + } + } else { + toskip := pattern[:pleft] + pright := strings.Index(pattern, "}") + vright := strings.Index(value, toskip) + if vright >= 0 { + values = append(values, value[:vright]) + } else { + values = append(values, "") + value = "" + } + return decodeCompositParams(pattern[pleft+1:pright], value[vright+len(toskip):], pattern[pright+1:], names, values) + } + return names, values +} + +func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Operation) { + mn := strings.ToUpper(method) + + bp := fpath.Clean(d.spec.BasePath()) + if len(bp) > 0 && bp[len(bp)-1] == '/' { + bp = bp[:len(bp)-1] + } + + d.debugLogf("operation: %#v", *operation) + if handler, ok := d.api.HandlerFor(method, strings.TrimPrefix(path, bp)); ok { + consumes := d.analyzer.ConsumesFor(operation) + produces := d.analyzer.ProducesFor(operation) + parameters := d.analyzer.ParamsFor(method, strings.TrimPrefix(path, bp)) + + // add API defaults if not part of the spec + if defConsumes := d.api.DefaultConsumes(); defConsumes != "" && !stringutils.ContainsStringsCI(consumes, defConsumes) { + consumes = append(consumes, defConsumes) + } + + if defProduces := d.api.DefaultProduces(); defProduces != "" && !stringutils.ContainsStringsCI(produces, defProduces) { + produces = append(produces, defProduces) + } + + requestBinder := NewUntypedRequestBinder(parameters, d.spec.Spec(), d.api.Formats()) + requestBinder.setDebugLogf(d.debugLogf) + record := denco.NewRecord(pathConverter.ReplaceAllString(path, ":$1"), &routeEntry{ + BasePath: bp, + PathPattern: path, + Operation: operation, + Handler: handler, + Consumes: consumes, + Produces: produces, + Consumers: d.api.ConsumersFor(normalizeOffers(consumes)), + Producers: d.api.ProducersFor(normalizeOffers(produces)), + Parameters: parameters, + Formats: d.api.Formats(), + Binder: requestBinder, + Authenticators: d.buildAuthenticators(operation), + Authorizer: d.api.Authorizer(), + }) + d.records[mn] = append(d.records[mn], record) + } +} + +func (d *defaultRouteBuilder) Build() *defaultRouter { + routers := make(map[string]*denco.Router) + for method, records := range d.records { + router := denco.New() + _ = router.Build(records) + routers[method] = router + } + return &defaultRouter{ + spec: d.spec, + routers: routers, + debugLogf: d.debugLogf, + } +} + +func (d *defaultRouteBuilder) buildAuthenticators(operation *spec.Operation) RouteAuthenticators { + requirements := d.analyzer.SecurityRequirementsFor(operation) + auths := make([]RouteAuthenticator, 0, len(requirements)) + for _, reqs := range requirements { + schemes := make([]string, 0, len(reqs)) + scopes := make(map[string][]string, len(reqs)) + scopeSlices := make([][]string, 0, len(reqs)) + for _, req := range reqs { + schemes = append(schemes, req.Name) + scopes[req.Name] = req.Scopes + scopeSlices = append(scopeSlices, req.Scopes) + } + + definitions := d.analyzer.SecurityDefinitionsForRequirements(reqs) + authenticators := d.api.AuthenticatorsFor(definitions) + auths = append(auths, RouteAuthenticator{ + Authenticator: authenticators, + Schemes: schemes, + Scopes: scopes, + allScopes: stringSliceUnion(scopeSlices...), + commonScopes: stringSliceIntersection(scopeSlices...), + allowAnonymous: len(reqs) == 1 && reqs[0].Name == "", + }) + } + return auths +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/security.go b/vendor/github.com/go-openapi/runtime/middleware/security.go new file mode 100644 index 000000000000..37ecfa6fd4e7 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/security.go @@ -0,0 +1,28 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import "net/http" + +func newSecureAPI(ctx *Context, next http.Handler) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := ctx.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + if route != nil && !route.NeedsAuth() { + next.ServeHTTP(rw, r) + return + } + + _, rCtx, err := ctx.Authorize(r, route) + if err != nil { + ctx.Respond(rw, r, route.Produces, route, err) + return + } + r = rCtx + + next.ServeHTTP(rw, r) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/spec.go b/vendor/github.com/go-openapi/runtime/middleware/spec.go new file mode 100644 index 000000000000..9cc9940aaa59 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/spec.go @@ -0,0 +1,91 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + "net/http" + "path" +) + +const ( + contentTypeHeader = "Content-Type" + applicationJSON = "application/json" +) + +// SpecOption can be applied to the Spec serving middleware +type SpecOption func(*specOptions) + +var defaultSpecOptions = specOptions{ + Path: "", + Document: "swagger.json", +} + +type specOptions struct { + Path string + Document string +} + +func specOptionsWithDefaults(opts []SpecOption) specOptions { + o := defaultSpecOptions + for _, apply := range opts { + apply(&o) + } + + return o +} + +// Spec creates a middleware to serve a swagger spec as a JSON document. +// +// This allows for altering the spec before starting the http listener. +// +// The basePath argument indicates the path of the spec document (defaults to "/"). +// Additional SpecOption can be used to change the name of the document (defaults to "swagger.json"). +func Spec(basePath string, b []byte, next http.Handler, opts ...SpecOption) http.Handler { + if basePath == "" { + basePath = "/" + } + o := specOptionsWithDefaults(opts) + pth := path.Join(basePath, o.Path, o.Document) + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if path.Clean(r.URL.Path) == pth { + rw.Header().Set(contentTypeHeader, applicationJSON) + rw.WriteHeader(http.StatusOK) + _, _ = rw.Write(b) + + return + } + + if next != nil { + next.ServeHTTP(rw, r) + + return + } + + rw.Header().Set(contentTypeHeader, applicationJSON) + rw.WriteHeader(http.StatusNotFound) + }) +} + +// WithSpecPath sets the path to be joined to the base path of the Spec middleware. +// +// This is empty by default. +func WithSpecPath(pth string) SpecOption { + return func(o *specOptions) { + o.Path = pth + } +} + +// WithSpecDocument sets the name of the JSON document served as a spec. +// +// By default, this is "swagger.json" +func WithSpecDocument(doc string) SpecOption { + return func(o *specOptions) { + if doc == "" { + return + } + + o.Document = doc + } +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go b/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go new file mode 100644 index 000000000000..b25a3a2cff7d --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go @@ -0,0 +1,178 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + "bytes" + "fmt" + "html/template" + "net/http" + "path" +) + +// SwaggerUIOpts configures the SwaggerUI middleware +type SwaggerUIOpts struct { + // BasePath for the API, defaults to: / + BasePath string + + // Path combines with BasePath to construct the path to the UI, defaults to: "docs". + Path string + + // SpecURL is the URL of the spec document. + // + // Defaults to: /swagger.json + SpecURL string + + // Title for the documentation site, default to: API documentation + Title string + + // Template specifies a custom template to serve the UI + Template string + + // OAuthCallbackURL the url called after OAuth2 login + OAuthCallbackURL string + + // The three components needed to embed swagger-ui + + // SwaggerURL points to the js that generates the SwaggerUI site. + // + // Defaults to: https://unpkg.com/swagger-ui-dist/swagger-ui-bundle.js + SwaggerURL string + + SwaggerPresetURL string + SwaggerStylesURL string + + Favicon32 string + Favicon16 string +} + +// EnsureDefaults in case some options are missing +func (r *SwaggerUIOpts) EnsureDefaults() { + r.ensureDefaults() + + if r.Template == "" { + r.Template = swaggeruiTemplate + } +} + +func (r *SwaggerUIOpts) EnsureDefaultsOauth2() { + r.ensureDefaults() + + if r.Template == "" { + r.Template = swaggerOAuthTemplate + } +} + +func (r *SwaggerUIOpts) ensureDefaults() { + common := toCommonUIOptions(r) + common.EnsureDefaults() + fromCommonToAnyOptions(common, r) + + // swaggerui-specifics + if r.OAuthCallbackURL == "" { + r.OAuthCallbackURL = path.Join(r.BasePath, r.Path, "oauth2-callback") + } + if r.SwaggerURL == "" { + r.SwaggerURL = swaggerLatest + } + if r.SwaggerPresetURL == "" { + r.SwaggerPresetURL = swaggerPresetLatest + } + if r.SwaggerStylesURL == "" { + r.SwaggerStylesURL = swaggerStylesLatest + } + if r.Favicon16 == "" { + r.Favicon16 = swaggerFavicon16Latest + } + if r.Favicon32 == "" { + r.Favicon32 = swaggerFavicon32Latest + } +} + +// SwaggerUI creates a middleware to serve a documentation site for a swagger spec. +// +// This allows for altering the spec before starting the http listener. +func SwaggerUI(opts SwaggerUIOpts, next http.Handler) http.Handler { + opts.EnsureDefaults() + + pth := path.Join(opts.BasePath, opts.Path) + tmpl := template.Must(template.New("swaggerui").Parse(opts.Template)) + assets := bytes.NewBuffer(nil) + if err := tmpl.Execute(assets, opts); err != nil { + panic(fmt.Errorf("cannot execute template: %w", err)) + } + + return serveUI(pth, assets.Bytes(), next) +} + +const ( + swaggerLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui-bundle.js" + swaggerPresetLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui-standalone-preset.js" + swaggerStylesLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui.css" + swaggerFavicon32Latest = "https://unpkg.com/swagger-ui-dist/favicon-32x32.png" + swaggerFavicon16Latest = "https://unpkg.com/swagger-ui-dist/favicon-16x16.png" + swaggeruiTemplate = ` + + + + + {{ .Title }} + + + + + + + + +
+ + + + + + +` +) diff --git a/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go b/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go new file mode 100644 index 000000000000..879bdbaadea7 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go @@ -0,0 +1,108 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + "bytes" + "fmt" + "net/http" + "text/template" +) + +func SwaggerUIOAuth2Callback(opts SwaggerUIOpts, next http.Handler) http.Handler { + opts.EnsureDefaultsOauth2() + + pth := opts.OAuthCallbackURL + tmpl := template.Must(template.New("swaggeroauth").Parse(opts.Template)) + assets := bytes.NewBuffer(nil) + if err := tmpl.Execute(assets, opts); err != nil { + panic(fmt.Errorf("cannot execute template: %w", err)) + } + + return serveUI(pth, assets.Bytes(), next) +} + +const ( + swaggerOAuthTemplate = ` + + + + {{ .Title }} + + + + + +` +) diff --git a/vendor/github.com/go-openapi/runtime/middleware/ui_options.go b/vendor/github.com/go-openapi/runtime/middleware/ui_options.go new file mode 100644 index 000000000000..cf2f673d3cb9 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/ui_options.go @@ -0,0 +1,176 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + "bytes" + "encoding/gob" + "fmt" + "net/http" + "path" + "strings" +) + +const ( + // constants that are common to all UI-serving middlewares + defaultDocsPath = "docs" + defaultDocsURL = "/swagger.json" + defaultDocsTitle = "API Documentation" +) + +// uiOptions defines common options for UI serving middlewares. +type uiOptions struct { + // BasePath for the UI, defaults to: / + BasePath string + + // Path combines with BasePath to construct the path to the UI, defaults to: "docs". + Path string + + // SpecURL is the URL of the spec document. + // + // Defaults to: /swagger.json + SpecURL string + + // Title for the documentation site, default to: API documentation + Title string + + // Template specifies a custom template to serve the UI + Template string +} + +// toCommonUIOptions converts any UI option type to retain the common options. +// +// This uses gob encoding/decoding to convert common fields from one struct to another. +func toCommonUIOptions(opts any) uiOptions { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + dec := gob.NewDecoder(&buf) + var o uiOptions + err := enc.Encode(opts) + if err != nil { + panic(err) + } + + err = dec.Decode(&o) + if err != nil { + panic(err) + } + + return o +} + +func fromCommonToAnyOptions[T any](source uiOptions, target *T) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + dec := gob.NewDecoder(&buf) + err := enc.Encode(source) + if err != nil { + panic(err) + } + + err = dec.Decode(target) + if err != nil { + panic(err) + } +} + +// UIOption can be applied to UI serving middleware, such as Context.APIHandler or +// Context.APIHandlerSwaggerUI to alter the defaut behavior. +type UIOption func(*uiOptions) + +func uiOptionsWithDefaults(opts []UIOption) uiOptions { + var o uiOptions + for _, apply := range opts { + apply(&o) + } + + return o +} + +// WithUIBasePath sets the base path from where to serve the UI assets. +// +// By default, Context middleware sets this value to the API base path. +func WithUIBasePath(base string) UIOption { + return func(o *uiOptions) { + if !strings.HasPrefix(base, "/") { + base = "/" + base + } + o.BasePath = base + } +} + +// WithUIPath sets the path from where to serve the UI assets (i.e. /{basepath}/{path}. +func WithUIPath(pth string) UIOption { + return func(o *uiOptions) { + o.Path = pth + } +} + +// WithUISpecURL sets the path from where to serve swagger spec document. +// +// This may be specified as a full URL or a path. +// +// By default, this is "/swagger.json" +func WithUISpecURL(specURL string) UIOption { + return func(o *uiOptions) { + o.SpecURL = specURL + } +} + +// WithUITitle sets the title of the UI. +// +// By default, Context middleware sets this value to the title found in the API spec. +func WithUITitle(title string) UIOption { + return func(o *uiOptions) { + o.Title = title + } +} + +// WithTemplate allows to set a custom template for the UI. +// +// UI middleware will panic if the template does not parse or execute properly. +func WithTemplate(tpl string) UIOption { + return func(o *uiOptions) { + o.Template = tpl + } +} + +// EnsureDefaults in case some options are missing +func (r *uiOptions) EnsureDefaults() { + if r.BasePath == "" { + r.BasePath = "/" + } + if r.Path == "" { + r.Path = defaultDocsPath + } + if r.SpecURL == "" { + r.SpecURL = defaultDocsURL + } + if r.Title == "" { + r.Title = defaultDocsTitle + } +} + +// serveUI creates a middleware that serves a templated asset as text/html. +func serveUI(pth string, assets []byte, next http.Handler) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if path.Clean(r.URL.Path) == pth { + rw.Header().Set(contentTypeHeader, "text/html; charset=utf-8") + rw.WriteHeader(http.StatusOK) + _, _ = rw.Write(assets) + + return + } + + if next != nil { + next.ServeHTTP(rw, r) + + return + } + + rw.Header().Set(contentTypeHeader, "text/plain") + rw.WriteHeader(http.StatusNotFound) + _, _ = fmt.Fprintf(rw, "%q not found", pth) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go new file mode 100644 index 000000000000..774da0ba0c86 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go @@ -0,0 +1,282 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package untyped + +import ( + "fmt" + "net/http" + "sort" + "strings" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/errors" + "github.com/go-openapi/loads" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" +) + +const ( + smallPreallocatedSlots = 10 + mediumPreallocatedSlots = 30 +) + +// API represents an untyped mux for a swagger spec +type API struct { + spec *loads.Document + analyzer *analysis.Spec + DefaultProduces string + DefaultConsumes string + consumers map[string]runtime.Consumer + producers map[string]runtime.Producer + authenticators map[string]runtime.Authenticator + authorizer runtime.Authorizer + operations map[string]map[string]runtime.OperationHandler + ServeError func(http.ResponseWriter, *http.Request, error) + Models map[string]func() any + formats strfmt.Registry +} + +// NewAPI creates the default untyped API +func NewAPI(spec *loads.Document) *API { + var an *analysis.Spec + if spec != nil && spec.Spec() != nil { + an = analysis.New(spec.Spec()) + } + api := &API{ + spec: spec, + analyzer: an, + consumers: make(map[string]runtime.Consumer, smallPreallocatedSlots), + producers: make(map[string]runtime.Producer, smallPreallocatedSlots), + authenticators: make(map[string]runtime.Authenticator), + operations: make(map[string]map[string]runtime.OperationHandler), + ServeError: errors.ServeError, + Models: make(map[string]func() any), + formats: strfmt.NewFormats(), + } + + return api.WithJSONDefaults() +} + +// WithJSONDefaults loads the json defaults for this api +func (d *API) WithJSONDefaults() *API { + d.DefaultConsumes = runtime.JSONMime + d.DefaultProduces = runtime.JSONMime + d.consumers[runtime.JSONMime] = runtime.JSONConsumer() + d.producers[runtime.JSONMime] = runtime.JSONProducer() + return d +} + +// WithoutJSONDefaults clears the json defaults for this api +func (d *API) WithoutJSONDefaults() *API { + d.DefaultConsumes = "" + d.DefaultProduces = "" + delete(d.consumers, runtime.JSONMime) + delete(d.producers, runtime.JSONMime) + return d +} + +// Formats returns the registered string formats +func (d *API) Formats() strfmt.Registry { + if d.formats == nil { + d.formats = strfmt.NewFormats() + } + return d.formats +} + +// RegisterFormat registers a custom format validator +func (d *API) RegisterFormat(name string, format strfmt.Format, validator strfmt.Validator) { + if d.formats == nil { + d.formats = strfmt.NewFormats() + } + d.formats.Add(name, format, validator) +} + +// RegisterAuth registers an auth handler in this api +func (d *API) RegisterAuth(scheme string, handler runtime.Authenticator) { + if d.authenticators == nil { + d.authenticators = make(map[string]runtime.Authenticator) + } + d.authenticators[scheme] = handler +} + +// RegisterAuthorizer registers an authorizer handler in this api +func (d *API) RegisterAuthorizer(handler runtime.Authorizer) { + d.authorizer = handler +} + +// RegisterConsumer registers a consumer for a media type. +func (d *API) RegisterConsumer(mediaType string, handler runtime.Consumer) { + if d.consumers == nil { + d.consumers = make(map[string]runtime.Consumer, smallPreallocatedSlots) + } + d.consumers[strings.ToLower(mediaType)] = handler +} + +// RegisterProducer registers a producer for a media type +func (d *API) RegisterProducer(mediaType string, handler runtime.Producer) { + if d.producers == nil { + d.producers = make(map[string]runtime.Producer, smallPreallocatedSlots) + } + d.producers[strings.ToLower(mediaType)] = handler +} + +// RegisterOperation registers an operation handler for an operation name +func (d *API) RegisterOperation(method, path string, handler runtime.OperationHandler) { + if d.operations == nil { + d.operations = make(map[string]map[string]runtime.OperationHandler, mediumPreallocatedSlots) + } + um := strings.ToUpper(method) + if b, ok := d.operations[um]; !ok || b == nil { + d.operations[um] = make(map[string]runtime.OperationHandler) + } + d.operations[um][path] = handler +} + +// OperationHandlerFor returns the operation handler for the specified id if it can be found +func (d *API) OperationHandlerFor(method, path string) (runtime.OperationHandler, bool) { + if d.operations == nil { + return nil, false + } + if pi, ok := d.operations[strings.ToUpper(method)]; ok { + h, ok := pi[path] + return h, ok + } + return nil, false +} + +// ConsumersFor gets the consumers for the specified media types +func (d *API) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { + result := make(map[string]runtime.Consumer) + for _, mt := range mediaTypes { + if consumer, ok := d.consumers[mt]; ok { + result[mt] = consumer + } + } + return result +} + +// ProducersFor gets the producers for the specified media types +func (d *API) ProducersFor(mediaTypes []string) map[string]runtime.Producer { + result := make(map[string]runtime.Producer) + for _, mt := range mediaTypes { + if producer, ok := d.producers[mt]; ok { + result[mt] = producer + } + } + return result +} + +// AuthenticatorsFor gets the authenticators for the specified security schemes +func (d *API) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator { + result := make(map[string]runtime.Authenticator) + for k := range schemes { + if a, ok := d.authenticators[k]; ok { + result[k] = a + } + } + return result +} + +// Authorizer returns the registered authorizer +func (d *API) Authorizer() runtime.Authorizer { + return d.authorizer +} + +// Validate validates this API for any missing items +func (d *API) Validate() error { + return d.validate() +} + +// validateWith validates the registrations in this API against the provided spec analyzer +func (d *API) validate() error { + consumes := make([]string, 0, len(d.consumers)) + for k := range d.consumers { + consumes = append(consumes, k) + } + + produces := make([]string, 0, len(d.producers)) + for k := range d.producers { + produces = append(produces, k) + } + + authenticators := make([]string, 0, len(d.authenticators)) + for k := range d.authenticators { + authenticators = append(authenticators, k) + } + + operations := make([]string, 0, len(d.operations)) + for m, v := range d.operations { + for p := range v { + operations = append(operations, fmt.Sprintf("%s %s", strings.ToUpper(m), p)) + } + } + + secDefinitions := d.spec.Spec().SecurityDefinitions + definedAuths := make([]string, 0, len(secDefinitions)) + for k := range secDefinitions { + definedAuths = append(definedAuths, k) + } + + if err := d.verify("consumes", consumes, d.analyzer.RequiredConsumes()); err != nil { + return err + } + if err := d.verify("produces", produces, d.analyzer.RequiredProduces()); err != nil { + return err + } + if err := d.verify("operation", operations, d.analyzer.OperationMethodPaths()); err != nil { + return err + } + + requiredAuths := d.analyzer.RequiredSecuritySchemes() + if err := d.verify("auth scheme", authenticators, requiredAuths); err != nil { + return err + } + if err := d.verify("security definitions", definedAuths, requiredAuths); err != nil { + return err + } + return nil +} + +func (d *API) verify(name string, registrations []string, expectations []string) error { + sort.Strings(registrations) + sort.Strings(expectations) + + expected := map[string]struct{}{} + seen := map[string]struct{}{} + + for _, v := range expectations { + expected[v] = struct{}{} + } + + var unspecified []string + for _, v := range registrations { + seen[v] = struct{}{} + if _, ok := expected[v]; !ok { + unspecified = append(unspecified, v) + } + } + + for k := range seen { + delete(expected, k) + } + + unregistered := make([]string, 0, len(expected)) + for k := range expected { + unregistered = append(unregistered, k) + } + sort.Strings(unspecified) + sort.Strings(unregistered) + + if len(unregistered) > 0 || len(unspecified) > 0 { + return &errors.APIVerificationFailed{ + Section: name, + MissingSpecification: unspecified, + MissingRegistration: unregistered, + } + } + + return nil +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/validation.go b/vendor/github.com/go-openapi/runtime/middleware/validation.go new file mode 100644 index 000000000000..ed026d626ba4 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/validation.go @@ -0,0 +1,118 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + "mime" + "net/http" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/swag/stringutils" +) + +type validation struct { + context *Context + result []error + request *http.Request + route *MatchedRoute + bound map[string]any +} + +// ContentType validates the content type of a request +func validateContentType(allowed []string, actual string) error { + if len(allowed) == 0 { + return nil + } + mt, _, err := mime.ParseMediaType(actual) + if err != nil { + return errors.InvalidContentType(actual, allowed) + } + if stringutils.ContainsStringsCI(allowed, mt) { + return nil + } + if stringutils.ContainsStringsCI(allowed, "*/*") { + return nil + } + parts := strings.Split(actual, "/") + if len(parts) == 2 && stringutils.ContainsStringsCI(allowed, parts[0]+"/*") { + return nil + } + return errors.InvalidContentType(actual, allowed) +} + +func validateRequest(ctx *Context, request *http.Request, route *MatchedRoute) *validation { + validate := &validation{ + context: ctx, + request: request, + route: route, + bound: make(map[string]any), + } + validate.debugLogf("validating request %s %s", request.Method, request.URL.EscapedPath()) + + validate.contentType() + if len(validate.result) == 0 { + validate.responseFormat() + } + if len(validate.result) == 0 { + validate.parameters() + } + + return validate +} + +func (v *validation) debugLogf(format string, args ...any) { + v.context.debugLogf(format, args...) +} + +func (v *validation) parameters() { + v.debugLogf("validating request parameters for %s %s", v.request.Method, v.request.URL.EscapedPath()) + if result := v.route.Binder.Bind(v.request, v.route.Params, v.route.Consumer, v.bound); result != nil { + if result.Error() == "validation failure list" { + for _, e := range result.(*errors.Validation).Value.([]any) { + v.result = append(v.result, e.(error)) + } + return + } + v.result = append(v.result, result) + } +} + +func (v *validation) contentType() { + if len(v.result) == 0 && runtime.HasBody(v.request) { + v.debugLogf("validating body content type for %s %s", v.request.Method, v.request.URL.EscapedPath()) + ct, _, req, err := v.context.ContentType(v.request) + if err != nil { + v.result = append(v.result, err) + } else { + v.request = req + } + + if len(v.result) == 0 { + v.debugLogf("validating content type for %q against [%s]", ct, strings.Join(v.route.Consumes, ", ")) + if err := validateContentType(v.route.Consumes, ct); err != nil { + v.result = append(v.result, err) + } + } + if ct != "" && v.route.Consumer == nil { + cons, ok := v.route.Consumers[ct] + if !ok { + v.result = append(v.result, errors.New(http.StatusInternalServerError, "no consumer registered for %s", ct)) + } else { + v.route.Consumer = cons + } + } + } +} + +func (v *validation) responseFormat() { + // if the route provides values for Produces and no format could be identify then return an error. + // if the route does not specify values for Produces then treat request as valid since the API designer + // choose not to specify the format for responses. + if str, rCtx := v.context.ResponseFormat(v.request, v.route.Produces); str == "" && len(v.route.Produces) > 0 { + v.request = rCtx + v.result = append(v.result, errors.InvalidResponseFormat(v.request.Header.Get(runtime.HeaderAccept), v.route.Produces)) + } +} diff --git a/vendor/github.com/go-openapi/runtime/request.go b/vendor/github.com/go-openapi/runtime/request.go new file mode 100644 index 000000000000..aab7b8c055a3 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/request.go @@ -0,0 +1,138 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "bufio" + "context" + "errors" + "io" + "net/http" + "strings" + + "github.com/go-openapi/swag/stringutils" +) + +// CanHaveBody returns true if this method can have a body +func CanHaveBody(method string) bool { + mn := strings.ToUpper(method) + return mn == "POST" || mn == "PUT" || mn == "PATCH" || mn == "DELETE" +} + +// IsSafe returns true if this is a request with a safe method +func IsSafe(r *http.Request) bool { + mn := strings.ToUpper(r.Method) + return mn == "GET" || mn == "HEAD" +} + +// AllowsBody returns true if the request allows for a body +func AllowsBody(r *http.Request) bool { + mn := strings.ToUpper(r.Method) + return mn != "HEAD" +} + +// HasBody returns true if this method needs a content-type +func HasBody(r *http.Request) bool { + // happy case: we have a content length set + if r.ContentLength > 0 { + return true + } + + if r.Header.Get("Content-Length") != "" { + // in this case, no Transfer-Encoding should be present + // we have a header set but it was explicitly set to 0, so we assume no body + return false + } + + rdr := newPeekingReader(r.Body) + r.Body = rdr + return rdr.HasContent() +} + +func newPeekingReader(r io.ReadCloser) *peekingReader { + if r == nil { + return nil + } + return &peekingReader{ + underlying: bufio.NewReader(r), + orig: r, + } +} + +type peekingReader struct { + underlying interface { + Buffered() int + Peek(int) ([]byte, error) + Read([]byte) (int, error) + } + orig io.ReadCloser +} + +func (p *peekingReader) HasContent() bool { + if p == nil { + return false + } + if p.underlying.Buffered() > 0 { + return true + } + b, err := p.underlying.Peek(1) + if err != nil { + return false + } + return len(b) > 0 +} + +func (p *peekingReader) Read(d []byte) (int, error) { + if p == nil { + return 0, io.EOF + } + if p.underlying == nil { + return 0, io.ErrUnexpectedEOF + } + return p.underlying.Read(d) +} + +func (p *peekingReader) Close() error { + if p.underlying == nil { + return errors.New("reader already closed") + } + p.underlying = nil + if p.orig != nil { + return p.orig.Close() + } + return nil +} + +// JSONRequest creates a new http request with json headers set. +// +// It uses context.Background. +func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) { + req, err := http.NewRequestWithContext(context.Background(), method, urlStr, body) + if err != nil { + return nil, err + } + req.Header.Add(HeaderContentType, JSONMime) + req.Header.Add(HeaderAccept, JSONMime) + return req, nil +} + +// Gettable for things with a method GetOK(string) (data string, hasKey bool, hasValue bool) +type Gettable interface { + GetOK(string) ([]string, bool, bool) +} + +// ReadSingleValue reads a single value from the source +func ReadSingleValue(values Gettable, name string) string { + vv, _, hv := values.GetOK(name) + if hv { + return vv[len(vv)-1] + } + return "" +} + +// ReadCollectionValue reads a collection value from a string data source +func ReadCollectionValue(values Gettable, name, collectionFormat string) []string { + v := ReadSingleValue(values, name) + return stringutils.SplitByFormat(v, collectionFormat) +} diff --git a/vendor/github.com/go-openapi/runtime/security/authenticator.go b/vendor/github.com/go-openapi/runtime/security/authenticator.go new file mode 100644 index 000000000000..b5b7904dc1e6 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/security/authenticator.go @@ -0,0 +1,266 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package security + +import ( + "context" + "net/http" + "strings" + + "github.com/go-openapi/errors" + + "github.com/go-openapi/runtime" +) + +const ( + query = "query" + header = "header" + accessTokenParam = "access_token" +) + +// HttpAuthenticator is a function that authenticates a HTTP request +func HttpAuthenticator(handler func(*http.Request) (bool, any, error)) runtime.Authenticator { //nolint:revive + return runtime.AuthenticatorFunc(func(params any) (bool, any, error) { + if request, ok := params.(*http.Request); ok { + return handler(request) + } + if scoped, ok := params.(*ScopedAuthRequest); ok { + return handler(scoped.Request) + } + return false, nil, nil + }) +} + +// ScopedAuthenticator is a function that authenticates a HTTP request against a list of valid scopes +func ScopedAuthenticator(handler func(*ScopedAuthRequest) (bool, any, error)) runtime.Authenticator { + return runtime.AuthenticatorFunc(func(params any) (bool, any, error) { + if request, ok := params.(*ScopedAuthRequest); ok { + return handler(request) + } + return false, nil, nil + }) +} + +// UserPassAuthentication authentication function +type UserPassAuthentication func(string, string) (any, error) + +// UserPassAuthenticationCtx authentication function with context.Context +type UserPassAuthenticationCtx func(context.Context, string, string) (context.Context, any, error) + +// TokenAuthentication authentication function +type TokenAuthentication func(string) (any, error) + +// TokenAuthenticationCtx authentication function with context.Context +type TokenAuthenticationCtx func(context.Context, string) (context.Context, any, error) + +// ScopedTokenAuthentication authentication function +type ScopedTokenAuthentication func(string, []string) (any, error) + +// ScopedTokenAuthenticationCtx authentication function with context.Context +type ScopedTokenAuthenticationCtx func(context.Context, string, []string) (context.Context, any, error) + +var DefaultRealmName = "API" + +type secCtxKey uint8 + +const ( + failedBasicAuth secCtxKey = iota + oauth2SchemeName +) + +func FailedBasicAuth(r *http.Request) string { + return FailedBasicAuthCtx(r.Context()) +} + +func FailedBasicAuthCtx(ctx context.Context) string { + v, ok := ctx.Value(failedBasicAuth).(string) + if !ok { + return "" + } + return v +} + +func OAuth2SchemeName(r *http.Request) string { + return OAuth2SchemeNameCtx(r.Context()) +} + +func OAuth2SchemeNameCtx(ctx context.Context) string { + v, ok := ctx.Value(oauth2SchemeName).(string) + if !ok { + return "" + } + return v +} + +// BasicAuth creates a basic auth authenticator with the provided authentication function +func BasicAuth(authenticate UserPassAuthentication) runtime.Authenticator { + return BasicAuthRealm(DefaultRealmName, authenticate) +} + +// BasicAuthRealm creates a basic auth authenticator with the provided authentication function and realm name +func BasicAuthRealm(realm string, authenticate UserPassAuthentication) runtime.Authenticator { + if realm == "" { + realm = DefaultRealmName + } + + return HttpAuthenticator(func(r *http.Request) (bool, any, error) { + if usr, pass, ok := r.BasicAuth(); ok { + p, err := authenticate(usr, pass) + if err != nil { + *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) + } + return true, p, err + } + *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) + return false, nil, nil + }) +} + +// BasicAuthCtx creates a basic auth authenticator with the provided authentication function with support for context.Context +func BasicAuthCtx(authenticate UserPassAuthenticationCtx) runtime.Authenticator { + return BasicAuthRealmCtx(DefaultRealmName, authenticate) +} + +// BasicAuthRealmCtx creates a basic auth authenticator with the provided authentication function and realm name with support for context.Context +func BasicAuthRealmCtx(realm string, authenticate UserPassAuthenticationCtx) runtime.Authenticator { + if realm == "" { + realm = DefaultRealmName + } + + return HttpAuthenticator(func(r *http.Request) (bool, any, error) { + if usr, pass, ok := r.BasicAuth(); ok { + ctx, p, err := authenticate(r.Context(), usr, pass) + if err != nil { + ctx = context.WithValue(ctx, failedBasicAuth, realm) + } + *r = *r.WithContext(ctx) + return true, p, err + } + *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) + return false, nil, nil + }) +} + +// APIKeyAuth creates an authenticator that uses a token for authorization. +// This token can be obtained from either a header or a query string +func APIKeyAuth(name, in string, authenticate TokenAuthentication) runtime.Authenticator { + inl := strings.ToLower(in) + if inl != query && inl != header { + // panic because this is most likely a typo + panic(errors.New(http.StatusInternalServerError, "api key auth: in value needs to be either \"query\" or \"header\"")) + } + + var getToken func(*http.Request) string + switch inl { + case header: + getToken = func(r *http.Request) string { return r.Header.Get(name) } + case query: + getToken = func(r *http.Request) string { return r.URL.Query().Get(name) } + } + + return HttpAuthenticator(func(r *http.Request) (bool, any, error) { + token := getToken(r) + if token == "" { + return false, nil, nil + } + + p, err := authenticate(token) + return true, p, err + }) +} + +// APIKeyAuthCtx creates an authenticator that uses a token for authorization with support for context.Context. +// This token can be obtained from either a header or a query string +func APIKeyAuthCtx(name, in string, authenticate TokenAuthenticationCtx) runtime.Authenticator { + inl := strings.ToLower(in) + if inl != query && inl != header { + // panic because this is most likely a typo + panic(errors.New(http.StatusInternalServerError, "api key auth: in value needs to be either \"query\" or \"header\"")) + } + + var getToken func(*http.Request) string + switch inl { + case header: + getToken = func(r *http.Request) string { return r.Header.Get(name) } + case query: + getToken = func(r *http.Request) string { return r.URL.Query().Get(name) } + } + + return HttpAuthenticator(func(r *http.Request) (bool, any, error) { + token := getToken(r) + if token == "" { + return false, nil, nil + } + + ctx, p, err := authenticate(r.Context(), token) + *r = *r.WithContext(ctx) + return true, p, err + }) +} + +// ScopedAuthRequest contains both a http request and the required scopes for a particular operation +type ScopedAuthRequest struct { + Request *http.Request + RequiredScopes []string +} + +// BearerAuth for use with oauth2 flows +func BearerAuth(name string, authenticate ScopedTokenAuthentication) runtime.Authenticator { + const prefix = "Bearer " + return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, any, error) { + var token string + hdr := r.Request.Header.Get(runtime.HeaderAuthorization) + if after, ok := strings.CutPrefix(hdr, prefix); ok { + token = after + } + if token == "" { + qs := r.Request.URL.Query() + token = qs.Get(accessTokenParam) + } + //#nosec + ct, _, _ := runtime.ContentType(r.Request.Header) + if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { + token = r.Request.FormValue(accessTokenParam) + } + + if token == "" { + return false, nil, nil + } + + rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name) + *r.Request = *r.Request.WithContext(rctx) + p, err := authenticate(token, r.RequiredScopes) + return true, p, err + }) +} + +// BearerAuthCtx for use with oauth2 flows with support for context.Context. +func BearerAuthCtx(name string, authenticate ScopedTokenAuthenticationCtx) runtime.Authenticator { + const prefix = "Bearer " + return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, any, error) { + var token string + hdr := r.Request.Header.Get(runtime.HeaderAuthorization) + if after, ok := strings.CutPrefix(hdr, prefix); ok { + token = after + } + if token == "" { + qs := r.Request.URL.Query() + token = qs.Get(accessTokenParam) + } + //#nosec + ct, _, _ := runtime.ContentType(r.Request.Header) + if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { + token = r.Request.FormValue(accessTokenParam) + } + + if token == "" { + return false, nil, nil + } + + rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name) + ctx, p, err := authenticate(rctx, token, r.RequiredScopes) + *r.Request = *r.Request.WithContext(ctx) + return true, p, err + }) +} diff --git a/vendor/github.com/go-openapi/runtime/security/authorizer.go b/vendor/github.com/go-openapi/runtime/security/authorizer.go new file mode 100644 index 000000000000..69bd497a3c28 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/security/authorizer.go @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package security + +import ( + "net/http" + + "github.com/go-openapi/runtime" +) + +// Authorized provides a default implementation of the Authorizer interface where all +// requests are authorized (successful) +func Authorized() runtime.Authorizer { + return runtime.AuthorizerFunc(func(_ *http.Request, _ any) error { return nil }) +} diff --git a/vendor/github.com/go-openapi/runtime/statuses.go b/vendor/github.com/go-openapi/runtime/statuses.go new file mode 100644 index 000000000000..7e10a5a56c29 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/statuses.go @@ -0,0 +1,79 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +// Statuses lists the most common HTTP status codes to default message +// taken from https://httpstatuses.com/ +var Statuses = map[int]string{ + 100: "Continue", + 101: "Switching Protocols", + 102: "Processing", + 103: "Checkpoint", + 122: "URI too long", + 200: "OK", + 201: "Created", + 202: "Accepted", + 203: "Request Processed", + 204: "No Content", + 205: "Reset Content", + 206: "Partial Content", + 207: "Multi-Status", + 208: "Already Reported", + 226: "IM Used", + 300: "Multiple Choices", + 301: "Moved Permanently", + 302: "Found", + 303: "See Other", + 304: "Not Modified", + 305: "Use Proxy", + 306: "Switch Proxy", + 307: "Temporary Redirect", + 308: "Permanent Redirect", + 400: "Bad Request", + 401: "Unauthorized", + 402: "Payment Required", + 403: "Forbidden", + 404: "Not Found", + 405: "Method Not Allowed", + 406: "Not Acceptable", + 407: "Proxy Authentication Required", + 408: "Request Timeout", + 409: "Conflict", + 410: "Gone", + 411: "Length Required", + 412: "Precondition Failed", + 413: "Request Entity Too Large", + 414: "Request-URI Too Long", + 415: "Unsupported Media Type", + 416: "Request Range Not Satisfiable", + 417: "Expectation Failed", + 418: "I'm a teapot", + 420: "Enhance Your Calm", + 422: "Unprocessable Entity", + 423: "Locked", + 424: "Failed Dependency", + 426: "Upgrade Required", + 428: "Precondition Required", + 429: "Too Many Requests", + 431: "Request Header Fields Too Large", + 444: "No Response", + 449: "Retry With", + 450: "Blocked by Windows Parental Controls", + 451: "Wrong Exchange Server", + 499: "Client Closed Request", + 500: "Internal Server Error", + 501: "Not Implemented", + 502: "Bad Gateway", + 503: "Service Unavailable", + 504: "Gateway Timeout", + 505: "HTTP Version Not Supported", + 506: "Variant Also Negotiates", + 507: "Insufficient Storage", + 508: "Loop Detected", + 509: "Bandwidth Limit Exceeded", + 510: "Not Extended", + 511: "Network Authentication Required", + 598: "Network read timeout error", + 599: "Network connect timeout error", +} diff --git a/vendor/github.com/go-openapi/runtime/text.go b/vendor/github.com/go-openapi/runtime/text.go new file mode 100644 index 000000000000..2b8e4ac09d0f --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/text.go @@ -0,0 +1,105 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + + "github.com/go-openapi/swag/jsonutils" +) + +// TextConsumer creates a new text consumer +func TextConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data any) error { + if reader == nil { + return errors.New("TextConsumer requires a reader") // early exit + } + + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(reader) + if err != nil { + return err + } + b := buf.Bytes() + + // If the buffer is empty, no need to unmarshal it, which causes a panic. + if len(b) == 0 { + return nil + } + + if tu, ok := data.(encoding.TextUnmarshaler); ok { + err := tu.UnmarshalText(b) + if err != nil { + return fmt.Errorf("text consumer: %v", err) + } + + return nil + } + + t := reflect.TypeOf(data) + if data != nil && t.Kind() == reflect.Ptr { + v := reflect.Indirect(reflect.ValueOf(data)) + if t.Elem().Kind() == reflect.String { + v.SetString(string(b)) + return nil + } + } + + return fmt.Errorf("%v (%T) is not supported by the TextConsumer, %s", + data, data, "can be resolved by supporting TextUnmarshaler interface") + }) +} + +// TextProducer creates a new text producer +func TextProducer() Producer { + return ProducerFunc(func(writer io.Writer, data any) error { + if writer == nil { + return errors.New("TextProducer requires a writer") // early exit + } + + if data == nil { + return errors.New("no data given to produce text from") + } + + if tm, ok := data.(encoding.TextMarshaler); ok { + txt, err := tm.MarshalText() + if err != nil { + return fmt.Errorf("text producer: %v", err) + } + _, err = writer.Write(txt) + return err + } + + if str, ok := data.(error); ok { + _, err := writer.Write([]byte(str.Error())) + return err + } + + if str, ok := data.(fmt.Stringer); ok { + _, err := writer.Write([]byte(str.String())) + return err + } + + v := reflect.Indirect(reflect.ValueOf(data)) + if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { + b, err := jsonutils.WriteJSON(data) + if err != nil { + return err + } + _, err = writer.Write(b) + return err + } + if v.Kind() != reflect.String { + return fmt.Errorf("%T is not a supported type by the TextProducer", data) + } + + _, err := writer.Write([]byte(v.String())) + return err + }) +} diff --git a/vendor/github.com/go-openapi/runtime/values.go b/vendor/github.com/go-openapi/runtime/values.go new file mode 100644 index 000000000000..19894e78451c --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/values.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +// Values typically represent parameters on a http request. +type Values map[string][]string + +// GetOK returns the values collection for the given key. +// When the key is present in the map it will return true for hasKey. +// When the value is not empty it will return true for hasValue. +func (v Values) GetOK(key string) (value []string, hasKey bool, hasValue bool) { + value, hasKey = v[key] + if !hasKey { + return + } + if len(value) == 0 { + return + } + hasValue = true + return +} diff --git a/vendor/github.com/go-openapi/runtime/xml.go b/vendor/github.com/go-openapi/runtime/xml.go new file mode 100644 index 000000000000..5060b5c8e915 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/xml.go @@ -0,0 +1,25 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "encoding/xml" + "io" +) + +// XMLConsumer creates a new XML consumer +func XMLConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data any) error { + dec := xml.NewDecoder(reader) + return dec.Decode(data) + }) +} + +// XMLProducer creates a new XML producer +func XMLProducer() Producer { + return ProducerFunc(func(writer io.Writer, data any) error { + enc := xml.NewEncoder(writer) + return enc.Encode(data) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go b/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go new file mode 100644 index 000000000000..ca63430e0b7b --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go @@ -0,0 +1,28 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package yamlpc + +import ( + "io" + + "github.com/go-openapi/runtime" + yaml "go.yaml.in/yaml/v3" +) + +// YAMLConsumer creates a consumer for yaml data +func YAMLConsumer() runtime.Consumer { + return runtime.ConsumerFunc(func(r io.Reader, v any) error { + dec := yaml.NewDecoder(r) + return dec.Decode(v) + }) +} + +// YAMLProducer creates a producer for yaml data +func YAMLProducer() runtime.Producer { + return runtime.ProducerFunc(func(w io.Writer, v any) error { + enc := yaml.NewEncoder(w) + defer enc.Close() + return enc.Encode(v) + }) +} diff --git a/vendor/github.com/go-openapi/spec/.editorconfig b/vendor/github.com/go-openapi/spec/.editorconfig new file mode 100644 index 000000000000..3152da69a5d7 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore new file mode 100644 index 000000000000..f47cb2045f13 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.gitignore @@ -0,0 +1 @@ +*.out diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml new file mode 100644 index 000000000000..1ad5adf47e69 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.golangci.yml @@ -0,0 +1,75 @@ +version: "2" +linters: + default: all + disable: + - cyclop + - depguard + - errchkjson + - errorlint + - exhaustruct + - forcetypeassert + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - godot + - godox + - gosmopolitan + - inamedparam + - intrange + - ireturn + - lll + - musttag + - nestif + - nlreturn + - nonamedreturns + - noinlineerr + - paralleltest + - recvcheck + - testpackage + - thelper + - tparallel + - unparam + - varnamelen + - whitespace + - wrapcheck + - wsl + - wsl_v5 + settings: + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + gocyclo: + min-complexity: 45 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..9322b065e37a --- /dev/null +++ b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md new file mode 100644 index 000000000000..3203bd2556d6 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/README.md @@ -0,0 +1,58 @@ +# OpenAPI v2 object model [![Build Status](https://github.com/go-openapi/spec/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/spec/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/spec.svg)](https://pkg.go.dev/github.com/go-openapi/spec) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec) + +The object model for OpenAPI specification documents. + +## Licensing + +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). + +### FAQ + +* What does this do? + +> 1. This package knows how to marshal and unmarshal Swagger API specifications into a golang object model +> 2. It knows how to resolve $ref and expand them to make a single root document + +* How does it play with the rest of the go-openapi packages ? + +> 1. This package is at the core of the go-openapi suite of packages and [code generator](https://github.com/go-swagger/go-swagger) +> 2. There is a [spec loading package](https://github.com/go-openapi/loads) to fetch specs as JSON or YAML from local or remote locations +> 3. There is a [spec validation package](https://github.com/go-openapi/validate) built on top of it +> 4. There is a [spec analysis package](https://github.com/go-openapi/analysis) built on top of it, to analyze, flatten, fix and merge spec documents + +* Does this library support OpenAPI 3? + +> No. +> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). +> There is no plan to make it evolve toward supporting OpenAPI 3.x. +> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. +> +> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 + +* Does the unmarshaling support YAML? + +> Not directly. The exposed types know only how to unmarshal from JSON. +> +> In order to load a YAML document as a Swagger spec, you need to use the loaders provided by +> github.com/go-openapi/loads +> +> Take a look at the example there: https://pkg.go.dev/github.com/go-openapi/loads#example-Spec +> +> See also https://github.com/go-openapi/spec/issues/164 + +* How can I validate a spec? + +> Validation is provided by [the validate package](http://github.com/go-openapi/validate) + +* Why do we have an `ID` field for `Schema` which is not part of the swagger spec? + +> We found jsonschema compatibility more important: since `id` in jsonschema influences +> how `$ref` are resolved. +> This `id` does not conflict with any property named `id`. +> +> See also https://github.com/go-openapi/spec/issues/23 diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go new file mode 100644 index 000000000000..10fba77a839c --- /dev/null +++ b/vendor/github.com/go-openapi/spec/cache.go @@ -0,0 +1,86 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "maps" + "sync" +) + +// ResolutionCache a cache for resolving urls +type ResolutionCache interface { + Get(string) (any, bool) + Set(string, any) +} + +type simpleCache struct { + lock sync.RWMutex + store map[string]any +} + +func (s *simpleCache) ShallowClone() ResolutionCache { + store := make(map[string]any, len(s.store)) + s.lock.RLock() + maps.Copy(store, s.store) + s.lock.RUnlock() + + return &simpleCache{ + store: store, + } +} + +// Get retrieves a cached URI +func (s *simpleCache) Get(uri string) (any, bool) { + s.lock.RLock() + v, ok := s.store[uri] + + s.lock.RUnlock() + return v, ok +} + +// Set caches a URI +func (s *simpleCache) Set(uri string, data any) { + s.lock.Lock() + s.store[uri] = data + s.lock.Unlock() +} + +var ( + // resCache is a package level cache for $ref resolution and expansion. + // It is initialized lazily by methods that have the need for it: no + // memory is allocated unless some expander methods are called. + // + // It is initialized with JSON schema and swagger schema, + // which do not mutate during normal operations. + // + // All subsequent utilizations of this cache are produced from a shallow + // clone of this initial version. + resCache *simpleCache + onceCache sync.Once + + _ ResolutionCache = &simpleCache{} +) + +// initResolutionCache initializes the URI resolution cache. To be wrapped in a sync.Once.Do call. +func initResolutionCache() { + resCache = defaultResolutionCache() +} + +func defaultResolutionCache() *simpleCache { + return &simpleCache{store: map[string]any{ + "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), + "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), + }} +} + +func cacheOrDefault(cache ResolutionCache) ResolutionCache { + onceCache.Do(initResolutionCache) + + if cache != nil { + return cache + } + + // get a shallow clone of the base cache with swagger and json schema + return resCache.ShallowClone() +} diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go new file mode 100644 index 000000000000..fafe639b45d6 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/contact_info.go @@ -0,0 +1,46 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag/jsonutils" +) + +// ContactInfo contact information for the exposed API. +// +// For more information: http://goo.gl/8us55a#contactObject +type ContactInfo struct { + ContactInfoProps + VendorExtensible +} + +// ContactInfoProps hold the properties of a ContactInfo object +type ContactInfoProps struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` + Email string `json:"email,omitempty"` +} + +// UnmarshalJSON hydrates ContactInfo from json +func (c *ContactInfo) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &c.ContactInfoProps); err != nil { + return err + } + return json.Unmarshal(data, &c.VendorExtensible) +} + +// MarshalJSON produces ContactInfo as json +func (c ContactInfo) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(c.ContactInfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(c.VendorExtensible) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b1, b2), nil +} diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go new file mode 100644 index 000000000000..f4316c26333d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/debug.go @@ -0,0 +1,38 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "fmt" + "log" + "os" + "path" + "runtime" +) + +// Debug is true when the SWAGGER_DEBUG env var is not empty. +// +// It enables a more verbose logging of this package. +var Debug = os.Getenv("SWAGGER_DEBUG") != "" + +var ( + // specLogger is a debug logger for this package + specLogger *log.Logger +) + +func init() { + debugOptions() +} + +func debugOptions() { + specLogger = log.New(os.Stdout, "spec:", log.LstdFlags) +} + +func debugLog(msg string, args ...any) { + // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() + if Debug { + _, file1, pos1, _ := runtime.Caller(1) + specLogger.Printf("%s:%d: %s", path.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/go-openapi/spec/embed.go b/vendor/github.com/go-openapi/spec/embed.go new file mode 100644 index 000000000000..0d0b69996cce --- /dev/null +++ b/vendor/github.com/go-openapi/spec/embed.go @@ -0,0 +1,20 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "embed" + "path" +) + +//go:embed schemas/*.json schemas/*/*.json +var assets embed.FS + +func jsonschemaDraft04JSONBytes() ([]byte, error) { + return assets.ReadFile(path.Join("schemas", "jsonschema-draft-04.json")) +} + +func v2SchemaJSONBytes() ([]byte, error) { + return assets.ReadFile(path.Join("schemas", "v2", "schema.json")) +} diff --git a/vendor/github.com/go-openapi/spec/errors.go b/vendor/github.com/go-openapi/spec/errors.go new file mode 100644 index 000000000000..e39ab8bf71e9 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/errors.go @@ -0,0 +1,25 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import "errors" + +// Error codes +var ( + // ErrUnknownTypeForReference indicates that a resolved reference was found in an unsupported container type + ErrUnknownTypeForReference = errors.New("unknown type for the resolved reference") + + // ErrResolveRefNeedsAPointer indicates that a $ref target must be a valid JSON pointer + ErrResolveRefNeedsAPointer = errors.New("resolve ref: target needs to be a pointer") + + // ErrDerefUnsupportedType indicates that a resolved reference was found in an unsupported container type. + // At the moment, $ref are supported only inside: schemas, parameters, responses, path items + ErrDerefUnsupportedType = errors.New("deref: unsupported type") + + // ErrExpandUnsupportedType indicates that $ref expansion is attempted on some invalid type + ErrExpandUnsupportedType = errors.New("expand: unsupported type. Input should be of type *Parameter or *Response") + + // ErrSpec is an error raised by the spec package + ErrSpec = errors.New("spec error") +) diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go new file mode 100644 index 000000000000..cc4bd1cba1ba --- /dev/null +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -0,0 +1,598 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + "fmt" +) + +const smallPrealloc = 10 + +// ExpandOptions provides options for the spec expander. +// +// RelativeBase is the path to the root document. This can be a remote URL or a path to a local file. +// +// If left empty, the root document is assumed to be located in the current working directory: +// all relative $ref's will be resolved from there. +// +// PathLoader injects a document loading method. By default, this resolves to the function provided by the SpecLoader package variable. +type ExpandOptions struct { + RelativeBase string // the path to the root document to expand. This is a file, not a directory + SkipSchemas bool // do not expand schemas, just paths, parameters and responses + ContinueOnError bool // continue expanding even after and error is found + PathLoader func(string) (json.RawMessage, error) `json:"-"` // the document loading method that takes a path as input and yields a json document + AbsoluteCircularRef bool // circular $ref remaining after expansion remain absolute URLs +} + +func optionsOrDefault(opts *ExpandOptions) *ExpandOptions { + if opts != nil { + clone := *opts // shallow clone to avoid internal changes to be propagated to the caller + if clone.RelativeBase != "" { + clone.RelativeBase = normalizeBase(clone.RelativeBase) + } + // if the relative base is empty, let the schema loader choose a pseudo root document + return &clone + } + return &ExpandOptions{} +} + +// ExpandSpec expands the references in a swagger spec +func ExpandSpec(spec *Swagger, options *ExpandOptions) error { + options = optionsOrDefault(options) + resolver := defaultSchemaLoader(spec, options, nil, nil) + + specBasePath := options.RelativeBase + + if !options.SkipSchemas { + for key, definition := range spec.Definitions { + parentRefs := make([]string, 0, smallPrealloc) + parentRefs = append(parentRefs, "#/definitions/"+key) + + def, err := expandSchema(definition, parentRefs, resolver, specBasePath) + if resolver.shouldStopOnError(err) { + return err + } + if def != nil { + spec.Definitions[key] = *def + } + } + } + + for key := range spec.Parameters { + parameter := spec.Parameters[key] + if err := expandParameterOrResponse(¶meter, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Parameters[key] = parameter + } + + for key := range spec.Responses { + response := spec.Responses[key] + if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Responses[key] = response + } + + if spec.Paths != nil { + for key := range spec.Paths.Paths { + pth := spec.Paths.Paths[key] + if err := expandPathItem(&pth, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Paths.Paths[key] = pth + } + } + + return nil +} + +const rootBase = ".root" + +// baseForRoot loads in the cache the root document and produces a fake ".root" base path entry +// for further $ref resolution +func baseForRoot(root any, cache ResolutionCache) string { + // cache the root document to resolve $ref's + normalizedBase := normalizeBase(rootBase) + + if root == nil { + // ensure that we never leave a nil root: always cache the root base pseudo-document + cachedRoot, found := cache.Get(normalizedBase) + if found && cachedRoot != nil { + // the cache is already preloaded with a root + return normalizedBase + } + + root = map[string]any{} + } + + cache.Set(normalizedBase, root) + + return normalizedBase +} + +// ExpandSchema expands the refs in the schema object with reference to the root object. +// +// go-openapi/validate uses this function. +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandSchemaWithBasePath to resolve external references). +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandSchema(schema *Schema, root any, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + if root == nil { + root = schema + } + + opts := &ExpandOptions{ + // when a root is specified, cache the root as an in-memory document for $ref retrieval + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + } + + return ExpandSchemaWithBasePath(schema, cache, opts) +} + +// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options. +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *ExpandOptions) error { + if schema == nil { + return nil + } + + cache = cacheOrDefault(cache) + + opts = optionsOrDefault(opts) + + resolver := defaultSchemaLoader(nil, opts, cache, nil) + + parentRefs := make([]string, 0, smallPrealloc) + s, err := expandSchema(*schema, parentRefs, resolver, opts.RelativeBase) + if err != nil { + return err + } + if s != nil { + // guard for when continuing on error + *schema = *s + } + + return nil +} + +func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Items == nil { + return &target, nil + } + + // array + if target.Items.Schema != nil { + t, err := expandSchema(*target.Items.Schema, parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + *target.Items.Schema = *t + } + + // tuple + for i := range target.Items.Schemas { + t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + target.Items.Schemas[i] = *t + } + + return &target, nil +} + +func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Ref.String() == "" && target.Ref.IsRoot() { + newRef := normalizeRef(&target.Ref, basePath) + target.Ref = *newRef + return &target, nil + } + + // change the base path of resolution when an ID is encountered + // otherwise the basePath should inherit the parent's + if target.ID != "" { + basePath, _ = resolver.setSchemaID(target, target.ID, basePath) + } + + if target.Ref.String() != "" { + if !resolver.options.SkipSchemas { + return expandSchemaRef(target, parentRefs, resolver, basePath) + } + + // when "expand" with SkipSchema, we just rebase the existing $ref without replacing + // the full schema. + rebasedRef, err := NewRef(normalizeURI(target.Ref.String(), basePath)) + if err != nil { + return nil, err + } + target.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) + + return &target, nil + } + + for k := range target.Definitions { + tt, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if tt != nil { + target.Definitions[k] = *tt + } + } + + t, err := expandItems(target, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target = *t + } + + for i := range target.AllOf { + t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.AllOf[i] = *t + } + } + + for i := range target.AnyOf { + t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.AnyOf[i] = *t + } + } + + for i := range target.OneOf { + t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.OneOf[i] = *t + } + } + + if target.Not != nil { + t, err := expandSchema(*target.Not, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Not = *t + } + } + + for k := range target.Properties { + t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.Properties[k] = *t + } + } + + if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { + t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalProperties.Schema = *t + } + } + + for k := range target.PatternProperties { + t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.PatternProperties[k] = *t + } + } + + for k := range target.Dependencies { + if target.Dependencies[k].Schema != nil { + t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Dependencies[k].Schema = *t + } + } + } + + if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { + t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalItems.Schema = *t + } + } + return &target, nil +} + +func expandSchemaRef(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + // if a Ref is found, all sibling fields are skipped + // Ref also changes the resolution scope of children expandSchema + + // here the resolution scope is changed because a $ref was encountered + normalizedRef := normalizeRef(&target.Ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if resolver.isCircular(normalizedRef, basePath, parentRefs...) { + // this means there is a cycle in the recursion tree: return the Ref + // - circular refs cannot be expanded. We leave them as ref. + // - denormalization means that a new local file ref is set relative to the original basePath + debugLog("short circuit circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s", + basePath, normalizedBasePath, normalizedRef.String()) + if !resolver.options.AbsoluteCircularRef { + target.Ref = denormalizeRef(normalizedRef, resolver.context.basePath, resolver.context.rootID) + } else { + target.Ref = *normalizedRef + } + return &target, nil + } + + var t *Schema + err := resolver.Resolve(&target.Ref, &t, basePath) + if resolver.shouldStopOnError(err) { + return nil, err + } + + if t == nil { + // guard for when continuing on error + return &target, nil + } + + parentRefs = append(parentRefs, normalizedRef.String()) + transitiveResolver := resolver.transitiveResolver(basePath, target.Ref) + + basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath) + + return expandSchema(*t, parentRefs, transitiveResolver, basePath) +} + +func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error { + if pathItem == nil { + return nil + } + + parentRefs := make([]string, 0, smallPrealloc) + if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + + if pathItem.Ref.String() != "" { + transitiveResolver := resolver.transitiveResolver(basePath, pathItem.Ref) + basePath = transitiveResolver.updateBasePath(resolver, basePath) + resolver = transitiveResolver + } + + pathItem.Ref = Ref{} + for i := range pathItem.Parameters { + if err := expandParameterOrResponse(&(pathItem.Parameters[i]), resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + + ops := []*Operation{ + pathItem.Get, + pathItem.Head, + pathItem.Options, + pathItem.Put, + pathItem.Post, + pathItem.Patch, + pathItem.Delete, + } + for _, op := range ops { + if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + + return nil +} + +func expandOperation(op *Operation, resolver *schemaLoader, basePath string) error { + if op == nil { + return nil + } + + for i := range op.Parameters { + param := op.Parameters[i] + if err := expandParameterOrResponse(¶m, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + op.Parameters[i] = param + } + + if op.Responses == nil { + return nil + } + + responses := op.Responses + if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + + for code := range responses.StatusCodeResponses { + response := responses.StatusCodeResponses[code] + if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + responses.StatusCodeResponses[code] = response + } + + return nil +} + +// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandResponse to resolve external references). +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandResponseWithRoot(response *Response, root any, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + } + resolver := defaultSchemaLoader(root, opts, cache, nil) + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandResponse expands a response based on a basepath +// +// All refs inside response will be resolved relative to basePath +func ExpandResponse(response *Response, basePath string) error { + opts := optionsOrDefault(&ExpandOptions{ + RelativeBase: basePath, + }) + resolver := defaultSchemaLoader(nil, opts, nil, nil) + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document. +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandParameter to resolve external references). +func ExpandParameterWithRoot(parameter *Parameter, root any, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + } + resolver := defaultSchemaLoader(root, opts, cache, nil) + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +// ExpandParameter expands a parameter based on a basepath. +// This is the exported version of expandParameter +// all refs inside parameter will be resolved relative to basePath +func ExpandParameter(parameter *Parameter, basePath string) error { + opts := optionsOrDefault(&ExpandOptions{ + RelativeBase: basePath, + }) + resolver := defaultSchemaLoader(nil, opts, nil, nil) + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +func getRefAndSchema(input any) (*Ref, *Schema, error) { + var ( + ref *Ref + sch *Schema + ) + + switch refable := input.(type) { + case *Parameter: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + case *Response: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + default: + return nil, nil, fmt.Errorf("unsupported type: %T: %w", input, ErrExpandUnsupportedType) + } + + return ref, sch, nil +} + +func expandParameterOrResponse(input any, resolver *schemaLoader, basePath string) error { + ref, sch, err := getRefAndSchema(input) + if err != nil { + return err + } + + if ref == nil && sch == nil { // nothing to do + return nil + } + + parentRefs := make([]string, 0, smallPrealloc) + if ref != nil { + // dereference this $ref + if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + + ref, sch, _ = getRefAndSchema(input) + } + + if ref.String() != "" { + transitiveResolver := resolver.transitiveResolver(basePath, *ref) + basePath = resolver.updateBasePath(transitiveResolver, basePath) + resolver = transitiveResolver + } + + if sch == nil { + // nothing to be expanded + if ref != nil { + *ref = Ref{} + } + + return nil + } + + if sch.Ref.String() != "" { + rebasedRef, ern := NewRef(normalizeURI(sch.Ref.String(), basePath)) + if ern != nil { + return ern + } + + if resolver.isCircular(&rebasedRef, basePath, parentRefs...) { + // this is a circular $ref: stop expansion + if !resolver.options.AbsoluteCircularRef { + sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) + } else { + sch.Ref = rebasedRef + } + } + } + + // $ref expansion or rebasing is performed by expandSchema below + if ref != nil { + *ref = Ref{} + } + + // expand schema + // yes, we do it even if options.SkipSchema is true: we have to go down that rabbit hole and rebase nested $ref) + s, err := expandSchema(*sch, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return err + } + + if s != nil { // guard for when continuing on error + *sch = *s + } + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go new file mode 100644 index 000000000000..17b8efbf1008 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/external_docs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +// ExternalDocumentation allows referencing an external resource for +// extended documentation. +// +// For more information: http://goo.gl/8us55a#externalDocumentationObject +type ExternalDocumentation struct { + Description string `json:"description,omitempty"` + URL string `json:"url,omitempty"` +} diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go new file mode 100644 index 000000000000..ab251ef76595 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/header.go @@ -0,0 +1,192 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +const ( + jsonArray = "array" +) + +// HeaderProps describes a response header +type HeaderProps struct { + Description string `json:"description,omitempty"` +} + +// Header describes a header for a response of the API +// +// For more information: http://goo.gl/8us55a#headerObject +type Header struct { + CommonValidations + SimpleSchema + VendorExtensible + HeaderProps +} + +// ResponseHeader creates a new header instance for use in a response +func ResponseHeader() *Header { + return new(Header) +} + +// WithDescription sets the description on this response, allows for chaining +func (h *Header) WithDescription(description string) *Header { + h.Description = description + return h +} + +// Typed a fluent builder method for the type of parameter +func (h *Header) Typed(tpe, format string) *Header { + h.Type = tpe + h.Format = format + return h +} + +// CollectionOf a fluent builder method for an array item +func (h *Header) CollectionOf(items *Items, format string) *Header { + h.Type = jsonArray + h.Items = items + h.CollectionFormat = format + return h +} + +// WithDefault sets the default value on this item +func (h *Header) WithDefault(defaultValue any) *Header { + h.Default = defaultValue + return h +} + +// WithMaxLength sets a max length value +func (h *Header) WithMaxLength(maximum int64) *Header { + h.MaxLength = &maximum + return h +} + +// WithMinLength sets a min length value +func (h *Header) WithMinLength(minimum int64) *Header { + h.MinLength = &minimum + return h +} + +// WithPattern sets a pattern value +func (h *Header) WithPattern(pattern string) *Header { + h.Pattern = pattern + return h +} + +// WithMultipleOf sets a multiple of value +func (h *Header) WithMultipleOf(number float64) *Header { + h.MultipleOf = &number + return h +} + +// WithMaximum sets a maximum number value +func (h *Header) WithMaximum(maximum float64, exclusive bool) *Header { + h.Maximum = &maximum + h.ExclusiveMaximum = exclusive + return h +} + +// WithMinimum sets a minimum number value +func (h *Header) WithMinimum(minimum float64, exclusive bool) *Header { + h.Minimum = &minimum + h.ExclusiveMinimum = exclusive + return h +} + +// WithEnum sets a the enum values (replace) +func (h *Header) WithEnum(values ...any) *Header { + h.Enum = append([]any{}, values...) + return h +} + +// WithMaxItems sets the max items +func (h *Header) WithMaxItems(size int64) *Header { + h.MaxItems = &size + return h +} + +// WithMinItems sets the min items +func (h *Header) WithMinItems(size int64) *Header { + h.MinItems = &size + return h +} + +// UniqueValues dictates that this array can only have unique items +func (h *Header) UniqueValues() *Header { + h.UniqueItems = true + return h +} + +// AllowDuplicates this array can have duplicates +func (h *Header) AllowDuplicates() *Header { + h.UniqueItems = false + return h +} + +// WithValidations is a fluent method to set header validations +func (h *Header) WithValidations(val CommonValidations) *Header { + h.SetValidations(SchemaValidations{CommonValidations: val}) + return h +} + +// MarshalJSON marshal this to JSON +func (h Header) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(h.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(h.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(h.HeaderProps) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b1, b2, b3), nil +} + +// UnmarshalJSON unmarshals this header from JSON +func (h *Header) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &h.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &h.HeaderProps) +} + +// JSONLookup look up a value by the json property name +func (h Header) JSONLookup(token string) (any, error) { + if ex, ok := h.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(h.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.HeaderProps, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go new file mode 100644 index 000000000000..9401065bbdeb --- /dev/null +++ b/vendor/github.com/go-openapi/spec/info.go @@ -0,0 +1,173 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +// Extensions vendor specific extensions +type Extensions map[string]any + +// Add adds a value to these extensions +func (e Extensions) Add(key string, value any) { + realKey := strings.ToLower(key) + e[realKey] = value +} + +// GetString gets a string value from the extensions +func (e Extensions) GetString(key string) (string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(string) + return str, ok + } + return "", false +} + +// GetInt gets a int value from the extensions +func (e Extensions) GetInt(key string) (int, bool) { + realKey := strings.ToLower(key) + + if v, ok := e.GetString(realKey); ok { + if r, err := strconv.Atoi(v); err == nil { + return r, true + } + } + + if v, ok := e[realKey]; ok { + if r, rOk := v.(float64); rOk { + return int(r), true + } + } + return -1, false +} + +// GetBool gets a string value from the extensions +func (e Extensions) GetBool(key string) (bool, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(bool) + return str, ok + } + return false, false +} + +// GetStringSlice gets a string value from the extensions +func (e Extensions) GetStringSlice(key string) ([]string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + arr, isSlice := v.([]any) + if !isSlice { + return nil, false + } + var strs []string + for _, iface := range arr { + str, isString := iface.(string) + if !isString { + return nil, false + } + strs = append(strs, str) + } + return strs, ok + } + return nil, false +} + +// VendorExtensible composition block. +type VendorExtensible struct { + Extensions Extensions +} + +// AddExtension adds an extension to this extensible object +func (v *VendorExtensible) AddExtension(key string, value any) { + if value == nil { + return + } + if v.Extensions == nil { + v.Extensions = make(map[string]any) + } + v.Extensions.Add(key, value) +} + +// MarshalJSON marshals the extensions to json +func (v VendorExtensible) MarshalJSON() ([]byte, error) { + toser := make(map[string]any) + for k, v := range v.Extensions { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + toser[k] = v + } + } + return json.Marshal(toser) +} + +// UnmarshalJSON for this extensible object +func (v *VendorExtensible) UnmarshalJSON(data []byte) error { + var d map[string]any + if err := json.Unmarshal(data, &d); err != nil { + return err + } + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if v.Extensions == nil { + v.Extensions = map[string]any{} + } + v.Extensions[k] = vv + } + } + return nil +} + +// InfoProps the properties for an info definition +type InfoProps struct { + Description string `json:"description,omitempty"` + Title string `json:"title,omitempty"` + TermsOfService string `json:"termsOfService,omitempty"` + Contact *ContactInfo `json:"contact,omitempty"` + License *License `json:"license,omitempty"` + Version string `json:"version,omitempty"` +} + +// Info object provides metadata about the API. +// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. +// +// For more information: http://goo.gl/8us55a#infoObject +type Info struct { + VendorExtensible + InfoProps +} + +// JSONLookup look up a value by the json property name +func (i Info) JSONLookup(token string) (any, error) { + if ex, ok := i.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(i.InfoProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (i Info) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.InfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (i *Info) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &i.InfoProps); err != nil { + return err + } + return json.Unmarshal(data, &i.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go new file mode 100644 index 000000000000..d30ca3569b11 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/items.go @@ -0,0 +1,223 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +const ( + jsonRef = "$ref" +) + +// SimpleSchema describe swagger simple schemas for parameters and headers +type SimpleSchema struct { + Type string `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Items *Items `json:"items,omitempty"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default any `json:"default,omitempty"` + Example any `json:"example,omitempty"` +} + +// TypeName return the type (or format) of a simple schema +func (s *SimpleSchema) TypeName() string { + if s.Format != "" { + return s.Format + } + return s.Type +} + +// ItemsTypeName yields the type of items in a simple schema array +func (s *SimpleSchema) ItemsTypeName() string { + if s.Items == nil { + return "" + } + return s.Items.TypeName() +} + +// Items a limited subset of JSON-Schema's items object. +// It is used by parameter definitions that are not located in "body". +// +// For more information: http://goo.gl/8us55a#items-object +type Items struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible +} + +// NewItems creates a new instance of items +func NewItems() *Items { + return &Items{} +} + +// Typed a fluent builder method for the type of item +func (i *Items) Typed(tpe, format string) *Items { + i.Type = tpe + i.Format = format + return i +} + +// AsNullable flags this schema as nullable. +func (i *Items) AsNullable() *Items { + i.Nullable = true + return i +} + +// CollectionOf a fluent builder method for an array item +func (i *Items) CollectionOf(items *Items, format string) *Items { + i.Type = jsonArray + i.Items = items + i.CollectionFormat = format + return i +} + +// WithDefault sets the default value on this item +func (i *Items) WithDefault(defaultValue any) *Items { + i.Default = defaultValue + return i +} + +// WithMaxLength sets a max length value +func (i *Items) WithMaxLength(maximum int64) *Items { + i.MaxLength = &maximum + return i +} + +// WithMinLength sets a min length value +func (i *Items) WithMinLength(minimum int64) *Items { + i.MinLength = &minimum + return i +} + +// WithPattern sets a pattern value +func (i *Items) WithPattern(pattern string) *Items { + i.Pattern = pattern + return i +} + +// WithMultipleOf sets a multiple of value +func (i *Items) WithMultipleOf(number float64) *Items { + i.MultipleOf = &number + return i +} + +// WithMaximum sets a maximum number value +func (i *Items) WithMaximum(maximum float64, exclusive bool) *Items { + i.Maximum = &maximum + i.ExclusiveMaximum = exclusive + return i +} + +// WithMinimum sets a minimum number value +func (i *Items) WithMinimum(minimum float64, exclusive bool) *Items { + i.Minimum = &minimum + i.ExclusiveMinimum = exclusive + return i +} + +// WithEnum sets a the enum values (replace) +func (i *Items) WithEnum(values ...any) *Items { + i.Enum = append([]any{}, values...) + return i +} + +// WithMaxItems sets the max items +func (i *Items) WithMaxItems(size int64) *Items { + i.MaxItems = &size + return i +} + +// WithMinItems sets the min items +func (i *Items) WithMinItems(size int64) *Items { + i.MinItems = &size + return i +} + +// UniqueValues dictates that this array can only have unique items +func (i *Items) UniqueValues() *Items { + i.UniqueItems = true + return i +} + +// AllowDuplicates this array can have duplicates +func (i *Items) AllowDuplicates() *Items { + i.UniqueItems = false + return i +} + +// WithValidations is a fluent method to set Items validations +func (i *Items) WithValidations(val CommonValidations) *Items { + i.SetValidations(SchemaValidations{CommonValidations: val}) + return i +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (i *Items) UnmarshalJSON(data []byte) error { + var validations CommonValidations + if err := json.Unmarshal(data, &validations); err != nil { + return err + } + var ref Refable + if err := json.Unmarshal(data, &ref); err != nil { + return err + } + var simpleSchema SimpleSchema + if err := json.Unmarshal(data, &simpleSchema); err != nil { + return err + } + var vendorExtensible VendorExtensible + if err := json.Unmarshal(data, &vendorExtensible); err != nil { + return err + } + i.Refable = ref + i.CommonValidations = validations + i.SimpleSchema = simpleSchema + i.VendorExtensible = vendorExtensible + return nil +} + +// MarshalJSON converts this items object to JSON +func (i Items) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(i.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b4, b3, b1, b2), nil +} + +// JSONLookup look up a value by the json property name +func (i Items) JSONLookup(token string) (any, error) { + if token == jsonRef { + return &i.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(i.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(i.SimpleSchema, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go new file mode 100644 index 000000000000..286b237e2bf7 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/license.go @@ -0,0 +1,45 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag/jsonutils" +) + +// License information for the exposed API. +// +// For more information: http://goo.gl/8us55a#licenseObject +type License struct { + LicenseProps + VendorExtensible +} + +// LicenseProps holds the properties of a License object +type LicenseProps struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` +} + +// UnmarshalJSON hydrates License from json +func (l *License) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &l.LicenseProps); err != nil { + return err + } + return json.Unmarshal(data, &l.VendorExtensible) +} + +// MarshalJSON produces License as json +func (l License) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(l.LicenseProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(l.VendorExtensible) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b1, b2), nil +} diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go new file mode 100644 index 000000000000..c3ea810aaf34 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer.go @@ -0,0 +1,191 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "net/url" + "path" + "strings" +) + +const fileScheme = "file" + +// normalizeURI ensures that all $ref paths used internally by the expander are canonicalized. +// +// NOTE(windows): there is a tolerance over the strict URI format on windows. +// +// The normalizer accepts relative file URLs like 'Path\File.JSON' as well as absolute file URLs like +// 'C:\Path\file.Yaml'. +// +// Both are canonicalized with a "file://" scheme, slashes and a lower-cased path: +// 'file:///c:/path/file.yaml' +// +// URLs can be specified with a file scheme, like in 'file:///folder/file.json' or +// 'file:///c:\folder\File.json'. +// +// URLs like file://C:\folder are considered invalid (i.e. there is no host 'c:\folder') and a "repair" +// is attempted. +// +// The base path argument is assumed to be canonicalized (e.g. using normalizeBase()). +func normalizeURI(refPath, base string) string { + refURL, err := parseURL(refPath) + if err != nil { + specLogger.Printf("warning: invalid URI in $ref %q: %v", refPath, err) + refURL, refPath = repairURI(refPath) + } + + fixWindowsURI(refURL, refPath) // noop on non-windows OS + + refURL.Path = path.Clean(refURL.Path) + if refURL.Path == "." { + refURL.Path = "" + } + + r := MustCreateRef(refURL.String()) + if r.IsCanonical() { + return refURL.String() + } + + baseURL, _ := parseURL(base) + if path.IsAbs(refURL.Path) { + baseURL.Path = refURL.Path + } else if refURL.Path != "" { + baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) + } + // copying fragment from ref to base + baseURL.Fragment = refURL.Fragment + + return baseURL.String() +} + +// denormalizeRef returns the simplest notation for a normalized $ref, given the path of the original root document. +// +// When calling this, we assume that: +// * $ref is a canonical URI +// * originalRelativeBase is a canonical URI +// +// denormalizeRef is currently used when we rewrite a $ref after a circular $ref has been detected. +// In this case, expansion stops and normally renders the internal canonical $ref. +// +// This internal $ref is eventually rebased to the original RelativeBase used for the expansion. +// +// There is a special case for schemas that are anchored with an "id": +// in that case, the rebasing is performed // against the id only if this is an anchor for the initial root document. +// All other intermediate "id"'s found along the way are ignored for the purpose of rebasing. +func denormalizeRef(ref *Ref, originalRelativeBase, id string) Ref { + debugLog("denormalizeRef called:\n$ref: %q\noriginal: %s\nroot ID:%s", ref.String(), originalRelativeBase, id) + + if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { + // short circuit: $ref to current doc + return *ref + } + + if id != "" { + idBaseURL, err := parseURL(id) + if err == nil { // if the schema id is not usable as a URI, ignore it + if ref, ok := rebase(ref, idBaseURL, true); ok { // rebase, but keep references to root unchaged (do not want $ref: "") + // $ref relative to the ID of the schema in the root document + return ref + } + } + } + + originalRelativeBaseURL, _ := parseURL(originalRelativeBase) + + r, _ := rebase(ref, originalRelativeBaseURL, false) + + return r +} + +func rebase(ref *Ref, v *url.URL, notEqual bool) (Ref, bool) { + var newBase url.URL + + u := ref.GetURL() + + if u.Scheme != v.Scheme || u.Host != v.Host { + return *ref, false + } + + docPath := v.Path + v.Path = path.Dir(v.Path) + + if v.Path == "." { + v.Path = "" + } else if !strings.HasSuffix(v.Path, "/") { + v.Path += "/" + } + + newBase.Fragment = u.Fragment + + if after, ok := strings.CutPrefix(u.Path, docPath); ok { + newBase.Path = after + } else { + newBase.Path = strings.TrimPrefix(u.Path, v.Path) + } + + if notEqual && newBase.Path == "" && newBase.Fragment == "" { + // do not want rebasing to end up in an empty $ref + return *ref, false + } + + if path.IsAbs(newBase.Path) { + // whenever we end up with an absolute path, specify the scheme and host + newBase.Scheme = v.Scheme + newBase.Host = v.Host + } + + return MustCreateRef(newBase.String()), true +} + +// normalizeRef canonicalize a Ref, using a canonical relativeBase as its absolute anchor +func normalizeRef(ref *Ref, relativeBase string) *Ref { + r := MustCreateRef(normalizeURI(ref.String(), relativeBase)) + return &r +} + +// normalizeBase performs a normalization of the input base path. +// +// This always yields a canonical URI (absolute), usable for the document cache. +// +// It ensures that all further internal work on basePath may safely assume +// a non-empty, cross-platform, canonical URI (i.e. absolute). +// +// This normalization tolerates windows paths (e.g. C:\x\y\File.dat) and transform this +// in a file:// URL with lower cased drive letter and path. +// +// See also: https://en.wikipedia.org/wiki/File_URI_scheme +func normalizeBase(in string) string { + u, err := parseURL(in) + if err != nil { + specLogger.Printf("warning: invalid URI in RelativeBase %q: %v", in, err) + u, in = repairURI(in) + } + + u.Fragment = "" // any fragment in the base is irrelevant + + fixWindowsURI(u, in) // noop on non-windows OS + + u.Path = path.Clean(u.Path) + if u.Path == "." { // empty after Clean() + u.Path = "" + } + + if u.Scheme != "" { + if path.IsAbs(u.Path) || u.Scheme != fileScheme { + // this is absolute or explicitly not a local file: we're good + return u.String() + } + } + + // no scheme or file scheme with relative path: assume file and make it absolute + // enforce scheme file://... with absolute path. + // + // If the input path is relative, we anchor the path to the current working directory. + // NOTE: we may end up with a host component. Leave it unchanged: e.g. file://host/folder/file.json + + u.Scheme = fileScheme + u.Path = absPath(u.Path) // platform-dependent + u.RawQuery = "" // any query component is irrelevant for a base + return u.String() +} diff --git a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go new file mode 100644 index 000000000000..0d55632349f8 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go @@ -0,0 +1,32 @@ +//go:build !windows + +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "net/url" + "path/filepath" +) + +// absPath makes a file path absolute and compatible with a URI path component. +// +// The parameter must be a path, not an URI. +func absPath(in string) string { + anchored, err := filepath.Abs(in) + if err != nil { + specLogger.Printf("warning: could not resolve current working directory: %v", err) + return in + } + return anchored +} + +func repairURI(in string) (*url.URL, string) { + u, _ := parseURL("") + debugLog("repaired URI: original: %q, repaired: %q", in, "") + return u, "" +} + +func fixWindowsURI(_ *url.URL, _ string) { +} diff --git a/vendor/github.com/go-openapi/spec/normalizer_windows.go b/vendor/github.com/go-openapi/spec/normalizer_windows.go new file mode 100644 index 000000000000..61515c9a163f --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer_windows.go @@ -0,0 +1,143 @@ +// -build windows + +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "net/url" + "os" + "path" + "path/filepath" + "strings" +) + +// absPath makes a file path absolute and compatible with a URI path component +// +// The parameter must be a path, not an URI. +func absPath(in string) string { + // NOTE(windows): filepath.Abs exhibits a special behavior on windows for empty paths. + // See https://github.com/golang/go/issues/24441 + if in == "" { + in = "." + } + + anchored, err := filepath.Abs(in) + if err != nil { + specLogger.Printf("warning: could not resolve current working directory: %v", err) + return in + } + + pth := strings.ReplaceAll(strings.ToLower(anchored), `\`, `/`) + if !strings.HasPrefix(pth, "/") { + pth = "/" + pth + } + + return path.Clean(pth) +} + +// repairURI tolerates invalid file URIs with common typos +// such as 'file://E:\folder\file', that break the regular URL parser. +// +// Adopting the same defaults as for unixes (e.g. return an empty path) would +// result into a counter-intuitive result for that case (e.g. E:\folder\file is +// eventually resolved as the current directory). The repair will detect the missing "/". +// +// Note that this only works for the file scheme. +func repairURI(in string) (*url.URL, string) { + const prefix = fileScheme + "://" + if !strings.HasPrefix(in, prefix) { + // giving up: resolve to empty path + u, _ := parseURL("") + + return u, "" + } + + // attempt the repair, stripping the scheme should be sufficient + u, _ := parseURL(strings.TrimPrefix(in, prefix)) + debugLog("repaired URI: original: %q, repaired: %q", in, u.String()) + + return u, u.String() +} + +// fixWindowsURI tolerates an absolute file path on windows such as C:\Base\File.yaml or \\host\share\Base\File.yaml +// and makes it a canonical URI: file:///c:/base/file.yaml +// +// Catch 22 notes for Windows: +// +// * There may be a drive letter on windows (it is lower-cased) +// * There may be a share UNC, e.g. \\server\folder\data.xml +// * Paths are case insensitive +// * Paths may already contain slashes +// * Paths must be slashed +// +// NOTE: there is no escaping. "/" may be valid separators just like "\". +// We don't use ToSlash() (which escapes everything) because windows now also +// tolerates the use of "/". Hence, both C:\File.yaml and C:/File.yaml will work. +func fixWindowsURI(u *url.URL, in string) { + drive := filepath.VolumeName(in) + + if len(drive) > 0 { + if len(u.Scheme) == 1 && strings.EqualFold(u.Scheme, drive[:1]) { // a path with a drive letter + u.Scheme = fileScheme + u.Host = "" + u.Path = strings.Join([]string{drive, u.Opaque, u.Path}, `/`) // reconstruct the full path component (no fragment, no query) + } else if u.Host == "" && strings.HasPrefix(u.Path, drive) { // a path with a \\host volume + // NOTE: the special host@port syntax for UNC is not supported (yet) + u.Scheme = fileScheme + + // this is a modified version of filepath.Dir() to apply on the VolumeName itself + i := len(drive) - 1 + for i >= 0 && !os.IsPathSeparator(drive[i]) { + i-- + } + host := drive[:i] // \\host\share => host + + u.Path = strings.TrimPrefix(u.Path, host) + u.Host = strings.TrimPrefix(host, `\\`) + } + + u.Opaque = "" + u.Path = strings.ReplaceAll(strings.ToLower(u.Path), `\`, `/`) + + // ensure we form an absolute path + if !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + + u.Path = path.Clean(u.Path) + + return + } + + if u.Scheme == fileScheme { + // Handle dodgy cases for file://{...} URIs on windows. + // A canonical URI should always be followed by an absolute path. + // + // Examples: + // * file:///folder/file => valid, unchanged + // * file:///c:\folder\file => slashed + // * file:///./folder/file => valid, cleaned to remove the dot + // * file:///.\folder\file => remapped to cwd + // * file:///. => dodgy, remapped to / (consistent with the behavior on unix) + // * file:///.. => dodgy, remapped to / (consistent with the behavior on unix) + if (!path.IsAbs(u.Path) && !filepath.IsAbs(u.Path)) || (strings.HasPrefix(u.Path, `/.`) && strings.Contains(u.Path, `\`)) { + // ensure we form an absolute path + u.Path, _ = filepath.Abs(strings.TrimLeft(u.Path, `/`)) + if !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + } + u.Path = strings.ToLower(u.Path) + } + + // NOTE: lower case normalization does not propagate to inner resources, + // generated when rebasing: when joining a relative URI with a file to an absolute base, + // only the base is currently lower-cased. + // + // For now, we assume this is good enough for most use cases + // and try not to generate too many differences + // between the output produced on different platforms. + u.Path = path.Clean(strings.ReplaceAll(u.Path, `\`, `/`)) +} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go new file mode 100644 index 000000000000..bbf8c7573723 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -0,0 +1,392 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "sort" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +func init() { + gob.Register(map[string]any{}) + gob.Register([]any{}) +} + +// OperationProps describes an operation +// +// NOTES: +// - schemes, when present must be from [http, https, ws, wss]: see validate +// - Security is handled as a special case: see MarshalJSON function +type OperationProps struct { + Description string `json:"description,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + ID string `json:"operationId,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + Responses *Responses `json:"responses,omitempty"` +} + +// MarshalJSON takes care of serializing operation properties to JSON +// +// We use a custom marhaller here to handle a special cases related to +// the Security field. We need to preserve zero length slice +// while omitting the field when the value is nil/unset. +func (op OperationProps) MarshalJSON() ([]byte, error) { + type Alias OperationProps + if op.Security == nil { + return json.Marshal(&struct { + *Alias + + Security []map[string][]string `json:"security,omitempty"` + }{ + Alias: (*Alias)(&op), + Security: op.Security, + }) + } + + return json.Marshal(&struct { + *Alias + + Security []map[string][]string `json:"security"` + }{ + Alias: (*Alias)(&op), + Security: op.Security, + }) +} + +// Operation describes a single API operation on a path. +// +// For more information: http://goo.gl/8us55a#operationObject +type Operation struct { + VendorExtensible + OperationProps +} + +// NewOperation creates a new operation instance. +// It expects an ID as parameter but not passing an ID is also valid. +func NewOperation(id string) *Operation { + op := new(Operation) + op.ID = id + return op +} + +// SuccessResponse gets a success response model +func (o *Operation) SuccessResponse() (*Response, int, bool) { + if o.Responses == nil { + return nil, 0, false + } + + responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses)) + for k := range o.Responses.StatusCodeResponses { + if k >= 200 && k < 300 { + responseCodes = append(responseCodes, k) + } + } + if len(responseCodes) > 0 { + sort.Ints(responseCodes) + v := o.Responses.StatusCodeResponses[responseCodes[0]] + return &v, responseCodes[0], true + } + + return o.Responses.Default, 0, false +} + +// JSONLookup look up a value by the json property name +func (o Operation) JSONLookup(token string) (any, error) { + if ex, ok := o.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(o.OperationProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (o *Operation) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &o.OperationProps); err != nil { + return err + } + return json.Unmarshal(data, &o.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (o Operation) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(o.OperationProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(o.VendorExtensible) + if err != nil { + return nil, err + } + concated := jsonutils.ConcatJSON(b1, b2) + return concated, nil +} + +// WithID sets the ID property on this operation, allows for chaining. +func (o *Operation) WithID(id string) *Operation { + o.ID = id + return o +} + +// WithDescription sets the description on this operation, allows for chaining +func (o *Operation) WithDescription(description string) *Operation { + o.Description = description + return o +} + +// WithSummary sets the summary on this operation, allows for chaining +func (o *Operation) WithSummary(summary string) *Operation { + o.Summary = summary + return o +} + +// WithExternalDocs sets/removes the external docs for/from this operation. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (o *Operation) WithExternalDocs(description, url string) *Operation { + if description == "" && url == "" { + o.ExternalDocs = nil + return o + } + + if o.ExternalDocs == nil { + o.ExternalDocs = &ExternalDocumentation{} + } + o.ExternalDocs.Description = description + o.ExternalDocs.URL = url + return o +} + +// Deprecate marks the operation as deprecated +func (o *Operation) Deprecate() *Operation { + o.Deprecated = true + return o +} + +// Undeprecate marks the operation as not deprected +func (o *Operation) Undeprecate() *Operation { + o.Deprecated = false + return o +} + +// WithConsumes adds media types for incoming body values +func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { + o.Consumes = append(o.Consumes, mediaTypes...) + return o +} + +// WithProduces adds media types for outgoing body values +func (o *Operation) WithProduces(mediaTypes ...string) *Operation { + o.Produces = append(o.Produces, mediaTypes...) + return o +} + +// WithTags adds tags for this operation +func (o *Operation) WithTags(tags ...string) *Operation { + o.Tags = append(o.Tags, tags...) + return o +} + +// AddParam adds a parameter to this operation, when a parameter for that location +// and with that name already exists it will be replaced +func (o *Operation) AddParam(param *Parameter) *Operation { + if param == nil { + return o + } + + for i, p := range o.Parameters { + if p.Name == param.Name && p.In == param.In { + params := make([]Parameter, 0, len(o.Parameters)+1) + params = append(params, o.Parameters[:i]...) + params = append(params, *param) + params = append(params, o.Parameters[i+1:]...) + o.Parameters = params + + return o + } + } + + o.Parameters = append(o.Parameters, *param) + return o +} + +// RemoveParam removes a parameter from the operation +func (o *Operation) RemoveParam(name, in string) *Operation { + for i, p := range o.Parameters { + if p.Name == name && p.In == in { + o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) + return o + } + } + return o +} + +// SecuredWith adds a security scope to this operation. +func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { + o.Security = append(o.Security, map[string][]string{name: scopes}) + return o +} + +// WithDefaultResponse adds a default response to the operation. +// Passing a nil value will remove the response +func (o *Operation) WithDefaultResponse(response *Response) *Operation { + return o.RespondsWith(0, response) +} + +// RespondsWith adds a status code response to the operation. +// When the code is 0 the value of the response will be used as default response value. +// When the value of the response is nil it will be removed from the operation +func (o *Operation) RespondsWith(code int, response *Response) *Operation { + if o.Responses == nil { + o.Responses = new(Responses) + } + if code == 0 { + o.Responses.Default = response + return o + } + if response == nil { + delete(o.Responses.StatusCodeResponses, code) + return o + } + if o.Responses.StatusCodeResponses == nil { + o.Responses.StatusCodeResponses = make(map[int]Response) + } + o.Responses.StatusCodeResponses[code] = *response + return o +} + +type opsAlias OperationProps + +type gobAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *opsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (o Operation) GobEncode() ([]byte, error) { + raw := struct { + Ext VendorExtensible + Props OperationProps + }{ + Ext: o.VendorExtensible, + Props: o.OperationProps, + } + var b bytes.Buffer + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (o *Operation) GobDecode(b []byte) error { + var raw struct { + Ext VendorExtensible + Props OperationProps + } + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + o.VendorExtensible = raw.Ext + o.OperationProps = raw.Props + return nil +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (op OperationProps) GobEncode() ([]byte, error) { + raw := gobAlias{ + Alias: (*opsAlias)(&op), + } + + var b bytes.Buffer + if op.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(op.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(op.Security)) + for _, req := range op.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (op *OperationProps) GobDecode(b []byte) error { + var raw gobAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *op = *(*OperationProps)(raw.Alias) + return nil +} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go new file mode 100644 index 000000000000..b94b7682ac8e --- /dev/null +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -0,0 +1,315 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +// QueryParam creates a query parameter +func QueryParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} +} + +// HeaderParam creates a header parameter, this is always required by default +func HeaderParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} +} + +// PathParam creates a path parameter, this is always required +func PathParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} +} + +// BodyParam creates a body parameter +func BodyParam(name string, schema *Schema) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}} +} + +// FormDataParam creates a body parameter +func FormDataParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} +} + +// FileParam creates a body parameter +func FileParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, + SimpleSchema: SimpleSchema{Type: "file"}} +} + +// SimpleArrayParam creates a param for a simple array (string, int, date etc) +func SimpleArrayParam(name, tpe, fmt string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name}, + SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", + Items: &Items{SimpleSchema: SimpleSchema{Type: tpe, Format: fmt}}}} +} + +// ParamRef creates a parameter that's a json reference +func ParamRef(uri string) *Parameter { + p := new(Parameter) + p.Ref = MustCreateRef(uri) + return p +} + +// ParamProps describes the specific attributes of an operation parameter +// +// NOTE: +// - Schema is defined when "in" == "body": see validate +// - AllowEmptyValue is allowed where "in" == "query" || "formData" +type ParamProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Required bool `json:"required,omitempty"` + Schema *Schema `json:"schema,omitempty"` + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` +} + +// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). +// +// There are five possible parameter types. +// - Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. +// - Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. +// - Header - Custom headers that are expected as part of the request. +// - Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// - Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// - `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// - `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. +// +// For more information: http://goo.gl/8us55a#parameterObject +type Parameter struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible + ParamProps +} + +// JSONLookup look up a value by the json property name +func (p Parameter) JSONLookup(token string) (any, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.ParamProps, token) + return r, err +} + +// WithDescription a fluent builder method for the description of the parameter +func (p *Parameter) WithDescription(description string) *Parameter { + p.Description = description + return p +} + +// Named a fluent builder method to override the name of the parameter +func (p *Parameter) Named(name string) *Parameter { + p.Name = name + return p +} + +// WithLocation a fluent builder method to override the location of the parameter +func (p *Parameter) WithLocation(in string) *Parameter { + p.In = in + return p +} + +// Typed a fluent builder method for the type of the parameter value +func (p *Parameter) Typed(tpe, format string) *Parameter { + p.Type = tpe + p.Format = format + return p +} + +// CollectionOf a fluent builder method for an array parameter +func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { + p.Type = jsonArray + p.Items = items + p.CollectionFormat = format + return p +} + +// WithDefault sets the default value on this parameter +func (p *Parameter) WithDefault(defaultValue any) *Parameter { + p.AsOptional() // with default implies optional + p.Default = defaultValue + return p +} + +// AllowsEmptyValues flags this parameter as being ok with empty values +func (p *Parameter) AllowsEmptyValues() *Parameter { + p.AllowEmptyValue = true + return p +} + +// NoEmptyValues flags this parameter as not liking empty values +func (p *Parameter) NoEmptyValues() *Parameter { + p.AllowEmptyValue = false + return p +} + +// AsOptional flags this parameter as optional +func (p *Parameter) AsOptional() *Parameter { + p.Required = false + return p +} + +// AsRequired flags this parameter as required +func (p *Parameter) AsRequired() *Parameter { + if p.Default != nil { // with a default required makes no sense + return p + } + p.Required = true + return p +} + +// WithMaxLength sets a max length value +func (p *Parameter) WithMaxLength(maximum int64) *Parameter { + p.MaxLength = &maximum + return p +} + +// WithMinLength sets a min length value +func (p *Parameter) WithMinLength(minimum int64) *Parameter { + p.MinLength = &minimum + return p +} + +// WithPattern sets a pattern value +func (p *Parameter) WithPattern(pattern string) *Parameter { + p.Pattern = pattern + return p +} + +// WithMultipleOf sets a multiple of value +func (p *Parameter) WithMultipleOf(number float64) *Parameter { + p.MultipleOf = &number + return p +} + +// WithMaximum sets a maximum number value +func (p *Parameter) WithMaximum(maximum float64, exclusive bool) *Parameter { + p.Maximum = &maximum + p.ExclusiveMaximum = exclusive + return p +} + +// WithMinimum sets a minimum number value +func (p *Parameter) WithMinimum(minimum float64, exclusive bool) *Parameter { + p.Minimum = &minimum + p.ExclusiveMinimum = exclusive + return p +} + +// WithEnum sets a the enum values (replace) +func (p *Parameter) WithEnum(values ...any) *Parameter { + p.Enum = append([]any{}, values...) + return p +} + +// WithMaxItems sets the max items +func (p *Parameter) WithMaxItems(size int64) *Parameter { + p.MaxItems = &size + return p +} + +// WithMinItems sets the min items +func (p *Parameter) WithMinItems(size int64) *Parameter { + p.MinItems = &size + return p +} + +// UniqueValues dictates that this array can only have unique items +func (p *Parameter) UniqueValues() *Parameter { + p.UniqueItems = true + return p +} + +// AllowDuplicates this array can have duplicates +func (p *Parameter) AllowDuplicates() *Parameter { + p.UniqueItems = false + return p +} + +// WithValidations is a fluent method to set parameter validations +func (p *Parameter) WithValidations(val CommonValidations) *Parameter { + p.SetValidations(SchemaValidations{CommonValidations: val}) + return p +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Parameter) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.ParamProps) +} + +// MarshalJSON converts this items object to JSON +func (p Parameter) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(p.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.ParamProps) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b3, b1, b2, b4, b5), nil +} diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go new file mode 100644 index 000000000000..c692b89e46c6 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/path_item.go @@ -0,0 +1,76 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +// PathItemProps the path item specific properties +type PathItemProps struct { + Get *Operation `json:"get,omitempty"` + Put *Operation `json:"put,omitempty"` + Post *Operation `json:"post,omitempty"` + Delete *Operation `json:"delete,omitempty"` + Options *Operation `json:"options,omitempty"` + Head *Operation `json:"head,omitempty"` + Patch *Operation `json:"patch,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` +} + +// PathItem describes the operations available on a single path. +// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// The path itself is still exposed to the documentation viewer but they will +// not know which operations and parameters are available. +// +// For more information: http://goo.gl/8us55a#pathItemObject +type PathItem struct { + Refable + VendorExtensible + PathItemProps +} + +// JSONLookup look up a value by the json property name +func (p PathItem) JSONLookup(token string) (any, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *PathItem) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.PathItemProps) +} + +// MarshalJSON converts this items object to JSON +func (p PathItem) MarshalJSON() ([]byte, error) { + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.PathItemProps) + if err != nil { + return nil, err + } + concated := jsonutils.ConcatJSON(b3, b4, b5) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go new file mode 100644 index 000000000000..b9e42184b19e --- /dev/null +++ b/vendor/github.com/go-openapi/spec/paths.go @@ -0,0 +1,87 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/swag/jsonutils" +) + +// Paths holds the relative paths to the individual endpoints. +// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order +// to construct the full URL. +// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// +// For more information: http://goo.gl/8us55a#pathsObject +type Paths struct { + VendorExtensible + + Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" +} + +// JSONLookup look up a value by the json property name +func (p Paths) JSONLookup(token string) (any, error) { + if pi, ok := p.Paths[token]; ok { + return &pi, nil + } + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + return nil, fmt.Errorf("object has no field %q: %w", token, ErrSpec) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Paths) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + for k, v := range res { + if strings.HasPrefix(strings.ToLower(k), "x-") { + if p.Extensions == nil { + p.Extensions = make(map[string]any) + } + var d any + if err := json.Unmarshal(v, &d); err != nil { + return err + } + p.Extensions[k] = d + } + if strings.HasPrefix(k, "/") { + if p.Paths == nil { + p.Paths = make(map[string]PathItem) + } + var pi PathItem + if err := json.Unmarshal(v, &pi); err != nil { + return err + } + p.Paths[k] = pi + } + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (p Paths) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + + pths := make(map[string]PathItem) + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + pths[k] = v + } + } + b2, err := json.Marshal(pths) + if err != nil { + return nil, err + } + concated := jsonutils.ConcatJSON(b1, b2) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/properties.go b/vendor/github.com/go-openapi/spec/properties.go new file mode 100644 index 000000000000..4142308dd397 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/properties.go @@ -0,0 +1,95 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "bytes" + "encoding/json" + "reflect" + "sort" +) + +// OrderSchemaItem holds a named schema (e.g. from a property of an object) +type OrderSchemaItem struct { + Schema + + Name string +} + +// OrderSchemaItems is a sortable slice of named schemas. +// The ordering is defined by the x-order schema extension. +type OrderSchemaItems []OrderSchemaItem + +// MarshalJSON produces a json object with keys defined by the name schemas +// of the OrderSchemaItems slice, keeping the original order of the slice. +func (items OrderSchemaItems) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(nil) + buf.WriteString("{") + for i := range items { + if i > 0 { + buf.WriteString(",") + } + buf.WriteString("\"") + buf.WriteString(items[i].Name) + buf.WriteString("\":") + bs, err := json.Marshal(&items[i].Schema) + if err != nil { + return nil, err + } + buf.Write(bs) + } + buf.WriteString("}") + return buf.Bytes(), nil +} + +func (items OrderSchemaItems) Len() int { return len(items) } +func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], items[i] } +func (items OrderSchemaItems) Less(i, j int) (ret bool) { + ii, oki := items[i].Extensions.GetInt("x-order") + ij, okj := items[j].Extensions.GetInt("x-order") + if oki { + if okj { + defer func() { + if err := recover(); err != nil { + defer func() { + if err = recover(); err != nil { + ret = items[i].Name < items[j].Name + } + }() + ret = reflect.ValueOf(ii).String() < reflect.ValueOf(ij).String() + } + }() + return ii < ij + } + return true + } else if okj { + return false + } + return items[i].Name < items[j].Name +} + +// SchemaProperties is a map representing the properties of a Schema object. +// It knows how to transform its keys into an ordered slice. +type SchemaProperties map[string]Schema + +// ToOrderedSchemaItems transforms the map of properties into a sortable slice +func (properties SchemaProperties) ToOrderedSchemaItems() OrderSchemaItems { + items := make(OrderSchemaItems, 0, len(properties)) + for k, v := range properties { + items = append(items, OrderSchemaItem{ + Name: k, + Schema: v, + }) + } + sort.Sort(items) + return items +} + +// MarshalJSON produces properties as json, keeping their order. +func (properties SchemaProperties) MarshalJSON() ([]byte, error) { + if properties == nil { + return []byte("null"), nil + } + return json.Marshal(properties.ToOrderedSchemaItems()) +} diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go new file mode 100644 index 000000000000..c9279262942b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -0,0 +1,184 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "net/http" + "os" + "path/filepath" + + "github.com/go-openapi/jsonreference" +) + +// Refable is a struct for things that accept a $ref property +type Refable struct { + Ref Ref +} + +// MarshalJSON marshals the ref to json +func (r Refable) MarshalJSON() ([]byte, error) { + return r.Ref.MarshalJSON() +} + +// UnmarshalJSON unmarshalss the ref from json +func (r *Refable) UnmarshalJSON(d []byte) error { + return json.Unmarshal(d, &r.Ref) +} + +// Ref represents a json reference that is potentially resolved +type Ref struct { + jsonreference.Ref +} + +// NewRef creates a new instance of a ref object +// returns an error when the reference uri is an invalid uri +func NewRef(refURI string) (Ref, error) { + ref, err := jsonreference.New(refURI) + if err != nil { + return Ref{}, err + } + + return Ref{Ref: ref}, nil +} + +// MustCreateRef creates a ref object but panics when refURI is invalid. +// Use the NewRef method for a version that returns an error. +func MustCreateRef(refURI string) Ref { + return Ref{Ref: jsonreference.MustCreateRef(refURI)} +} + +// RemoteURI gets the remote uri part of the ref +func (r *Ref) RemoteURI() string { + if r.String() == "" { + return "" + } + + u := *r.GetURL() + u.Fragment = "" + return u.String() +} + +// IsValidURI returns true when the url the ref points to can be found +func (r *Ref) IsValidURI(basepaths ...string) bool { + if r.String() == "" { + return true + } + + v := r.RemoteURI() + if v == "" { + return true + } + + if r.HasFullURL { + //nolint:noctx,gosec + rr, err := http.Get(v) + if err != nil { + return false + } + defer rr.Body.Close() + + // true if the response is >= 200 and < 300 + return rr.StatusCode/100 == 2 //nolint:mnd + } + + if !r.HasFileScheme && !r.HasFullFilePath && !r.HasURLPathOnly { + return false + } + + // check for local file + pth := v + if r.HasURLPathOnly { + base := "." + if len(basepaths) > 0 { + base = filepath.Dir(filepath.Join(basepaths...)) + } + p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) + if e != nil { + return false + } + pth = p + } + + fi, err := os.Stat(filepath.ToSlash(pth)) + if err != nil { + return false + } + + return !fi.IsDir() +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + ref, err := r.Ref.Inherits(child.Ref) + if err != nil { + return nil, err + } + return &Ref{Ref: *ref}, nil +} + +// MarshalJSON marshals this ref into a JSON object +func (r Ref) MarshalJSON() ([]byte, error) { + str := r.String() + if str == "" { + if r.IsRoot() { + return []byte(`{"$ref":""}`), nil + } + return []byte("{}"), nil + } + v := map[string]any{"$ref": str} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshals this ref from a JSON object +func (r *Ref) UnmarshalJSON(d []byte) error { + var v map[string]any + if err := json.Unmarshal(d, &v); err != nil { + return err + } + return r.fromMap(v) +} + +// GobEncode provides a safe gob encoder for Ref +func (r Ref) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw, err := r.MarshalJSON() + if err != nil { + return nil, err + } + err = gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Ref +func (r *Ref) GobDecode(b []byte) error { + var raw []byte + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + return json.Unmarshal(raw, r) +} + +func (r *Ref) fromMap(v map[string]any) error { + if v == nil { + return nil + } + + if vv, ok := v["$ref"]; ok { + if str, ok := vv.(string); ok { + ref, err := jsonreference.New(str) + if err != nil { + return err + } + *r = Ref{Ref: ref} + } + } + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/resolver.go b/vendor/github.com/go-openapi/spec/resolver.go new file mode 100644 index 000000000000..b82c18213325 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/resolver.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "fmt" + + "github.com/go-openapi/swag/jsonutils" +) + +func resolveAnyWithBase(root any, ref *Ref, result any, options *ExpandOptions) error { + options = optionsOrDefault(options) + resolver := defaultSchemaLoader(root, options, nil, nil) + + if err := resolver.Resolve(ref, result, options.RelativeBase); err != nil { + return err + } + + return nil +} + +// ResolveRefWithBase resolves a reference against a context root with preservation of base path +func ResolveRefWithBase(root any, ref *Ref, options *ExpandOptions) (*Schema, error) { + result := new(Schema) + + if err := resolveAnyWithBase(root, ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveRef resolves a reference for a schema against a context root +// ref is guaranteed to be in root (no need to go to external files) +// +// ResolveRef is ONLY called from the code generation module +func ResolveRef(root any, ref *Ref) (*Schema, error) { + res, _, err := ref.GetPointer().Get(root) + if err != nil { + return nil, err + } + + switch sch := res.(type) { + case Schema: + return &sch, nil + case *Schema: + return sch, nil + case map[string]any: + newSch := new(Schema) + if err = jsonutils.FromDynamicJSON(sch, newSch); err != nil { + return nil, err + } + return newSch, nil + default: + return nil, fmt.Errorf("type: %T: %w", sch, ErrUnknownTypeForReference) + } +} + +// ResolveParameterWithBase resolves a parameter reference against a context root and base path +func ResolveParameterWithBase(root any, ref Ref, options *ExpandOptions) (*Parameter, error) { + result := new(Parameter) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveParameter resolves a parameter reference against a context root +func ResolveParameter(root any, ref Ref) (*Parameter, error) { + return ResolveParameterWithBase(root, ref, nil) +} + +// ResolveResponseWithBase resolves response a reference against a context root and base path +func ResolveResponseWithBase(root any, ref Ref, options *ExpandOptions) (*Response, error) { + result := new(Response) + + err := resolveAnyWithBase(root, &ref, result, options) + if err != nil { + return nil, err + } + + return result, nil +} + +// ResolveResponse resolves response a reference against a context root +func ResolveResponse(root any, ref Ref) (*Response, error) { + return ResolveResponseWithBase(root, ref, nil) +} + +// ResolvePathItemWithBase resolves response a path item against a context root and base path +func ResolvePathItemWithBase(root any, ref Ref, options *ExpandOptions) (*PathItem, error) { + result := new(PathItem) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolvePathItem resolves response a path item against a context root and base path +// +// Deprecated: use ResolvePathItemWithBase instead +func ResolvePathItem(root any, ref Ref, options *ExpandOptions) (*PathItem, error) { + return ResolvePathItemWithBase(root, ref, options) +} + +// ResolveItemsWithBase resolves parameter items reference against a context root and base path. +// +// NOTE: stricly speaking, this construct is not supported by Swagger 2.0. +// Similarly, $ref are forbidden in response headers. +func ResolveItemsWithBase(root any, ref Ref, options *ExpandOptions) (*Items, error) { + result := new(Items) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveItems resolves parameter items reference against a context root and base path. +// +// Deprecated: use ResolveItemsWithBase instead +func ResolveItems(root any, ref Ref, options *ExpandOptions) (*Items, error) { + return ResolveItemsWithBase(root, ref, options) +} diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go new file mode 100644 index 000000000000..e5a7e5c40d43 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/response.go @@ -0,0 +1,141 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +// ResponseProps properties specific to a response +type ResponseProps struct { + Description string `json:"description"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]any `json:"examples,omitempty"` +} + +// Response describes a single response from an API Operation. +// +// For more information: http://goo.gl/8us55a#responseObject +type Response struct { + Refable + ResponseProps + VendorExtensible +} + +// NewResponse creates a new response instance +func NewResponse() *Response { + return new(Response) +} + +// ResponseRef creates a response as a json reference +func ResponseRef(url string) *Response { + resp := NewResponse() + resp.Ref = MustCreateRef(url) + return resp +} + +// JSONLookup look up a value by the json property name +func (r Response) JSONLookup(token string) (any, error) { + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if token == "$ref" { + return &r.Ref, nil + } + ptr, _, err := jsonpointer.GetForToken(r.ResponseProps, token) + return ptr, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Response) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponseProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.Refable); err != nil { + return err + } + return json.Unmarshal(data, &r.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (r Response) MarshalJSON() ([]byte, error) { + var ( + b1 []byte + err error + ) + + if r.Ref.String() == "" { + // when there is no $ref, empty description is rendered as an empty string + b1, err = json.Marshal(r.ResponseProps) + } else { + // when there is $ref inside the schema, description should be omitempty-ied + b1, err = json.Marshal(struct { + Description string `json:"description,omitempty"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]any `json:"examples,omitempty"` + }{ + Description: r.Description, + Schema: r.Schema, + Examples: r.Examples, + }) + } + if err != nil { + return nil, err + } + + b2, err := json.Marshal(r.Refable) + if err != nil { + return nil, err + } + b3, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b1, b2, b3), nil +} + +// WithDescription sets the description on this response, allows for chaining +func (r *Response) WithDescription(description string) *Response { + r.Description = description + return r +} + +// WithSchema sets the schema on this response, allows for chaining. +// Passing a nil argument removes the schema from this response +func (r *Response) WithSchema(schema *Schema) *Response { + r.Schema = schema + return r +} + +// AddHeader adds a header to this response +func (r *Response) AddHeader(name string, header *Header) *Response { + if header == nil { + return r.RemoveHeader(name) + } + if r.Headers == nil { + r.Headers = make(map[string]Header) + } + r.Headers[name] = *header + return r +} + +// RemoveHeader removes a header from this response +func (r *Response) RemoveHeader(name string) *Response { + delete(r.Headers, name) + return r +} + +// AddExample adds an example to this response +func (r *Response) AddExample(mediaType string, example any) *Response { + if r.Examples == nil { + r.Examples = make(map[string]any) + } + r.Examples[mediaType] = example + return r +} diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go new file mode 100644 index 000000000000..733a1315d02b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/go-openapi/swag/jsonutils" +) + +// Responses is a container for the expected responses of an operation. +// The container maps a HTTP response code to the expected response. +// It is not expected from the documentation to necessarily cover all possible HTTP response codes, +// since they may not be known in advance. However, it is expected from the documentation to cover +// a successful operation response and any known errors. +// +// The `default` can be used a default response object for all HTTP codes that are not covered +// individually by the specification. +// +// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response +// for a successful operation call. +// +// For more information: http://goo.gl/8us55a#responsesObject +type Responses struct { + VendorExtensible + ResponsesProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (r Responses) JSONLookup(token string) (any, error) { + if token == "default" { + return r.Default, nil + } + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if i, err := strconv.Atoi(token); err == nil { + if scr, ok := r.StatusCodeResponses[i]; ok { + return scr, nil + } + } + return nil, fmt.Errorf("object has no field %q: %w", token, ErrSpec) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Responses) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { + return err + } + + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { + r.ResponsesProps = ResponsesProps{} + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (r Responses) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.ResponsesProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + concated := jsonutils.ConcatJSON(b1, b2) + return concated, nil +} + +// ResponsesProps describes all responses for an operation. +// It tells what is the default response and maps all responses with a +// HTTP status code. +type ResponsesProps struct { + Default *Response + StatusCodeResponses map[int]Response +} + +// MarshalJSON marshals responses as JSON +func (r ResponsesProps) MarshalJSON() ([]byte, error) { + toser := map[string]Response{} + if r.Default != nil { + toser["default"] = *r.Default + } + for k, v := range r.StatusCodeResponses { + toser[strconv.Itoa(k)] = v + } + return json.Marshal(toser) +} + +// UnmarshalJSON unmarshals responses from JSON +func (r *ResponsesProps) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + if v, ok := res["default"]; ok { + var defaultRes Response + if err := json.Unmarshal(v, &defaultRes); err != nil { + return err + } + r.Default = &defaultRes + delete(res, "default") + } + for k, v := range res { + if !strings.HasPrefix(k, "x-") { + var statusCodeResp Response + if err := json.Unmarshal(v, &statusCodeResp); err != nil { + return err + } + if nk, err := strconv.Atoi(k); err == nil { + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]Response{} + } + r.StatusCodeResponses[nk] = statusCodeResp + } + } + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go new file mode 100644 index 000000000000..6623728a41af --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -0,0 +1,636 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonname" + "github.com/go-openapi/swag/jsonutils" +) + +// BooleanProperty creates a boolean property +func BooleanProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} +} + +// BoolProperty creates a boolean property +func BoolProperty() *Schema { return BooleanProperty() } + +// StringProperty creates a string property +func StringProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// CharProperty creates a string property +func CharProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// Float64Property creates a float64/double property +func Float64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} +} + +// Float32Property creates a float32/float property +func Float32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} +} + +// Int8Property creates an int8 property +func Int8Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} +} + +// Int16Property creates an int16 property +func Int16Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} +} + +// Int32Property creates an int32 property +func Int32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} +} + +// Int64Property creates an int64 property +func Int64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} +} + +// StrFmtProperty creates a property for the named string format +func StrFmtProperty(format string) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} +} + +// DateProperty creates a date property +func DateProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} +} + +// DateTimeProperty creates a date time property +func DateTimeProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} +} + +// MapProperty creates a map property +func MapProperty(property *Schema) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, + AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} +} + +// RefProperty creates a ref property +func RefProperty(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// RefSchema creates a ref property +func RefSchema(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// ArrayProperty creates an array property +func ArrayProperty(items *Schema) *Schema { + if items == nil { + return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} + } + return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} +} + +// ComposedSchema creates a schema with allOf +func ComposedSchema(schemas ...Schema) *Schema { + s := new(Schema) + s.AllOf = schemas + return s +} + +// SchemaURL represents a schema url +type SchemaURL string + +// MarshalJSON marshal this to JSON +func (r SchemaURL) MarshalJSON() ([]byte, error) { + if r == "" { + return []byte("{}"), nil + } + v := map[string]any{"$schema": string(r)} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshal this from JSON +func (r *SchemaURL) UnmarshalJSON(data []byte) error { + var v map[string]any + if err := json.Unmarshal(data, &v); err != nil { + return err + } + return r.fromMap(v) +} + +func (r *SchemaURL) fromMap(v map[string]any) error { + if v == nil { + return nil + } + if vv, ok := v["$schema"]; ok { + if str, ok := vv.(string); ok { + u, err := parseURL(str) + if err != nil { + return err + } + + *r = SchemaURL(u.String()) + } + } + return nil +} + +// SchemaProps describes a JSON schema (draft 4) +type SchemaProps struct { + ID string `json:"id,omitempty"` + Ref Ref `json:"-"` + Schema SchemaURL `json:"-"` + Description string `json:"description,omitempty"` + Type StringOrArray `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Title string `json:"title,omitempty"` + Default any `json:"default,omitempty"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []any `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *SchemaOrArray `json:"items,omitempty"` + AllOf []Schema `json:"allOf,omitempty"` + OneOf []Schema `json:"oneOf,omitempty"` + AnyOf []Schema `json:"anyOf,omitempty"` + Not *Schema `json:"not,omitempty"` + Properties SchemaProperties `json:"properties,omitempty"` + AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` + PatternProperties SchemaProperties `json:"patternProperties,omitempty"` + Dependencies Dependencies `json:"dependencies,omitempty"` + AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` + Definitions Definitions `json:"definitions,omitempty"` +} + +// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) +type SwaggerSchemaProps struct { + Discriminator string `json:"discriminator,omitempty"` + ReadOnly bool `json:"readOnly,omitempty"` + XML *XMLObject `json:"xml,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + Example any `json:"example,omitempty"` +} + +// Schema the schema object allows the definition of input and output data types. +// These types can be objects, but also primitives and arrays. +// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) +// and uses a predefined subset of it. +// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. +// +// For more information: http://goo.gl/8us55a#schemaObject +type Schema struct { + VendorExtensible + SchemaProps + SwaggerSchemaProps + + ExtraProps map[string]any `json:"-"` +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s Schema) JSONLookup(token string) (any, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + if ex, ok := s.ExtraProps[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) + if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) { + return r, err + } + r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) + return r, err +} + +// WithID sets the id for this schema, allows for chaining +func (s *Schema) WithID(id string) *Schema { + s.ID = id + return s +} + +// WithTitle sets the title for this schema, allows for chaining +func (s *Schema) WithTitle(title string) *Schema { + s.Title = title + return s +} + +// WithDescription sets the description for this schema, allows for chaining +func (s *Schema) WithDescription(description string) *Schema { + s.Description = description + return s +} + +// WithProperties sets the properties for this schema +func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { + s.Properties = schemas + return s +} + +// SetProperty sets a property on this schema +func (s *Schema) SetProperty(name string, schema Schema) *Schema { + if s.Properties == nil { + s.Properties = make(map[string]Schema) + } + s.Properties[name] = schema + return s +} + +// WithAllOf sets the all of property +func (s *Schema) WithAllOf(schemas ...Schema) *Schema { + s.AllOf = schemas + return s +} + +// WithMaxProperties sets the max number of properties an object can have +func (s *Schema) WithMaxProperties(maximum int64) *Schema { + s.MaxProperties = &maximum + return s +} + +// WithMinProperties sets the min number of properties an object must have +func (s *Schema) WithMinProperties(minimum int64) *Schema { + s.MinProperties = &minimum + return s +} + +// Typed sets the type of this schema for a single value item +func (s *Schema) Typed(tpe, format string) *Schema { + s.Type = []string{tpe} + s.Format = format + return s +} + +// AddType adds a type with potential format to the types for this schema +func (s *Schema) AddType(tpe, format string) *Schema { + s.Type = append(s.Type, tpe) + if format != "" { + s.Format = format + } + return s +} + +// AsNullable flags this schema as nullable. +func (s *Schema) AsNullable() *Schema { + s.Nullable = true + return s +} + +// CollectionOf a fluent builder method for an array parameter +func (s *Schema) CollectionOf(items Schema) *Schema { + s.Type = []string{jsonArray} + s.Items = &SchemaOrArray{Schema: &items} + return s +} + +// WithDefault sets the default value on this parameter +func (s *Schema) WithDefault(defaultValue any) *Schema { + s.Default = defaultValue + return s +} + +// WithRequired flags this parameter as required +func (s *Schema) WithRequired(items ...string) *Schema { + s.Required = items + return s +} + +// AddRequired adds field names to the required properties array +func (s *Schema) AddRequired(items ...string) *Schema { + s.Required = append(s.Required, items...) + return s +} + +// WithMaxLength sets a max length value +func (s *Schema) WithMaxLength(maximum int64) *Schema { + s.MaxLength = &maximum + return s +} + +// WithMinLength sets a min length value +func (s *Schema) WithMinLength(minimum int64) *Schema { + s.MinLength = &minimum + return s +} + +// WithPattern sets a pattern value +func (s *Schema) WithPattern(pattern string) *Schema { + s.Pattern = pattern + return s +} + +// WithMultipleOf sets a multiple of value +func (s *Schema) WithMultipleOf(number float64) *Schema { + s.MultipleOf = &number + return s +} + +// WithMaximum sets a maximum number value +func (s *Schema) WithMaximum(maximum float64, exclusive bool) *Schema { + s.Maximum = &maximum + s.ExclusiveMaximum = exclusive + return s +} + +// WithMinimum sets a minimum number value +func (s *Schema) WithMinimum(minimum float64, exclusive bool) *Schema { + s.Minimum = &minimum + s.ExclusiveMinimum = exclusive + return s +} + +// WithEnum sets a the enum values (replace) +func (s *Schema) WithEnum(values ...any) *Schema { + s.Enum = append([]any{}, values...) + return s +} + +// WithMaxItems sets the max items +func (s *Schema) WithMaxItems(size int64) *Schema { + s.MaxItems = &size + return s +} + +// WithMinItems sets the min items +func (s *Schema) WithMinItems(size int64) *Schema { + s.MinItems = &size + return s +} + +// UniqueValues dictates that this array can only have unique items +func (s *Schema) UniqueValues() *Schema { + s.UniqueItems = true + return s +} + +// AllowDuplicates this array can have duplicates +func (s *Schema) AllowDuplicates() *Schema { + s.UniqueItems = false + return s +} + +// AddToAllOf adds a schema to the allOf property +func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { + s.AllOf = append(s.AllOf, schemas...) + return s +} + +// WithDiscriminator sets the name of the discriminator field +func (s *Schema) WithDiscriminator(discriminator string) *Schema { + s.Discriminator = discriminator + return s +} + +// AsReadOnly flags this schema as readonly +func (s *Schema) AsReadOnly() *Schema { + s.ReadOnly = true + return s +} + +// AsWritable flags this schema as writeable (not read-only) +func (s *Schema) AsWritable() *Schema { + s.ReadOnly = false + return s +} + +// WithExample sets the example for this schema +func (s *Schema) WithExample(example any) *Schema { + s.Example = example + return s +} + +// WithExternalDocs sets/removes the external docs for/from this schema. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (s *Schema) WithExternalDocs(description, url string) *Schema { + if description == "" && url == "" { + s.ExternalDocs = nil + return s + } + + if s.ExternalDocs == nil { + s.ExternalDocs = &ExternalDocumentation{} + } + s.ExternalDocs.Description = description + s.ExternalDocs.URL = url + return s +} + +// WithXMLName sets the xml name for the object +func (s *Schema) WithXMLName(name string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Name = name + return s +} + +// WithXMLNamespace sets the xml namespace for the object +func (s *Schema) WithXMLNamespace(namespace string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Namespace = namespace + return s +} + +// WithXMLPrefix sets the xml prefix for the object +func (s *Schema) WithXMLPrefix(prefix string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Prefix = prefix + return s +} + +// AsXMLAttribute flags this object as xml attribute +func (s *Schema) AsXMLAttribute() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = true + return s +} + +// AsXMLElement flags this object as an xml node +func (s *Schema) AsXMLElement() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = false + return s +} + +// AsWrappedXML flags this object as wrapped, this is mostly useful for array types +func (s *Schema) AsWrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = true + return s +} + +// AsUnwrappedXML flags this object as an xml node +func (s *Schema) AsUnwrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = false + return s +} + +// SetValidations defines all schema validations. +// +// NOTE: Required, ReadOnly, AllOf, AnyOf, OneOf and Not are not considered. +func (s *Schema) SetValidations(val SchemaValidations) { + s.Maximum = val.Maximum + s.ExclusiveMaximum = val.ExclusiveMaximum + s.Minimum = val.Minimum + s.ExclusiveMinimum = val.ExclusiveMinimum + s.MaxLength = val.MaxLength + s.MinLength = val.MinLength + s.Pattern = val.Pattern + s.MaxItems = val.MaxItems + s.MinItems = val.MinItems + s.UniqueItems = val.UniqueItems + s.MultipleOf = val.MultipleOf + s.Enum = val.Enum + s.MinProperties = val.MinProperties + s.MaxProperties = val.MaxProperties + s.PatternProperties = val.PatternProperties +} + +// WithValidations is a fluent method to set schema validations +func (s *Schema) WithValidations(val SchemaValidations) *Schema { + s.SetValidations(val) + return s +} + +// Validations returns a clone of the validations for this schema +func (s Schema) Validations() SchemaValidations { + return SchemaValidations{ + CommonValidations: CommonValidations{ + Maximum: s.Maximum, + ExclusiveMaximum: s.ExclusiveMaximum, + Minimum: s.Minimum, + ExclusiveMinimum: s.ExclusiveMinimum, + MaxLength: s.MaxLength, + MinLength: s.MinLength, + Pattern: s.Pattern, + MaxItems: s.MaxItems, + MinItems: s.MinItems, + UniqueItems: s.UniqueItems, + MultipleOf: s.MultipleOf, + Enum: s.Enum, + }, + MinProperties: s.MinProperties, + MaxProperties: s.MaxProperties, + PatternProperties: s.PatternProperties, + } +} + +// MarshalJSON marshal this to JSON +func (s Schema) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SchemaProps) + if err != nil { + return nil, fmt.Errorf("schema props %v: %w", err, ErrSpec) + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, fmt.Errorf("vendor props %v: %w", err, ErrSpec) + } + b3, err := s.Ref.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("ref prop %v: %w", err, ErrSpec) + } + b4, err := s.Schema.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("schema prop %v: %w", err, ErrSpec) + } + b5, err := json.Marshal(s.SwaggerSchemaProps) + if err != nil { + return nil, fmt.Errorf("common validations %v: %w", err, ErrSpec) + } + var b6 []byte + if s.ExtraProps != nil { + jj, err := json.Marshal(s.ExtraProps) + if err != nil { + return nil, fmt.Errorf("extra props %v: %w", err, ErrSpec) + } + b6 = jj + } + return jsonutils.ConcatJSON(b1, b2, b3, b4, b5, b6), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *Schema) UnmarshalJSON(data []byte) error { + props := struct { + SchemaProps + SwaggerSchemaProps + }{} + if err := json.Unmarshal(data, &props); err != nil { + return err + } + + sch := Schema{ + SchemaProps: props.SchemaProps, + SwaggerSchemaProps: props.SwaggerSchemaProps, + } + + var d map[string]any + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + _ = sch.Ref.fromMap(d) + _ = sch.Schema.fromMap(d) + + delete(d, "$ref") + delete(d, "$schema") + for _, pn := range jsonname.DefaultJSONNameProvider.GetJSONNames(s) { + delete(d, pn) + } + + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if sch.Extensions == nil { + sch.Extensions = map[string]any{} + } + sch.Extensions[k] = vv + continue + } + if sch.ExtraProps == nil { + sch.ExtraProps = map[string]any{} + } + sch.ExtraProps[k] = vv + } + + *s = sch + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go new file mode 100644 index 000000000000..8d4a98532566 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -0,0 +1,322 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "reflect" + "strings" + + "github.com/go-openapi/swag/jsonutils" + "github.com/go-openapi/swag/loading" + "github.com/go-openapi/swag/stringutils" +) + +// PathLoader is a function to use when loading remote refs. +// +// This is a package level default. It may be overridden or bypassed by +// specifying the loader in ExpandOptions. +// +// NOTE: if you are using the go-openapi/loads package, it will override +// this value with its own default (a loader to retrieve YAML documents as +// well as JSON ones). +var PathLoader = func(pth string) (json.RawMessage, error) { + data, err := loading.LoadFromFileOrHTTP(pth) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil +} + +// resolverContext allows to share a context during spec processing. +// At the moment, it just holds the index of circular references found. +type resolverContext struct { + // circulars holds all visited circular references, to shortcircuit $ref resolution. + // + // This structure is privately instantiated and needs not be locked against + // concurrent access, unless we chose to implement a parallel spec walking. + circulars map[string]bool + basePath string + loadDoc func(string) (json.RawMessage, error) + rootID string +} + +func newResolverContext(options *ExpandOptions) *resolverContext { + expandOptions := optionsOrDefault(options) + + // path loader may be overridden by options + var loader func(string) (json.RawMessage, error) + if expandOptions.PathLoader == nil { + loader = PathLoader + } else { + loader = expandOptions.PathLoader + } + + return &resolverContext{ + circulars: make(map[string]bool), + basePath: expandOptions.RelativeBase, // keep the root base path in context + loadDoc: loader, + } +} + +type schemaLoader struct { + root any + options *ExpandOptions + cache ResolutionCache + context *resolverContext +} + +// Resolve resolves a reference against basePath and stores the result in target. +// +// Resolve is not in charge of following references: it only resolves ref by following its URL. +// +// If the schema the ref is referring to holds nested refs, Resolve doesn't resolve them. +// +// If basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct +func (r *schemaLoader) Resolve(ref *Ref, target any, basePath string) error { + return r.resolveRef(ref, target, basePath) +} + +func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) *schemaLoader { + if ref.IsRoot() || ref.HasFragmentOnly { + return r + } + + baseRef := MustCreateRef(basePath) + currentRef := normalizeRef(&ref, basePath) + if strings.HasPrefix(currentRef.String(), baseRef.String()) { + return r + } + + // set a new root against which to resolve + rootURL := currentRef.GetURL() + rootURL.Fragment = "" + root, _ := r.cache.Get(rootURL.String()) + + // shallow copy of resolver options to set a new RelativeBase when + // traversing multiple documents + newOptions := r.options + newOptions.RelativeBase = rootURL.String() + + return defaultSchemaLoader(root, newOptions, r.cache, r.context) +} + +func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string { + if transitive != r { + if transitive.options != nil && transitive.options.RelativeBase != "" { + return normalizeBase(transitive.options.RelativeBase) + } + } + + return basePath +} + +func (r *schemaLoader) resolveRef(ref *Ref, target any, basePath string) error { + tgt := reflect.ValueOf(target) + if tgt.Kind() != reflect.Ptr { + return ErrResolveRefNeedsAPointer + } + + if ref.GetURL() == nil { + return nil + } + + var ( + res any + data any + err error + ) + + // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means + // it is pointing somewhere in the root. + root := r.root + if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { + if baseRef, erb := NewRef(basePath); erb == nil { + root, _, _, _ = r.load(baseRef.GetURL()) + } + } + + if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { + data = root + } else { + baseRef := normalizeRef(ref, basePath) + data, _, _, err = r.load(baseRef.GetURL()) + if err != nil { + return err + } + } + + res = data + if ref.String() != "" { + res, _, err = ref.GetPointer().Get(data) + if err != nil { + return err + } + } + return jsonutils.FromDynamicJSON(res, target) +} + +func (r *schemaLoader) load(refURL *url.URL) (any, url.URL, bool, error) { + debugLog("loading schema from url: %s", refURL) + toFetch := *refURL + toFetch.Fragment = "" + + var err error + pth := toFetch.String() + normalized := normalizeBase(pth) + debugLog("loading doc from: %s", normalized) + + data, fromCache := r.cache.Get(normalized) + if fromCache { + return data, toFetch, fromCache, nil + } + + b, err := r.context.loadDoc(normalized) + if err != nil { + return nil, url.URL{}, false, err + } + + var doc any + if err := json.Unmarshal(b, &doc); err != nil { + return nil, url.URL{}, false, err + } + r.cache.Set(normalized, doc) + + return doc, toFetch, fromCache, nil +} + +// isCircular detects cycles in sequences of $ref. +// +// It relies on a private context (which needs not be locked). +func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { + normalizedRef := normalizeURI(ref.String(), basePath) + if _, ok := r.context.circulars[normalizedRef]; ok { + // circular $ref has been already detected in another explored cycle + foundCycle = true + return + } + foundCycle = stringutils.ContainsStrings(parentRefs, normalizedRef) // normalized windows url's are lower cased + if foundCycle { + r.context.circulars[normalizedRef] = true + } + return +} + +func (r *schemaLoader) deref(input any, parentRefs []string, basePath string) error { + var ref *Ref + switch refable := input.(type) { + case *Schema: + ref = &refable.Ref + case *Parameter: + ref = &refable.Ref + case *Response: + ref = &refable.Ref + case *PathItem: + ref = &refable.Ref + default: + return fmt.Errorf("unsupported type: %T: %w", input, ErrDerefUnsupportedType) + } + + curRef := ref.String() + if curRef == "" { + return nil + } + + normalizedRef := normalizeRef(ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if r.isCircular(normalizedRef, basePath, parentRefs...) { + return nil + } + + if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) { + return err + } + + if ref.String() == "" || ref.String() == curRef { + // done with rereferencing + return nil + } + + parentRefs = append(parentRefs, normalizedRef.String()) + return r.deref(input, parentRefs, normalizedBasePath) +} + +func (r *schemaLoader) shouldStopOnError(err error) bool { + if err != nil && !r.options.ContinueOnError { + return true + } + + if err != nil { + log.Println(err) + } + + return false +} + +func (r *schemaLoader) setSchemaID(target any, id, basePath string) (string, string) { + debugLog("schema has ID: %s", id) + + // handling the case when id is a folder + // remember that basePath has to point to a file + var refPath string + if strings.HasSuffix(id, "/") { + // ensure this is detected as a file, not a folder + refPath = fmt.Sprintf("%s%s", id, "placeholder.json") + } else { + refPath = id + } + + // updates the current base path + // * important: ID can be a relative path + // * registers target to be fetchable from the new base proposed by this id + newBasePath := normalizeURI(refPath, basePath) + + // store found IDs for possible future reuse in $ref + r.cache.Set(newBasePath, target) + + // the root document has an ID: all $ref relative to that ID may + // be rebased relative to the root document + if basePath == r.context.basePath { + debugLog("root document is a schema with ID: %s (normalized as:%s)", id, newBasePath) + r.context.rootID = newBasePath + } + + return newBasePath, refPath +} + +func defaultSchemaLoader( + root any, + expandOptions *ExpandOptions, + cache ResolutionCache, + context *resolverContext) *schemaLoader { + + if expandOptions == nil { + expandOptions = &ExpandOptions{} + } + + cache = cacheOrDefault(cache) + + if expandOptions.RelativeBase == "" { + // if no relative base is provided, assume the root document + // contains all $ref, or at least, that the relative documents + // may be resolved from the current working directory. + expandOptions.RelativeBase = baseForRoot(root, cache) + } + debugLog("effective expander options: %#v", expandOptions) + + if context == nil { + context = newResolverContext(expandOptions) + } + + return &schemaLoader{ + root: root, + options: expandOptions, + cache: cache, + context: context, + } +} diff --git a/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json new file mode 100644 index 000000000000..bcbb84743e38 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json @@ -0,0 +1,149 @@ +{ + "id": "http://json-schema.org/draft-04/schema#", + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "$schema": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/vendor/github.com/go-openapi/spec/schemas/v2/schema.json b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json new file mode 100644 index 000000000000..ebe10ed32d60 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json @@ -0,0 +1,1607 @@ +{ + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for responses" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go new file mode 100644 index 000000000000..46a4a7e2f9f8 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/security_scheme.go @@ -0,0 +1,159 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +const ( + basic = "basic" + apiKey = "apiKey" + oauth2 = "oauth2" + implicit = "implicit" + password = "password" + application = "application" + accessCode = "accessCode" +) + +// BasicAuth creates a basic auth security scheme +func BasicAuth() *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} +} + +// APIKeyAuth creates an api key auth security scheme +func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} +} + +// OAuth2Implicit creates an implicit flow oauth2 security scheme +func OAuth2Implicit(authorizationURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: implicit, + AuthorizationURL: authorizationURL, + }} +} + +// OAuth2Password creates a password flow oauth2 security scheme +func OAuth2Password(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: password, + TokenURL: tokenURL, + }} +} + +// OAuth2Application creates an application flow oauth2 security scheme +func OAuth2Application(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: application, + TokenURL: tokenURL, + }} +} + +// OAuth2AccessToken creates an access token flow oauth2 security scheme +func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: accessCode, + AuthorizationURL: authorizationURL, + TokenURL: tokenURL, + }} +} + +// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section +type SecuritySchemeProps struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 +} + +// AddScope adds a scope to this security scheme +func (s *SecuritySchemeProps) AddScope(scope, description string) { + if s.Scopes == nil { + s.Scopes = make(map[string]string) + } + s.Scopes[scope] = description +} + +// SecurityScheme allows the definition of a security scheme that can be used by the operations. +// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) +// and OAuth2's common flows (implicit, password, application and access code). +// +// For more information: http://goo.gl/8us55a#securitySchemeObject +type SecurityScheme struct { + VendorExtensible + SecuritySchemeProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SecurityScheme) JSONLookup(token string) (any, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (s SecurityScheme) MarshalJSON() ([]byte, error) { + var ( + b1 []byte + err error + ) + + if s.Type == oauth2 && (s.Flow == "implicit" || s.Flow == "accessCode") { + // when oauth2 for implicit or accessCode flows, empty AuthorizationURL is added as empty string + b1, err = json.Marshal(s.SecuritySchemeProps) + } else { + // when not oauth2, empty AuthorizationURL should be omitted + b1, err = json.Marshal(struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 + }{ + Description: s.Description, + Type: s.Type, + Name: s.Name, + In: s.In, + Flow: s.Flow, + AuthorizationURL: s.AuthorizationURL, + TokenURL: s.TokenURL, + Scopes: s.Scopes, + }) + } + if err != nil { + return nil, err + } + + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *SecurityScheme) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { + return err + } + return json.Unmarshal(data, &s.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go new file mode 100644 index 000000000000..05c3fc775cf2 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" +) + +//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json +//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema +//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... +//go:generate perl -pi -e s,Json,JSON,g bindata.go + +const ( + // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs + SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" + // JSONSchemaURL the url for the json schema + JSONSchemaURL = "http://json-schema.org/draft-04/schema#" +) + +// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error +func MustLoadJSONSchemaDraft04() *Schema { + d, e := JSONSchemaDraft04() + if e != nil { + panic(e) + } + return d +} + +// JSONSchemaDraft04 loads the json schema document for json shema draft04 +func JSONSchemaDraft04() (*Schema, error) { + b, err := jsonschemaDraft04JSONBytes() + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} + +// MustLoadSwagger20Schema panics when Swagger20Schema returns an error +func MustLoadSwagger20Schema() *Schema { + d, e := Swagger20Schema() + if e != nil { + panic(e) + } + return d +} + +// Swagger20Schema loads the swagger 2.0 schema from the embedded assets +func Swagger20Schema() (*Schema, error) { + + b, err := v2SchemaJSONBytes() + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go new file mode 100644 index 000000000000..f7cd0f608c24 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -0,0 +1,433 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "fmt" + "slices" + "strconv" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +// Swagger this is the root document object for the API specification. +// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) +// together into one document. +// +// For more information: http://goo.gl/8us55a#swagger-object- +type Swagger struct { + VendorExtensible + SwaggerProps +} + +// JSONLookup look up a value by the json property name +func (s Swagger) JSONLookup(token string) (any, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token) + return r, err +} + +// MarshalJSON marshals this swagger structure to json +func (s Swagger) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SwaggerProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON unmarshals a swagger spec from json +func (s *Swagger) UnmarshalJSON(data []byte) error { + var sw Swagger + if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { + return err + } + if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { + return err + } + *s = sw + return nil +} + +// GobEncode provides a safe gob encoder for Swagger, including extensions +func (s Swagger) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw := struct { + Props SwaggerProps + Ext VendorExtensible + }{ + Props: s.SwaggerProps, + Ext: s.VendorExtensible, + } + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Swagger, including extensions +func (s *Swagger) GobDecode(b []byte) error { + var raw struct { + Props SwaggerProps + Ext VendorExtensible + } + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + s.SwaggerProps = raw.Props + s.VendorExtensible = raw.Ext + return nil +} + +// SwaggerProps captures the top-level properties of an Api specification +// +// NOTE: validation rules +// - the scheme, when present must be from [http, https, ws, wss] +// - BasePath must start with a leading "/" +// - Paths is required +type SwaggerProps struct { + ID string `json:"id,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Swagger string `json:"swagger,omitempty"` + Info *Info `json:"info,omitempty"` + Host string `json:"host,omitempty"` + BasePath string `json:"basePath,omitempty"` + Paths *Paths `json:"paths"` + Definitions Definitions `json:"definitions,omitempty"` + Parameters map[string]Parameter `json:"parameters,omitempty"` + Responses map[string]Response `json:"responses,omitempty"` + SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Tags []Tag `json:"tags,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +type swaggerPropsAlias SwaggerProps + +type gobSwaggerPropsAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *swaggerPropsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements +func (o SwaggerProps) GobEncode() ([]byte, error) { + raw := gobSwaggerPropsAlias{ + Alias: (*swaggerPropsAlias)(&o), + } + + var b bytes.Buffer + if o.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(o.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(o.Security)) + for _, req := range o.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements +func (o *SwaggerProps) GobDecode(b []byte) error { + var raw gobSwaggerPropsAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *o = *(*SwaggerProps)(raw.Alias) + return nil +} + +// Dependencies represent a dependencies property +type Dependencies map[string]SchemaOrStringArray + +// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property +type SchemaOrBool struct { + Allows bool + Schema *Schema +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrBool) JSONLookup(token string) (any, error) { + if token == "allows" { + return s.Allows, nil + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +var jsTrue = []byte("true") +var jsFalse = []byte("false") + +// MarshalJSON convert this object to JSON +func (s SchemaOrBool) MarshalJSON() ([]byte, error) { + if s.Schema != nil { + return json.Marshal(s.Schema) + } + + if s.Schema == nil && !s.Allows { + return jsFalse, nil + } + return jsTrue, nil +} + +// UnmarshalJSON converts this bool or schema object from a JSON structure +func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { + var nw SchemaOrBool + if len(data) > 0 { + if data[0] == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + nw.Allows = !bytes.Equal(data, []byte("false")) + } + *s = nw + return nil +} + +// SchemaOrStringArray represents a schema or a string array +type SchemaOrStringArray struct { + Schema *Schema + Property []string +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrStringArray) JSONLookup(token string) (any, error) { + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { + if len(s.Property) > 0 { + return json.Marshal(s.Property) + } + if s.Schema != nil { + return json.Marshal(s.Schema) + } + return []byte("null"), nil +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + var nw SchemaOrStringArray + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Property); err != nil { + return err + } + } + *s = nw + return nil +} + +// Definitions contains the models explicitly defined in this spec +// An object to hold data types that can be consumed and produced by operations. +// These data types can be primitives, arrays or models. +// +// For more information: http://goo.gl/8us55a#definitionsObject +type Definitions map[string]Schema + +// SecurityDefinitions a declaration of the security schemes available to be used in the specification. +// This does not enforce the security schemes on the operations and only serves to provide +// the relevant details for each scheme. +// +// For more information: http://goo.gl/8us55a#securityDefinitionsObject +type SecurityDefinitions map[string]*SecurityScheme + +// StringOrArray represents a value that can either be a string +// or an array of strings. Mainly here for serialization purposes +type StringOrArray []string + +// Contains returns true when the value is contained in the slice +func (s StringOrArray) Contains(value string) bool { + return slices.Contains(s, value) +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrArray) JSONLookup(token string) (any, error) { + if _, err := strconv.Atoi(token); err == nil { + r, _, err := jsonpointer.GetForToken(s.Schemas, token) + return r, err + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string +func (s *StringOrArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + + if first == '[' { + var parsed []string + if err := json.Unmarshal(data, &parsed); err != nil { + return err + } + *s = StringOrArray(parsed) + return nil + } + + var single any + if err := json.Unmarshal(data, &single); err != nil { + return err + } + if single == nil { + return nil + } + switch v := single.(type) { + case string: + *s = StringOrArray([]string{v}) + return nil + default: + return fmt.Errorf("only string or array is allowed, not %T: %w", single, ErrSpec) + } +} + +// MarshalJSON converts this string or array to a JSON array or JSON string +func (s StringOrArray) MarshalJSON() ([]byte, error) { + if len(s) == 1 { + return json.Marshal([]string(s)[0]) + } + return json.Marshal([]string(s)) +} + +// SchemaOrArray represents a value that can either be a Schema +// or an array of Schema. Mainly here for serialization purposes +type SchemaOrArray struct { + Schema *Schema + Schemas []Schema +} + +// Len returns the number of schemas in this property +func (s SchemaOrArray) Len() int { + if s.Schema != nil { + return 1 + } + return len(s.Schemas) +} + +// ContainsType returns true when one of the schemas is of the specified type +func (s *SchemaOrArray) ContainsType(name string) bool { + if s.Schema != nil { + return s.Schema.Type != nil && s.Schema.Type.Contains(name) + } + return false +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrArray) MarshalJSON() ([]byte, error) { + if len(s.Schemas) > 0 { + return json.Marshal(s.Schemas) + } + return json.Marshal(s.Schema) +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { + var nw SchemaOrArray + var first byte + if len(data) > 1 { + first = data[0] + } + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Schemas); err != nil { + return err + } + } + *s = nw + return nil +} + +// vim:set ft=go noet sts=2 sw=2 ts=2: diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go new file mode 100644 index 000000000000..ae98fd985fb2 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/tag.go @@ -0,0 +1,64 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +// TagProps describe a tag entry in the top level tags section of a swagger spec +type TagProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +// Tag allows adding meta data to a single tag that is used by the +// [Operation Object](http://goo.gl/8us55a#operationObject). +// It is not mandatory to have a Tag Object per tag used there. +// +// For more information: http://goo.gl/8us55a#tagObject +type Tag struct { + VendorExtensible + TagProps +} + +// NewTag creates a new tag +func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { + return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} +} + +// JSONLookup implements an interface to customize json pointer lookup +func (t Tag) JSONLookup(token string) (any, error) { + if ex, ok := t.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(t.TagProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (t Tag) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(t.TagProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(t.VendorExtensible) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (t *Tag) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &t.TagProps); err != nil { + return err + } + return json.Unmarshal(data, &t.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/url_go19.go b/vendor/github.com/go-openapi/spec/url_go19.go new file mode 100644 index 000000000000..8d0c81acd688 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/url_go19.go @@ -0,0 +1,14 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import "net/url" + +func parseURL(s string) (*url.URL, error) { + u, err := url.Parse(s) + if err == nil { + u.OmitHost = false + } + return u, err +} diff --git a/vendor/github.com/go-openapi/spec/validations.go b/vendor/github.com/go-openapi/spec/validations.go new file mode 100644 index 000000000000..4f70e301732b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/validations.go @@ -0,0 +1,222 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +// CommonValidations describe common JSON-schema validations +type CommonValidations struct { + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []any `json:"enum,omitempty"` +} + +// SetValidations defines all validations for a simple schema. +// +// NOTE: the input is the larger set of validations available for schemas. +// For simple schemas, MinProperties and MaxProperties are ignored. +func (v *CommonValidations) SetValidations(val SchemaValidations) { + v.Maximum = val.Maximum + v.ExclusiveMaximum = val.ExclusiveMaximum + v.Minimum = val.Minimum + v.ExclusiveMinimum = val.ExclusiveMinimum + v.MaxLength = val.MaxLength + v.MinLength = val.MinLength + v.Pattern = val.Pattern + v.MaxItems = val.MaxItems + v.MinItems = val.MinItems + v.UniqueItems = val.UniqueItems + v.MultipleOf = val.MultipleOf + v.Enum = val.Enum +} + +type clearedValidation struct { + Validation string + Value any +} + +type clearedValidations []clearedValidation + +func (c clearedValidations) apply(cbs []func(string, any)) { + for _, cb := range cbs { + for _, cleared := range c { + cb(cleared.Validation, cleared.Value) + } + } +} + +// ClearNumberValidations clears all number validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearNumberValidations(cbs ...func(string, any)) { + const maxNumberValidations = 5 + done := make(clearedValidations, 0, maxNumberValidations) + defer func() { + done.apply(cbs) + }() + + if v.Minimum != nil { + done = append(done, clearedValidation{Validation: "minimum", Value: v.Minimum}) + v.Minimum = nil + } + if v.Maximum != nil { + done = append(done, clearedValidation{Validation: "maximum", Value: v.Maximum}) + v.Maximum = nil + } + if v.ExclusiveMaximum { + done = append(done, clearedValidation{Validation: "exclusiveMaximum", Value: v.ExclusiveMaximum}) + v.ExclusiveMaximum = false + } + if v.ExclusiveMinimum { + done = append(done, clearedValidation{Validation: "exclusiveMinimum", Value: v.ExclusiveMinimum}) + v.ExclusiveMinimum = false + } + if v.MultipleOf != nil { + done = append(done, clearedValidation{Validation: "multipleOf", Value: v.MultipleOf}) + v.MultipleOf = nil + } +} + +// ClearStringValidations clears all string validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearStringValidations(cbs ...func(string, any)) { + const maxStringValidations = 3 + done := make(clearedValidations, 0, maxStringValidations) + defer func() { + done.apply(cbs) + }() + + if v.Pattern != "" { + done = append(done, clearedValidation{Validation: "pattern", Value: v.Pattern}) + v.Pattern = "" + } + if v.MinLength != nil { + done = append(done, clearedValidation{Validation: "minLength", Value: v.MinLength}) + v.MinLength = nil + } + if v.MaxLength != nil { + done = append(done, clearedValidation{Validation: "maxLength", Value: v.MaxLength}) + v.MaxLength = nil + } +} + +// ClearArrayValidations clears all array validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearArrayValidations(cbs ...func(string, any)) { + const maxArrayValidations = 3 + done := make(clearedValidations, 0, maxArrayValidations) + defer func() { + done.apply(cbs) + }() + + if v.MaxItems != nil { + done = append(done, clearedValidation{Validation: "maxItems", Value: v.MaxItems}) + v.MaxItems = nil + } + if v.MinItems != nil { + done = append(done, clearedValidation{Validation: "minItems", Value: v.MinItems}) + v.MinItems = nil + } + if v.UniqueItems { + done = append(done, clearedValidation{Validation: "uniqueItems", Value: v.UniqueItems}) + v.UniqueItems = false + } +} + +// Validations returns a clone of the validations for a simple schema. +// +// NOTE: in the context of simple schema objects, MinProperties, MaxProperties +// and PatternProperties remain unset. +func (v CommonValidations) Validations() SchemaValidations { + return SchemaValidations{ + CommonValidations: v, + } +} + +// HasNumberValidations indicates if the validations are for numbers or integers +func (v CommonValidations) HasNumberValidations() bool { + return v.Maximum != nil || v.Minimum != nil || v.MultipleOf != nil +} + +// HasStringValidations indicates if the validations are for strings +func (v CommonValidations) HasStringValidations() bool { + return v.MaxLength != nil || v.MinLength != nil || v.Pattern != "" +} + +// HasArrayValidations indicates if the validations are for arrays +func (v CommonValidations) HasArrayValidations() bool { + return v.MaxItems != nil || v.MinItems != nil || v.UniqueItems +} + +// HasEnum indicates if the validation includes some enum constraint +func (v CommonValidations) HasEnum() bool { + return len(v.Enum) > 0 +} + +// SchemaValidations describes the validation properties of a schema +// +// NOTE: at this moment, this is not embedded in SchemaProps because this would induce a breaking change +// in the exported members: all initializers using litterals would fail. +type SchemaValidations struct { + CommonValidations + + PatternProperties SchemaProperties `json:"patternProperties,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` +} + +// HasObjectValidations indicates if the validations are for objects +func (v SchemaValidations) HasObjectValidations() bool { + return v.MaxProperties != nil || v.MinProperties != nil || v.PatternProperties != nil +} + +// SetValidations for schema validations +func (v *SchemaValidations) SetValidations(val SchemaValidations) { + v.CommonValidations.SetValidations(val) + v.PatternProperties = val.PatternProperties + v.MaxProperties = val.MaxProperties + v.MinProperties = val.MinProperties +} + +// Validations for a schema +func (v SchemaValidations) Validations() SchemaValidations { + val := v.CommonValidations.Validations() + val.PatternProperties = v.PatternProperties + val.MinProperties = v.MinProperties + val.MaxProperties = v.MaxProperties + return val +} + +// ClearObjectValidations returns a clone of the validations with all object validations cleared. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *SchemaValidations) ClearObjectValidations(cbs ...func(string, any)) { + const maxObjectValidations = 3 + done := make(clearedValidations, 0, maxObjectValidations) + defer func() { + done.apply(cbs) + }() + + if v.MaxProperties != nil { + done = append(done, clearedValidation{Validation: "maxProperties", Value: v.MaxProperties}) + v.MaxProperties = nil + } + if v.MinProperties != nil { + done = append(done, clearedValidation{Validation: "minProperties", Value: v.MinProperties}) + v.MinProperties = nil + } + if v.PatternProperties != nil { + done = append(done, clearedValidation{Validation: "patternProperties", Value: v.PatternProperties}) + v.PatternProperties = nil + } +} diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go new file mode 100644 index 000000000000..bf2f8f18b24c --- /dev/null +++ b/vendor/github.com/go-openapi/spec/xml_object.go @@ -0,0 +1,57 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +// XMLObject a metadata object that allows for more fine-tuned XML model definitions. +// +// For more information: http://goo.gl/8us55a#xmlObject +type XMLObject struct { + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + Prefix string `json:"prefix,omitempty"` + Attribute bool `json:"attribute,omitempty"` + Wrapped bool `json:"wrapped,omitempty"` +} + +// WithName sets the xml name for the object +func (x *XMLObject) WithName(name string) *XMLObject { + x.Name = name + return x +} + +// WithNamespace sets the xml namespace for the object +func (x *XMLObject) WithNamespace(namespace string) *XMLObject { + x.Namespace = namespace + return x +} + +// WithPrefix sets the xml prefix for the object +func (x *XMLObject) WithPrefix(prefix string) *XMLObject { + x.Prefix = prefix + return x +} + +// AsAttribute flags this object as xml attribute +func (x *XMLObject) AsAttribute() *XMLObject { + x.Attribute = true + return x +} + +// AsElement flags this object as an xml node +func (x *XMLObject) AsElement() *XMLObject { + x.Attribute = false + return x +} + +// AsWrapped flags this object as wrapped, this is mostly useful for array types +func (x *XMLObject) AsWrapped() *XMLObject { + x.Wrapped = true + return x +} + +// AsUnwrapped flags this object as an xml node +func (x *XMLObject) AsUnwrapped() *XMLObject { + x.Wrapped = false + return x +} diff --git a/vendor/github.com/go-openapi/strfmt/.editorconfig b/vendor/github.com/go-openapi/strfmt/.editorconfig new file mode 100644 index 000000000000..3152da69a5d7 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/strfmt/.gitattributes b/vendor/github.com/go-openapi/strfmt/.gitattributes new file mode 100644 index 000000000000..d020be8ea4e7 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.gitattributes @@ -0,0 +1,2 @@ +*.go text eol=lf + diff --git a/vendor/github.com/go-openapi/strfmt/.gitignore b/vendor/github.com/go-openapi/strfmt/.gitignore new file mode 100644 index 000000000000..dd91ed6a04e6 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml new file mode 100644 index 000000000000..1ad5adf47e69 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml @@ -0,0 +1,75 @@ +version: "2" +linters: + default: all + disable: + - cyclop + - depguard + - errchkjson + - errorlint + - exhaustruct + - forcetypeassert + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - godot + - godox + - gosmopolitan + - inamedparam + - intrange + - ireturn + - lll + - musttag + - nestif + - nlreturn + - nonamedreturns + - noinlineerr + - paralleltest + - recvcheck + - testpackage + - thelper + - tparallel + - unparam + - varnamelen + - whitespace + - wrapcheck + - wsl + - wsl_v5 + settings: + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + gocyclo: + min-complexity: 45 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 diff --git a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..9322b065e37a --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/strfmt/LICENSE b/vendor/github.com/go-openapi/strfmt/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md new file mode 100644 index 000000000000..de5afe137606 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/README.md @@ -0,0 +1,92 @@ +# Strfmt [![Build Status](https://github.com/go-openapi/strfmt/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/strfmt/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) + +This package exposes a registry of data types to support string formats in the go-openapi toolkit. + +strfmt represents a well known string format such as credit card or email. The go toolkit for OpenAPI specifications knows how to deal with those. + +## Supported data formats +go-openapi/strfmt follows the swagger 2.0 specification with the following formats +defined [here](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types). + +It also provides convenient extensions to go-openapi users. + +- [x] JSON-schema draft 4 formats + - date-time + - email + - hostname + - ipv4 + - ipv6 + - uri +- [x] swagger 2.0 format extensions + - binary + - byte (e.g. base64 encoded string) + - date (e.g. "1970-01-01") + - password +- [x] go-openapi custom format extensions + - bsonobjectid (BSON objectID) + - creditcard + - duration (e.g. "3 weeks", "1ms") + - hexcolor (e.g. "#FFFFFF") + - isbn, isbn10, isbn13 + - mac (e.g "01:02:03:04:05:06") + - rgbcolor (e.g. "rgb(100,100,100)") + - ssn + - uuid, uuid3, uuid4, uuid5, uuid7 + - cidr (e.g. "192.0.2.1/24", "2001:db8:a0b:12f0::1/32") + - ulid (e.g. "00000PP9HGSBSSDZ1JTEXBJ0PW", [spec](https://github.com/ulid/spec)) + +> NOTE: as the name stands for, this package is intended to support string formatting only. +> It does not provide validation for numerical values with swagger format extension for JSON types "number" or +> "integer" (e.g. float, double, int32...). + +## Type conversion + +All types defined here are stringers and may be converted to strings with `.String()`. +Note that most types defined by this package may be converted directly to string like `string(Email{})`. + +`Date` and `DateTime` may be converted directly to `time.Time` like `time.Time(Time{})`. +Similarly, you can convert `Duration` to `time.Duration` as in `time.Duration(Duration{})` + +## Using pointers + +The `conv` subpackage provides helpers to convert the types to and from pointers, just like `go-openapi/swag` does +with primitive types. + +## Format types +Types defined in strfmt expose marshaling and validation capabilities. + +List of defined types: +- Base64 +- CreditCard +- Date +- DateTime +- Duration +- Email +- HexColor +- Hostname +- IPv4 +- IPv6 +- CIDR +- ISBN +- ISBN10 +- ISBN13 +- MAC +- ObjectId +- Password +- RGBColor +- SSN +- URI +- UUID +- [UUID3](https://www.rfc-editor.org/rfc/rfc9562.html#name-uuid-version-3) +- [UUID4](https://www.rfc-editor.org/rfc/rfc9562.html#name-uuid-version-4) +- [UUID5](https://www.rfc-editor.org/rfc/rfc9562.html#name-uuid-version-5) +- [UUID7](https://www.rfc-editor.org/rfc/rfc9562.html#name-uuid-version-7) +- [ULID](https://github.com/ulid/spec) + +## Licensing + +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go new file mode 100644 index 000000000000..0eec8f6432ce --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/bson.go @@ -0,0 +1,115 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +import ( + "database/sql/driver" + "fmt" + + bsonprim "go.mongodb.org/mongo-driver/bson/primitive" +) + +func init() { + var id ObjectId + // register this format in the default registry + Default.Add("bsonobjectid", &id, IsBSONObjectID) +} + +// IsBSONObjectID returns true when the string is a valid BSON.ObjectId +func IsBSONObjectID(str string) bool { + _, err := bsonprim.ObjectIDFromHex(str) + return err == nil +} + +// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) +// +// swagger:strfmt bsonobjectid +type ObjectId bsonprim.ObjectID //nolint:revive + +// NewObjectId creates a ObjectId from a Hex String +func NewObjectId(hex string) ObjectId { //nolint:revive + oid, err := bsonprim.ObjectIDFromHex(hex) + if err != nil { + panic(err) + } + return ObjectId(oid) +} + +// MarshalText turns this instance into text +func (id ObjectId) MarshalText() ([]byte, error) { + oid := bsonprim.ObjectID(id) + if oid == bsonprim.NilObjectID { + return nil, nil + } + return []byte(oid.Hex()), nil +} + +// UnmarshalText hydrates this instance from text +func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on + if len(data) == 0 { + *id = ObjectId(bsonprim.NilObjectID) + return nil + } + oidstr := string(data) + oid, err := bsonprim.ObjectIDFromHex(oidstr) + if err != nil { + return err + } + *id = ObjectId(oid) + return nil +} + +// Scan read a value from a database driver +func (id *ObjectId) Scan(raw any) error { + var data []byte + switch v := raw.(type) { + case []byte: + data = v + case string: + data = []byte(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v: %w", v, ErrFormat) + } + + return id.UnmarshalText(data) +} + +// Value converts a value to a database driver value +func (id ObjectId) Value() (driver.Value, error) { + return driver.Value(bsonprim.ObjectID(id).Hex()), nil +} + +func (id ObjectId) String() string { + return bsonprim.ObjectID(id).Hex() +} + +// MarshalJSON returns the ObjectId as JSON +func (id ObjectId) MarshalJSON() ([]byte, error) { + return bsonprim.ObjectID(id).MarshalJSON() +} + +// UnmarshalJSON sets the ObjectId from JSON +func (id *ObjectId) UnmarshalJSON(data []byte) error { + var obj bsonprim.ObjectID + if err := obj.UnmarshalJSON(data); err != nil { + return err + } + *id = ObjectId(obj) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (id *ObjectId) DeepCopyInto(out *ObjectId) { + *out = *id +} + +// DeepCopy copies the receiver into a new ObjectId. +func (id *ObjectId) DeepCopy() *ObjectId { + if id == nil { + return nil + } + out := new(ObjectId) + id.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/go-openapi/strfmt/date.go b/vendor/github.com/go-openapi/strfmt/date.go new file mode 100644 index 000000000000..8aa17b8ea551 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/date.go @@ -0,0 +1,151 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "time" +) + +func init() { + d := Date{} + // register this format in the default registry + Default.Add("date", &d, IsDate) +} + +// IsDate returns true when the string is a valid date +func IsDate(str string) bool { + _, err := time.Parse(RFC3339FullDate, str) + return err == nil +} + +const ( + // RFC3339FullDate represents a full-date as specified by RFC3339 + // See: http://goo.gl/xXOvVd + RFC3339FullDate = "2006-01-02" +) + +// Date represents a date from the API +// +// swagger:strfmt date +type Date time.Time + +// String converts this date into a string +func (d Date) String() string { + return time.Time(d).Format(RFC3339FullDate) +} + +// UnmarshalText parses a text representation into a date type +func (d *Date) UnmarshalText(text []byte) error { + if len(text) == 0 { + return nil + } + dd, err := time.ParseInLocation(RFC3339FullDate, string(text), DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(dd) + return nil +} + +// MarshalText serializes this date type to string +func (d Date) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// Scan scans a Date value from database driver type. +func (d *Date) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + return d.UnmarshalText(v) + case string: + return d.UnmarshalText([]byte(v)) + case time.Time: + *d = Date(v) + return nil + case nil: + *d = Date{} + return nil + default: + return fmt.Errorf("cannot sql.Scan() strfmt.Date from: %#v: %w", v, ErrFormat) + } +} + +// Value converts Date to a primitive value ready to written to a database. +func (d Date) Value() (driver.Value, error) { + return driver.Value(d.String()), nil +} + +// MarshalJSON returns the Date as JSON +func (d Date) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Time(d).Format(RFC3339FullDate)) +} + +// UnmarshalJSON sets the Date from JSON +func (d *Date) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var strdate string + if err := json.Unmarshal(data, &strdate); err != nil { + return err + } + tt, err := time.ParseInLocation(RFC3339FullDate, strdate, DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(tt) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (d *Date) DeepCopyInto(out *Date) { + *out = *d +} + +// DeepCopy copies the receiver into a new Date. +func (d *Date) DeepCopy() *Date { + if d == nil { + return nil + } + out := new(Date) + d.DeepCopyInto(out) + return out +} + +// GobEncode implements the gob.GobEncoder interface. +func (d Date) GobEncode() ([]byte, error) { + return d.MarshalBinary() +} + +// GobDecode implements the gob.GobDecoder interface. +func (d *Date) GobDecode(data []byte) error { + return d.UnmarshalBinary(data) +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d Date) MarshalBinary() ([]byte, error) { + return time.Time(d).MarshalBinary() +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Date) UnmarshalBinary(data []byte) error { + var original time.Time + + err := original.UnmarshalBinary(data) + if err != nil { + return err + } + + *d = Date(original) + + return nil +} + +// Equal checks if two Date instances are equal +func (d Date) Equal(d2 Date) bool { + return time.Time(d).Equal(time.Time(d2)) +} diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go new file mode 100644 index 000000000000..8a80cfbdb8ae --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/default.go @@ -0,0 +1,2110 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +import ( + "database/sql/driver" + "encoding/base64" + "encoding/json" + "fmt" + "net" + "net/mail" + "net/netip" + "net/url" + "regexp" + "strconv" + "strings" + + "github.com/google/uuid" + "golang.org/x/net/idna" +) + +const ( + // HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114. + // + // Deprecated: this package no longer uses regular expressions to validate hostnames. + HostnamePattern = `^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z0-9-\p{L}]){2,63})$` + + // json null type + jsonNull = "null" +) + +const ( + // UUIDPattern Regex for UUID that allows uppercase + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUIDPattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{32}$)` + + // UUID3Pattern Regex for UUID3 that allows uppercase + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUID3Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{12}3[0-9a-f]{3}?[0-9a-f]{16}$)` + + // UUID4Pattern Regex for UUID4 that allows uppercase + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUID4Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}$)` + + // UUID5Pattern Regex for UUID5 that allows uppercase + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUID5Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}5[0-9a-f]{3}[89ab][0-9a-f]{15}$)` + + isbn10Pattern string = "^(?:[0-9]{9}X|[0-9]{10})$" + isbn13Pattern string = "^(?:[0-9]{13})$" + usCardPattern string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$" + ssnPattern string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` + hexColorPattern string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" + rgbColorPattern string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" +) + +const ( + isbnVersion10 = 10 + isbnVersion13 = 13 + decimalBase = 10 +) + +var ( + idnaHostChecker = idna.New( + idna.ValidateForRegistration(), // shorthand for [idna.StrictDomainName], [idna.ValidateLabels], [idna.VerifyDNSLength], [idna.BidiRule] + ) + + whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`) + rxISBN10 = regexp.MustCompile(isbn10Pattern) + rxISBN13 = regexp.MustCompile(isbn13Pattern) + rxCreditCard = regexp.MustCompile(usCardPattern) + rxSSN = regexp.MustCompile(ssnPattern) + rxHexcolor = regexp.MustCompile(hexColorPattern) + rxRGBcolor = regexp.MustCompile(rgbColorPattern) +) + +// IsHostname returns true when the string is a valid hostname. +// +// It follows the rules detailed at https://url.spec.whatwg.org/#concept-host-parser +// and implemented by most modern web browsers. +// +// It supports IDNA rules regarding internationalized names with unicode. +// +// Besides: +// * the empty string is not a valid host name +// * a trailing dot is allowed in names and IPv4's (not IPv6) +// * a host name can be a valid IPv4 (with decimal, octal or hexadecimal numbers) or IPv6 address +// * IPv6 zones are disallowed +// * top-level domains can be unicode (cf. https://www.iana.org/domains/root/db). +// +// NOTE: this validator doesn't check top-level domains against the IANA root database. +// It merely ensures that a top-level domain in a FQDN is at least 2 code points long. +func IsHostname(str string) bool { + if len(str) == 0 { + return false + } + + // IP v6 check + if ipv6Cleaned, found := strings.CutPrefix(str, "["); found { + ipv6Cleaned, found = strings.CutSuffix(ipv6Cleaned, "]") + if !found { + return false + } + + return isValidIPv6(ipv6Cleaned) + } + + // IDNA check + res, err := idnaHostChecker.ToASCII(strings.ToLower(str)) + if err != nil || res == "" { + return false + } + + parts := strings.Split(res, ".") + + // IP v4 check + lastPart, lastIndex, shouldBeIPv4 := domainEndsAsNumber(parts) + if shouldBeIPv4 { + // domain ends in a number: must be an IPv4 + return isValidIPv4(parts[:lastIndex+1]) // if the last part is a trailing dot, remove it + } + + // check TLD length (excluding trailing dot) + const minTLDLength = 2 + if lastIndex > 0 && len(lastPart) < minTLDLength { + return false + } + + return true +} + +// domainEndsAsNumber determines if a domain name ends with a decimal, octal or hex digit, +// accounting for a possible trailing dot (the last part being empty in that case). +// +// It returns the last non-trailing dot part and if that part consists only of (dec/hex/oct) digits. +func domainEndsAsNumber(parts []string) (lastPart string, lastIndex int, ok bool) { + // NOTE: using ParseUint(x, 0, 32) is not an option, as the IPv4 format supported why WHATWG + // doesn't support notations such as "0b1001" (binary digits) or "0o666" (alternate notation for octal digits). + lastIndex = len(parts) - 1 + lastPart = parts[lastIndex] + if len(lastPart) == 0 { + // trailing dot + if len(parts) == 1 { // dot-only string: normally already ruled out by the IDNA check above + return lastPart, lastIndex, false + } + + lastIndex-- + lastPart = parts[lastIndex] + } + + if startOfHexDigit(lastPart) { + for _, b := range []byte(lastPart[2:]) { + if !isHexDigit(b) { + return lastPart, lastIndex, false + } + } + + return lastPart, lastIndex, true + } + + // check for decimal and octal + for _, b := range []byte(lastPart) { + if !isASCIIDigit(b) { + return lastPart, lastIndex, false + } + } + + return lastPart, lastIndex, true +} + +func startOfHexDigit(str string) bool { + return strings.HasPrefix(str, "0x") // the input has already been lower-cased +} + +func startOfOctalDigit(str string) bool { + if str == "0" { + // a single "0" is considered decimal + return false + } + + return strings.HasPrefix(str, "0") +} + +func isValidIPv6(str string) bool { + // disallow empty ipv6 address + if len(str) == 0 { + return false + } + + addr, err := netip.ParseAddr(str) + if err != nil { + return false + } + + if !addr.Is6() { + return false + } + + // explicit desupport of IPv6 zones + if addr.Zone() != "" { + return false + } + + return true +} + +// isValidIPv4 parses an IPv4 with deciaml, hex or octal digit parts. +// +// We can't rely on [netip.ParseAddr] because we may get a mix of decimal, octal and hex digits. +// +// Examples of valid addresses not supported by [netip.ParseAddr] or [net.ParseIP]: +// +// "192.0x00A80001" +// "0300.0250.0340.001" +// "1.0x.1.1" +// +// But not: +// +// "0b1010.2.3.4" +// "0o07.2.3.4" +func isValidIPv4(parts []string) bool { + // NOTE: using ParseUint(x, 0, 32) is not an option, even though it would simplify this code a lot. + // The IPv4 format supported why WHATWG doesn't support notations such as "0b1001" (binary digits) + // or "0o666" (alternate notation for octal digits). + const ( + maxPartsInIPv4 = 4 + maxDigitsInPart = 11 // max size of a 4-bytes hex or octal digit + ) + + if len(parts) == 0 || len(parts) > maxPartsInIPv4 { + return false + } + + // we call this when we know that the last part is a digit part, so len(lastPart)>0 + + digits := make([]uint64, 0, maxPartsInIPv4) + for _, part := range parts { + if len(part) == 0 { // empty part: this case has normally been already ruled out by the IDNA check above + return false + } + + if len(part) > maxDigitsInPart { // whether decimal, octal or hex, an address can't exceed that length + return false + } + + if !isASCIIDigit(part[0]) { // start of an IPv4 part is always a digit + return false + } + + switch { + case startOfHexDigit(part): + const hexDigitOffset = 2 + hexString := part[hexDigitOffset:] + if len(hexString) == 0 { // 0x part: assume 0 + digits = append(digits, 0) + + continue + } + + hexDigit, err := strconv.ParseUint(hexString, 16, 32) + if err != nil { + return false + } + + digits = append(digits, hexDigit) + + continue + + case startOfOctalDigit(part): + const octDigitOffset = 1 + octString := part[octDigitOffset:] // we know that this is not empty + octDigit, err := strconv.ParseUint(octString, 8, 32) + if err != nil { + return false + } + + digits = append(digits, octDigit) + + default: // assume decimal digits (0-255) + // we know that we don't have a leading 0 (would have been caught by octal digit) + decDigit, err := strconv.ParseUint(part, 10, 8) + if err != nil { + return false + } + + digits = append(digits, decDigit) + } + } + + // now check the digits: the last digit may encompass several parts of the address + lastDigit := digits[len(digits)-1] + if lastDigit > uint64(1)< 1 { + const maxUint8 = uint64(^uint8(0)) + + for i := range len(digits) - 2 { + if digits[i] > maxUint8 { + return false + } + } + } + + return true +} + +func isHexDigit(c byte) bool { + switch { + case '0' <= c && c <= '9': + return true + case 'a' <= c && c <= 'f': // assume the input string to be lower case + return true + } + return false +} + +func isASCIIDigit(c byte) bool { + return c >= '0' && c <= '9' +} + +// IsUUID returns true is the string matches a UUID (in any version, including v6 and v7), upper case is allowed +func IsUUID(str string) bool { + _, err := uuid.Parse(str) + return err == nil +} + +const ( + uuidV3 = 3 + uuidV4 = 4 + uuidV5 = 5 + uuidV7 = 7 +) + +// IsUUID3 returns true is the string matches a UUID v3, upper case is allowed +func IsUUID3(str string) bool { + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(uuidV3) +} + +// IsUUID4 returns true is the string matches a UUID v4, upper case is allowed +func IsUUID4(str string) bool { + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(uuidV4) +} + +// IsUUID5 returns true is the string matches a UUID v5, upper case is allowed +func IsUUID5(str string) bool { + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(uuidV5) +} + +// IsUUID7 returns true is the string matches a UUID v7, upper case is allowed +func IsUUID7(str string) bool { + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(uuidV7) +} + +// IsEmail validates an email address. +func IsEmail(str string) bool { + addr, e := mail.ParseAddress(str) + return e == nil && addr.Address != "" +} + +func init() { + // register formats in the default registry: + // - byte + // - creditcard + // - email + // - hexcolor + // - hostname + // - ipv4 + // - ipv6 + // - cidr + // - isbn + // - isbn10 + // - isbn13 + // - mac + // - password + // - rgbcolor + // - ssn + // - uri + // - uuid + // - uuid3 + // - uuid4 + // - uuid5 + // - uuid7 + u := URI("") + Default.Add("uri", &u, isRequestURI) + + eml := Email("") + Default.Add("email", &eml, IsEmail) + + hn := Hostname("") + Default.Add("hostname", &hn, IsHostname) + + ip4 := IPv4("") + Default.Add("ipv4", &ip4, isIPv4) + + ip6 := IPv6("") + Default.Add("ipv6", &ip6, isIPv6) + + cidr := CIDR("") + Default.Add("cidr", &cidr, isCIDR) + + mac := MAC("") + Default.Add("mac", &mac, isMAC) + + uid := UUID("") + Default.Add("uuid", &uid, IsUUID) + + uid3 := UUID3("") + Default.Add("uuid3", &uid3, IsUUID3) + + uid4 := UUID4("") + Default.Add("uuid4", &uid4, IsUUID4) + + uid5 := UUID5("") + Default.Add("uuid5", &uid5, IsUUID5) + + uid7 := UUID7("") + Default.Add("uuid7", &uid7, IsUUID7) + + isbn := ISBN("") + Default.Add("isbn", &isbn, func(str string) bool { return isISBN10(str) || isISBN13(str) }) + + isbn10 := ISBN10("") + Default.Add("isbn10", &isbn10, isISBN10) + + isbn13 := ISBN13("") + Default.Add("isbn13", &isbn13, isISBN13) + + cc := CreditCard("") + Default.Add("creditcard", &cc, isCreditCard) + + ssn := SSN("") + Default.Add("ssn", &ssn, isSSN) + + hc := HexColor("") + Default.Add("hexcolor", &hc, isHexcolor) + + rc := RGBColor("") + Default.Add("rgbcolor", &rc, isRGBcolor) + + b64 := Base64([]byte(nil)) + Default.Add("byte", &b64, isBase64) + + pw := Password("") + Default.Add("password", &pw, func(_ string) bool { return true }) +} + +// Base64 represents a base64 encoded string, using URLEncoding alphabet +// +// swagger:strfmt byte +type Base64 []byte + +// MarshalText turns this instance into text +func (b Base64) MarshalText() ([]byte, error) { + enc := base64.URLEncoding + src := []byte(b) + buf := make([]byte, enc.EncodedLen(len(src))) + enc.Encode(buf, src) + return buf, nil +} + +// UnmarshalText hydrates this instance from text +func (b *Base64) UnmarshalText(data []byte) error { // validation is performed later on + enc := base64.URLEncoding + dbuf := make([]byte, enc.DecodedLen(len(data))) + + n, err := enc.Decode(dbuf, data) + if err != nil { + return err + } + + *b = dbuf[:n] + return nil +} + +// Scan read a value from a database driver +func (b *Base64) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + dbuf := make([]byte, base64.StdEncoding.DecodedLen(len(v))) + n, err := base64.StdEncoding.Decode(dbuf, v) + if err != nil { + return err + } + *b = dbuf[:n] + case string: + vv, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err + } + *b = Base64(vv) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.Base64 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (b Base64) Value() (driver.Value, error) { + return driver.Value(b.String()), nil +} + +func (b Base64) String() string { + return base64.StdEncoding.EncodeToString([]byte(b)) +} + +// MarshalJSON returns the Base64 as JSON +func (b Base64) MarshalJSON() ([]byte, error) { + return json.Marshal(b.String()) +} + +// UnmarshalJSON sets the Base64 from JSON +func (b *Base64) UnmarshalJSON(data []byte) error { + var b64str string + if err := json.Unmarshal(data, &b64str); err != nil { + return err + } + vb, err := base64.StdEncoding.DecodeString(b64str) + if err != nil { + return err + } + *b = Base64(vb) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (b *Base64) DeepCopyInto(out *Base64) { + *out = *b +} + +// DeepCopy copies the receiver into a new Base64. +func (b *Base64) DeepCopy() *Base64 { + if b == nil { + return nil + } + out := new(Base64) + b.DeepCopyInto(out) + return out +} + +// URI represents the uri string format as specified by the json schema spec +// +// swagger:strfmt uri +type URI string + +// MarshalText turns this instance into text +func (u URI) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *URI) UnmarshalText(data []byte) error { // validation is performed later on + *u = URI(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *URI) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = URI(string(v)) + case string: + *u = URI(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u URI) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u URI) String() string { + return string(u) +} + +// MarshalJSON returns the URI as JSON +func (u URI) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the URI from JSON +func (u *URI) UnmarshalJSON(data []byte) error { + var uristr string + if err := json.Unmarshal(data, &uristr); err != nil { + return err + } + *u = URI(uristr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *URI) DeepCopyInto(out *URI) { + *out = *u +} + +// DeepCopy copies the receiver into a new URI. +func (u *URI) DeepCopy() *URI { + if u == nil { + return nil + } + out := new(URI) + u.DeepCopyInto(out) + return out +} + +// Email represents the email string format as specified by the json schema spec +// +// swagger:strfmt email +type Email string + +// MarshalText turns this instance into text +func (e Email) MarshalText() ([]byte, error) { + return []byte(string(e)), nil +} + +// UnmarshalText hydrates this instance from text +func (e *Email) UnmarshalText(data []byte) error { // validation is performed later on + *e = Email(string(data)) + return nil +} + +// Scan read a value from a database driver +func (e *Email) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *e = Email(string(v)) + case string: + *e = Email(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.Email from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (e Email) Value() (driver.Value, error) { + return driver.Value(string(e)), nil +} + +func (e Email) String() string { + return string(e) +} + +// MarshalJSON returns the Email as JSON +func (e Email) MarshalJSON() ([]byte, error) { + return json.Marshal(string(e)) +} + +// UnmarshalJSON sets the Email from JSON +func (e *Email) UnmarshalJSON(data []byte) error { + var estr string + if err := json.Unmarshal(data, &estr); err != nil { + return err + } + *e = Email(estr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (e *Email) DeepCopyInto(out *Email) { + *out = *e +} + +// DeepCopy copies the receiver into a new Email. +func (e *Email) DeepCopy() *Email { + if e == nil { + return nil + } + out := new(Email) + e.DeepCopyInto(out) + return out +} + +// Hostname represents the hostname string format as specified by the json schema spec +// +// swagger:strfmt hostname +type Hostname string + +// MarshalText turns this instance into text +func (h Hostname) MarshalText() ([]byte, error) { + return []byte(string(h)), nil +} + +// UnmarshalText hydrates this instance from text +func (h *Hostname) UnmarshalText(data []byte) error { // validation is performed later on + *h = Hostname(string(data)) + return nil +} + +// Scan read a value from a database driver +func (h *Hostname) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *h = Hostname(string(v)) + case string: + *h = Hostname(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.Hostname from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (h Hostname) Value() (driver.Value, error) { + return driver.Value(string(h)), nil +} + +func (h Hostname) String() string { + return string(h) +} + +// MarshalJSON returns the Hostname as JSON +func (h Hostname) MarshalJSON() ([]byte, error) { + return json.Marshal(string(h)) +} + +// UnmarshalJSON sets the Hostname from JSON +func (h *Hostname) UnmarshalJSON(data []byte) error { + var hstr string + if err := json.Unmarshal(data, &hstr); err != nil { + return err + } + *h = Hostname(hstr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (h *Hostname) DeepCopyInto(out *Hostname) { + *out = *h +} + +// DeepCopy copies the receiver into a new Hostname. +func (h *Hostname) DeepCopy() *Hostname { + if h == nil { + return nil + } + out := new(Hostname) + h.DeepCopyInto(out) + return out +} + +// IPv4 represents an IP v4 address +// +// swagger:strfmt ipv4 +type IPv4 string + +// MarshalText turns this instance into text +func (u IPv4) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *IPv4) UnmarshalText(data []byte) error { // validation is performed later on + *u = IPv4(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *IPv4) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = IPv4(string(v)) + case string: + *u = IPv4(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.IPv4 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u IPv4) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u IPv4) String() string { + return string(u) +} + +// MarshalJSON returns the IPv4 as JSON +func (u IPv4) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the IPv4 from JSON +func (u *IPv4) UnmarshalJSON(data []byte) error { + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = IPv4(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *IPv4) DeepCopyInto(out *IPv4) { + *out = *u +} + +// DeepCopy copies the receiver into a new IPv4. +func (u *IPv4) DeepCopy() *IPv4 { + if u == nil { + return nil + } + out := new(IPv4) + u.DeepCopyInto(out) + return out +} + +// IPv6 represents an IP v6 address +// +// swagger:strfmt ipv6 +type IPv6 string + +// MarshalText turns this instance into text +func (u IPv6) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *IPv6) UnmarshalText(data []byte) error { // validation is performed later on + *u = IPv6(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *IPv6) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = IPv6(string(v)) + case string: + *u = IPv6(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.IPv6 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u IPv6) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u IPv6) String() string { + return string(u) +} + +// MarshalJSON returns the IPv6 as JSON +func (u IPv6) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the IPv6 from JSON +func (u *IPv6) UnmarshalJSON(data []byte) error { + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = IPv6(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *IPv6) DeepCopyInto(out *IPv6) { + *out = *u +} + +// DeepCopy copies the receiver into a new IPv6. +func (u *IPv6) DeepCopy() *IPv6 { + if u == nil { + return nil + } + out := new(IPv6) + u.DeepCopyInto(out) + return out +} + +// CIDR represents a Classless Inter-Domain Routing notation +// +// swagger:strfmt cidr +type CIDR string + +// MarshalText turns this instance into text +func (u CIDR) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *CIDR) UnmarshalText(data []byte) error { // validation is performed later on + *u = CIDR(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *CIDR) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = CIDR(string(v)) + case string: + *u = CIDR(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.CIDR from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u CIDR) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u CIDR) String() string { + return string(u) +} + +// MarshalJSON returns the CIDR as JSON +func (u CIDR) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the CIDR from JSON +func (u *CIDR) UnmarshalJSON(data []byte) error { + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = CIDR(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *CIDR) DeepCopyInto(out *CIDR) { + *out = *u +} + +// DeepCopy copies the receiver into a new CIDR. +func (u *CIDR) DeepCopy() *CIDR { + if u == nil { + return nil + } + out := new(CIDR) + u.DeepCopyInto(out) + return out +} + +// MAC represents a 48 bit MAC address +// +// swagger:strfmt mac +type MAC string + +// MarshalText turns this instance into text +func (u MAC) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *MAC) UnmarshalText(data []byte) error { // validation is performed later on + *u = MAC(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *MAC) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = MAC(string(v)) + case string: + *u = MAC(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.IPv4 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u MAC) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u MAC) String() string { + return string(u) +} + +// MarshalJSON returns the MAC as JSON +func (u MAC) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the MAC from JSON +func (u *MAC) UnmarshalJSON(data []byte) error { + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = MAC(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *MAC) DeepCopyInto(out *MAC) { + *out = *u +} + +// DeepCopy copies the receiver into a new MAC. +func (u *MAC) DeepCopy() *MAC { + if u == nil { + return nil + } + out := new(MAC) + u.DeepCopyInto(out) + return out +} + +// UUID represents a uuid string format +// +// swagger:strfmt uuid +type UUID string + +// MarshalText turns this instance into text +func (u UUID) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *UUID) UnmarshalText(data []byte) error { // validation is performed later on + *u = UUID(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *UUID) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = UUID(string(v)) + case string: + *u = UUID(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.UUID from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u UUID) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u UUID) String() string { + return string(u) +} + +// MarshalJSON returns the UUID as JSON +func (u UUID) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the UUID from JSON +func (u *UUID) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = UUID(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *UUID) DeepCopyInto(out *UUID) { + *out = *u +} + +// DeepCopy copies the receiver into a new UUID. +func (u *UUID) DeepCopy() *UUID { + if u == nil { + return nil + } + out := new(UUID) + u.DeepCopyInto(out) + return out +} + +// UUID3 represents a uuid3 string format +// +// swagger:strfmt uuid3 +type UUID3 string + +// MarshalText turns this instance into text +func (u UUID3) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *UUID3) UnmarshalText(data []byte) error { // validation is performed later on + *u = UUID3(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *UUID3) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = UUID3(string(v)) + case string: + *u = UUID3(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.UUID3 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u UUID3) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u UUID3) String() string { + return string(u) +} + +// MarshalJSON returns the UUID as JSON +func (u UUID3) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the UUID from JSON +func (u *UUID3) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = UUID3(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *UUID3) DeepCopyInto(out *UUID3) { + *out = *u +} + +// DeepCopy copies the receiver into a new UUID3. +func (u *UUID3) DeepCopy() *UUID3 { + if u == nil { + return nil + } + out := new(UUID3) + u.DeepCopyInto(out) + return out +} + +// UUID4 represents a uuid4 string format +// +// swagger:strfmt uuid4 +type UUID4 string + +// MarshalText turns this instance into text +func (u UUID4) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *UUID4) UnmarshalText(data []byte) error { // validation is performed later on + *u = UUID4(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *UUID4) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = UUID4(string(v)) + case string: + *u = UUID4(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.UUID4 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u UUID4) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u UUID4) String() string { + return string(u) +} + +// MarshalJSON returns the UUID as JSON +func (u UUID4) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the UUID from JSON +func (u *UUID4) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = UUID4(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *UUID4) DeepCopyInto(out *UUID4) { + *out = *u +} + +// DeepCopy copies the receiver into a new UUID4. +func (u *UUID4) DeepCopy() *UUID4 { + if u == nil { + return nil + } + out := new(UUID4) + u.DeepCopyInto(out) + return out +} + +// UUID5 represents a uuid5 string format +// +// swagger:strfmt uuid5 +type UUID5 string + +// MarshalText turns this instance into text +func (u UUID5) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *UUID5) UnmarshalText(data []byte) error { // validation is performed later on + *u = UUID5(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *UUID5) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = UUID5(string(v)) + case string: + *u = UUID5(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.UUID5 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u UUID5) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u UUID5) String() string { + return string(u) +} + +// MarshalJSON returns the UUID as JSON +func (u UUID5) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the UUID from JSON +func (u *UUID5) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = UUID5(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *UUID5) DeepCopyInto(out *UUID5) { + *out = *u +} + +// DeepCopy copies the receiver into a new UUID5. +func (u *UUID5) DeepCopy() *UUID5 { + if u == nil { + return nil + } + out := new(UUID5) + u.DeepCopyInto(out) + return out +} + +// UUID7 represents a uuid7 string format +// +// swagger:strfmt uuid7 +type UUID7 string + +// MarshalText turns this instance into text +func (u UUID7) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *UUID7) UnmarshalText(data []byte) error { // validation is performed later on + *u = UUID7(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *UUID7) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = UUID7(string(v)) + case string: + *u = UUID7(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.UUID7 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u UUID7) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u UUID7) String() string { + return string(u) +} + +// MarshalJSON returns the UUID as JSON +func (u UUID7) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the UUID from JSON +func (u *UUID7) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = UUID7(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *UUID7) DeepCopyInto(out *UUID7) { + *out = *u +} + +// DeepCopy copies the receiver into a new UUID7. +func (u *UUID7) DeepCopy() *UUID7 { + if u == nil { + return nil + } + out := new(UUID7) + u.DeepCopyInto(out) + return out +} + +// ISBN represents an isbn string format +// +// swagger:strfmt isbn +type ISBN string + +// MarshalText turns this instance into text +func (u ISBN) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *ISBN) UnmarshalText(data []byte) error { // validation is performed later on + *u = ISBN(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *ISBN) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = ISBN(string(v)) + case string: + *u = ISBN(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.ISBN from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u ISBN) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u ISBN) String() string { + return string(u) +} + +// MarshalJSON returns the ISBN as JSON +func (u ISBN) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the ISBN from JSON +func (u *ISBN) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = ISBN(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *ISBN) DeepCopyInto(out *ISBN) { + *out = *u +} + +// DeepCopy copies the receiver into a new ISBN. +func (u *ISBN) DeepCopy() *ISBN { + if u == nil { + return nil + } + out := new(ISBN) + u.DeepCopyInto(out) + return out +} + +// ISBN10 represents an isbn 10 string format +// +// swagger:strfmt isbn10 +type ISBN10 string + +// MarshalText turns this instance into text +func (u ISBN10) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *ISBN10) UnmarshalText(data []byte) error { // validation is performed later on + *u = ISBN10(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *ISBN10) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = ISBN10(string(v)) + case string: + *u = ISBN10(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.ISBN10 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u ISBN10) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u ISBN10) String() string { + return string(u) +} + +// MarshalJSON returns the ISBN10 as JSON +func (u ISBN10) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the ISBN10 from JSON +func (u *ISBN10) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = ISBN10(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *ISBN10) DeepCopyInto(out *ISBN10) { + *out = *u +} + +// DeepCopy copies the receiver into a new ISBN10. +func (u *ISBN10) DeepCopy() *ISBN10 { + if u == nil { + return nil + } + out := new(ISBN10) + u.DeepCopyInto(out) + return out +} + +// ISBN13 represents an isbn 13 string format +// +// swagger:strfmt isbn13 +type ISBN13 string + +// MarshalText turns this instance into text +func (u ISBN13) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *ISBN13) UnmarshalText(data []byte) error { // validation is performed later on + *u = ISBN13(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *ISBN13) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = ISBN13(string(v)) + case string: + *u = ISBN13(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.ISBN13 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u ISBN13) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u ISBN13) String() string { + return string(u) +} + +// MarshalJSON returns the ISBN13 as JSON +func (u ISBN13) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the ISBN13 from JSON +func (u *ISBN13) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = ISBN13(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *ISBN13) DeepCopyInto(out *ISBN13) { + *out = *u +} + +// DeepCopy copies the receiver into a new ISBN13. +func (u *ISBN13) DeepCopy() *ISBN13 { + if u == nil { + return nil + } + out := new(ISBN13) + u.DeepCopyInto(out) + return out +} + +// CreditCard represents a credit card string format +// +// swagger:strfmt creditcard +type CreditCard string + +// MarshalText turns this instance into text +func (u CreditCard) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *CreditCard) UnmarshalText(data []byte) error { // validation is performed later on + *u = CreditCard(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *CreditCard) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = CreditCard(string(v)) + case string: + *u = CreditCard(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.CreditCard from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u CreditCard) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u CreditCard) String() string { + return string(u) +} + +// MarshalJSON returns the CreditCard as JSON +func (u CreditCard) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the CreditCard from JSON +func (u *CreditCard) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = CreditCard(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *CreditCard) DeepCopyInto(out *CreditCard) { + *out = *u +} + +// DeepCopy copies the receiver into a new CreditCard. +func (u *CreditCard) DeepCopy() *CreditCard { + if u == nil { + return nil + } + out := new(CreditCard) + u.DeepCopyInto(out) + return out +} + +// SSN represents a social security string format +// +// swagger:strfmt ssn +type SSN string + +// MarshalText turns this instance into text +func (u SSN) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *SSN) UnmarshalText(data []byte) error { // validation is performed later on + *u = SSN(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *SSN) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = SSN(string(v)) + case string: + *u = SSN(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.SSN from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u SSN) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u SSN) String() string { + return string(u) +} + +// MarshalJSON returns the SSN as JSON +func (u SSN) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the SSN from JSON +func (u *SSN) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = SSN(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *SSN) DeepCopyInto(out *SSN) { + *out = *u +} + +// DeepCopy copies the receiver into a new SSN. +func (u *SSN) DeepCopy() *SSN { + if u == nil { + return nil + } + out := new(SSN) + u.DeepCopyInto(out) + return out +} + +// HexColor represents a hex color string format +// +// swagger:strfmt hexcolor +type HexColor string + +// MarshalText turns this instance into text +func (h HexColor) MarshalText() ([]byte, error) { + return []byte(string(h)), nil +} + +// UnmarshalText hydrates this instance from text +func (h *HexColor) UnmarshalText(data []byte) error { // validation is performed later on + *h = HexColor(string(data)) + return nil +} + +// Scan read a value from a database driver +func (h *HexColor) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *h = HexColor(string(v)) + case string: + *h = HexColor(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.HexColor from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (h HexColor) Value() (driver.Value, error) { + return driver.Value(string(h)), nil +} + +func (h HexColor) String() string { + return string(h) +} + +// MarshalJSON returns the HexColor as JSON +func (h HexColor) MarshalJSON() ([]byte, error) { + return json.Marshal(string(h)) +} + +// UnmarshalJSON sets the HexColor from JSON +func (h *HexColor) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *h = HexColor(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (h *HexColor) DeepCopyInto(out *HexColor) { + *out = *h +} + +// DeepCopy copies the receiver into a new HexColor. +func (h *HexColor) DeepCopy() *HexColor { + if h == nil { + return nil + } + out := new(HexColor) + h.DeepCopyInto(out) + return out +} + +// RGBColor represents a RGB color string format +// +// swagger:strfmt rgbcolor +type RGBColor string + +// MarshalText turns this instance into text +func (r RGBColor) MarshalText() ([]byte, error) { + return []byte(string(r)), nil +} + +// UnmarshalText hydrates this instance from text +func (r *RGBColor) UnmarshalText(data []byte) error { // validation is performed later on + *r = RGBColor(string(data)) + return nil +} + +// Scan read a value from a database driver +func (r *RGBColor) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *r = RGBColor(string(v)) + case string: + *r = RGBColor(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.RGBColor from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (r RGBColor) Value() (driver.Value, error) { + return driver.Value(string(r)), nil +} + +func (r RGBColor) String() string { + return string(r) +} + +// MarshalJSON returns the RGBColor as JSON +func (r RGBColor) MarshalJSON() ([]byte, error) { + return json.Marshal(string(r)) +} + +// UnmarshalJSON sets the RGBColor from JSON +func (r *RGBColor) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *r = RGBColor(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (r *RGBColor) DeepCopyInto(out *RGBColor) { + *out = *r +} + +// DeepCopy copies the receiver into a new RGBColor. +func (r *RGBColor) DeepCopy() *RGBColor { + if r == nil { + return nil + } + out := new(RGBColor) + r.DeepCopyInto(out) + return out +} + +// Password represents a password. +// This has no validations and is mainly used as a marker for UI components. +// +// swagger:strfmt password +type Password string + +// MarshalText turns this instance into text +func (r Password) MarshalText() ([]byte, error) { + return []byte(string(r)), nil +} + +// UnmarshalText hydrates this instance from text +func (r *Password) UnmarshalText(data []byte) error { // validation is performed later on + *r = Password(string(data)) + return nil +} + +// Scan read a value from a database driver +func (r *Password) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *r = Password(string(v)) + case string: + *r = Password(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.Password from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (r Password) Value() (driver.Value, error) { + return driver.Value(string(r)), nil +} + +func (r Password) String() string { + return string(r) +} + +// MarshalJSON returns the Password as JSON +func (r Password) MarshalJSON() ([]byte, error) { + return json.Marshal(string(r)) +} + +// UnmarshalJSON sets the Password from JSON +func (r *Password) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *r = Password(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (r *Password) DeepCopyInto(out *Password) { + *out = *r +} + +// DeepCopy copies the receiver into a new Password. +func (r *Password) DeepCopy() *Password { + if r == nil { + return nil + } + out := new(Password) + r.DeepCopyInto(out) + return out +} + +func isRequestURI(rawurl string) bool { + _, err := url.ParseRequestURI(rawurl) + return err == nil +} + +// isIPv4 checks if the string is an IP version 4. +func isIPv4(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ".") +} + +// isIPv6 checks if the string is an IP version 6. +func isIPv6(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ":") +} + +// isCIDR checks if the string is an valid CIDR notiation (IPV4 & IPV6) +func isCIDR(str string) bool { + _, _, err := net.ParseCIDR(str) + return err == nil +} + +// isMAC checks if a string is valid MAC address. +// Possible MAC formats: +// 01:23:45:67:89:ab +// 01:23:45:67:89:ab:cd:ef +// 01-23-45-67-89-ab +// 01-23-45-67-89-ab-cd-ef +// 0123.4567.89ab +// 0123.4567.89ab.cdef +func isMAC(str string) bool { + _, err := net.ParseMAC(str) + return err == nil +} + +// isISBN checks if the string is an ISBN (version 10 or 13). +// If version value is not equal to 10 or 13, it will be checks both variants. +func isISBN(str string, version int) bool { + sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") + var checksum int32 + var i int32 + + switch version { + case isbnVersion10: + if !rxISBN10.MatchString(sanitized) { + return false + } + for i = range isbnVersion10 - 1 { + checksum += (i + 1) * int32(sanitized[i]-'0') + } + if sanitized[isbnVersion10-1] == 'X' { + checksum += isbnVersion10 * isbnVersion10 + } else { + checksum += isbnVersion10 * int32(sanitized[isbnVersion10-1]-'0') + } + if checksum%(isbnVersion10+1) == 0 { + return true + } + return false + case isbnVersion13: + if !rxISBN13.MatchString(sanitized) { + return false + } + factor := []int32{1, 3} + for i = range isbnVersion13 - 1 { + checksum += factor[i%2] * int32(sanitized[i]-'0') + } + return (int32(sanitized[isbnVersion13-1]-'0'))-((decimalBase-(checksum%decimalBase))%decimalBase) == 0 + default: + return isISBN(str, isbnVersion10) || isISBN(str, isbnVersion13) + } +} + +// isISBN10 checks if the string is an ISBN version 10. +func isISBN10(str string) bool { + return isISBN(str, isbnVersion10) +} + +// isISBN13 checks if the string is an ISBN version 13. +func isISBN13(str string) bool { + return isISBN(str, isbnVersion13) +} + +// isCreditCard checks if the string is a credit card. +func isCreditCard(str string) bool { + sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") + if !rxCreditCard.MatchString(sanitized) { + return false + } + + number, err := strconv.ParseInt(sanitized, 0, 64) + if err != nil { + return false + } + number, lastDigit := number/decimalBase, number%decimalBase + + var sum int64 + for i := 0; number > 0; i++ { + digit := number % decimalBase + + if i%2 == 0 { + digit *= 2 + if digit > decimalBase-1 { + digit -= decimalBase - 1 + } + } + + sum += digit + number /= decimalBase + } + + return (sum+lastDigit)%decimalBase == 0 +} + +// isSSN will validate the given string as a U.S. Social Security Number +func isSSN(str string) bool { + if str == "" || len(str) != 11 { + return false + } + return rxSSN.MatchString(str) +} + +// isHexcolor checks if the string is a hexadecimal color. +func isHexcolor(str string) bool { + return rxHexcolor.MatchString(str) +} + +// isRGBcolor checks if the string is a valid RGB color in form rgb(RRR, GGG, BBB). +func isRGBcolor(str string) bool { + return rxRGBcolor.MatchString(str) +} + +// isBase64 checks if a string is base64 encoded. +func isBase64(str string) bool { + _, err := base64.StdEncoding.DecodeString(str) + + return err == nil +} diff --git a/vendor/github.com/go-openapi/strfmt/doc.go b/vendor/github.com/go-openapi/strfmt/doc.go new file mode 100644 index 000000000000..5825b72108ed --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/doc.go @@ -0,0 +1,7 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package strfmt contains custom string formats +// +// TODO: add info on how to define and register a custom format +package strfmt diff --git a/vendor/github.com/go-openapi/strfmt/duration.go b/vendor/github.com/go-openapi/strfmt/duration.go new file mode 100644 index 000000000000..908c1b02f3c8 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/duration.go @@ -0,0 +1,206 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +func init() { + d := Duration(0) + // register this format in the default registry + Default.Add("duration", &d, IsDuration) +} + +const ( + hoursInDay = 24 + daysInWeek = 7 +) + +var ( + timeUnits = [][]string{ + {"ns", "nano"}, + {"us", "µs", "micro"}, + {"ms", "milli"}, + {"s", "sec"}, + {"m", "min"}, + {"h", "hr", "hour"}, + {"d", "day"}, + {"w", "wk", "week"}, + } + + timeMultiplier = map[string]time.Duration{ + "ns": time.Nanosecond, + "us": time.Microsecond, + "ms": time.Millisecond, + "s": time.Second, + "m": time.Minute, + "h": time.Hour, + "d": hoursInDay * time.Hour, + "w": hoursInDay * daysInWeek * time.Hour, + } + + durationMatcher = regexp.MustCompile(`^(((?:-\s?)?\d+)(\.\d+)?\s*([A-Za-zµ]+))`) +) + +// IsDuration returns true if the provided string is a valid duration +func IsDuration(str string) bool { + _, err := ParseDuration(str) + return err == nil +} + +// Duration represents a duration +// +// Duration stores a period of time as a nanosecond count, with the largest +// repesentable duration being approximately 290 years. +// +// swagger:strfmt duration +type Duration time.Duration + +// MarshalText turns this instance into text +func (d Duration) MarshalText() ([]byte, error) { + return []byte(time.Duration(d).String()), nil +} + +// UnmarshalText hydrates this instance from text +func (d *Duration) UnmarshalText(data []byte) error { // validation is performed later on + dd, err := ParseDuration(string(data)) + if err != nil { + return err + } + *d = Duration(dd) + return nil +} + +// ParseDuration parses a duration from a string, compatible with scala duration syntax +func ParseDuration(cand string) (time.Duration, error) { + if dur, err := time.ParseDuration(cand); err == nil { + return dur, nil + } + + var dur time.Duration + ok := false + const expectGroups = 4 + for _, match := range durationMatcher.FindAllStringSubmatch(cand, -1) { + if len(match) < expectGroups { + continue + } + + // remove possible leading - and spaces + value, negative := strings.CutPrefix(match[2], "-") + + // if the duration contains a decimal separator determine a divising factor + const neutral = 1.0 + divisor := neutral + decimal, hasDecimal := strings.CutPrefix(match[3], ".") + if hasDecimal { + divisor = math.Pow10(len(decimal)) + value += decimal // consider the value as an integer: will change units later on + } + + // if the string is a valid duration, parse it + factor, err := strconv.Atoi(strings.TrimSpace(value)) // converts string to int + if err != nil { + return 0, err + } + + if negative { + factor = -factor + } + + unit := strings.ToLower(strings.TrimSpace(match[4])) + + for _, variants := range timeUnits { + last := len(variants) - 1 + multiplier := timeMultiplier[variants[0]] + + for i, variant := range variants { + if (last == i && strings.HasPrefix(unit, variant)) || strings.EqualFold(variant, unit) { + ok = true + if divisor != neutral { + multiplier = time.Duration(float64(multiplier) / divisor) // convert to duration only after having reduced the scale + } + dur += (time.Duration(factor) * multiplier) + } + } + } + } + + if ok { + return dur, nil + } + return 0, fmt.Errorf("unable to parse %s as duration: %w", cand, ErrFormat) +} + +// Scan reads a Duration value from database driver type. +func (d *Duration) Scan(raw any) error { + switch v := raw.(type) { + // TODO: case []byte: // ? + case int64: + *d = Duration(v) + case float64: + *d = Duration(int64(v)) + case nil: + *d = Duration(0) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.Duration from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts Duration to a primitive value ready to be written to a database. +func (d Duration) Value() (driver.Value, error) { + return driver.Value(int64(d)), nil +} + +// String converts this duration to a string +func (d Duration) String() string { + return time.Duration(d).String() +} + +// MarshalJSON returns the Duration as JSON +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Duration(d).String()) +} + +// UnmarshalJSON sets the Duration from JSON +func (d *Duration) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + + var dstr string + if err := json.Unmarshal(data, &dstr); err != nil { + return err + } + tt, err := ParseDuration(dstr) + if err != nil { + return err + } + *d = Duration(tt) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (d *Duration) DeepCopyInto(out *Duration) { + *out = *d +} + +// DeepCopy copies the receiver into a new Duration. +func (d *Duration) DeepCopy() *Duration { + if d == nil { + return nil + } + out := new(Duration) + d.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/go-openapi/strfmt/errors.go b/vendor/github.com/go-openapi/strfmt/errors.go new file mode 100644 index 000000000000..9faa37cf2e51 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/errors.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +type strfmtError string + +// ErrFormat is an error raised by the strfmt package +const ErrFormat strfmtError = "format error" + +func (e strfmtError) Error() string { + return string(e) +} diff --git a/vendor/github.com/go-openapi/strfmt/format.go b/vendor/github.com/go-openapi/strfmt/format.go new file mode 100644 index 000000000000..d9d9e04c2080 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/format.go @@ -0,0 +1,297 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +import ( + "encoding" + "fmt" + "reflect" + "slices" + "strings" + "sync" + "time" + + "github.com/go-openapi/errors" + "github.com/go-viper/mapstructure/v2" +) + +// Default is the default formats registry +var Default = NewSeededFormats(nil, nil) + +// Validator represents a validator for a string format. +type Validator func(string) bool + +// NewFormats creates a new formats registry seeded with the values from the default +func NewFormats() Registry { + //nolint:forcetypeassert + return NewSeededFormats(Default.(*defaultFormats).data, nil) +} + +// NewSeededFormats creates a new formats registry +func NewSeededFormats(seeds []knownFormat, normalizer NameNormalizer) Registry { + if normalizer == nil { + normalizer = DefaultNameNormalizer + } + // copy here, don't modify the original + return &defaultFormats{ + data: slices.Clone(seeds), + normalizeName: normalizer, + } +} + +type knownFormat struct { + Name string + OrigName string + Type reflect.Type + Validator Validator +} + +// NameNormalizer is a function that normalizes a format name. +type NameNormalizer func(string) string + +// DefaultNameNormalizer removes all dashes +func DefaultNameNormalizer(name string) string { + return strings.ReplaceAll(name, "-", "") +} + +type defaultFormats struct { + sync.Mutex + + data []knownFormat + normalizeName NameNormalizer +} + +// MapStructureHookFunc is a decode hook function for mapstructure +func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { + return func(from reflect.Type, to reflect.Type, obj any) (any, error) { + if from.Kind() != reflect.String { + return obj, nil + } + data, ok := obj.(string) + if !ok { + return nil, fmt.Errorf("failed to cast %+v to string: %w", obj, ErrFormat) + } + + for _, v := range f.data { + tpe, _ := f.GetType(v.Name) + if to == tpe { + switch v.Name { + case "date": + d, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation) + if err != nil { + return nil, err + } + return Date(d), nil + case "datetime": + input := data + if len(input) == 0 { + return nil, fmt.Errorf("empty string is an invalid datetime format: %w", ErrFormat) + } + return ParseDateTime(input) + case "duration": + dur, err := ParseDuration(data) + if err != nil { + return nil, err + } + return Duration(dur), nil + case "uri": + return URI(data), nil + case "email": + return Email(data), nil + case "uuid": + return UUID(data), nil + case "uuid3": + return UUID3(data), nil + case "uuid4": + return UUID4(data), nil + case "uuid5": + return UUID5(data), nil + case "uuid7": + return UUID7(data), nil + case "hostname": + return Hostname(data), nil + case "ipv4": + return IPv4(data), nil + case "ipv6": + return IPv6(data), nil + case "cidr": + return CIDR(data), nil + case "mac": + return MAC(data), nil + case "isbn": + return ISBN(data), nil + case "isbn10": + return ISBN10(data), nil + case "isbn13": + return ISBN13(data), nil + case "creditcard": + return CreditCard(data), nil + case "ssn": + return SSN(data), nil + case "hexcolor": + return HexColor(data), nil + case "rgbcolor": + return RGBColor(data), nil + case "byte": + return Base64(data), nil + case "password": + return Password(data), nil + case "ulid": + ulid, err := ParseULID(data) + if err != nil { + return nil, err + } + return ulid, nil + default: + return nil, errors.InvalidTypeName(v.Name) + } + } + } + return data, nil + } +} + +// Add adds a new format, return true if this was a new item instead of a replacement +func (f *defaultFormats) Add(name string, strfmt Format, validator Validator) bool { + f.Lock() + defer f.Unlock() + + nme := f.normalizeName(name) + + tpe := reflect.TypeOf(strfmt) + if tpe.Kind() == reflect.Ptr { + tpe = tpe.Elem() + } + + for i := range f.data { + v := &f.data[i] + if v.Name == nme { + v.Type = tpe + v.Validator = validator + return false + } + } + + // turns out it's new after all + f.data = append(f.data, knownFormat{Name: nme, OrigName: name, Type: tpe, Validator: validator}) + return true +} + +// GetType gets the type for the specified name +func (f *defaultFormats) GetType(name string) (reflect.Type, bool) { + f.Lock() + defer f.Unlock() + nme := f.normalizeName(name) + for _, v := range f.data { + if v.Name == nme { + return v.Type, true + } + } + return nil, false +} + +// DelByName removes the format by the specified name, returns true when an item was actually removed +func (f *defaultFormats) DelByName(name string) bool { + f.Lock() + defer f.Unlock() + + nme := f.normalizeName(name) + + for i, v := range f.data { + if v.Name == nme { + f.data[i] = knownFormat{} // release + f.data = append(f.data[:i], f.data[i+1:]...) + return true + } + } + return false +} + +// DelByFormat removes the specified format, returns true when an item was actually removed +func (f *defaultFormats) DelByFormat(strfmt Format) bool { + f.Lock() + defer f.Unlock() + + tpe := reflect.TypeOf(strfmt) + if tpe.Kind() == reflect.Ptr { + tpe = tpe.Elem() + } + + for i, v := range f.data { + if v.Type == tpe { + f.data[i] = knownFormat{} // release + f.data = append(f.data[:i], f.data[i+1:]...) + return true + } + } + return false +} + +// ContainsName returns true if this registry contains the specified name +func (f *defaultFormats) ContainsName(name string) bool { + f.Lock() + defer f.Unlock() + nme := f.normalizeName(name) + for _, v := range f.data { + if v.Name == nme { + return true + } + } + return false +} + +// ContainsFormat returns true if this registry contains the specified format +func (f *defaultFormats) ContainsFormat(strfmt Format) bool { + f.Lock() + defer f.Unlock() + tpe := reflect.TypeOf(strfmt) + if tpe.Kind() == reflect.Ptr { + tpe = tpe.Elem() + } + + for _, v := range f.data { + if v.Type == tpe { + return true + } + } + return false +} + +// Validates passed data against format. +// +// Note that the format name is automatically normalized, e.g. one may +// use "date-time" to use the "datetime" format validator. +func (f *defaultFormats) Validates(name, data string) bool { + f.Lock() + defer f.Unlock() + nme := f.normalizeName(name) + for _, v := range f.data { + if v.Name == nme { + return v.Validator(data) + } + } + return false +} + +// Parse a string into the appropriate format representation type. +// +// E.g. parsing a string a "date" will return a Date type. +func (f *defaultFormats) Parse(name, data string) (any, error) { + f.Lock() + defer f.Unlock() + nme := f.normalizeName(name) + for _, v := range f.data { + if v.Name == nme { + nw := reflect.New(v.Type).Interface() + if dec, ok := nw.(encoding.TextUnmarshaler); ok { + if err := dec.UnmarshalText([]byte(data)); err != nil { + return nil, err + } + return nw, nil + } + return nil, errors.InvalidTypeName(name) + } + } + return nil, errors.InvalidTypeName(name) +} diff --git a/vendor/github.com/go-openapi/strfmt/ifaces.go b/vendor/github.com/go-openapi/strfmt/ifaces.go new file mode 100644 index 000000000000..1b9e72c64ebc --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/ifaces.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +import ( + "encoding" + "reflect" + + "github.com/go-viper/mapstructure/v2" +) + +// Format represents a string format. +// +// All implementations of Format provide a string representation and text +// marshaling/unmarshaling interface to be used by encoders (e.g. encoding/json). +type Format interface { + String() string + encoding.TextMarshaler + encoding.TextUnmarshaler +} + +// Registry is a registry of string formats, with a validation method. +type Registry interface { + Add(string, Format, Validator) bool + DelByName(string) bool + GetType(string) (reflect.Type, bool) + ContainsName(string) bool + Validates(string, string) bool + Parse(string, string) (any, error) + MapStructureHookFunc() mapstructure.DecodeHookFunc +} diff --git a/vendor/github.com/go-openapi/strfmt/mongo.go b/vendor/github.com/go-openapi/strfmt/mongo.go new file mode 100644 index 000000000000..641fed9b1a67 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/mongo.go @@ -0,0 +1,646 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +import ( + "encoding/base64" + "encoding/binary" + "fmt" + "time" + + "github.com/oklog/ulid" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/bson/bsontype" + bsonprim "go.mongodb.org/mongo-driver/bson/primitive" +) + +var ( + _ bson.Marshaler = Date{} + _ bson.Unmarshaler = &Date{} + _ bson.Marshaler = Base64{} + _ bson.Unmarshaler = &Base64{} + _ bson.Marshaler = Duration(0) + _ bson.Unmarshaler = (*Duration)(nil) + _ bson.Marshaler = DateTime{} + _ bson.Unmarshaler = &DateTime{} + _ bson.Marshaler = ULID{} + _ bson.Unmarshaler = &ULID{} + _ bson.Marshaler = URI("") + _ bson.Unmarshaler = (*URI)(nil) + _ bson.Marshaler = Email("") + _ bson.Unmarshaler = (*Email)(nil) + _ bson.Marshaler = Hostname("") + _ bson.Unmarshaler = (*Hostname)(nil) + _ bson.Marshaler = IPv4("") + _ bson.Unmarshaler = (*IPv4)(nil) + _ bson.Marshaler = IPv6("") + _ bson.Unmarshaler = (*IPv6)(nil) + _ bson.Marshaler = CIDR("") + _ bson.Unmarshaler = (*CIDR)(nil) + _ bson.Marshaler = MAC("") + _ bson.Unmarshaler = (*MAC)(nil) + _ bson.Marshaler = Password("") + _ bson.Unmarshaler = (*Password)(nil) + _ bson.Marshaler = UUID("") + _ bson.Unmarshaler = (*UUID)(nil) + _ bson.Marshaler = UUID3("") + _ bson.Unmarshaler = (*UUID3)(nil) + _ bson.Marshaler = UUID4("") + _ bson.Unmarshaler = (*UUID4)(nil) + _ bson.Marshaler = UUID5("") + _ bson.Unmarshaler = (*UUID5)(nil) + _ bson.Marshaler = UUID7("") + _ bson.Unmarshaler = (*UUID7)(nil) + _ bson.Marshaler = ISBN("") + _ bson.Unmarshaler = (*ISBN)(nil) + _ bson.Marshaler = ISBN10("") + _ bson.Unmarshaler = (*ISBN10)(nil) + _ bson.Marshaler = ISBN13("") + _ bson.Unmarshaler = (*ISBN13)(nil) + _ bson.Marshaler = CreditCard("") + _ bson.Unmarshaler = (*CreditCard)(nil) + _ bson.Marshaler = SSN("") + _ bson.Unmarshaler = (*SSN)(nil) + _ bson.Marshaler = HexColor("") + _ bson.Unmarshaler = (*HexColor)(nil) + _ bson.Marshaler = RGBColor("") + _ bson.Unmarshaler = (*RGBColor)(nil) + _ bson.Marshaler = ObjectId{} + _ bson.Unmarshaler = &ObjectId{} + + _ bson.ValueMarshaler = DateTime{} + _ bson.ValueUnmarshaler = &DateTime{} + _ bson.ValueMarshaler = ObjectId{} + _ bson.ValueUnmarshaler = &ObjectId{} +) + +const ( + millisec = 1000 + microsec = 1_000_000 + bsonDateTimeSize = 8 +) + +func (d Date) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": d.String()}) +} + +func (d *Date) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if data, ok := m["data"].(string); ok { + rd, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(rd) + return nil + } + + return fmt.Errorf("couldn't unmarshal bson bytes value as Date: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (b Base64) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": b.String()}) +} + +// UnmarshalBSON document into this value +func (b *Base64) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if bd, ok := m["data"].(string); ok { + vb, err := base64.StdEncoding.DecodeString(bd) + if err != nil { + return err + } + *b = Base64(vb) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as base64: %w", ErrFormat) +} + +func (d Duration) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": d.String()}) +} + +func (d *Duration) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if data, ok := m["data"].(string); ok { + rd, err := ParseDuration(data) + if err != nil { + return err + } + *d = Duration(rd) + return nil + } + + return fmt.Errorf("couldn't unmarshal bson bytes value as Date: %w", ErrFormat) +} + +// MarshalBSON renders the DateTime as a BSON document +func (t DateTime) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": t}) +} + +// UnmarshalBSON reads the DateTime from a BSON document +func (t *DateTime) UnmarshalBSON(data []byte) error { + var obj struct { + Data DateTime + } + + if err := bson.Unmarshal(data, &obj); err != nil { + return err + } + + *t = obj.Data + + return nil +} + +// MarshalBSONValue is an interface implemented by types that can marshal themselves +// into a BSON document represented as bytes. The bytes returned must be a valid +// BSON document if the error is nil. +// +// Marshals a DateTime as a bson.TypeDateTime, an int64 representing +// milliseconds since epoch. +func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) { + // UnixNano cannot be used directly, the result of calling UnixNano on the zero + // Time is undefined. Thats why we use time.Nanosecond() instead. + + tNorm := NormalizeTimeForMarshal(time.Time(t)) + i64 := tNorm.Unix()*millisec + int64(tNorm.Nanosecond())/microsec + buf := make([]byte, bsonDateTimeSize) + binary.LittleEndian.PutUint64(buf, uint64(i64)) //nolint:gosec // it's okay to handle negative int64 this way + + return bson.TypeDateTime, buf, nil +} + +// UnmarshalBSONValue is an interface implemented by types that can unmarshal a +// BSON value representation of themselves. The BSON bytes and type can be +// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it +// wishes to retain the data after returning. +func (t *DateTime) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error { + if tpe == bson.TypeNull { + *t = DateTime{} + return nil + } + + if len(data) != bsonDateTimeSize { + return fmt.Errorf("bson date field length not exactly %d bytes: %w", bsonDateTimeSize, ErrFormat) + } + + i64 := int64(binary.LittleEndian.Uint64(data)) //nolint:gosec // it's okay if we overflow and get a negative datetime + *t = DateTime(time.Unix(i64/millisec, i64%millisec*microsec)) + + return nil +} + +// MarshalBSON document from this value +func (u ULID) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *ULID) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + id, err := ulid.ParseStrict(ud) + if err != nil { + return fmt.Errorf("couldn't parse bson bytes as ULID: %w: %w", err, ErrFormat) + } + u.ULID = id + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as ULID: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u URI) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *URI) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = URI(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as uri: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (e Email) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": e.String()}) +} + +// UnmarshalBSON document into this value +func (e *Email) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *e = Email(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as email: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (h Hostname) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": h.String()}) +} + +// UnmarshalBSON document into this value +func (h *Hostname) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *h = Hostname(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as hostname: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u IPv4) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *IPv4) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = IPv4(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as ipv4: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u IPv6) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *IPv6) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = IPv6(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as ipv6: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u CIDR) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *CIDR) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = CIDR(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as CIDR: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u MAC) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *MAC) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = MAC(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as MAC: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (r Password) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": r.String()}) +} + +// UnmarshalBSON document into this value +func (r *Password) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *r = Password(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as Password: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u UUID) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *UUID) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = UUID(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as UUID: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u UUID3) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *UUID3) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = UUID3(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as UUID3: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u UUID4) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *UUID4) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = UUID4(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as UUID4: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u UUID5) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *UUID5) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = UUID5(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as UUID5: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u UUID7) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *UUID7) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = UUID7(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as UUID7: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u ISBN) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *ISBN) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = ISBN(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as ISBN: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u ISBN10) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *ISBN10) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = ISBN10(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as ISBN10: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u ISBN13) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *ISBN13) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = ISBN13(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as ISBN13: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u CreditCard) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *CreditCard) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = CreditCard(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as CreditCard: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u SSN) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *SSN) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = SSN(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as SSN: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (h HexColor) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": h.String()}) +} + +// UnmarshalBSON document into this value +func (h *HexColor) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *h = HexColor(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as HexColor: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (r RGBColor) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": r.String()}) +} + +// UnmarshalBSON document into this value +func (r *RGBColor) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *r = RGBColor(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as RGBColor: %w", ErrFormat) +} + +// MarshalBSON renders the object id as a BSON document +func (id ObjectId) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": bsonprim.ObjectID(id)}) +} + +// UnmarshalBSON reads the objectId from a BSON document +func (id *ObjectId) UnmarshalBSON(data []byte) error { + var obj struct { + Data bsonprim.ObjectID + } + if err := bson.Unmarshal(data, &obj); err != nil { + return err + } + *id = ObjectId(obj.Data) + return nil +} + +// MarshalBSONValue is an interface implemented by types that can marshal themselves +// into a BSON document represented as bytes. The bytes returned must be a valid +// BSON document if the error is nil. +func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { + oid := bsonprim.ObjectID(id) + return bson.TypeObjectID, oid[:], nil +} + +// UnmarshalBSONValue is an interface implemented by types that can unmarshal a +// BSON value representation of themselves. The BSON bytes and type can be +// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it +// wishes to retain the data after returning. +func (id *ObjectId) UnmarshalBSONValue(_ bsontype.Type, data []byte) error { + var oid bsonprim.ObjectID + copy(oid[:], data) + *id = ObjectId(oid) + return nil +} diff --git a/vendor/github.com/go-openapi/strfmt/time.go b/vendor/github.com/go-openapi/strfmt/time.go new file mode 100644 index 000000000000..8085aaf69658 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/time.go @@ -0,0 +1,258 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "regexp" + "strings" + "time" +) + +var ( + // UnixZero sets the zero unix UTC timestamp we want to compare against. + // + // Unix 0 for an EST timezone is not equivalent to a UTC timezone. + UnixZero = time.Unix(0, 0).UTC() +) + +func init() { + dt := DateTime{} + Default.Add("datetime", &dt, IsDateTime) +} + +// IsDateTime returns true when the string is a valid date-time. +// +// JSON datetime format consist of a date and a time separated by a "T", e.g. 2012-04-23T18:25:43.511Z. +func IsDateTime(str string) bool { + const ( + minDateTimeLength = 4 + minParts = 2 + ) + if len(str) < minDateTimeLength { + return false + } + s := strings.Split(strings.ToLower(str), "t") + if len(s) < minParts || !IsDate(s[0]) { + return false + } + + matches := rxDateTime.FindAllStringSubmatch(s[1], -1) + if len(matches) == 0 || len(matches[0]) == 0 { + return false + } + m := matches[0] + res := m[1] <= "23" && m[2] <= "59" && m[3] <= "59" + return res +} + +const ( + // RFC3339Millis represents a ISO8601 format to millis instead of to nanos + RFC3339Millis = "2006-01-02T15:04:05.000Z07:00" + // RFC3339MillisNoColon represents a ISO8601 format to millis instead of to nanos + RFC3339MillisNoColon = "2006-01-02T15:04:05.000Z0700" + // RFC3339Micro represents a ISO8601 format to micro instead of to nano + RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" + // RFC3339MicroNoColon represents a ISO8601 format to micro instead of to nano + RFC3339MicroNoColon = "2006-01-02T15:04:05.000000Z0700" + // ISO8601LocalTime represents a ISO8601 format to ISO8601 in local time (no timezone) + ISO8601LocalTime = "2006-01-02T15:04:05" + // ISO8601TimeWithReducedPrecision represents a ISO8601 format with reduced precision (dropped secs) + ISO8601TimeWithReducedPrecision = "2006-01-02T15:04Z" + // ISO8601TimeWithReducedPrecisionLocaltime represents a ISO8601 format with reduced precision and no timezone (dropped seconds + no timezone) + ISO8601TimeWithReducedPrecisionLocaltime = "2006-01-02T15:04" + // ISO8601TimeUniversalSortableDateTimePattern represents a ISO8601 universal sortable date time pattern. + ISO8601TimeUniversalSortableDateTimePattern = "2006-01-02 15:04:05" + // ISO8601TimeUniversalSortableDateTimePatternShortForm is the short form of ISO8601TimeUniversalSortableDateTimePattern + ISO8601TimeUniversalSortableDateTimePatternShortForm = "2006-01-02" + // DateTimePattern pattern to match for the date-time format from http://tools.ietf.org/html/rfc3339#section-5.6 + DateTimePattern = `^([0-9]{2}):([0-9]{2}):([0-9]{2})(.[0-9]+)?(z|([+-][0-9]{2}:[0-9]{2}))$` +) + +var ( + rxDateTime = regexp.MustCompile(DateTimePattern) + + // DateTimeFormats is the collection of formats used by ParseDateTime() + DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern, ISO8601TimeUniversalSortableDateTimePatternShortForm} + + // MarshalFormat sets the time resolution format used for marshaling time (set to milliseconds) + MarshalFormat = RFC3339Millis + + // NormalizeTimeForMarshal provides a normalization function on time before marshalling (e.g. time.UTC). + // By default, the time value is not changed. + NormalizeTimeForMarshal = func(t time.Time) time.Time { return t } + + // DefaultTimeLocation provides a location for a time when the time zone is not encoded in the string (ex: ISO8601 Local variants). + DefaultTimeLocation = time.UTC +) + +// ParseDateTime parses a string that represents an ISO8601 time or a unix epoch +func ParseDateTime(data string) (DateTime, error) { + if data == "" { + return NewDateTime(), nil + } + var lastError error + for _, layout := range DateTimeFormats { + dd, err := time.ParseInLocation(layout, data, DefaultTimeLocation) + if err != nil { + lastError = err + continue + } + return DateTime(dd), nil + } + return DateTime{}, lastError +} + +// DateTime is a time but it serializes to ISO8601 format with millis. +// +// It knows how to read 3 different variations of a RFC3339 date time. +// Most APIs we encounter want either millisecond or second precision times. +// This just tries to make it worry-free. +// +// swagger:strfmt date-time +type DateTime time.Time + +// NewDateTime is a representation of the UNIX epoch (January 1, 1970 00:00:00 UTC) for the [DateTime] type. +// +// Notice that this is not the zero value of the [DateTime] type. +// +// You may use [DateTime.IsUNIXZero] to check against this value. +func NewDateTime() DateTime { + return DateTime(time.Unix(0, 0).UTC()) +} + +// MakeDateTime is a representation of the zero value of the [DateTime] type (January 1, year 1, 00:00:00 UTC). +// +// You may use [Datetime.IsZero] to check against this value. +func MakeDateTime() DateTime { + return DateTime(time.Time{}) +} + +// String converts this time to a string +func (t DateTime) String() string { + return NormalizeTimeForMarshal(time.Time(t)).Format(MarshalFormat) +} + +// IsZero returns whether the date time is a zero value +func (t DateTime) IsZero() bool { + return time.Time(t).IsZero() +} + +// IsUnixZero returns whether the date time is equivalent to time.Unix(0, 0).UTC(). +func (t DateTime) IsUnixZero() bool { + return time.Time(t).Equal(UnixZero) +} + +// MarshalText implements the text marshaller interface +func (t DateTime) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalText implements the text unmarshaller interface +func (t *DateTime) UnmarshalText(text []byte) error { + tt, err := ParseDateTime(string(text)) + if err != nil { + return err + } + *t = tt + return nil +} + +// Scan scans a DateTime value from database driver type. +func (t *DateTime) Scan(raw any) error { + // TODO: case int64: and case float64: ? + switch v := raw.(type) { + case []byte: + return t.UnmarshalText(v) + case string: + return t.UnmarshalText([]byte(v)) + case time.Time: + *t = DateTime(v) + case nil: + *t = DateTime{} + default: + return fmt.Errorf("cannot sql.Scan() strfmt.DateTime from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts DateTime to a primitive value ready to written to a database. +func (t DateTime) Value() (driver.Value, error) { + return driver.Value(t.String()), nil +} + +// MarshalJSON returns the DateTime as JSON +func (t DateTime) MarshalJSON() ([]byte, error) { + return json.Marshal(NormalizeTimeForMarshal(time.Time(t)).Format(MarshalFormat)) +} + +// UnmarshalJSON sets the DateTime from JSON +func (t *DateTime) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + + var tstr string + if err := json.Unmarshal(data, &tstr); err != nil { + return err + } + tt, err := ParseDateTime(tstr) + if err != nil { + return err + } + *t = tt + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (t *DateTime) DeepCopyInto(out *DateTime) { + *out = *t +} + +// DeepCopy copies the receiver into a new DateTime. +func (t *DateTime) DeepCopy() *DateTime { + if t == nil { + return nil + } + out := new(DateTime) + t.DeepCopyInto(out) + return out +} + +// GobEncode implements the gob.GobEncoder interface. +func (t DateTime) GobEncode() ([]byte, error) { + return t.MarshalBinary() +} + +// GobDecode implements the gob.GobDecoder interface. +func (t *DateTime) GobDecode(data []byte) error { + return t.UnmarshalBinary(data) +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (t DateTime) MarshalBinary() ([]byte, error) { + return NormalizeTimeForMarshal(time.Time(t)).MarshalBinary() +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (t *DateTime) UnmarshalBinary(data []byte) error { + var original time.Time + + err := original.UnmarshalBinary(data) + if err != nil { + return err + } + + *t = DateTime(original) + + return nil +} + +// Equal checks if two DateTime instances are equal using time.Time's Equal method +func (t DateTime) Equal(t2 DateTime) bool { + return time.Time(t).Equal(time.Time(t2)) +} diff --git a/vendor/github.com/go-openapi/strfmt/ulid.go b/vendor/github.com/go-openapi/strfmt/ulid.go new file mode 100644 index 000000000000..85c5b53e6c7a --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/ulid.go @@ -0,0 +1,208 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +import ( + cryptorand "crypto/rand" + "database/sql/driver" + "encoding/json" + "fmt" + "io" + "sync" + + "github.com/oklog/ulid" +) + +// ULID represents a ulid string format +// ref: +// +// https://github.com/ulid/spec +// +// impl: +// +// https://github.com/oklog/ulid +// +// swagger:strfmt ulid +type ULID struct { + ulid.ULID +} + +var ( + ulidEntropyPool = sync.Pool{ + New: func() any { + return cryptorand.Reader + }, + } + + ULIDScanDefaultFunc = func(raw any) (ULID, error) { + u := NewULIDZero() + switch x := raw.(type) { + case nil: + // zerp ulid + return u, nil + case string: + if x == "" { + // zero ulid + return u, nil + } + return u, u.UnmarshalText([]byte(x)) + case []byte: + return u, u.UnmarshalText(x) + } + + return u, fmt.Errorf("cannot sql.Scan() strfmt.ULID from: %#v: %w", raw, ulid.ErrScanValue) + } + + // ULIDScanOverrideFunc allows you to override the Scan method of the ULID type + ULIDScanOverrideFunc = ULIDScanDefaultFunc + + ULIDValueDefaultFunc = func(u ULID) (driver.Value, error) { + return driver.Value(u.String()), nil + } + + // ULIDValueOverrideFunc allows you to override the Value method of the ULID type + ULIDValueOverrideFunc = ULIDValueDefaultFunc +) + +func init() { + // register formats in the default registry: + // - ulid + ulid := ULID{} + Default.Add("ulid", &ulid, IsULID) +} + +// IsULID checks if provided string is ULID format +// Be noticed that this function considers overflowed ULID as non-ulid. +// For more details see https://github.com/ulid/spec +func IsULID(str string) bool { + _, err := ulid.ParseStrict(str) + return err == nil +} + +// ParseULID parses a string that represents an valid ULID +func ParseULID(str string) (ULID, error) { + var u ULID + + return u, u.UnmarshalText([]byte(str)) +} + +// NewULIDZero returns a zero valued ULID type +func NewULIDZero() ULID { + return ULID{} +} + +// NewULID generates new unique ULID value and a error if any +func NewULID() (ULID, error) { + var u ULID + + obj := ulidEntropyPool.Get() + entropy, ok := obj.(io.Reader) + if !ok { + return u, fmt.Errorf("failed to cast %+v to io.Reader: %w", obj, ErrFormat) + } + + id, err := ulid.New(ulid.Now(), entropy) + if err != nil { + return u, err + } + ulidEntropyPool.Put(entropy) + + u.ULID = id + return u, nil +} + +// GetULID returns underlying instance of ULID +func (u *ULID) GetULID() any { + return u.ULID +} + +// MarshalText returns this instance into text +func (u ULID) MarshalText() ([]byte, error) { + return u.ULID.MarshalText() +} + +// UnmarshalText hydrates this instance from text +func (u *ULID) UnmarshalText(data []byte) error { // validation is performed later on + return u.ULID.UnmarshalText(data) +} + +// Scan reads a value from a database driver +func (u *ULID) Scan(raw any) error { + ul, err := ULIDScanOverrideFunc(raw) + if err == nil { + *u = ul + } + return err +} + +// Value converts a value to a database driver value +func (u ULID) Value() (driver.Value, error) { + return ULIDValueOverrideFunc(u) +} + +func (u ULID) String() string { + return u.ULID.String() +} + +// MarshalJSON returns the ULID as JSON +func (u ULID) MarshalJSON() ([]byte, error) { + return json.Marshal(u.String()) +} + +// UnmarshalJSON sets the ULID from JSON +func (u *ULID) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + id, err := ulid.ParseStrict(ustr) + if err != nil { + return fmt.Errorf("couldn't parse JSON value as ULID: %w", err) + } + u.ULID = id + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *ULID) DeepCopyInto(out *ULID) { + *out = *u +} + +// DeepCopy copies the receiver into a new ULID. +func (u *ULID) DeepCopy() *ULID { + if u == nil { + return nil + } + out := new(ULID) + u.DeepCopyInto(out) + return out +} + +// GobEncode implements the gob.GobEncoder interface. +func (u ULID) GobEncode() ([]byte, error) { + return u.ULID.MarshalBinary() +} + +// GobDecode implements the gob.GobDecoder interface. +func (u *ULID) GobDecode(data []byte) error { + return u.ULID.UnmarshalBinary(data) +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (u ULID) MarshalBinary() ([]byte, error) { + return u.ULID.MarshalBinary() +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (u *ULID) UnmarshalBinary(data []byte) error { + return u.ULID.UnmarshalBinary(data) +} + +// Equal checks if two ULID instances are equal by their underlying type +func (u ULID) Equal(other ULID) bool { + return u.ULID == other.ULID +} diff --git a/vendor/github.com/go-openapi/swag/.codecov.yml b/vendor/github.com/go-openapi/swag/.codecov.yml new file mode 100644 index 000000000000..3354f44b28e9 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.codecov.yml @@ -0,0 +1,4 @@ +ignore: + - jsonutils/fixtures_test + - jsonutils/adapters/ifaces/mocks + - jsonutils/adapters/testintegration/benchmarks diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index 80e2be0042f1..126264a6b898 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -1,60 +1,78 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 45 - maligned: - suggest-new: true - dupl: - threshold: 200 - goconst: - min-len: 3 - min-occurrences: 3 - +version: "2" linters: - enable-all: true + default: all disable: - - maligned - - lll - - gochecknoinits - - gochecknoglobals + - cyclop + - depguard + - errchkjson + - errorlint + - exhaustruct + - forcetypeassert - funlen - - godox + - gochecknoglobals + - gochecknoinits - gocognit - - whitespace - - wsl - - wrapcheck - - testpackage - - nlreturn - - gomnd - - exhaustivestruct - - goerr113 - - errorlint - - nestif - godot - - gofumpt + - godox + - gomoddirectives + - gosmopolitan + - inamedparam + - intrange + - ireturn + - lll + - musttag + - modernize + - nestif + - nlreturn + - nonamedreturns + - noinlineerr - paralleltest - - tparallel + - recvcheck + - testpackage - thelper - - ifshort - - exhaustruct + - tagliatelle + - tparallel + - unparam - varnamelen - - gci - - depguard - - errchkjson - - inamedparam - - nonamedreturns - - musttag - - ireturn - - forcetypeassert - - cyclop - # deprecated linters - - deadcode - - interfacer - - scopelint - - varcheck - - structcheck - - golint - - nosnakecase + - whitespace + - wrapcheck + - wsl + - wsl_v5 + settings: + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + gocyclo: + min-complexity: 45 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 diff --git a/vendor/github.com/go-openapi/swag/.mockery.yml b/vendor/github.com/go-openapi/swag/.mockery.yml new file mode 100644 index 000000000000..8557cb58d331 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.mockery.yml @@ -0,0 +1,30 @@ +all: false +dir: '{{.InterfaceDir}}' +filename: mocks_test.go +force-file-write: true +formatter: goimports +include-auto-generated: false +log-level: info +structname: '{{.Mock}}{{.InterfaceName}}' +pkgname: '{{.SrcPackageName}}' +recursive: false +require-template-schema-exists: true +template: matryer +template-schema: '{{.Template}}.schema.json' +packages: + github.com/go-openapi/swag/jsonutils/adapters/ifaces: + config: + dir: jsonutils/adapters/ifaces/mocks + filename: mocks.go + pkgname: 'mocks' + force-file-write: true + all: true + github.com/go-openapi/swag/jsonutils/adapters/testintegration: + config: + inpackage: true + dir: jsonutils/adapters/testintegration + force-file-write: true + all: true + interfaces: + EJMarshaler: + EJUnmarshaler: diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md index a7292229980f..b2b29f8fe2c9 100644 --- a/vendor/github.com/go-openapi/swag/README.md +++ b/vendor/github.com/go-openapi/swag/README.md @@ -1,23 +1,219 @@ # Swag [![Build Status](https://github.com/go-openapi/swag/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) +[![license](https://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) [![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/swag.svg)](https://pkg.go.dev/github.com/go-openapi/swag) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) -Contains a bunch of helper functions for go-openapi and go-swagger projects. +Package `swag` contains a bunch of helper functions for go-openapi and go-swagger projects. You may also use it standalone for your projects. -* convert between value and pointers for builtin types -* convert from string to builtin types (wraps strconv) -* fast json concatenation -* search in path -* load from file or http -* name mangling +> **NOTE** +> `swag` is one of the foundational building blocks of the go-openapi initiative. +> Most repositories in `github.com/go-openapi/...` depend on it in some way. +> And so does our CLI tool `github.com/go-swagger/go-swagger`, +> as well as the code generated by this tool. +* [Contents](#contents) +* [Dependencies](#dependencies) +* [Release Notes](#release-notes) +* [Licensing](#licensing) +* [Note to contributors](#note-to-contributors) +* [TODOs, suggestions and plans](#todos-suggestions-and-plans) -This repo has only few dependencies outside of the standard library: +## Contents -* YAML utilities depend on `gopkg.in/yaml.v3` -* `github.com/mailru/easyjson v0.7.7` +`go-openapi/swag` exposes a collection of relatively independent modules. + +Moving forward, no additional feature will be added to the `swag` API directly at the root package level, +which remains there for backward-compatibility purposes. All exported top-level features are now deprecated. + +Child modules will continue to evolve and some new ones may be added in the future. + +| Module | Content | Main features | +|---------------|---------|---------------| +| `cmdutils` | utilities to work with CLIs || +| `conv` | type conversion utilities | convert between values and pointers for any types
convert from string to builtin types (wraps `strconv`)
require `./typeutils` (test dependency)
| +| `fileutils` | file utilities | | +| `jsonname` | JSON utilities | infer JSON names from `go` properties
| +| `jsonutils` | JSON utilities | fast json concatenation
read and write JSON from and to dynamic `go` data structures
~require `github.com/mailru/easyjson`~
| +| `loading` | file loading | load from file or http
require `./yamlutils`
| +| `mangling` | safe name generation | name mangling for `go`
| +| `netutils` | networking utilities | host, port from address
| +| `stringutils` | `string` utilities | search in slice (with case-insensitive)
split/join query parameters as arrays
| +| `typeutils` | `go` types utilities | check the zero value for any type
safe check for a nil value
| +| `yamlutils` | YAML utilities | converting YAML to JSON
loading YAML into a dynamic YAML document
maintaining the original order of keys in YAML objects
require `./jsonutils`
~require `github.com/mailru/easyjson`~
require `go.yaml.in/yaml/v3`
| + +--- + +## Dependencies + +The root module `github.com/go-openapi/swag` at the repo level maintains a few +dependencies outside of the standard library. + +* YAML utilities depend on `go.yaml.in/yaml/v3` +* JSON utilities depend on their registered adapter module: + * by default, only the standard library is used + * `github.com/mailru/easyjson` is now only a dependency for module + `github.com/go-openapi/swag/jsonutils/adapters/easyjson/json`, + for users willing to import that module. + * integration tests and benchmarks use all the dependencies are published as their own module +* other dependencies are test dependencies drawn from `github.com/stretchr/testify` + +## Release notes + +### v0.25.2 (draft, unpublished) + +Minor changes due to internal maintenance that don't affect the behavior of the library. + +* [x] removed indirect test dependencies by switching all tests to `go-openapi/testify`, + a fork of `stretch/testify` with zero-dependencies. +* [x] improvements to CI to catch test reports. +* [x] modernized licensing annotations in source code, using the more compact SPDX annotations + rather than the full license terms. +* [x] simplified a bit JSON & YAML testing by using newly available assertions +* started the journey to an OpenSSF score card badge: + * [x] explicited permissions in CI workflows + * [x] published security policy + * pinned dependencies to github actions + * introduced fuzzing in tests + +### v0.25.1 + +* fixes a data race that could occur when using the standard library implementation of a JSON ordered map + +### v0.25.0 + +**New with this release**: + +* requires `go1.24`, as iterators are being introduced +* removes the dependency to `mailru/easyjson` by default (#68) + * functionality remains the same, but performance may somewhat degrade for applications + that relied on `easyjson` + * users of the JSON or YAML utilities who want to use `easyjson` as their preferred JSON serializer library + will be able to do so by registering this the corresponding JSON adapter at runtime. See below. + * ordered keys in JSON and YAML objects: this feature used to rely solely on `easyjson`. + With this release, an implementation relying on the standard `encoding/json` is provided. + * an independent [benchmark](./jsonutils/adapters/testintegration/benchmarks/README.md) to compare the different adapters +* improves the "float is integer" check (`conv.IsFloat64AJSONInteger`) (#59) +* removes the _direct_ dependency to `gopkg.in/yaml.v3` (indirect dependency is still incurred through `stretchr/testify`) (#127) +* exposed `conv.IsNil()` (previously kept private): a safe nil check (accounting for the "non-nil interface with nil value" nonsensical go trick) + +**What coming next?** + +Moving forward, we want to : +* provide an implementation of the JSON adapter based on `encoding/json/v2`, for `go1.25` builds. +* provide similar implementations for `goccy/go-json` and `jsoniterator/go`, and perhaps some other + similar libraries may be interesting too. + + +**How to explicitly register a dependency at runtime**? + +The following would maintain how JSON utilities proposed by `swag` used work, up to `v0.24.1`. + + ```go + import ( + "github.com/go-openapi/swag/jsonutils/adapters" + easyjson "github.com/go-openapi/swag/jsonutils/adapters/easyjson/json" + ) + + func init() { + easyjson.Register(adapters.Registry) + } + ``` + +Subsequent calls to `jsonutils.ReadJSON()` or `jsonutils.WriteJSON()` will switch to `easyjson` +whenever the passed data structures implement the `easyjson.Unmarshaler` or `easyjson.Marshaler` respectively, +or fallback to the standard library. + +For more details, you may also look at our +[integration tests](jsonutils/adapters/testintegration/integration_suite_test.go#29). + +### v0.24.0 + +With this release, we have largely modernized the API of `swag`: + +* The traditional `swag` API is still supported: code that imports `swag` will still + compile and work the same. +* A deprecation notice is published to encourage consumers of this library to adopt + the newer API +* **Deprecation notice** + * configuration through global variables is now deprecated, in favor of options passed as parameters + * all helper functions are moved to more specialized packages, which are exposed as + go modules. Importing such a module would reduce the footprint of dependencies. + * _all_ functions, variables, constants exposed by the deprecated API have now moved, so + that consumers of the new API no longer need to import github.com/go-openapi/swag, but + should import the desired sub-module(s). + +**New with this release**: + +* [x] type converters and pointer to value helpers now support generic types +* [x] name mangling now support pluralized initialisms (issue #46) + Strings like "contact IDs" are now recognized as such a plural form and mangled as a linter would expect. +* [x] performance: small improvements to reduce the overhead of convert/format wrappers (see issues #110, or PR #108) +* [x] performance: name mangling utilities run ~ 10% faster (PR #115) + +--- + +## Licensing + +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). + +## Note to contributors + +A mono-repo structure comes with some unavoidable extra pains... + +* Testing + +> The usual `go test ./...` command, run from the root of this repo won't work any longer to test all submodules. +> +> Each module constitutes an independant unit of test. So you have to run `go test` inside each module. +> Or you may take a look at how this is achieved by CI +> [here] https://github.com/go-openapi/swag/blob/master/.github/workflows/go-test.yml). +> +> There are also some alternative tricks using `go work`, for local development, if you feel comfortable with +> go workspaces. Perhaps some day, we'll have a `go work test` to run all tests without any hack. + +* Releasing + +> Each module follows its own independant module versioning. +> +> So you have tags like `mangling/v0.24.0`, `fileutils/v0.24.0` etc that are used by `go mod` and `go get` +> to refer to the tagged version of each module specifically. +> +> This means we may release patches etc to each module independently. +> +> We'd like to adopt the rule that modules in this repo would only differ by a patch version +> (e.g. `v0.24.5` vs `v0.24.3`), and we'll level all modules whenever a minor version is introduced. +> +> A script in `./hack` is provided to tag all modules with the same version in one go. + +* Continuous integration + +> At this moment, all tests in all modules are systematically run over the full test matrix (3 platform x 2 go releases). +> This generates quite a lot of jobs. +> +> We ought to reduce the number of jobs required to test a PR focused on only a few modules. + +## Todos, suggestions and plans + +All kinds of contributions are welcome. + +A few ideas: + +* [x] Complete the split of dependencies to isolate easyjson from the rest +* [x] Improve CI to reduce needed tests +* [x] Replace dependency to `gopkg.in/yaml.v3` (`yamlutil`) +* [ ] Improve mangling utilities (improve readability, support for capitalized words, + better word substitution for non-letter symbols...) +* [ ] Move back to this common shared pot a few of the technical features introduced by go-swagger independently + (e.g. mangle go package names, search package with go modules support, ...) +* [ ] Apply a similar mono-repo approach to go-openapi/strfmt which suffer from similar woes: bloated API, + imposed dependency to some database driver. +* [ ] Adapt `go-swagger` (incl. generated code) to the new `swag` API. +* [ ] Factorize some tests, as there is a lot of redundant testing code in `jsonutils` +* [ ] Benchmark & profiling: publish independently the tool built to analyze and chart benchmarks (e.g. similar to `benchvisual`) +* [ ] more thorough testing for nil / null case +* [ ] ci pipeline to manage releases +* [ ] cleaner mockery generation (doesn't work out of the box for all sub-modules) diff --git a/vendor/github.com/go-openapi/swag/SECURITY.md b/vendor/github.com/go-openapi/swag/SECURITY.md new file mode 100644 index 000000000000..72296a83135d --- /dev/null +++ b/vendor/github.com/go-openapi/swag/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +This policy outlines the commitment and practices of the go-openapi maintainers regarding security. + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.25.x | :white_check_mark: | + +## Reporting a vulnerability + +If you become aware of a security vulnerability that affects the current repository, +please report it privately to the maintainers. + +Please follow the instructions provided by github to +[Privately report a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). + +TL;DR: on Github, navigate to the project's "Security" tab then click on "Report a vulnerability". diff --git a/vendor/github.com/go-openapi/swag/cmdutils/LICENSE b/vendor/github.com/go-openapi/swag/cmdutils/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/cmdutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/cmdutils/cmd_utils.go b/vendor/github.com/go-openapi/swag/cmdutils/cmd_utils.go new file mode 100644 index 000000000000..6c7bbb26f03d --- /dev/null +++ b/vendor/github.com/go-openapi/swag/cmdutils/cmd_utils.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package cmdutils + +// CommandLineOptionsGroup represents a group of user-defined command line options. +// +// This is for instance used to configure command line arguments in API servers generated by go-swagger. +type CommandLineOptionsGroup struct { + ShortDescription string + LongDescription string + Options any +} diff --git a/vendor/github.com/go-openapi/swag/cmdutils/doc.go b/vendor/github.com/go-openapi/swag/cmdutils/doc.go new file mode 100644 index 000000000000..31f2c37538a0 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/cmdutils/doc.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package cmdutils brings helpers for CLIs produced by go-openapi +package cmdutils diff --git a/vendor/github.com/go-openapi/swag/cmdutils_iface.go b/vendor/github.com/go-openapi/swag/cmdutils_iface.go new file mode 100644 index 000000000000..bd0c1fc12802 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/cmdutils_iface.go @@ -0,0 +1,11 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import "github.com/go-openapi/swag/cmdutils" + +// CommandLineOptionsGroup represents a group of user-defined command line options. +// +// Deprecated: use [cmdutils.CommandLineOptionsGroup] instead. +type CommandLineOptionsGroup = cmdutils.CommandLineOptionsGroup diff --git a/vendor/github.com/go-openapi/swag/conv/LICENSE b/vendor/github.com/go-openapi/swag/conv/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/conv/convert.go b/vendor/github.com/go-openapi/swag/conv/convert.go new file mode 100644 index 000000000000..f205c3913457 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/convert.go @@ -0,0 +1,161 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package conv + +import ( + "math" + "strconv" + "strings" +) + +// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER +const ( + maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 + minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 + epsilon float64 = 1e-9 +) + +// IsFloat64AJSONInteger allows for integers [-2^53, 2^53-1] inclusive. +func IsFloat64AJSONInteger(f float64) bool { + if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat { + return false + } + rounded := math.Round(f) + if f == rounded { + return true + } + if rounded == 0 { // f = 0.0 exited above + return false + } + + diff := math.Abs(f - rounded) + if diff == 0 { + return true + } + + // relative error Abs{f - Round(f)) / Round(f)} < ε ; Round(f) + return diff < epsilon*math.Abs(rounded) +} + +// ConvertFloat turns a string into a float numerical value. +func ConvertFloat[T Float](str string) (T, error) { + var v T + f, err := strconv.ParseFloat(str, bitsize(v)) + if err != nil { + return 0, err + } + + return T(f), nil +} + +// ConvertInteger turns a string into a signed integer. +func ConvertInteger[T Signed](str string) (T, error) { + var v T + f, err := strconv.ParseInt(str, 10, bitsize(v)) + if err != nil { + return 0, err + } + + return T(f), nil +} + +// ConvertUinteger turns a string into an unsigned integer. +func ConvertUinteger[T Unsigned](str string) (T, error) { + var v T + f, err := strconv.ParseUint(str, 10, bitsize(v)) + if err != nil { + return 0, err + } + + return T(f), nil +} + +// ConvertBool turns a string into a boolean. +// +// It supports a few more "true" strings than [strconv.ParseBool]: +// +// - it is not case sensitive ("trUe" or "FalsE" work) +// - "ok", "yes", "y", "on", "selected", "checked", "enabled" are all true +// - everything that is not true is false: there is never an actual error returned +func ConvertBool(str string) (bool, error) { + switch strings.ToLower(str) { + case "true", + "1", + "yes", + "ok", + "y", + "on", + "selected", + "checked", + "t", + "enabled": + return true, nil + default: + return false, nil + } +} + +// ConvertFloat32 turns a string into a float32. +func ConvertFloat32(str string) (float32, error) { return ConvertFloat[float32](str) } + +// ConvertFloat64 turns a string into a float64 +func ConvertFloat64(str string) (float64, error) { return ConvertFloat[float64](str) } + +// ConvertInt8 turns a string into an int8 +func ConvertInt8(str string) (int8, error) { return ConvertInteger[int8](str) } + +// ConvertInt16 turns a string into an int16 +func ConvertInt16(str string) (int16, error) { + i, err := strconv.ParseInt(str, 10, 16) + if err != nil { + return 0, err + } + return int16(i), nil +} + +// ConvertInt32 turns a string into an int32 +func ConvertInt32(str string) (int32, error) { + i, err := strconv.ParseInt(str, 10, 32) + if err != nil { + return 0, err + } + return int32(i), nil +} + +// ConvertInt64 turns a string into an int64 +func ConvertInt64(str string) (int64, error) { + return strconv.ParseInt(str, 10, 64) +} + +// ConvertUint8 turns a string into an uint8 +func ConvertUint8(str string) (uint8, error) { + i, err := strconv.ParseUint(str, 10, 8) + if err != nil { + return 0, err + } + return uint8(i), nil +} + +// ConvertUint16 turns a string into an uint16 +func ConvertUint16(str string) (uint16, error) { + i, err := strconv.ParseUint(str, 10, 16) + if err != nil { + return 0, err + } + return uint16(i), nil +} + +// ConvertUint32 turns a string into an uint32 +func ConvertUint32(str string) (uint32, error) { + i, err := strconv.ParseUint(str, 10, 32) + if err != nil { + return 0, err + } + return uint32(i), nil +} + +// ConvertUint64 turns a string into an uint64 +func ConvertUint64(str string) (uint64, error) { + return strconv.ParseUint(str, 10, 64) +} diff --git a/vendor/github.com/go-openapi/swag/conv/convert_types.go b/vendor/github.com/go-openapi/swag/conv/convert_types.go new file mode 100644 index 000000000000..cf4c6495ebc9 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/convert_types.go @@ -0,0 +1,72 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package conv + +// Unlicensed credits (idea, concept) +// +// The idea to convert values to pointers and the other way around, was inspired, eons ago, by the aws go sdk. +// +// Nowadays, all sensible API sdk's expose a similar functionality. + +// Pointer returns a pointer to the value passed in. +func Pointer[T any](v T) *T { + return &v +} + +// Value returns a shallow copy of the value of the pointer passed in. +// +// If the pointer is nil, the returned value is the zero value. +func Value[T any](v *T) T { + if v != nil { + return *v + } + + var zero T + return zero +} + +// PointerSlice converts a slice of values into a slice of pointers. +func PointerSlice[T any](src []T) []*T { + dst := make([]*T, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// ValueSlice converts a slice of pointers into a slice of values. +// +// nil elements are zero values. +func ValueSlice[T any](src []*T) []T { + dst := make([]T, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// PointerMap converts a map of values into a map of pointers. +func PointerMap[K comparable, T any](src map[K]T) map[K]*T { + dst := make(map[K]*T) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// ValueMap converts a map of pointers into a map of values. +// +// nil elements are skipped. +func ValueMap[K comparable, T any](src map[K]*T) map[K]T { + dst := make(map[K]T) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/go-openapi/swag/conv/doc.go b/vendor/github.com/go-openapi/swag/conv/doc.go new file mode 100644 index 000000000000..1bd6ead6e2d1 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/doc.go @@ -0,0 +1,15 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package conv exposes utilities to convert types. +// +// The Convert and Format families of functions are essentially a shorthand to [strconv] functions, +// using the decimal representation of numbers. +// +// Features: +// +// - from string representation to value ("Convert*") and reciprocally ("Format*") +// - from pointer to value ([Value]) and reciprocally ([Pointer]) +// - from slice of values to slice of pointers ([PointerSlice]) and reciprocally ([ValueSlice]) +// - from map of values to map of pointers ([PointerMap]) and reciprocally ([ValueMap]) +package conv diff --git a/vendor/github.com/go-openapi/swag/conv/format.go b/vendor/github.com/go-openapi/swag/conv/format.go new file mode 100644 index 000000000000..5b87b8e146bb --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/format.go @@ -0,0 +1,28 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package conv + +import ( + "strconv" +) + +// FormatInteger turns an integer type into a string. +func FormatInteger[T Signed](value T) string { + return strconv.FormatInt(int64(value), 10) +} + +// FormatUinteger turns an unsigned integer type into a string. +func FormatUinteger[T Unsigned](value T) string { + return strconv.FormatUint(uint64(value), 10) +} + +// FormatFloat turns a floating point numerical value into a string. +func FormatFloat[T Float](value T) string { + return strconv.FormatFloat(float64(value), 'f', -1, bitsize(value)) +} + +// FormatBool turns a boolean into a string. +func FormatBool(value bool) string { + return strconv.FormatBool(value) +} diff --git a/vendor/github.com/go-openapi/swag/conv/sizeof.go b/vendor/github.com/go-openapi/swag/conv/sizeof.go new file mode 100644 index 000000000000..494346557381 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/sizeof.go @@ -0,0 +1,20 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package conv + +import "unsafe" + +// bitsize returns the size in bits of a type. +// +// NOTE: [unsafe.SizeOf] simply returns the size in bytes of the value. +// For primitive types T, the generic stencil is precompiled and this value +// is resolved at compile time, resulting in an immediate call to [strconv.ParseFloat]. +// +// We may leave up to the go compiler to simplify this function into a +// constant value, which happens in practice at least for primitive types +// (e.g. numerical types). +func bitsize[T Numerical](value T) int { + const bitsPerByte = 8 + return int(unsafe.Sizeof(value)) * bitsPerByte +} diff --git a/vendor/github.com/go-openapi/swag/conv/type_constraints.go b/vendor/github.com/go-openapi/swag/conv/type_constraints.go new file mode 100644 index 000000000000..81135e827e52 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/type_constraints.go @@ -0,0 +1,29 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package conv + +type ( + // these type constraints are redefined after golang.org/x/exp/constraints, + // because importing that package causes an undesired go upgrade. + + // Signed integer types, cf. [golang.org/x/exp/constraints.Signed] + Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 + } + + // Unsigned integer types, cf. [golang.org/x/exp/constraints.Unsigned] + Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr + } + + // Float numerical types, cf. [golang.org/x/exp/constraints.Float] + Float interface { + ~float32 | ~float64 + } + + // Numerical types + Numerical interface { + Signed | Unsigned | Float + } +) diff --git a/vendor/github.com/go-openapi/swag/conv_iface.go b/vendor/github.com/go-openapi/swag/conv_iface.go new file mode 100644 index 000000000000..eea7b2e56e33 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv_iface.go @@ -0,0 +1,486 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import ( + "time" + + "github.com/go-openapi/swag/conv" +) + +// IsFloat64AJSONInteger allows for integers [-2^53, 2^53-1] inclusive. +// +// Deprecated: use [conv.IsFloat64AJSONInteger] instead. +func IsFloat64AJSONInteger(f float64) bool { return conv.IsFloat64AJSONInteger(f) } + +// ConvertBool turns a string into a boolean. +// +// Deprecated: use [conv.ConvertBool] instead. +func ConvertBool(str string) (bool, error) { return conv.ConvertBool(str) } + +// ConvertFloat32 turns a string into a float32. +// +// Deprecated: use [conv.ConvertFloat32] instead. Alternatively, you may use the generic version [conv.ConvertFloat]. +func ConvertFloat32(str string) (float32, error) { return conv.ConvertFloat[float32](str) } + +// ConvertFloat64 turns a string into a float64. +// +// Deprecated: use [conv.ConvertFloat64] instead. Alternatively, you may use the generic version [conv.ConvertFloat]. +func ConvertFloat64(str string) (float64, error) { return conv.ConvertFloat[float64](str) } + +// ConvertInt8 turns a string into an int8. +// +// Deprecated: use [conv.ConvertInt8] instead. Alternatively, you may use the generic version [conv.ConvertInteger]. +func ConvertInt8(str string) (int8, error) { return conv.ConvertInteger[int8](str) } + +// ConvertInt16 turns a string into an int16. +// +// Deprecated: use [conv.ConvertInt16] instead. Alternatively, you may use the generic version [conv.ConvertInteger]. +func ConvertInt16(str string) (int16, error) { return conv.ConvertInteger[int16](str) } + +// ConvertInt32 turns a string into an int32. +// +// Deprecated: use [conv.ConvertInt32] instead. Alternatively, you may use the generic version [conv.ConvertInteger]. +func ConvertInt32(str string) (int32, error) { return conv.ConvertInteger[int32](str) } + +// ConvertInt64 turns a string into an int64. +// +// Deprecated: use [conv.ConvertInt64] instead. Alternatively, you may use the generic version [conv.ConvertInteger]. +func ConvertInt64(str string) (int64, error) { return conv.ConvertInteger[int64](str) } + +// ConvertUint8 turns a string into an uint8. +// +// Deprecated: use [conv.ConvertUint8] instead. Alternatively, you may use the generic version [conv.ConvertUinteger]. +func ConvertUint8(str string) (uint8, error) { return conv.ConvertUinteger[uint8](str) } + +// ConvertUint16 turns a string into an uint16. +// +// Deprecated: use [conv.ConvertUint16] instead. Alternatively, you may use the generic version [conv.ConvertUinteger]. +func ConvertUint16(str string) (uint16, error) { return conv.ConvertUinteger[uint16](str) } + +// ConvertUint32 turns a string into an uint32. +// +// Deprecated: use [conv.ConvertUint32] instead. Alternatively, you may use the generic version [conv.ConvertUinteger]. +func ConvertUint32(str string) (uint32, error) { return conv.ConvertUinteger[uint32](str) } + +// ConvertUint64 turns a string into an uint64. +// +// Deprecated: use [conv.ConvertUint64] instead. Alternatively, you may use the generic version [conv.ConvertUinteger]. +func ConvertUint64(str string) (uint64, error) { return conv.ConvertUinteger[uint64](str) } + +// FormatBool turns a boolean into a string. +// +// Deprecated: use [conv.FormatBool] instead. +func FormatBool(value bool) string { return conv.FormatBool(value) } + +// FormatFloat32 turns a float32 into a string. +// +// Deprecated: use [conv.FormatFloat] instead. +func FormatFloat32(value float32) string { return conv.FormatFloat(value) } + +// FormatFloat64 turns a float64 into a string. +// +// Deprecated: use [conv.FormatFloat] instead. +func FormatFloat64(value float64) string { return conv.FormatFloat(value) } + +// FormatInt8 turns an int8 into a string. +// +// Deprecated: use [conv.FormatInteger] instead. +func FormatInt8(value int8) string { return conv.FormatInteger(value) } + +// FormatInt16 turns an int16 into a string. +// +// Deprecated: use [conv.FormatInteger] instead. +func FormatInt16(value int16) string { return conv.FormatInteger(value) } + +// FormatInt32 turns an int32 into a string +// +// Deprecated: use [conv.FormatInteger] instead. +func FormatInt32(value int32) string { return conv.FormatInteger(value) } + +// FormatInt64 turns an int64 into a string. +// +// Deprecated: use [conv.FormatInteger] instead. +func FormatInt64(value int64) string { return conv.FormatInteger(value) } + +// FormatUint8 turns an uint8 into a string. +// +// Deprecated: use [conv.FormatUinteger] instead. +func FormatUint8(value uint8) string { return conv.FormatUinteger(value) } + +// FormatUint16 turns an uint16 into a string. +// +// Deprecated: use [conv.FormatUinteger] instead. +func FormatUint16(value uint16) string { return conv.FormatUinteger(value) } + +// FormatUint32 turns an uint32 into a string. +// +// Deprecated: use [conv.FormatUinteger] instead. +func FormatUint32(value uint32) string { return conv.FormatUinteger(value) } + +// FormatUint64 turns an uint64 into a string. +// +// Deprecated: use [conv.FormatUinteger] instead. +func FormatUint64(value uint64) string { return conv.FormatUinteger(value) } + +// String turn a pointer to of the string value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func String(v string) *string { return conv.Pointer(v) } + +// StringValue turn the value of the string pointer passed in or +// "" if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func StringValue(v *string) string { return conv.Value(v) } + +// StringSlice converts a slice of string values into a slice of string pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func StringSlice(src []string) []*string { return conv.PointerSlice(src) } + +// StringValueSlice converts a slice of string pointers into a slice of string values. +// +// Deprecated: use [conv.ValueSlice] instead. +func StringValueSlice(src []*string) []string { return conv.ValueSlice(src) } + +// StringMap converts a string map of string values into a string map of string pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func StringMap(src map[string]string) map[string]*string { return conv.PointerMap(src) } + +// StringValueMap converts a string map of string pointers into a string map of string values. +// +// Deprecated: use [conv.ValueMap] instead. +func StringValueMap(src map[string]*string) map[string]string { return conv.ValueMap(src) } + +// Bool turn a pointer to of the bool value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Bool(v bool) *bool { return conv.Pointer(v) } + +// BoolValue turn the value of the bool pointer passed in or false if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func BoolValue(v *bool) bool { return conv.Value(v) } + +// BoolSlice converts a slice of bool values into a slice of bool pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func BoolSlice(src []bool) []*bool { return conv.PointerSlice(src) } + +// BoolValueSlice converts a slice of bool pointers into a slice of bool values. +// +// Deprecated: use [conv.ValueSlice] instead. +func BoolValueSlice(src []*bool) []bool { return conv.ValueSlice(src) } + +// BoolMap converts a string map of bool values into a string map of bool pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func BoolMap(src map[string]bool) map[string]*bool { return conv.PointerMap(src) } + +// BoolValueMap converts a string map of bool pointers into a string map of bool values. +// +// Deprecated: use [conv.ValueMap] instead. +func BoolValueMap(src map[string]*bool) map[string]bool { return conv.ValueMap(src) } + +// Int turn a pointer to of the int value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Int(v int) *int { return conv.Pointer(v) } + +// IntValue turn the value of the int pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func IntValue(v *int) int { return conv.Value(v) } + +// IntSlice converts a slice of int values into a slice of int pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func IntSlice(src []int) []*int { return conv.PointerSlice(src) } + +// IntValueSlice converts a slice of int pointers into a slice of int values. +// +// Deprecated: use [conv.ValueSlice] instead. +func IntValueSlice(src []*int) []int { return conv.ValueSlice(src) } + +// IntMap converts a string map of int values into a string map of int pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func IntMap(src map[string]int) map[string]*int { return conv.PointerMap(src) } + +// IntValueMap converts a string map of int pointers into a string map of int values. +// +// Deprecated: use [conv.ValueMap] instead. +func IntValueMap(src map[string]*int) map[string]int { return conv.ValueMap(src) } + +// Int32 turn a pointer to of the int32 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Int32(v int32) *int32 { return conv.Pointer(v) } + +// Int32Value turn the value of the int32 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Int32Value(v *int32) int32 { return conv.Value(v) } + +// Int32Slice converts a slice of int32 values into a slice of int32 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Int32Slice(src []int32) []*int32 { return conv.PointerSlice(src) } + +// Int32ValueSlice converts a slice of int32 pointers into a slice of int32 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Int32ValueSlice(src []*int32) []int32 { return conv.ValueSlice(src) } + +// Int32Map converts a string map of int32 values into a string map of int32 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Int32Map(src map[string]int32) map[string]*int32 { return conv.PointerMap(src) } + +// Int32ValueMap converts a string map of int32 pointers into a string map of int32 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Int32ValueMap(src map[string]*int32) map[string]int32 { return conv.ValueMap(src) } + +// Int64 turn a pointer to of the int64 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Int64(v int64) *int64 { return conv.Pointer(v) } + +// Int64Value turn the value of the int64 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Int64Value(v *int64) int64 { return conv.Value(v) } + +// Int64Slice converts a slice of int64 values into a slice of int64 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Int64Slice(src []int64) []*int64 { return conv.PointerSlice(src) } + +// Int64ValueSlice converts a slice of int64 pointers into a slice of int64 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Int64ValueSlice(src []*int64) []int64 { return conv.ValueSlice(src) } + +// Int64Map converts a string map of int64 values into a string map of int64 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Int64Map(src map[string]int64) map[string]*int64 { return conv.PointerMap(src) } + +// Int64ValueMap converts a string map of int64 pointers into a string map of int64 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Int64ValueMap(src map[string]*int64) map[string]int64 { return conv.ValueMap(src) } + +// Uint16 turn a pointer to of the uint16 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Uint16(v uint16) *uint16 { return conv.Pointer(v) } + +// Uint16Value turn the value of the uint16 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Uint16Value(v *uint16) uint16 { return conv.Value(v) } + +// Uint16Slice converts a slice of uint16 values into a slice of uint16 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Uint16Slice(src []uint16) []*uint16 { return conv.PointerSlice(src) } + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of uint16 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Uint16ValueSlice(src []*uint16) []uint16 { return conv.ValueSlice(src) } + +// Uint16Map converts a string map of uint16 values into a string map of uint16 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Uint16Map(src map[string]uint16) map[string]*uint16 { return conv.PointerMap(src) } + +// Uint16ValueMap converts a string map of uint16 pointers into a string map of uint16 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { return conv.ValueMap(src) } + +// Uint turn a pointer to of the uint value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Uint(v uint) *uint { return conv.Pointer(v) } + +// UintValue turn the value of the uint pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func UintValue(v *uint) uint { return conv.Value(v) } + +// UintSlice converts a slice of uint values into a slice of uint pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func UintSlice(src []uint) []*uint { return conv.PointerSlice(src) } + +// UintValueSlice converts a slice of uint pointers into a slice of uint values. +// +// Deprecated: use [conv.ValueSlice] instead. +func UintValueSlice(src []*uint) []uint { return conv.ValueSlice(src) } + +// UintMap converts a string map of uint values into a string map of uint pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func UintMap(src map[string]uint) map[string]*uint { return conv.PointerMap(src) } + +// UintValueMap converts a string map of uint pointers into a string map of uint values. +// +// Deprecated: use [conv.ValueMap] instead. +func UintValueMap(src map[string]*uint) map[string]uint { return conv.ValueMap(src) } + +// Uint32 turn a pointer to of the uint32 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Uint32(v uint32) *uint32 { return conv.Pointer(v) } + +// Uint32Value turn the value of the uint32 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Uint32Value(v *uint32) uint32 { return conv.Value(v) } + +// Uint32Slice converts a slice of uint32 values into a slice of uint32 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Uint32Slice(src []uint32) []*uint32 { return conv.PointerSlice(src) } + +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of uint32 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Uint32ValueSlice(src []*uint32) []uint32 { return conv.ValueSlice(src) } + +// Uint32Map converts a string map of uint32 values into a string map of uint32 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Uint32Map(src map[string]uint32) map[string]*uint32 { return conv.PointerMap(src) } + +// Uint32ValueMap converts a string map of uint32 pointers into a string map of uint32 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { return conv.ValueMap(src) } + +// Uint64 turn a pointer to of the uint64 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Uint64(v uint64) *uint64 { return conv.Pointer(v) } + +// Uint64Value turn the value of the uint64 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Uint64Value(v *uint64) uint64 { return conv.Value(v) } + +// Uint64Slice converts a slice of uint64 values into a slice of uint64 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Uint64Slice(src []uint64) []*uint64 { return conv.PointerSlice(src) } + +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of uint64 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Uint64ValueSlice(src []*uint64) []uint64 { return conv.ValueSlice(src) } + +// Uint64Map converts a string map of uint64 values into a string map of uint64 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Uint64Map(src map[string]uint64) map[string]*uint64 { return conv.PointerMap(src) } + +// Uint64ValueMap converts a string map of uint64 pointers into a string map of uint64 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { return conv.ValueMap(src) } + +// Float32 turn a pointer to of the float32 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Float32(v float32) *float32 { return conv.Pointer(v) } + +// Float32Value turn the value of the float32 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Float32Value(v *float32) float32 { return conv.Value(v) } + +// Float32Slice converts a slice of float32 values into a slice of float32 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Float32Slice(src []float32) []*float32 { return conv.PointerSlice(src) } + +// Float32ValueSlice converts a slice of float32 pointers into a slice of float32 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Float32ValueSlice(src []*float32) []float32 { return conv.ValueSlice(src) } + +// Float32Map converts a string map of float32 values into a string map of float32 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Float32Map(src map[string]float32) map[string]*float32 { return conv.PointerMap(src) } + +// Float32ValueMap converts a string map of float32 pointers into a string map of float32 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Float32ValueMap(src map[string]*float32) map[string]float32 { return conv.ValueMap(src) } + +// Float64 turn a pointer to of the float64 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Float64(v float64) *float64 { return conv.Pointer(v) } + +// Float64Value turn the value of the float64 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Float64Value(v *float64) float64 { return conv.Value(v) } + +// Float64Slice converts a slice of float64 values into a slice of float64 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Float64Slice(src []float64) []*float64 { return conv.PointerSlice(src) } + +// Float64ValueSlice converts a slice of float64 pointers into a slice of float64 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Float64ValueSlice(src []*float64) []float64 { return conv.ValueSlice(src) } + +// Float64Map converts a string map of float64 values into a string map of float64 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Float64Map(src map[string]float64) map[string]*float64 { return conv.PointerMap(src) } + +// Float64ValueMap converts a string map of float64 pointers into a string map of float64 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Float64ValueMap(src map[string]*float64) map[string]float64 { return conv.ValueMap(src) } + +// Time turn a pointer to of the time.Time value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Time(v time.Time) *time.Time { return conv.Pointer(v) } + +// TimeValue turn the value of the time.Time pointer passed in or time.Time{} if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func TimeValue(v *time.Time) time.Time { return conv.Value(v) } + +// TimeSlice converts a slice of time.Time values into a slice of time.Time pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func TimeSlice(src []time.Time) []*time.Time { return conv.PointerSlice(src) } + +// TimeValueSlice converts a slice of time.Time pointers into a slice of time.Time values +// +// Deprecated: use [conv.ValueSlice] instead. +func TimeValueSlice(src []*time.Time) []time.Time { return conv.ValueSlice(src) } + +// TimeMap converts a string map of time.Time values into a string map of time.Time pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func TimeMap(src map[string]time.Time) map[string]*time.Time { return conv.PointerMap(src) } + +// TimeValueMap converts a string map of time.Time pointers into a string map of time.Time values. +// +// Deprecated: use [conv.ValueMap] instead. +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { return conv.ValueMap(src) } diff --git a/vendor/github.com/go-openapi/swag/convert.go b/vendor/github.com/go-openapi/swag/convert.go deleted file mode 100644 index fc085aeb8ea5..000000000000 --- a/vendor/github.com/go-openapi/swag/convert.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "math" - "strconv" - "strings" -) - -// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER -const ( - maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 - minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 - epsilon float64 = 1e-9 -) - -// IsFloat64AJSONInteger allow for integers [-2^53, 2^53-1] inclusive -func IsFloat64AJSONInteger(f float64) bool { - if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat { - return false - } - fa := math.Abs(f) - g := float64(uint64(f)) - ga := math.Abs(g) - - diff := math.Abs(f - g) - - // more info: https://floating-point-gui.de/errors/comparison/#look-out-for-edge-cases - switch { - case f == g: // best case - return true - case f == float64(int64(f)) || f == float64(uint64(f)): // optimistic case - return true - case f == 0 || g == 0 || diff < math.SmallestNonzeroFloat64: // very close to 0 values - return diff < (epsilon * math.SmallestNonzeroFloat64) - } - // check the relative error - return diff/math.Min(fa+ga, math.MaxFloat64) < epsilon -} - -var evaluatesAsTrue map[string]struct{} - -func init() { - evaluatesAsTrue = map[string]struct{}{ - "true": {}, - "1": {}, - "yes": {}, - "ok": {}, - "y": {}, - "on": {}, - "selected": {}, - "checked": {}, - "t": {}, - "enabled": {}, - } -} - -// ConvertBool turn a string into a boolean -func ConvertBool(str string) (bool, error) { - _, ok := evaluatesAsTrue[strings.ToLower(str)] - return ok, nil -} - -// ConvertFloat32 turn a string into a float32 -func ConvertFloat32(str string) (float32, error) { - f, err := strconv.ParseFloat(str, 32) - if err != nil { - return 0, err - } - return float32(f), nil -} - -// ConvertFloat64 turn a string into a float64 -func ConvertFloat64(str string) (float64, error) { - return strconv.ParseFloat(str, 64) -} - -// ConvertInt8 turn a string into an int8 -func ConvertInt8(str string) (int8, error) { - i, err := strconv.ParseInt(str, 10, 8) - if err != nil { - return 0, err - } - return int8(i), nil -} - -// ConvertInt16 turn a string into an int16 -func ConvertInt16(str string) (int16, error) { - i, err := strconv.ParseInt(str, 10, 16) - if err != nil { - return 0, err - } - return int16(i), nil -} - -// ConvertInt32 turn a string into an int32 -func ConvertInt32(str string) (int32, error) { - i, err := strconv.ParseInt(str, 10, 32) - if err != nil { - return 0, err - } - return int32(i), nil -} - -// ConvertInt64 turn a string into an int64 -func ConvertInt64(str string) (int64, error) { - return strconv.ParseInt(str, 10, 64) -} - -// ConvertUint8 turn a string into an uint8 -func ConvertUint8(str string) (uint8, error) { - i, err := strconv.ParseUint(str, 10, 8) - if err != nil { - return 0, err - } - return uint8(i), nil -} - -// ConvertUint16 turn a string into an uint16 -func ConvertUint16(str string) (uint16, error) { - i, err := strconv.ParseUint(str, 10, 16) - if err != nil { - return 0, err - } - return uint16(i), nil -} - -// ConvertUint32 turn a string into an uint32 -func ConvertUint32(str string) (uint32, error) { - i, err := strconv.ParseUint(str, 10, 32) - if err != nil { - return 0, err - } - return uint32(i), nil -} - -// ConvertUint64 turn a string into an uint64 -func ConvertUint64(str string) (uint64, error) { - return strconv.ParseUint(str, 10, 64) -} - -// FormatBool turns a boolean into a string -func FormatBool(value bool) string { - return strconv.FormatBool(value) -} - -// FormatFloat32 turns a float32 into a string -func FormatFloat32(value float32) string { - return strconv.FormatFloat(float64(value), 'f', -1, 32) -} - -// FormatFloat64 turns a float64 into a string -func FormatFloat64(value float64) string { - return strconv.FormatFloat(value, 'f', -1, 64) -} - -// FormatInt8 turns an int8 into a string -func FormatInt8(value int8) string { - return strconv.FormatInt(int64(value), 10) -} - -// FormatInt16 turns an int16 into a string -func FormatInt16(value int16) string { - return strconv.FormatInt(int64(value), 10) -} - -// FormatInt32 turns an int32 into a string -func FormatInt32(value int32) string { - return strconv.Itoa(int(value)) -} - -// FormatInt64 turns an int64 into a string -func FormatInt64(value int64) string { - return strconv.FormatInt(value, 10) -} - -// FormatUint8 turns an uint8 into a string -func FormatUint8(value uint8) string { - return strconv.FormatUint(uint64(value), 10) -} - -// FormatUint16 turns an uint16 into a string -func FormatUint16(value uint16) string { - return strconv.FormatUint(uint64(value), 10) -} - -// FormatUint32 turns an uint32 into a string -func FormatUint32(value uint32) string { - return strconv.FormatUint(uint64(value), 10) -} - -// FormatUint64 turns an uint64 into a string -func FormatUint64(value uint64) string { - return strconv.FormatUint(value, 10) -} diff --git a/vendor/github.com/go-openapi/swag/convert_types.go b/vendor/github.com/go-openapi/swag/convert_types.go deleted file mode 100644 index c49cc473a8c2..000000000000 --- a/vendor/github.com/go-openapi/swag/convert_types.go +++ /dev/null @@ -1,730 +0,0 @@ -package swag - -import "time" - -// This file was taken from the aws go sdk - -// String returns a pointer to of the string value passed in. -func String(v string) *string { - return &v -} - -// StringValue returns the value of the string pointer passed in or -// "" if the pointer is nil. -func StringValue(v *string) string { - if v != nil { - return *v - } - return "" -} - -// StringSlice converts a slice of string values into a slice of -// string pointers -func StringSlice(src []string) []*string { - dst := make([]*string, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// StringValueSlice converts a slice of string pointers into a slice of -// string values -func StringValueSlice(src []*string) []string { - dst := make([]string, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// StringMap converts a string map of string values into a string -// map of string pointers -func StringMap(src map[string]string) map[string]*string { - dst := make(map[string]*string) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// StringValueMap converts a string map of string pointers into a string -// map of string values -func StringValueMap(src map[string]*string) map[string]string { - dst := make(map[string]string) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Bool returns a pointer to of the bool value passed in. -func Bool(v bool) *bool { - return &v -} - -// BoolValue returns the value of the bool pointer passed in or -// false if the pointer is nil. -func BoolValue(v *bool) bool { - if v != nil { - return *v - } - return false -} - -// BoolSlice converts a slice of bool values into a slice of -// bool pointers -func BoolSlice(src []bool) []*bool { - dst := make([]*bool, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// BoolValueSlice converts a slice of bool pointers into a slice of -// bool values -func BoolValueSlice(src []*bool) []bool { - dst := make([]bool, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// BoolMap converts a string map of bool values into a string -// map of bool pointers -func BoolMap(src map[string]bool) map[string]*bool { - dst := make(map[string]*bool) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// BoolValueMap converts a string map of bool pointers into a string -// map of bool values -func BoolValueMap(src map[string]*bool) map[string]bool { - dst := make(map[string]bool) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int returns a pointer to of the int value passed in. -func Int(v int) *int { - return &v -} - -// IntValue returns the value of the int pointer passed in or -// 0 if the pointer is nil. -func IntValue(v *int) int { - if v != nil { - return *v - } - return 0 -} - -// IntSlice converts a slice of int values into a slice of -// int pointers -func IntSlice(src []int) []*int { - dst := make([]*int, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// IntValueSlice converts a slice of int pointers into a slice of -// int values -func IntValueSlice(src []*int) []int { - dst := make([]int, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// IntMap converts a string map of int values into a string -// map of int pointers -func IntMap(src map[string]int) map[string]*int { - dst := make(map[string]*int) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// IntValueMap converts a string map of int pointers into a string -// map of int values -func IntValueMap(src map[string]*int) map[string]int { - dst := make(map[string]int) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int32 returns a pointer to of the int32 value passed in. -func Int32(v int32) *int32 { - return &v -} - -// Int32Value returns the value of the int32 pointer passed in or -// 0 if the pointer is nil. -func Int32Value(v *int32) int32 { - if v != nil { - return *v - } - return 0 -} - -// Int32Slice converts a slice of int32 values into a slice of -// int32 pointers -func Int32Slice(src []int32) []*int32 { - dst := make([]*int32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int32ValueSlice converts a slice of int32 pointers into a slice of -// int32 values -func Int32ValueSlice(src []*int32) []int32 { - dst := make([]int32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int32Map converts a string map of int32 values into a string -// map of int32 pointers -func Int32Map(src map[string]int32) map[string]*int32 { - dst := make(map[string]*int32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int32ValueMap converts a string map of int32 pointers into a string -// map of int32 values -func Int32ValueMap(src map[string]*int32) map[string]int32 { - dst := make(map[string]int32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int64 returns a pointer to of the int64 value passed in. -func Int64(v int64) *int64 { - return &v -} - -// Int64Value returns the value of the int64 pointer passed in or -// 0 if the pointer is nil. -func Int64Value(v *int64) int64 { - if v != nil { - return *v - } - return 0 -} - -// Int64Slice converts a slice of int64 values into a slice of -// int64 pointers -func Int64Slice(src []int64) []*int64 { - dst := make([]*int64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int64ValueSlice converts a slice of int64 pointers into a slice of -// int64 values -func Int64ValueSlice(src []*int64) []int64 { - dst := make([]int64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int64Map converts a string map of int64 values into a string -// map of int64 pointers -func Int64Map(src map[string]int64) map[string]*int64 { - dst := make(map[string]*int64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int64ValueMap converts a string map of int64 pointers into a string -// map of int64 values -func Int64ValueMap(src map[string]*int64) map[string]int64 { - dst := make(map[string]int64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint16 returns a pointer to of the uint16 value passed in. -func Uint16(v uint16) *uint16 { - return &v -} - -// Uint16Value returns the value of the uint16 pointer passed in or -// 0 if the pointer is nil. -func Uint16Value(v *uint16) uint16 { - if v != nil { - return *v - } - - return 0 -} - -// Uint16Slice converts a slice of uint16 values into a slice of -// uint16 pointers -func Uint16Slice(src []uint16) []*uint16 { - dst := make([]*uint16, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - - return dst -} - -// Uint16ValueSlice converts a slice of uint16 pointers into a slice of -// uint16 values -func Uint16ValueSlice(src []*uint16) []uint16 { - dst := make([]uint16, len(src)) - - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - - return dst -} - -// Uint16Map converts a string map of uint16 values into a string -// map of uint16 pointers -func Uint16Map(src map[string]uint16) map[string]*uint16 { - dst := make(map[string]*uint16) - - for k, val := range src { - v := val - dst[k] = &v - } - - return dst -} - -// Uint16ValueMap converts a string map of uint16 pointers into a string -// map of uint16 values -func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { - dst := make(map[string]uint16) - - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - - return dst -} - -// Uint returns a pointer to of the uint value passed in. -func Uint(v uint) *uint { - return &v -} - -// UintValue returns the value of the uint pointer passed in or -// 0 if the pointer is nil. -func UintValue(v *uint) uint { - if v != nil { - return *v - } - return 0 -} - -// UintSlice converts a slice of uint values into a slice of -// uint pointers -func UintSlice(src []uint) []*uint { - dst := make([]*uint, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// UintValueSlice converts a slice of uint pointers into a slice of -// uint values -func UintValueSlice(src []*uint) []uint { - dst := make([]uint, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// UintMap converts a string map of uint values into a string -// map of uint pointers -func UintMap(src map[string]uint) map[string]*uint { - dst := make(map[string]*uint) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// UintValueMap converts a string map of uint pointers into a string -// map of uint values -func UintValueMap(src map[string]*uint) map[string]uint { - dst := make(map[string]uint) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint32 returns a pointer to of the uint32 value passed in. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint32Value returns the value of the uint32 pointer passed in or -// 0 if the pointer is nil. -func Uint32Value(v *uint32) uint32 { - if v != nil { - return *v - } - return 0 -} - -// Uint32Slice converts a slice of uint32 values into a slice of -// uint32 pointers -func Uint32Slice(src []uint32) []*uint32 { - dst := make([]*uint32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint32ValueSlice converts a slice of uint32 pointers into a slice of -// uint32 values -func Uint32ValueSlice(src []*uint32) []uint32 { - dst := make([]uint32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint32Map converts a string map of uint32 values into a string -// map of uint32 pointers -func Uint32Map(src map[string]uint32) map[string]*uint32 { - dst := make(map[string]*uint32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint32ValueMap converts a string map of uint32 pointers into a string -// map of uint32 values -func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { - dst := make(map[string]uint32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint64 returns a pointer to of the uint64 value passed in. -func Uint64(v uint64) *uint64 { - return &v -} - -// Uint64Value returns the value of the uint64 pointer passed in or -// 0 if the pointer is nil. -func Uint64Value(v *uint64) uint64 { - if v != nil { - return *v - } - return 0 -} - -// Uint64Slice converts a slice of uint64 values into a slice of -// uint64 pointers -func Uint64Slice(src []uint64) []*uint64 { - dst := make([]*uint64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint64ValueSlice converts a slice of uint64 pointers into a slice of -// uint64 values -func Uint64ValueSlice(src []*uint64) []uint64 { - dst := make([]uint64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint64Map converts a string map of uint64 values into a string -// map of uint64 pointers -func Uint64Map(src map[string]uint64) map[string]*uint64 { - dst := make(map[string]*uint64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint64ValueMap converts a string map of uint64 pointers into a string -// map of uint64 values -func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { - dst := make(map[string]uint64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float32 returns a pointer to of the float32 value passed in. -func Float32(v float32) *float32 { - return &v -} - -// Float32Value returns the value of the float32 pointer passed in or -// 0 if the pointer is nil. -func Float32Value(v *float32) float32 { - if v != nil { - return *v - } - - return 0 -} - -// Float32Slice converts a slice of float32 values into a slice of -// float32 pointers -func Float32Slice(src []float32) []*float32 { - dst := make([]*float32, len(src)) - - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - - return dst -} - -// Float32ValueSlice converts a slice of float32 pointers into a slice of -// float32 values -func Float32ValueSlice(src []*float32) []float32 { - dst := make([]float32, len(src)) - - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - - return dst -} - -// Float32Map converts a string map of float32 values into a string -// map of float32 pointers -func Float32Map(src map[string]float32) map[string]*float32 { - dst := make(map[string]*float32) - - for k, val := range src { - v := val - dst[k] = &v - } - - return dst -} - -// Float32ValueMap converts a string map of float32 pointers into a string -// map of float32 values -func Float32ValueMap(src map[string]*float32) map[string]float32 { - dst := make(map[string]float32) - - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - - return dst -} - -// Float64 returns a pointer to of the float64 value passed in. -func Float64(v float64) *float64 { - return &v -} - -// Float64Value returns the value of the float64 pointer passed in or -// 0 if the pointer is nil. -func Float64Value(v *float64) float64 { - if v != nil { - return *v - } - return 0 -} - -// Float64Slice converts a slice of float64 values into a slice of -// float64 pointers -func Float64Slice(src []float64) []*float64 { - dst := make([]*float64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float64ValueSlice converts a slice of float64 pointers into a slice of -// float64 values -func Float64ValueSlice(src []*float64) []float64 { - dst := make([]float64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float64Map converts a string map of float64 values into a string -// map of float64 pointers -func Float64Map(src map[string]float64) map[string]*float64 { - dst := make(map[string]*float64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float64ValueMap converts a string map of float64 pointers into a string -// map of float64 values -func Float64ValueMap(src map[string]*float64) map[string]float64 { - dst := make(map[string]float64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Time returns a pointer to of the time.Time value passed in. -func Time(v time.Time) *time.Time { - return &v -} - -// TimeValue returns the value of the time.Time pointer passed in or -// time.Time{} if the pointer is nil. -func TimeValue(v *time.Time) time.Time { - if v != nil { - return *v - } - return time.Time{} -} - -// TimeSlice converts a slice of time.Time values into a slice of -// time.Time pointers -func TimeSlice(src []time.Time) []*time.Time { - dst := make([]*time.Time, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// TimeValueSlice converts a slice of time.Time pointers into a slice of -// time.Time values -func TimeValueSlice(src []*time.Time) []time.Time { - dst := make([]time.Time, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// TimeMap converts a string map of time.Time values into a string -// map of time.Time pointers -func TimeMap(src map[string]time.Time) map[string]*time.Time { - dst := make(map[string]*time.Time) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// TimeValueMap converts a string map of time.Time pointers into a string -// map of time.Time values -func TimeValueMap(src map[string]*time.Time) map[string]time.Time { - dst := make(map[string]time.Time) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} diff --git a/vendor/github.com/go-openapi/swag/doc.go b/vendor/github.com/go-openapi/swag/doc.go index 55094cb74c4d..b54b57478af6 100644 --- a/vendor/github.com/go-openapi/swag/doc.go +++ b/vendor/github.com/go-openapi/swag/doc.go @@ -1,31 +1,47 @@ -// Copyright 2015 go-swagger maintainers +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package swag contains a bunch of helper functions for go-openapi and go-swagger projects. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at +// You may also use it standalone for your projects. // -// http://www.apache.org/licenses/LICENSE-2.0 +// NOTE: all features that used to be exposed as package-level members (constants, variables, +// functions and types) are now deprecated and are superseded by equivalent features in +// more specialized sub-packages. +// Moving forward, no additional feature will be added to the [swag] API directly at the root package level, +// which remains there for backward-compatibility purposes. // -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package swag contains a bunch of helper functions for go-openapi and go-swagger projects. - -You may also use it standalone for your projects. - - - convert between value and pointers for builtin types - - convert from string to builtin types (wraps strconv) - - fast json concatenation - - search in path - - load from file or http - - name mangling - -This repo has only few dependencies outside of the standard library: - - - YAML utilities depend on gopkg.in/yaml.v2 -*/ +// Child modules will continue to evolve or some new ones may be added in the future. +// +// # Modules +// +// - [cmdutils] utilities to work with CLIs +// +// - [conv] type conversion utilities +// +// - [fileutils] file utilities +// +// - [jsonname] JSON utilities +// +// - [jsonutils] JSON utilities +// +// - [loading] file loading +// +// - [mangling] safe name generation +// +// - [netutils] networking utilities +// +// - [stringutils] `string` utilities +// +// - [typeutils] `go` types utilities +// +// - [yamlutils] YAML utilities +// +// # Dependencies +// +// This repo has a few dependencies outside of the standard library: +// +// - YAML utilities depend on [go.yaml.in/yaml/v3] package swag + +//go:generate mockery diff --git a/vendor/github.com/go-openapi/swag/fileutils/LICENSE b/vendor/github.com/go-openapi/swag/fileutils/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/fileutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/fileutils/doc.go b/vendor/github.com/go-openapi/swag/fileutils/doc.go new file mode 100644 index 000000000000..859a200d8413 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/fileutils/doc.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package fileutils exposes utilities to deal with files and paths. +// +// Currently, there is: +// - [File] to represent an abstraction of an uploaded file. +// For instance, this is used by [github.com/go-openapi/runtime.File]. +// - path search utilities (e.g. finding packages in the GO search path) +package fileutils diff --git a/vendor/github.com/go-openapi/swag/fileutils/file.go b/vendor/github.com/go-openapi/swag/fileutils/file.go new file mode 100644 index 000000000000..5ad4cfaeafa4 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/fileutils/file.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package fileutils + +import "mime/multipart" + +// File represents an uploaded file. +type File struct { + Data multipart.File + Header *multipart.FileHeader +} + +// Read bytes from the file +func (f *File) Read(p []byte) (n int, err error) { + return f.Data.Read(p) +} + +// Close the file +func (f *File) Close() error { + return f.Data.Close() +} diff --git a/vendor/github.com/go-openapi/swag/path.go b/vendor/github.com/go-openapi/swag/fileutils/path.go similarity index 58% rename from vendor/github.com/go-openapi/swag/path.go rename to vendor/github.com/go-openapi/swag/fileutils/path.go index 941bd0176b05..dd09f690bf81 100644 --- a/vendor/github.com/go-openapi/swag/path.go +++ b/vendor/github.com/go-openapi/swag/fileutils/path.go @@ -1,18 +1,7 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 -package swag +package fileutils import ( "os" @@ -21,10 +10,8 @@ import ( "strings" ) -const ( - // GOPATHKey represents the env key for gopath - GOPATHKey = "GOPATH" -) +// GOPATHKey represents the env key for gopath +const GOPATHKey = "GOPATH" // FindInSearchPath finds a package in a provided lists of paths func FindInSearchPath(searchPath, pkg string) string { @@ -40,11 +27,17 @@ func FindInSearchPath(searchPath, pkg string) string { } // FindInGoSearchPath finds a package in the $GOPATH:$GOROOT +// +// Deprecated: this function is no longer relevant with modern go. +// It uses [runtime.GOROOT] under the hood, which is deprecated as of go1.24. func FindInGoSearchPath(pkg string) string { return FindInSearchPath(FullGoSearchPath(), pkg) } // FullGoSearchPath gets the search paths for finding packages +// +// Deprecated: this function is no longer relevant with modern go. +// It uses [runtime.GOROOT] under the hood, which is deprecated as of go1.24. func FullGoSearchPath() string { allPaths := os.Getenv(GOPATHKey) if allPaths == "" { diff --git a/vendor/github.com/go-openapi/swag/fileutils_iface.go b/vendor/github.com/go-openapi/swag/fileutils_iface.go new file mode 100644 index 000000000000..f3e79a0e4bce --- /dev/null +++ b/vendor/github.com/go-openapi/swag/fileutils_iface.go @@ -0,0 +1,33 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import "github.com/go-openapi/swag/fileutils" + +// GOPATHKey represents the env key for gopath +// +// Deprecated: use [fileutils.GOPATHKey] instead. +const GOPATHKey = fileutils.GOPATHKey + +// File represents an uploaded file. +// +// Deprecated: use [fileutils.File] instead. +type File = fileutils.File + +// FindInSearchPath finds a package in a provided lists of paths. +// +// Deprecated: use [fileutils.FindInSearchPath] instead. +func FindInSearchPath(searchPath, pkg string) string { + return fileutils.FindInSearchPath(searchPath, pkg) +} + +// FindInGoSearchPath finds a package in the $GOPATH:$GOROOT +// +// Deprecated: use [fileutils.FindInGoSearchPath] instead. +func FindInGoSearchPath(pkg string) string { return fileutils.FindInGoSearchPath(pkg) } + +// FullGoSearchPath gets the search paths for finding packages +// +// Deprecated: use [fileutils.FullGoSearchPath] instead. +func FullGoSearchPath() string { return fileutils.FullGoSearchPath() } diff --git a/vendor/github.com/go-openapi/swag/go.work b/vendor/github.com/go-openapi/swag/go.work new file mode 100644 index 000000000000..1e537f0749b0 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/go.work @@ -0,0 +1,20 @@ +use ( + . + ./cmdutils + ./conv + ./fileutils + ./jsonname + ./jsonutils + ./jsonutils/adapters/easyjson + ./jsonutils/adapters/testintegration + ./jsonutils/adapters/testintegration/benchmarks + ./jsonutils/fixtures_test + ./loading + ./mangling + ./netutils + ./stringutils + ./typeutils + ./yamlutils +) + +go 1.24.0 diff --git a/vendor/github.com/go-openapi/swag/go.work.sum b/vendor/github.com/go-openapi/swag/go.work.sum new file mode 100644 index 000000000000..edaf71bb2b7b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/go.work.sum @@ -0,0 +1,9 @@ +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= +github.com/go-openapi/testify/v2 v2.0.1/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go deleted file mode 100644 index 20a359bb60a2..000000000000 --- a/vendor/github.com/go-openapi/swag/initialism_index.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "sort" - "strings" - "sync" -) - -var ( - // commonInitialisms are common acronyms that are kept as whole uppercased words. - commonInitialisms *indexOfInitialisms - - // initialisms is a slice of sorted initialisms - initialisms []string - - // a copy of initialisms pre-baked as []rune - initialismsRunes [][]rune - initialismsUpperCased [][]rune - - isInitialism func(string) bool - - maxAllocMatches int -) - -func init() { - // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 - configuredInitialisms := map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "IPv4": true, - "IPv6": true, - "JSON": true, - "LHS": true, - "OAI": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, - } - - // a thread-safe index of initialisms - commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) - initialisms = commonInitialisms.sorted() - initialismsRunes = asRunes(initialisms) - initialismsUpperCased = asUpperCased(initialisms) - maxAllocMatches = maxAllocHeuristic(initialismsRunes) - - // a test function - isInitialism = commonInitialisms.isInitialism -} - -func asRunes(in []string) [][]rune { - out := make([][]rune, len(in)) - for i, initialism := range in { - out[i] = []rune(initialism) - } - - return out -} - -func asUpperCased(in []string) [][]rune { - out := make([][]rune, len(in)) - - for i, initialism := range in { - out[i] = []rune(upper(trim(initialism))) - } - - return out -} - -func maxAllocHeuristic(in [][]rune) int { - heuristic := make(map[rune]int) - for _, initialism := range in { - heuristic[initialism[0]]++ - } - - var maxAlloc int - for _, val := range heuristic { - if val > maxAlloc { - maxAlloc = val - } - } - - return maxAlloc -} - -// AddInitialisms add additional initialisms -func AddInitialisms(words ...string) { - for _, word := range words { - // commonInitialisms[upper(word)] = true - commonInitialisms.add(upper(word)) - } - // sort again - initialisms = commonInitialisms.sorted() - initialismsRunes = asRunes(initialisms) - initialismsUpperCased = asUpperCased(initialisms) -} - -// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. -// Since go1.9, this may be implemented with sync.Map. -type indexOfInitialisms struct { - sortMutex *sync.Mutex - index *sync.Map -} - -func newIndexOfInitialisms() *indexOfInitialisms { - return &indexOfInitialisms{ - sortMutex: new(sync.Mutex), - index: new(sync.Map), - } -} - -func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - for k, v := range initial { - m.index.Store(k, v) - } - return m -} - -func (m *indexOfInitialisms) isInitialism(key string) bool { - _, ok := m.index.Load(key) - return ok -} - -func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { - m.index.Store(key, true) - return m -} - -func (m *indexOfInitialisms) sorted() (result []string) { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - m.index.Range(func(key, _ interface{}) bool { - k := key.(string) - result = append(result, k) - return true - }) - sort.Sort(sort.Reverse(byInitialism(result))) - return -} - -type byInitialism []string - -func (s byInitialism) Len() int { - return len(s) -} -func (s byInitialism) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s byInitialism) Less(i, j int) bool { - if len(s[i]) != len(s[j]) { - return len(s[i]) < len(s[j]) - } - - return strings.Compare(s[i], s[j]) > 0 -} diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go deleted file mode 100644 index 7e9902ca314b..000000000000 --- a/vendor/github.com/go-openapi/swag/json.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "bytes" - "encoding/json" - "log" - "reflect" - "strings" - "sync" - - "github.com/mailru/easyjson/jlexer" - "github.com/mailru/easyjson/jwriter" -) - -// nullJSON represents a JSON object with null type -var nullJSON = []byte("null") - -// DefaultJSONNameProvider the default cache for types -var DefaultJSONNameProvider = NewNameProvider() - -const comma = byte(',') - -var closers map[byte]byte - -func init() { - closers = map[byte]byte{ - '{': '}', - '[': ']', - } -} - -type ejMarshaler interface { - MarshalEasyJSON(w *jwriter.Writer) -} - -type ejUnmarshaler interface { - UnmarshalEasyJSON(w *jlexer.Lexer) -} - -// WriteJSON writes json data, prefers finding an appropriate interface to short-circuit the marshaler -// so it takes the fastest option available. -func WriteJSON(data interface{}) ([]byte, error) { - if d, ok := data.(ejMarshaler); ok { - jw := new(jwriter.Writer) - d.MarshalEasyJSON(jw) - return jw.BuildBytes() - } - if d, ok := data.(json.Marshaler); ok { - return d.MarshalJSON() - } - return json.Marshal(data) -} - -// ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaler -// so it takes the fastest option available -func ReadJSON(data []byte, value interface{}) error { - trimmedData := bytes.Trim(data, "\x00") - if d, ok := value.(ejUnmarshaler); ok { - jl := &jlexer.Lexer{Data: trimmedData} - d.UnmarshalEasyJSON(jl) - return jl.Error() - } - if d, ok := value.(json.Unmarshaler); ok { - return d.UnmarshalJSON(trimmedData) - } - return json.Unmarshal(trimmedData, value) -} - -// DynamicJSONToStruct converts an untyped json structure into a struct -func DynamicJSONToStruct(data interface{}, target interface{}) error { - // TODO: convert straight to a json typed map (mergo + iterate?) - b, err := WriteJSON(data) - if err != nil { - return err - } - return ReadJSON(b, target) -} - -// ConcatJSON concatenates multiple json objects efficiently -func ConcatJSON(blobs ...[]byte) []byte { - if len(blobs) == 0 { - return nil - } - - last := len(blobs) - 1 - for blobs[last] == nil || bytes.Equal(blobs[last], nullJSON) { - // strips trailing null objects - last-- - if last < 0 { - // there was nothing but "null"s or nil... - return nil - } - } - if last == 0 { - return blobs[0] - } - - var opening, closing byte - var idx, a int - buf := bytes.NewBuffer(nil) - - for i, b := range blobs[:last+1] { - if b == nil || bytes.Equal(b, nullJSON) { - // a null object is in the list: skip it - continue - } - if len(b) > 0 && opening == 0 { // is this an array or an object? - opening, closing = b[0], closers[b[0]] - } - - if opening != '{' && opening != '[' { - continue // don't know how to concatenate non container objects - } - - if len(b) < 3 { // yep empty but also the last one, so closing this thing - if i == last && a > 0 { - if err := buf.WriteByte(closing); err != nil { - log.Println(err) - } - } - continue - } - - idx = 0 - if a > 0 { // we need to join with a comma for everything beyond the first non-empty item - if err := buf.WriteByte(comma); err != nil { - log.Println(err) - } - idx = 1 // this is not the first or the last so we want to drop the leading bracket - } - - if i != last { // not the last one, strip brackets - if _, err := buf.Write(b[idx : len(b)-1]); err != nil { - log.Println(err) - } - } else { // last one, strip only the leading bracket - if _, err := buf.Write(b[idx:]); err != nil { - log.Println(err) - } - } - a++ - } - // somehow it ended up being empty, so provide a default value - if buf.Len() == 0 { - if err := buf.WriteByte(opening); err != nil { - log.Println(err) - } - if err := buf.WriteByte(closing); err != nil { - log.Println(err) - } - } - return buf.Bytes() -} - -// ToDynamicJSON turns an object into a properly JSON typed structure -func ToDynamicJSON(data interface{}) interface{} { - // TODO: convert straight to a json typed map (mergo + iterate?) - b, err := json.Marshal(data) - if err != nil { - log.Println(err) - } - var res interface{} - if err := json.Unmarshal(b, &res); err != nil { - log.Println(err) - } - return res -} - -// FromDynamicJSON turns an object into a properly JSON typed structure -func FromDynamicJSON(data, target interface{}) error { - b, err := json.Marshal(data) - if err != nil { - log.Println(err) - } - return json.Unmarshal(b, target) -} - -// NameProvider represents an object capable of translating from go property names -// to json property names -// This type is thread-safe. -type NameProvider struct { - lock *sync.Mutex - index map[reflect.Type]nameIndex -} - -type nameIndex struct { - jsonNames map[string]string - goNames map[string]string -} - -// NewNameProvider creates a new name provider -func NewNameProvider() *NameProvider { - return &NameProvider{ - lock: &sync.Mutex{}, - index: make(map[reflect.Type]nameIndex), - } -} - -func buildnameIndex(tpe reflect.Type, idx, reverseIdx map[string]string) { - for i := 0; i < tpe.NumField(); i++ { - targetDes := tpe.Field(i) - - if targetDes.PkgPath != "" { // unexported - continue - } - - if targetDes.Anonymous { // walk embedded structures tree down first - buildnameIndex(targetDes.Type, idx, reverseIdx) - continue - } - - if tag := targetDes.Tag.Get("json"); tag != "" { - - parts := strings.Split(tag, ",") - if len(parts) == 0 { - continue - } - - nm := parts[0] - if nm == "-" { - continue - } - if nm == "" { // empty string means we want to use the Go name - nm = targetDes.Name - } - - idx[nm] = targetDes.Name - reverseIdx[targetDes.Name] = nm - } - } -} - -func newNameIndex(tpe reflect.Type) nameIndex { - var idx = make(map[string]string, tpe.NumField()) - var reverseIdx = make(map[string]string, tpe.NumField()) - - buildnameIndex(tpe, idx, reverseIdx) - return nameIndex{jsonNames: idx, goNames: reverseIdx} -} - -// GetJSONNames gets all the json property names for a type -func (n *NameProvider) GetJSONNames(subject interface{}) []string { - n.lock.Lock() - defer n.lock.Unlock() - tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() - names, ok := n.index[tpe] - if !ok { - names = n.makeNameIndex(tpe) - } - - res := make([]string, 0, len(names.jsonNames)) - for k := range names.jsonNames { - res = append(res, k) - } - return res -} - -// GetJSONName gets the json name for a go property name -func (n *NameProvider) GetJSONName(subject interface{}, name string) (string, bool) { - tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() - return n.GetJSONNameForType(tpe, name) -} - -// GetJSONNameForType gets the json name for a go property name on a given type -func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) { - n.lock.Lock() - defer n.lock.Unlock() - names, ok := n.index[tpe] - if !ok { - names = n.makeNameIndex(tpe) - } - nme, ok := names.goNames[name] - return nme, ok -} - -func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex { - names := newNameIndex(tpe) - n.index[tpe] = names - return names -} - -// GetGoName gets the go name for a json property name -func (n *NameProvider) GetGoName(subject interface{}, name string) (string, bool) { - tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() - return n.GetGoNameForType(tpe, name) -} - -// GetGoNameForType gets the go name for a given type for a json property name -func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) { - n.lock.Lock() - defer n.lock.Unlock() - names, ok := n.index[tpe] - if !ok { - names = n.makeNameIndex(tpe) - } - nme, ok := names.jsonNames[name] - return nme, ok -} diff --git a/vendor/github.com/go-openapi/swag/jsonname/LICENSE b/vendor/github.com/go-openapi/swag/jsonname/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonname/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/jsonname/doc.go b/vendor/github.com/go-openapi/swag/jsonname/doc.go new file mode 100644 index 000000000000..79232eaca476 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonname/doc.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package jsonname is a provider of json property names from go properties. +package jsonname diff --git a/vendor/github.com/go-openapi/swag/jsonname/name_provider.go b/vendor/github.com/go-openapi/swag/jsonname/name_provider.go new file mode 100644 index 000000000000..8eaf1bece8d6 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonname/name_provider.go @@ -0,0 +1,138 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package jsonname + +import ( + "reflect" + "strings" + "sync" +) + +// DefaultJSONNameProvider is the default cache for types. +var DefaultJSONNameProvider = NewNameProvider() + +// NameProvider represents an object capable of translating from go property names +// to json property names. +// +// This type is thread-safe. +// +// See [github.com/go-openapi/jsonpointer.Pointer] for an example. +type NameProvider struct { + lock *sync.Mutex + index map[reflect.Type]nameIndex +} + +type nameIndex struct { + jsonNames map[string]string + goNames map[string]string +} + +// NewNameProvider creates a new name provider +func NewNameProvider() *NameProvider { + return &NameProvider{ + lock: &sync.Mutex{}, + index: make(map[reflect.Type]nameIndex), + } +} + +func buildnameIndex(tpe reflect.Type, idx, reverseIdx map[string]string) { + for i := 0; i < tpe.NumField(); i++ { + targetDes := tpe.Field(i) + + if targetDes.PkgPath != "" { // unexported + continue + } + + if targetDes.Anonymous { // walk embedded structures tree down first + buildnameIndex(targetDes.Type, idx, reverseIdx) + continue + } + + if tag := targetDes.Tag.Get("json"); tag != "" { + + parts := strings.Split(tag, ",") + if len(parts) == 0 { + continue + } + + nm := parts[0] + if nm == "-" { + continue + } + if nm == "" { // empty string means we want to use the Go name + nm = targetDes.Name + } + + idx[nm] = targetDes.Name + reverseIdx[targetDes.Name] = nm + } + } +} + +func newNameIndex(tpe reflect.Type) nameIndex { + var idx = make(map[string]string, tpe.NumField()) + var reverseIdx = make(map[string]string, tpe.NumField()) + + buildnameIndex(tpe, idx, reverseIdx) + return nameIndex{jsonNames: idx, goNames: reverseIdx} +} + +// GetJSONNames gets all the json property names for a type +func (n *NameProvider) GetJSONNames(subject any) []string { + n.lock.Lock() + defer n.lock.Unlock() + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + + res := make([]string, 0, len(names.jsonNames)) + for k := range names.jsonNames { + res = append(res, k) + } + return res +} + +// GetJSONName gets the json name for a go property name +func (n *NameProvider) GetJSONName(subject any, name string) (string, bool) { + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + return n.GetJSONNameForType(tpe, name) +} + +// GetJSONNameForType gets the json name for a go property name on a given type +func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) { + n.lock.Lock() + defer n.lock.Unlock() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + nme, ok := names.goNames[name] + return nme, ok +} + +// GetGoName gets the go name for a json property name +func (n *NameProvider) GetGoName(subject any, name string) (string, bool) { + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + return n.GetGoNameForType(tpe, name) +} + +// GetGoNameForType gets the go name for a given type for a json property name +func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) { + n.lock.Lock() + defer n.lock.Unlock() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + nme, ok := names.jsonNames[name] + return nme, ok +} + +func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex { + names := newNameIndex(tpe) + n.index[tpe] = names + return names +} diff --git a/vendor/github.com/go-openapi/swag/jsonname_iface.go b/vendor/github.com/go-openapi/swag/jsonname_iface.go new file mode 100644 index 000000000000..303a007f6f4c --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonname_iface.go @@ -0,0 +1,24 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import ( + "github.com/go-openapi/swag/jsonname" +) + +// DefaultJSONNameProvider is the default cache for types +// +// Deprecated: use [jsonname.DefaultJSONNameProvider] instead. +var DefaultJSONNameProvider = jsonname.DefaultJSONNameProvider + +// NameProvider represents an object capable of translating from go property names +// to json property names. +// +// Deprecated: use [jsonname.NameProvider] instead. +type NameProvider = jsonname.NameProvider + +// NewNameProvider creates a new name provider +// +// Deprecated: use [jsonname.NewNameProvider] instead. +func NewNameProvider() *NameProvider { return jsonname.NewNameProvider() } diff --git a/vendor/github.com/go-openapi/swag/jsonutils/LICENSE b/vendor/github.com/go-openapi/swag/jsonutils/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/jsonutils/README.md b/vendor/github.com/go-openapi/swag/jsonutils/README.md new file mode 100644 index 000000000000..d745cdb466e2 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/README.md @@ -0,0 +1,108 @@ + # jsonutils + +`jsonutils` exposes a few tools to work with JSON: + +- a fast, simple `Concat` to concatenate (not merge) JSON objects and arrays +- `FromDynamicJSON` to convert a data structure into a "dynamic JSON" data structure +- `ReadJSON` and `WriteJSON` behave like `json.Unmarshal` and `json.Marshal`, + with the ability to use another underlying serialization library through an `Adapter` + configured at runtime +- a `JSONMapSlice` structure that may be used to store JSON objects with the order of keys maintained + +## Dynamic JSON + +We call "dynamic JSON" the go data structure that results from unmarshaling JSON like this: + +```go + var value any + jsonBytes := `{"a": 1, ... }` + _ = json.Unmarshal(jsonBytes, &value) +``` + +In this configuration, the standard library mappings are as follows: + +| JSON | go | +|-----------|------------------| +| `number` | `float64` | +| `string` | `string` | +| `boolean` | `bool` | +| `null` | `nil` | +| `object` | `map[string]any` | +| `array` | `[]any` | + +## Map slices + +When using `JSONMapSlice`, the ordering of keys is ensured by replacing +mappings to `map[string]any` by a `JSONMapSlice` which is an (ordered) +slice of `JSONMapItem`s. + +Notice that a similar feature is available for YAML (see [`yamlutils`](../yamlutils)), +with a `YAMLMapSlice` type based on the `JSONMapSlice`. + +`JSONMapSlice` is similar to an ordered map, but the keys are not retrieved +in constant time. + +Another difference with the the above standard mappings is that numbers don't always map +to a `float64`: if the value is a JSON integer, it unmarshals to `int64`. + +See also [some examples](https://pkg.go.dev/github.com/go-openapi/swag/jsonutils#pkg-examples) + +## Adapters + +`ReadJSON`, `WriteJSON` and `FromDynamicJSON` (which is a combination of the latter two) +are wrappers on top of `json.Unmarshal` and `json.Marshal`. + +By default, the adapter merely wraps the standard library. + +The adapter may be used to register other JSON serialization libraries, +possibly several ones at the same time. + +If the value passed is identified as an "ordered map" (i.e. implements `ifaces.Ordered` +or `ifaces.SetOrdered`, the adapter favors the "ordered" JSON behavior and tries to +find a registered implementation that support ordered keys in objects. + +Our standard library implementation supports this. + +As of `v0.25.0`, we support through such an adapter the popular `mailru/easyjson` +library, which kicks in when the passed values support the `easyjson.Unmarshaler` +or `easyjson.Marshaler` interfaces. + +In the future, we plan to add more similar libraries that compete on the go JSON +serializers scene. + +## Registering an adapter + +In package `github.com/go-openapi/swag/easyjson/adapters`, several adapters are available. + +Each adapter is an independent go module. Hence you'll pick its dependencies only if you import it. + +At this moment we provide: +* `stdlib`: JSON adapter based on the standard library +* `easyjson`: JSON adapter based on the `github.com/mailru/easyjson` + +The adapters provide the basic `Marshal` and `Unmarshal` capabilities, plus an implementation +of the `MapSlice` pattern. + +You may also build your own adapter based on your specific use-case. An adapter is not required to implement +all capabilities. + +Every adapter comes with a `Register` function, possibly with some options, to register the adapter +to a global registry. + +For example, to enable `easyjson` to be used in `ReadJSON` and `WriteJSON`, you would write something like: + +```go + import ( + "github.com/go-openapi/swag/jsonutils/adapters" + easyjson "github.com/go-openapi/swag/jsonutils/adapters/easyjson/json" + ) + + func init() { + easyjson.Register(adapters.Registry) + } +``` + +You may register several adapters. In this case, capability matching is evaluated from the last registered +adapters (LIFO). + +## [Benchmarks](./adapters/testintegration/benchmarks/README.md) diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/doc.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/doc.go new file mode 100644 index 000000000000..76d3898fca5e --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/doc.go @@ -0,0 +1,8 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package adapters exposes a registry of adapters to multiple +// JSON serialization libraries. +// +// All interfaces are defined in package [ifaces.Adapter]. +package adapters diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/doc.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/doc.go new file mode 100644 index 000000000000..1fd43a1fad51 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/doc.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package ifaces exposes all interfaces to work with adapters. +package ifaces diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/ifaces.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/ifaces.go new file mode 100644 index 000000000000..7805e5e5e398 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/ifaces.go @@ -0,0 +1,84 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package ifaces + +import ( + _ "encoding/json" // for documentation purpose + "iter" +) + +// Ordered knows how to iterate over the (key,value) pairs of a JSON object. +type Ordered interface { + OrderedItems() iter.Seq2[string, any] +} + +// SetOrdered knows how to append or update the keys of a JSON object, +// given an iterator over (key,value) pairs. +// +// If the provided iterator is nil then the receiver should be set to nil. +type SetOrdered interface { + SetOrderedItems(iter.Seq2[string, any]) +} + +// OrderedMap represent a JSON object (i.e. like a map[string,any]), +// and knows how to serialize and deserialize JSON with the order of keys maintained. +type OrderedMap interface { + Ordered + SetOrdered + + OrderedMarshalJSON() ([]byte, error) + OrderedUnmarshalJSON([]byte) error +} + +// MarshalAdapter behaves likes the standard library [json.Marshal]. +type MarshalAdapter interface { + Poolable + + Marshal(any) ([]byte, error) +} + +// OrderedMarshalAdapter behaves likes the standard library [json.Marshal], preserving the order of keys in objects. +type OrderedMarshalAdapter interface { + Poolable + + OrderedMarshal(Ordered) ([]byte, error) +} + +// UnmarshalAdapter behaves likes the standard library [json.Unmarshal]. +type UnmarshalAdapter interface { + Poolable + + Unmarshal([]byte, any) error +} + +// OrderedUnmarshalAdapter behaves likes the standard library [json.Unmarshal], preserving the order of keys in objects. +type OrderedUnmarshalAdapter interface { + Poolable + + OrderedUnmarshal([]byte, SetOrdered) error +} + +// Adapter exposes an interface like the standard [json] library. +type Adapter interface { + MarshalAdapter + UnmarshalAdapter + + OrderedAdapter +} + +// OrderedAdapter exposes interfaces to process JSON and keep the order of object keys. +type OrderedAdapter interface { + OrderedMarshalAdapter + OrderedUnmarshalAdapter + NewOrderedMap(capacity int) OrderedMap +} + +type Poolable interface { + // Self-redeem: for [Adapter] s that are allocated from a pool. + // The [Adapter] must not be used after calling [Redeem]. + Redeem() + + // Reset the state of the [Adapter], if any. + Reset() +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/registry_iface.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/registry_iface.go new file mode 100644 index 000000000000..2d6c69f4e602 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/registry_iface.go @@ -0,0 +1,91 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package ifaces + +import ( + "strings" +) + +// Capability indicates what a JSON adapter is capable of. +type Capability uint8 + +const ( + CapabilityMarshalJSON Capability = 1 << iota + CapabilityUnmarshalJSON + CapabilityOrderedMarshalJSON + CapabilityOrderedUnmarshalJSON + CapabilityOrderedMap +) + +func (c Capability) String() string { + switch c { + case CapabilityMarshalJSON: + return "MarshalJSON" + case CapabilityUnmarshalJSON: + return "UnmarshalJSON" + case CapabilityOrderedMarshalJSON: + return "OrderedMarshalJSON" + case CapabilityOrderedUnmarshalJSON: + return "OrderedUnmarshalJSON" + case CapabilityOrderedMap: + return "OrderedMap" + default: + return "" + } +} + +// Capabilities holds several unitary capability flags +type Capabilities uint8 + +// Has some capability flag enabled. +func (c Capabilities) Has(capability Capability) bool { + return Capability(c)&capability > 0 +} + +func (c Capabilities) String() string { + var w strings.Builder + + first := true + for _, capability := range []Capability{ + CapabilityMarshalJSON, + CapabilityUnmarshalJSON, + CapabilityOrderedMarshalJSON, + CapabilityOrderedUnmarshalJSON, + CapabilityOrderedMap, + } { + if c.Has(capability) { + if !first { + w.WriteByte('|') + } else { + first = false + } + w.WriteString(capability.String()) + } + } + + return w.String() +} + +const ( + AllCapabilities Capabilities = Capabilities(uint8(CapabilityMarshalJSON) | + uint8(CapabilityUnmarshalJSON) | + uint8(CapabilityOrderedMarshalJSON) | + uint8(CapabilityOrderedUnmarshalJSON) | + uint8(CapabilityOrderedMap)) + + AllUnorderedCapabilities Capabilities = Capabilities(uint8(CapabilityMarshalJSON) | uint8(CapabilityUnmarshalJSON)) +) + +// RegistryEntry describes how any given adapter registers its capabilities to the [Registrar]. +type RegistryEntry struct { + Who string + What Capabilities + Constructor func() Adapter + Support func(what Capability, value any) bool +} + +// Registrar is a type that knows how to keep registration calls from adapters. +type Registrar interface { + RegisterFor(RegistryEntry) +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/registry.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/registry.go new file mode 100644 index 000000000000..3062acaff261 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/registry.go @@ -0,0 +1,229 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package adapters + +import ( + "fmt" + "reflect" + "slices" + "sync" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" + stdlib "github.com/go-openapi/swag/jsonutils/adapters/stdlib/json" +) + +// Registry holds the global registry for registered adapters. +var Registry = NewRegistrar() + +var ( + defaultRegistered = stdlib.Register + + _ ifaces.Registrar = &Registrar{} +) + +type registryError string + +func (e registryError) Error() string { + return string(e) +} + +// ErrRegistry indicates an error returned by the [Registrar]. +var ErrRegistry registryError = "JSON adapters registry error" + +type registry []*ifaces.RegistryEntry + +// Registrar holds registered [ifaces.Adapters] for different serialization capabilities. +// +// Internally, it maintains a cache for data types that favor a given adapter. +type Registrar struct { + marshalerRegistry registry + unmarshalerRegistry registry + orderedMarshalerRegistry registry + orderedUnmarshalerRegistry registry + orderedMapRegistry registry + + gmx sync.RWMutex + + // cache indexed by value type, so we don't have to lookup + marshalerCache map[reflect.Type]*ifaces.RegistryEntry + unmarshalerCache map[reflect.Type]*ifaces.RegistryEntry + orderedMarshalerCache map[reflect.Type]*ifaces.RegistryEntry + orderedUnmarshalerCache map[reflect.Type]*ifaces.RegistryEntry + orderedMapCache map[reflect.Type]*ifaces.RegistryEntry +} + +func NewRegistrar() *Registrar { + r := &Registrar{} + + r.marshalerRegistry = make(registry, 0, 1) + r.unmarshalerRegistry = make(registry, 0, 1) + r.orderedMarshalerRegistry = make(registry, 0, 1) + r.orderedUnmarshalerRegistry = make(registry, 0, 1) + r.orderedMapRegistry = make(registry, 0, 1) + + r.marshalerCache = make(map[reflect.Type]*ifaces.RegistryEntry) + r.unmarshalerCache = make(map[reflect.Type]*ifaces.RegistryEntry) + r.orderedMarshalerCache = make(map[reflect.Type]*ifaces.RegistryEntry) + r.orderedUnmarshalerCache = make(map[reflect.Type]*ifaces.RegistryEntry) + r.orderedMapCache = make(map[reflect.Type]*ifaces.RegistryEntry) + + defaultRegistered(r) + + return r +} + +// ClearCache resets the internal type cache. +func (r *Registrar) ClearCache() { + r.gmx.Lock() + r.clearCache() + r.gmx.Unlock() +} + +// Reset the [Registrar] to its defaults. +func (r *Registrar) Reset() { + r.gmx.Lock() + r.clearCache() + r.marshalerRegistry = r.marshalerRegistry[:0] + r.unmarshalerRegistry = r.unmarshalerRegistry[:0] + r.orderedMarshalerRegistry = r.orderedMarshalerRegistry[:0] + r.orderedUnmarshalerRegistry = r.orderedUnmarshalerRegistry[:0] + r.orderedMapRegistry = r.orderedMapRegistry[:0] + r.gmx.Unlock() + + defaultRegistered(r) +} + +// RegisterFor registers an adapter for some JSON capabilities. +func (r *Registrar) RegisterFor(entry ifaces.RegistryEntry) { + r.gmx.Lock() + if entry.What.Has(ifaces.CapabilityMarshalJSON) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityMarshalJSON) + r.marshalerRegistry = slices.Insert(r.marshalerRegistry, 0, &e) + } + if entry.What.Has(ifaces.CapabilityUnmarshalJSON) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityUnmarshalJSON) + r.unmarshalerRegistry = slices.Insert(r.unmarshalerRegistry, 0, &e) + } + if entry.What.Has(ifaces.CapabilityOrderedMarshalJSON) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityOrderedMarshalJSON) + r.orderedMarshalerRegistry = slices.Insert(r.orderedMarshalerRegistry, 0, &e) + } + if entry.What.Has(ifaces.CapabilityOrderedUnmarshalJSON) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityOrderedUnmarshalJSON) + r.orderedUnmarshalerRegistry = slices.Insert(r.orderedUnmarshalerRegistry, 0, &e) + } + if entry.What.Has(ifaces.CapabilityOrderedMap) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityOrderedMap) + r.orderedMapRegistry = slices.Insert(r.orderedMapRegistry, 0, &e) + } + r.gmx.Unlock() +} + +// AdapterFor returns an [ifaces.Adapter] that supports this capability for this type of value. +// +// The [ifaces.Adapter] may be redeemed to its pool using its Redeem() method, for adapters that support global +// pooling. When this is not the case, the redeem function is just a no-operation. +func (r *Registrar) AdapterFor(capability ifaces.Capability, value any) ifaces.Adapter { + entry := r.findFirstFor(capability, value) + if entry == nil { + return nil + } + + return entry.Constructor() +} + +func (r *Registrar) clearCache() { + clear(r.marshalerCache) + clear(r.unmarshalerCache) + clear(r.orderedMarshalerCache) + clear(r.orderedUnmarshalerCache) + clear(r.orderedMapCache) +} + +func (r *Registrar) findFirstFor(capability ifaces.Capability, value any) *ifaces.RegistryEntry { + switch capability { + case ifaces.CapabilityMarshalJSON: + return r.findFirstInRegistryFor(r.marshalerRegistry, r.marshalerCache, capability, value) + case ifaces.CapabilityUnmarshalJSON: + return r.findFirstInRegistryFor(r.unmarshalerRegistry, r.unmarshalerCache, capability, value) + case ifaces.CapabilityOrderedMarshalJSON: + return r.findFirstInRegistryFor(r.orderedMarshalerRegistry, r.orderedMarshalerCache, capability, value) + case ifaces.CapabilityOrderedUnmarshalJSON: + return r.findFirstInRegistryFor(r.orderedUnmarshalerRegistry, r.orderedUnmarshalerCache, capability, value) + case ifaces.CapabilityOrderedMap: + return r.findFirstInRegistryFor(r.orderedMapRegistry, r.orderedMapCache, capability, value) + default: + panic(fmt.Errorf("unsupported capability %d: %w", capability, ErrRegistry)) + } +} + +func (r *Registrar) findFirstInRegistryFor(reg registry, cache map[reflect.Type]*ifaces.RegistryEntry, capability ifaces.Capability, value any) *ifaces.RegistryEntry { + r.gmx.RLock() + if len(reg) > 1 { + if entry, ok := cache[reflect.TypeOf(value)]; ok { + // cache hit + r.gmx.RUnlock() + return entry + } + } + + for _, entry := range reg { + if !entry.Support(capability, value) { + continue + } + + r.gmx.RUnlock() + + // update the internal cache + r.gmx.Lock() + cache[reflect.TypeOf(value)] = entry + r.gmx.Unlock() + + return entry + } + + // no adapter found + r.gmx.RUnlock() + + return nil +} + +// MarshalAdapterFor returns the first adapter that knows how to Marshal this type of value. +func MarshalAdapterFor(value any) ifaces.MarshalAdapter { + return Registry.AdapterFor(ifaces.CapabilityMarshalJSON, value) +} + +// OrderedMarshalAdapterFor returns the first adapter that knows how to OrderedMarshal this type of value. +func OrderedMarshalAdapterFor(value ifaces.Ordered) ifaces.OrderedMarshalAdapter { + return Registry.AdapterFor(ifaces.CapabilityOrderedMarshalJSON, value) +} + +// UnmarshalAdapterFor returns the first adapter that knows how to Unmarshal this type of value. +func UnmarshalAdapterFor(value any) ifaces.UnmarshalAdapter { + return Registry.AdapterFor(ifaces.CapabilityUnmarshalJSON, value) +} + +// OrderedUnmarshalAdapterFor provides the first adapter that knows how to OrderedUnmarshal this type of value. +func OrderedUnmarshalAdapterFor(value ifaces.SetOrdered) ifaces.OrderedUnmarshalAdapter { + return Registry.AdapterFor(ifaces.CapabilityOrderedUnmarshalJSON, value) +} + +// NewOrderedMap provides the "ordered map" implementation provided by the registry. +func NewOrderedMap(capacity int) ifaces.OrderedMap { + var v any + adapter := Registry.AdapterFor(ifaces.CapabilityOrderedUnmarshalJSON, v) + if adapter == nil { + return nil + } + + defer adapter.Redeem() + return adapter.NewOrderedMap(capacity) +} + +func noopRedeemer() {} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/adapter.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/adapter.go new file mode 100644 index 000000000000..0213ff5c29f1 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/adapter.go @@ -0,0 +1,115 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package json + +import ( + stdjson "encoding/json" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" + "github.com/go-openapi/swag/typeutils" +) + +const sensibleBufferSize = 8192 + +type jsonError string + +func (e jsonError) Error() string { + return string(e) +} + +// ErrStdlib indicates that an error comes from the stdlib JSON adapter +var ErrStdlib jsonError = "error from the JSON adapter stdlib" + +var _ ifaces.Adapter = &Adapter{} + +type Adapter struct { +} + +// NewAdapter yields an [ifaces.Adapter] using the standard library. +func NewAdapter() *Adapter { + return &Adapter{} +} + +func (a *Adapter) Marshal(value any) ([]byte, error) { + return stdjson.Marshal(value) +} + +func (a *Adapter) Unmarshal(data []byte, value any) error { + return stdjson.Unmarshal(data, value) +} + +func (a *Adapter) OrderedMarshal(value ifaces.Ordered) ([]byte, error) { + w := poolOfWriters.Borrow() + defer func() { + poolOfWriters.Redeem(w) + }() + + if typeutils.IsNil(value) { + w.RawString("null") + + return w.BuildBytes() + } + + w.RawByte('{') + first := true + for k, v := range value.OrderedItems() { + if first { + first = false + } else { + w.RawByte(',') + } + + w.String(k) + w.RawByte(':') + + switch val := v.(type) { + case ifaces.Ordered: + w.Raw(a.OrderedMarshal(val)) + default: + w.Raw(stdjson.Marshal(v)) + } + } + + w.RawByte('}') + + return w.BuildBytes() +} + +func (a *Adapter) OrderedUnmarshal(data []byte, value ifaces.SetOrdered) error { + var m MapSlice + if err := m.OrderedUnmarshalJSON(data); err != nil { + return err + } + + if typeutils.IsNil(m) { + // force input value to nil + value.SetOrderedItems(nil) + + return nil + } + + value.SetOrderedItems(m.OrderedItems()) + + return nil +} + +func (a *Adapter) NewOrderedMap(capacity int) ifaces.OrderedMap { + m := make(MapSlice, 0, capacity) + + return &m +} + +// Redeem the [Adapter] when it comes from a pool. +// +// The adapter becomes immediately unusable once redeemed. +func (a *Adapter) Redeem() { + if a == nil { + return + } + + RedeemAdapter(a) +} + +func (a *Adapter) Reset() { +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/doc.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/doc.go new file mode 100644 index 000000000000..5ea1b4404252 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/doc.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package json implements an [ifaces.Adapter] using the standard library. +package json diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/lexer.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/lexer.go new file mode 100644 index 000000000000..b5aa1c7972e7 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/lexer.go @@ -0,0 +1,320 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package json + +import ( + stdjson "encoding/json" + "errors" + "fmt" + "io" + "math" + "strconv" + + "github.com/go-openapi/swag/conv" +) + +type token struct { + stdjson.Token +} + +func (t token) String() string { + if t == invalidToken { + return "invalid token" + } + if t == eofToken { + return "EOF" + } + + return fmt.Sprintf("%v", t.Token) +} + +func (t token) Kind() tokenKind { + switch t.Token.(type) { + case nil: + return tokenNull + case stdjson.Delim: + return tokenDelim + case bool: + return tokenBool + case float64: + return tokenFloat + case stdjson.Number: + return tokenNumber + case string: + return tokenString + default: + return tokenUndef + } +} + +func (t token) Delim() byte { + r, ok := t.Token.(stdjson.Delim) + if !ok { + return 0 + } + + return byte(r) +} + +type tokenKind uint8 + +const ( + tokenUndef tokenKind = iota + tokenString + tokenNumber + tokenFloat + tokenBool + tokenNull + tokenDelim +) + +var ( + invalidToken = token{ + Token: stdjson.Token(struct{}{}), + } + + eofToken = token{ + Token: stdjson.Token(&struct{}{}), + } + + undefToken = token{ + Token: stdjson.Token(uint8(0)), + } +) + +// jlexer apes easyjson's jlexer, but uses the standard library decoder under the hood. +type jlexer struct { + buf *bytesReader + dec *stdjson.Decoder + err error + // current token + next token + // started bool +} + +type bytesReader struct { + buf []byte + offset int +} + +func (b *bytesReader) Reset() { + b.buf = nil + b.offset = 0 +} + +func (b *bytesReader) Read(p []byte) (int, error) { + if b.offset >= len(b.buf) { + return 0, io.EOF + } + + n := len(p) + buf := b.buf[b.offset:] + m := len(buf) + + if n >= m { + copy(p, buf) + b.offset += m + + return m, nil + } + + copy(p, buf[:n]) + b.offset += n + + return n, nil +} + +var _ io.Reader = &bytesReader{} + +func newLexer(data []byte) *jlexer { + l := &jlexer{ + // current: undefToken, + next: undefToken, + } + l.buf = &bytesReader{ + buf: data, + } + l.dec = stdjson.NewDecoder(l.buf) // unfortunately, cannot pool this + + return l +} + +func (l *jlexer) Reset() { + l.err = nil + l.next = undefToken + // leave l.dec and l.buf alone, since they are replaced at every Borrow +} + +func (l *jlexer) Error() error { + return l.err +} + +func (l *jlexer) SetErr(err error) { + l.err = err +} + +func (l *jlexer) Ok() bool { + return l.err == nil +} + +// NextToken consumes a token +func (l *jlexer) NextToken() token { + if !l.Ok() { + return invalidToken + } + + if l.next != undefToken { + next := l.next + l.next = undefToken + + return next + } + + return l.fetchToken() +} + +// PeekToken returns the next token without consuming it +func (l *jlexer) PeekToken() token { + if l.next == undefToken { + l.next = l.fetchToken() + } + + return l.next +} + +func (l *jlexer) Skip() { + _ = l.NextToken() +} + +func (l *jlexer) IsDelim(c byte) bool { + if !l.Ok() { + return false + } + + next := l.PeekToken() + if next.Kind() != tokenDelim { + return false + } + + if next.Delim() != c { + return false + } + + return true +} + +func (l *jlexer) IsNull() bool { + if !l.Ok() { + return false + } + + next := l.PeekToken() + + return next.Kind() == tokenNull +} + +func (l *jlexer) Delim(c byte) { + if !l.Ok() { + return + } + + tok := l.NextToken() + if tok.Kind() != tokenDelim { + l.err = fmt.Errorf("expected a delimiter token but got '%v': %w", tok, ErrStdlib) + + return + } + + if tok.Delim() != c { + l.err = fmt.Errorf("expected delimiter '%q' but got '%q': %w", c, tok.Delim(), ErrStdlib) + } +} + +func (l *jlexer) Null() { + if !l.Ok() { + return + } + + tok := l.NextToken() + if tok.Kind() != tokenNull { + l.err = fmt.Errorf("expected a null token but got '%v': %w", tok, ErrStdlib) + } +} + +func (l *jlexer) Number() any { + if !l.Ok() { + return 0 + } + + tok := l.NextToken() + + switch tok.Kind() { //nolint:exhaustive + case tokenNumber: + n := tok.Token.(stdjson.Number).String() + f, _ := strconv.ParseFloat(n, 64) + if conv.IsFloat64AJSONInteger(f) { + return int64(math.Trunc(f)) + } + + return f + + case tokenFloat: + f := tok.Token.(float64) + if conv.IsFloat64AJSONInteger(f) { + return int64(math.Trunc(f)) + } + + return f + + default: + l.err = fmt.Errorf("expected a number token but got '%v': %w", tok, ErrStdlib) + + return 0 + } +} + +func (l *jlexer) Bool() bool { + if !l.Ok() { + return false + } + + tok := l.NextToken() + if tok.Kind() != tokenBool { + l.err = fmt.Errorf("expected a bool token but got '%v': %w", tok, ErrStdlib) + + return false + } + + return tok.Token.(bool) +} + +func (l *jlexer) String() string { + if !l.Ok() { + return "" + } + + tok := l.NextToken() + if tok.Kind() != tokenString { + l.err = fmt.Errorf("expected a string token but got '%v': %w", tok, ErrStdlib) + + return "" + } + + return tok.Token.(string) +} + +// Commas and colons are elided. +func (l *jlexer) fetchToken() token { + jtok, err := l.dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + return eofToken + } + + l.err = errors.Join(err, ErrStdlib) + return invalidToken + } + + return token{Token: jtok} +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/ordered_map.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/ordered_map.go new file mode 100644 index 000000000000..54deef406f33 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/ordered_map.go @@ -0,0 +1,266 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package json + +import ( + stdjson "encoding/json" + "fmt" + "iter" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" +) + +var _ ifaces.OrderedMap = &MapSlice{} + +// MapSlice represents a JSON object, with the order of keys maintained. +type MapSlice []MapItem + +func (s MapSlice) OrderedItems() iter.Seq2[string, any] { + return func(yield func(string, any) bool) { + for _, item := range s { + if !yield(item.Key, item.Value) { + return + } + } + } +} + +func (s *MapSlice) SetOrderedItems(items iter.Seq2[string, any]) { + if items == nil { + *s = nil + + return + } + + m := *s + if len(m) > 0 { + // update mode + idx := make(map[string]int, len(m)) + + for i, item := range m { + idx[item.Key] = i + } + + for k, v := range items { + idx, ok := idx[k] + if ok { + m[idx].Value = v + + continue + } + m = append(m, MapItem{Key: k, Value: v}) + } + + *s = m + + return + } + + for k, v := range items { + m = append(m, MapItem{Key: k, Value: v}) + } + + *s = m +} + +// MarshalJSON renders a [MapSlice] as JSON bytes, preserving the order of keys. +func (s MapSlice) MarshalJSON() ([]byte, error) { + return s.OrderedMarshalJSON() +} + +func (s MapSlice) OrderedMarshalJSON() ([]byte, error) { + w := poolOfWriters.Borrow() + defer func() { + poolOfWriters.Redeem(w) + }() + + s.marshalObject(w) + + return w.BuildBytes() // this clones data, so it's okay to redeem the writer and its buffer +} + +// UnmarshalJSON builds a [MapSlice] from JSON bytes, preserving the order of keys. +// +// Inner objects are unmarshaled as [MapSlice] slices and not map[string]any. +func (s *MapSlice) UnmarshalJSON(data []byte) error { + return s.OrderedUnmarshalJSON(data) +} + +func (s *MapSlice) OrderedUnmarshalJSON(data []byte) error { + l := poolOfLexers.Borrow(data) + defer func() { + poolOfLexers.Redeem(l) + }() + + s.unmarshalObject(l) + + return l.Error() +} + +func (s MapSlice) marshalObject(w *jwriter) { + if s == nil { + w.RawString("null") + + return + } + + w.RawByte('{') + + if len(s) == 0 { + w.RawByte('}') + + return + } + + s[0].marshalJSON(w) + + for i := 1; i < len(s); i++ { + w.RawByte(',') + s[i].marshalJSON(w) + } + + w.RawByte('}') +} + +func (s *MapSlice) unmarshalObject(in *jlexer) { + if in.IsNull() { + in.Skip() + + return + } + + in.Delim('{') // consume token + if !in.Ok() { + return + } + + result := make(MapSlice, 0) + + for in.Ok() && !in.IsDelim('}') { + var mi MapItem + + mi.unmarshalKeyValue(in) + result = append(result, mi) + } + + in.Delim('}') + + if !in.Ok() { + return + } + + *s = result +} + +// MapItem represents the value of a key in a JSON object held by [MapSlice]. +// +// Notice that [MapItem] should not be marshaled to or unmarshaled from JSON directly, +// use this type as part of a [MapSlice] when dealing with JSON bytes. +type MapItem struct { + Key string + Value any +} + +func (s MapItem) marshalJSON(w *jwriter) { + w.String(s.Key) + w.RawByte(':') + w.Raw(stdjson.Marshal(s.Value)) +} + +func (s *MapItem) unmarshalKeyValue(in *jlexer) { + key := in.String() // consume string + value := s.asInterface(in) // consume any value, including termination tokens '}' or ']' + + if !in.Ok() { + return + } + + s.Key = key + s.Value = value +} + +func (s *MapItem) unmarshalArray(in *jlexer) []any { + if in.IsNull() { + in.Skip() + + return nil + } + + in.Delim('[') // consume token + if !in.Ok() { + return nil + } + + ret := make([]any, 0) + + for in.Ok() && !in.IsDelim(']') { + ret = append(ret, s.asInterface(in)) + } + + in.Delim(']') + if !in.Ok() { + return nil + } + + return ret +} + +// asInterface is very much like [jlexer.Lexer.Interface], but unmarshals an object +// into a [MapSlice], not a map[string]any. +// +// We have to force parsing errors somehow, since [jlexer.Lexer] doesn't let us +// set a parsing error directly. +func (s *MapItem) asInterface(in *jlexer) any { + if !in.Ok() { + return nil + } + + tok := in.PeekToken() // look-ahead what the next token looks like + kind := tok.Kind() + + switch kind { + case tokenString: + return in.String() // consume string + + case tokenNumber, tokenFloat: + return in.Number() + + case tokenBool: + return in.Bool() + + case tokenNull: + in.Null() + + return nil + + case tokenDelim: + switch tok.Delim() { + case '{': // not consumed yet + ret := make(MapSlice, 0) + ret.unmarshalObject(in) // consumes the terminating '}' + + if in.Ok() { + return ret + } + + // lexer is in an error state: will exhaust + return nil + + case '[': // not consumed yet + return s.unmarshalArray(in) // consumes the terminating ']' + default: + in.SetErr(fmt.Errorf("unexpected delimiter: %v: %w", tok, ErrStdlib)) // force error + return nil + } + + case tokenUndef: + fallthrough + default: + if in.Ok() { + in.SetErr(fmt.Errorf("unexpected token: %v: %w", tok, ErrStdlib)) // force error + } + + return nil + } +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/pool.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/pool.go new file mode 100644 index 000000000000..709b97c3046b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/pool.go @@ -0,0 +1,143 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package json + +import ( + "encoding/json" + "sync" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" +) + +type adaptersPool struct { + sync.Pool +} + +func (p *adaptersPool) Borrow() *Adapter { + return p.Get().(*Adapter) +} + +func (p *adaptersPool) BorrowIface() ifaces.Adapter { + return p.Get().(*Adapter) +} + +func (p *adaptersPool) Redeem(a *Adapter) { + p.Put(a) +} + +type writersPool struct { + sync.Pool +} + +func (p *writersPool) Borrow() *jwriter { + ptr := p.Get() + + jw := ptr.(*jwriter) + jw.Reset() + + return jw +} + +func (p *writersPool) Redeem(w *jwriter) { + p.Put(w) +} + +type lexersPool struct { + sync.Pool +} + +func (p *lexersPool) Borrow(data []byte) *jlexer { + ptr := p.Get() + + l := ptr.(*jlexer) + l.buf = poolOfReaders.Borrow(data) + l.dec = json.NewDecoder(l.buf) // cannot pool, not exposed by the encoding/json API + l.Reset() + + return l +} + +func (p *lexersPool) Redeem(l *jlexer) { + l.dec = nil + discard := l.buf + l.buf = nil + poolOfReaders.Redeem(discard) + p.Put(l) +} + +type readersPool struct { + sync.Pool +} + +func (p *readersPool) Borrow(data []byte) *bytesReader { + ptr := p.Get() + + b := ptr.(*bytesReader) + b.Reset() + b.buf = data + + return b +} + +func (p *readersPool) Redeem(b *bytesReader) { + p.Put(b) +} + +var ( + poolOfAdapters = &adaptersPool{ + Pool: sync.Pool{ + New: func() any { + return NewAdapter() + }, + }, + } + + poolOfWriters = &writersPool{ + Pool: sync.Pool{ + New: func() any { + return newJWriter() + }, + }, + } + + poolOfLexers = &lexersPool{ + Pool: sync.Pool{ + New: func() any { + return newLexer(nil) + }, + }, + } + + poolOfReaders = &readersPool{ + Pool: sync.Pool{ + New: func() any { + return &bytesReader{} + }, + }, + } +) + +// BorrowAdapter borrows an [Adapter] from the pool, recycling already allocated instances. +func BorrowAdapter() *Adapter { + return poolOfAdapters.Borrow() +} + +// BorrowAdapterIface borrows a stdlib [Adapter] and converts it directly +// to [ifaces.Adapter]. This is useful to avoid further allocations when +// translating the concrete type into an interface. +func BorrowAdapterIface() ifaces.Adapter { + return poolOfAdapters.BorrowIface() +} + +// RedeemAdapter redeems an [Adapter] to the pool, so it may be recycled. +func RedeemAdapter(a *Adapter) { + poolOfAdapters.Redeem(a) +} + +func RedeemAdapterIface(a ifaces.Adapter) { + concrete, ok := a.(*Adapter) + if ok { + poolOfAdapters.Redeem(concrete) + } +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/register.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/register.go new file mode 100644 index 000000000000..fc8818694eae --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/register.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package json + +import ( + "fmt" + "reflect" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" +) + +func Register(dispatcher ifaces.Registrar) { + t := reflect.TypeOf(Adapter{}) + dispatcher.RegisterFor( + ifaces.RegistryEntry{ + Who: fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()), + What: ifaces.AllCapabilities, + Constructor: BorrowAdapterIface, + Support: support, + }) +} + +func support(_ ifaces.Capability, _ any) bool { + return true +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/writer.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/writer.go new file mode 100644 index 000000000000..dc2325c1a30f --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/writer.go @@ -0,0 +1,75 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package json + +import ( + "bytes" + "encoding/json" + "strings" +) + +type jwriter struct { + buf *bytes.Buffer + err error +} + +func newJWriter() *jwriter { + buf := make([]byte, 0, sensibleBufferSize) + + return &jwriter{buf: bytes.NewBuffer(buf)} +} + +func (w *jwriter) Reset() { + w.buf.Reset() + w.err = nil +} + +func (w *jwriter) RawString(s string) { + if w.err != nil { + return + } + w.buf.WriteString(s) +} + +func (w *jwriter) Raw(b []byte, err error) { + if w.err != nil { + return + } + if err != nil { + w.err = err + return + } + + _, _ = w.buf.Write(b) +} + +func (w *jwriter) RawByte(c byte) { + if w.err != nil { + return + } + w.buf.WriteByte(c) +} + +var quoteReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`) + +func (w *jwriter) String(s string) { + if w.err != nil { + return + } + // escape quotes and \ + s = quoteReplacer.Replace(s) + + _ = w.buf.WriteByte('"') + json.HTMLEscape(w.buf, []byte(s)) + _ = w.buf.WriteByte('"') +} + +// BuildBytes returns a clone of the internal buffer. +func (w *jwriter) BuildBytes() ([]byte, error) { + if w.err != nil { + return nil, w.err + } + + return bytes.Clone(w.buf.Bytes()), nil +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/concat.go b/vendor/github.com/go-openapi/swag/jsonutils/concat.go new file mode 100644 index 000000000000..2068503af05b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/concat.go @@ -0,0 +1,92 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package jsonutils + +import ( + "bytes" +) + +// nullJSON represents a JSON object with null type +var nullJSON = []byte("null") + +const comma = byte(',') + +var closers map[byte]byte + +func init() { + closers = map[byte]byte{ + '{': '}', + '[': ']', + } +} + +// ConcatJSON concatenates multiple json objects or arrays efficiently. +// +// Note that [ConcatJSON] performs a very simple (and fast) concatenation +// operation: it does not attempt to merge objects. +func ConcatJSON(blobs ...[]byte) []byte { + if len(blobs) == 0 { + return nil + } + + last := len(blobs) - 1 + for blobs[last] == nil || bytes.Equal(blobs[last], nullJSON) { + // strips trailing null objects + last-- + if last < 0 { + // there was nothing but "null"s or nil... + return nil + } + } + if last == 0 { + return blobs[0] + } + + var opening, closing byte + var idx, a int + buf := bytes.NewBuffer(nil) + + for i, b := range blobs[:last+1] { + if b == nil || bytes.Equal(b, nullJSON) { + // a null object is in the list: skip it + continue + } + if len(b) > 0 && opening == 0 { // is this an array or an object? + opening, closing = b[0], closers[b[0]] + } + + if opening != '{' && opening != '[' { + continue // don't know how to concatenate non container objects + } + + const minLengthIfNotEmpty = 3 + if len(b) < minLengthIfNotEmpty { // yep empty but also the last one, so closing this thing + if i == last && a > 0 { + _ = buf.WriteByte(closing) // never returns err != nil + } + continue + } + + idx = 0 + if a > 0 { // we need to join with a comma for everything beyond the first non-empty item + _ = buf.WriteByte(comma) // never returns err != nil + idx = 1 // this is not the first or the last so we want to drop the leading bracket + } + + if i != last { // not the last one, strip brackets + _, _ = buf.Write(b[idx : len(b)-1]) // never returns err != nil + } else { // last one, strip only the leading bracket + _, _ = buf.Write(b[idx:]) + } + a++ + } + + // somehow it ended up being empty, so provide a default value + if buf.Len() == 0 && (opening == '{' || opening == '[') { + _ = buf.WriteByte(opening) // never returns err != nil + _ = buf.WriteByte(closing) + } + + return buf.Bytes() +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/doc.go b/vendor/github.com/go-openapi/swag/jsonutils/doc.go new file mode 100644 index 000000000000..3926cc58d1bc --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/doc.go @@ -0,0 +1,7 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package jsonutils provides helpers to work with JSON. +// +// These utilities work with dynamic go structures to and from JSON. +package jsonutils diff --git a/vendor/github.com/go-openapi/swag/jsonutils/json.go b/vendor/github.com/go-openapi/swag/jsonutils/json.go new file mode 100644 index 000000000000..40753ce03fde --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/json.go @@ -0,0 +1,116 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package jsonutils + +import ( + "bytes" + "encoding/json" + + "github.com/go-openapi/swag/jsonutils/adapters" + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" +) + +// WriteJSON marshals a data structure as JSON. +// +// The difference with [json.Marshal] is that it may check among several alternatives +// to do so. +// +// See [adapters.Registrar] for more details about how to configure +// multiple serialization alternatives. +// +// NOTE: to allow types that are [easyjson.Marshaler] s to use that route to process JSON, +// you now need to register the adapter for easyjson at runtime. +func WriteJSON(value any) ([]byte, error) { + if orderedMap, isOrdered := value.(ifaces.Ordered); isOrdered { + orderedMarshaler := adapters.OrderedMarshalAdapterFor(orderedMap) + + if orderedMarshaler != nil { + defer orderedMarshaler.Redeem() + + return orderedMarshaler.OrderedMarshal(orderedMap) + } + + // no support found in registered adapters, fallback to the default (unordered) case + } + + marshaler := adapters.MarshalAdapterFor(value) + if marshaler != nil { + defer marshaler.Redeem() + + return marshaler.Marshal(value) + } + + // no support found in registered adapters, fallback to the default standard library. + // + // This only happens when tinkering with the global registry of adapters, since the default handles all the above cases. + return json.Marshal(value) // Codecov ignore // this is a safeguard not easily simulated in tests +} + +// ReadJSON unmarshals JSON data into a data structure. +// +// The difference with [json.Unmarshal] is that it may check among several alternatives +// to do so. +// +// See [adapters.Registrar] for more details about how to configure +// multiple serialization alternatives. +// +// NOTE: value must be a pointer. +// +// If the provided value implements [ifaces.SetOrdered], it is a considered an "ordered map" and [ReadJSON] +// will favor an adapter that supports the [ifaces.OrderedUnmarshal] feature, or fallback to +// an unordered behavior if none is found. +// +// NOTE: to allow types that are [easyjson.Unmarshaler] s to use that route to process JSON, +// you now need to register the adapter for easyjson at runtime. +func ReadJSON(data []byte, value any) error { + trimmedData := bytes.Trim(data, "\x00") + + if orderedMap, isOrdered := value.(ifaces.SetOrdered); isOrdered { + // if the value is an ordered map, favors support for OrderedUnmarshal. + + orderedUnmarshaler := adapters.OrderedUnmarshalAdapterFor(orderedMap) + + if orderedUnmarshaler != nil { + defer orderedUnmarshaler.Redeem() + + return orderedUnmarshaler.OrderedUnmarshal(trimmedData, orderedMap) + } + + // no support found in registered adapters, fallback to the default (unordered) case + } + + unmarshaler := adapters.UnmarshalAdapterFor(value) + if unmarshaler != nil { + defer unmarshaler.Redeem() + + return unmarshaler.Unmarshal(trimmedData, value) + } + + // no support found in registered adapters, fallback to the default standard library. + // + // This only happens when tinkering with the global registry of adapters, since the default handles all the above cases. + return json.Unmarshal(trimmedData, value) // Codecov ignore // this is a safeguard not easily simulated in tests +} + +// FromDynamicJSON turns a go value into a properly JSON typed structure. +// +// "Dynamic JSON" refers to what you get when unmarshaling JSON into an untyped any, +// i.e. objects are represented by map[string]any, arrays by []any, and +// all numbers are represented as float64. +// +// NOTE: target must be a pointer. +// +// # Maintaining the order of keys in objects +// +// If source and target implement [ifaces.Ordered] and [ifaces.SetOrdered] respectively, +// they are considered "ordered maps" and the order of keys is maintained in the +// "jsonification" process. In that case, map[string]any values are replaced by (ordered) [JSONMapSlice] ones. +func FromDynamicJSON(source, target any) error { + b, err := WriteJSON(source) + if err != nil { + return err + } + + return ReadJSON(b, target) +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/ordered_map.go b/vendor/github.com/go-openapi/swag/jsonutils/ordered_map.go new file mode 100644 index 000000000000..38dd3e244426 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/ordered_map.go @@ -0,0 +1,114 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package jsonutils + +import ( + "iter" + + "github.com/go-openapi/swag/jsonutils/adapters" + "github.com/go-openapi/swag/typeutils" +) + +// JSONMapSlice represents a JSON object, with the order of keys maintained. +// +// It behaves like an ordered map, but keys can't be accessed in constant time. +type JSONMapSlice []JSONMapItem + +// OrderedItems iterates over all (key,value) pairs with the order of keys maintained. +// +// This implements the [ifaces.Ordered] interface, so that [ifaces.Adapter] s know how to marshal +// keys in the desired order. +func (s JSONMapSlice) OrderedItems() iter.Seq2[string, any] { + return func(yield func(string, any) bool) { + for _, item := range s { + if !yield(item.Key, item.Value) { + return + } + } + } +} + +// SetOrderedItems sets keys in the [JSONMapSlice] objects, as presented by +// the provided iterator. +// +// As a special case, if items is nil, this sets to receiver to a nil slice. +// +// This implements the [ifaces.SetOrdered] interface, so that [ifaces.Adapter] s know how to unmarshal +// keys in the desired order. +func (s *JSONMapSlice) SetOrderedItems(items iter.Seq2[string, any]) { + if items == nil { + // force receiver to be a nil slice + *s = nil + + return + } + + m := *s + if len(m) > 0 { + // update mode: short-circuited when unmarshaling fresh data structures + idx := make(map[string]int, len(m)) + + for i, item := range m { + idx[item.Key] = i + } + + for k, v := range items { + idx, ok := idx[k] + if ok { + m[idx].Value = v + + continue + } + + m = append(m, JSONMapItem{Key: k, Value: v}) + } + + *s = m + + return + } + + for k, v := range items { + m = append(m, JSONMapItem{Key: k, Value: v}) + } + + *s = m +} + +// MarshalJSON renders a [JSONMapSlice] as JSON bytes, preserving the order of keys. +// +// It will pick the JSON library currently configured by the [adapters.Registry] (defaults to the standard library). +func (s JSONMapSlice) MarshalJSON() ([]byte, error) { + orderedMarshaler := adapters.OrderedMarshalAdapterFor(s) + defer orderedMarshaler.Redeem() + + return orderedMarshaler.OrderedMarshal(s) +} + +// UnmarshalJSON builds a [JSONMapSlice] from JSON bytes, preserving the order of keys. +// +// Inner objects are unmarshaled as ordered [JSONMapSlice] slices and not map[string]any. +// +// It will pick the JSON library currently configured by the [adapters.Registry] (defaults to the standard library). +func (s *JSONMapSlice) UnmarshalJSON(data []byte) error { + if typeutils.IsNil(*s) { + // allow to unmarshal with a simple var declaration (nil slice) + *s = JSONMapSlice{} + } + + orderedUnmarshaler := adapters.OrderedUnmarshalAdapterFor(s) + defer orderedUnmarshaler.Redeem() + + return orderedUnmarshaler.OrderedUnmarshal(data, s) +} + +// JSONMapItem represents the value of a key in a JSON object held by [JSONMapSlice]. +// +// Notice that JSONMapItem should not be marshaled to or unmarshaled from JSON directly. +// +// Use this type as part of a [JSONMapSlice] when dealing with JSON bytes. +type JSONMapItem struct { + Key string + Value any +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils_iface.go b/vendor/github.com/go-openapi/swag/jsonutils_iface.go new file mode 100644 index 000000000000..7bd4105fa51a --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils_iface.go @@ -0,0 +1,65 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import ( + "log" + + "github.com/go-openapi/swag/jsonutils" +) + +// JSONMapSlice represents a JSON object, with the order of keys maintained +// +// Deprecated: use [jsonutils.JSONMapSlice] instead, or [yamlutils.YAMLMapSlice] if you marshal YAML. +type JSONMapSlice = jsonutils.JSONMapSlice + +// JSONMapItem represents a JSON object, with the order of keys maintained +// +// Deprecated: use [jsonutils.JSONMapItem] instead. +type JSONMapItem = jsonutils.JSONMapItem + +// WriteJSON writes json data. +// +// Deprecated: use [jsonutils.WriteJSON] instead. +func WriteJSON(data any) ([]byte, error) { return jsonutils.WriteJSON(data) } + +// ReadJSON reads json data. +// +// Deprecated: use [jsonutils.ReadJSON] instead. +func ReadJSON(data []byte, value any) error { return jsonutils.ReadJSON(data, value) } + +// DynamicJSONToStruct converts an untyped JSON structure into a target data type. +// +// Deprecated: use [jsonutils.FromDynamicJSON] instead. +func DynamicJSONToStruct(data any, target any) error { + return jsonutils.FromDynamicJSON(data, target) +} + +// ConcatJSON concatenates multiple JSON objects efficiently. +// +// Deprecated: use [jsonutils.ConcatJSON] instead. +func ConcatJSON(blobs ...[]byte) []byte { return jsonutils.ConcatJSON(blobs...) } + +// ToDynamicJSON turns a go value into a properly JSON untyped structure. +// +// It is the same as [FromDynamicJSON], but doesn't check for errors. +// +// Deprecated: this function is a misnomer and is unsafe. Use [jsonutils.FromDynamicJSON] instead. +func ToDynamicJSON(value any) any { + var res any + if err := FromDynamicJSON(value, &res); err != nil { + log.Println(err) + } + + return res +} + +// FromDynamicJSON turns a go value into a properly JSON typed structure. +// +// "Dynamic JSON" refers to what you get when unmarshaling JSON into an untyped any, +// i.e. objects are represented by map[string]any, arrays by []any, and all +// scalar values are any. +// +// Deprecated: use [jsonutils.FromDynamicJSON] instead. +func FromDynamicJSON(data, target any) error { return jsonutils.FromDynamicJSON(data, target) } diff --git a/vendor/github.com/go-openapi/swag/loading/LICENSE b/vendor/github.com/go-openapi/swag/loading/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/loading/doc.go b/vendor/github.com/go-openapi/swag/loading/doc.go new file mode 100644 index 000000000000..8cf7bcb8b9d4 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading/doc.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package loading provides tools to load a file from http or from a local file system. +package loading diff --git a/vendor/github.com/go-openapi/swag/loading/errors.go b/vendor/github.com/go-openapi/swag/loading/errors.go new file mode 100644 index 000000000000..b3964289c742 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading/errors.go @@ -0,0 +1,15 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package loading + +type loadingError string + +const ( + // ErrLoader is an error raised by the file loader utility + ErrLoader loadingError = "loader error" +) + +func (e loadingError) Error() string { + return string(e) +} diff --git a/vendor/github.com/go-openapi/swag/loading/json.go b/vendor/github.com/go-openapi/swag/loading/json.go new file mode 100644 index 000000000000..59db12f5cfdb --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading/json.go @@ -0,0 +1,25 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package loading + +import ( + "encoding/json" + "errors" + "path/filepath" +) + +// JSONMatcher matches json for a file loader. +func JSONMatcher(path string) bool { + ext := filepath.Ext(path) + return ext == ".json" || ext == ".jsn" || ext == ".jso" +} + +// JSONDoc loads a json document from either a file or a remote url. +func JSONDoc(path string, opts ...Option) (json.RawMessage, error) { + data, err := LoadFromFileOrHTTP(path, opts...) + if err != nil { + return nil, errors.Join(err, ErrLoader) + } + return json.RawMessage(data), nil +} diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading/loading.go similarity index 59% rename from vendor/github.com/go-openapi/swag/loading.go rename to vendor/github.com/go-openapi/swag/loading/loading.go index 783442fddf63..269fb74d1675 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading/loading.go @@ -1,54 +1,26 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 -package swag +package loading import ( + "context" + "embed" "fmt" "io" "log" "net/http" "net/url" - "os" "path" "path/filepath" "runtime" "strings" - "time" ) -// LoadHTTPTimeout the default timeout for load requests -var LoadHTTPTimeout = 30 * time.Second - -// LoadHTTPBasicAuthUsername the username to use when load requests require basic auth -var LoadHTTPBasicAuthUsername = "" - -// LoadHTTPBasicAuthPassword the password to use when load requests require basic auth -var LoadHTTPBasicAuthPassword = "" - -// LoadHTTPCustomHeaders an optional collection of custom HTTP headers for load requests -var LoadHTTPCustomHeaders = map[string]string{} - // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in -func LoadFromFileOrHTTP(pth string) ([]byte, error) { - return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth) -} - -// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in -// timeout arg allows for per request overriding of the request timeout -func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) { - return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth) +func LoadFromFileOrHTTP(pth string, opts ...Option) ([]byte, error) { + o := optionsWithDefaults(opts) + return LoadStrategy(pth, o.ReadFileFunc(), loadHTTPBytes(opts...), opts...)(pth) } // LoadStrategy returns a loader function for a given path or URI. @@ -81,10 +53,12 @@ func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, e // - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported) // - `file:///c:/folder/file` becomes `C:\folder\file` // - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file` -func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { +func LoadStrategy(pth string, local, remote func(string) ([]byte, error), opts ...Option) func(string) ([]byte, error) { if strings.HasPrefix(pth, "http") { return remote } + o := optionsWithDefaults(opts) + _, isEmbedFS := o.fs.(embed.FS) return func(p string) ([]byte, error) { upth, err := url.PathUnescape(p) @@ -92,19 +66,19 @@ func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(s return nil, err } - if !strings.HasPrefix(p, `file://`) { + cpth, hasPrefix := strings.CutPrefix(upth, "file://") + if !hasPrefix || isEmbedFS || runtime.GOOS != "windows" { + // crude processing: trim the file:// prefix. This leaves full URIs with a host with a (mostly) unexpected result // regular file path provided: just normalize slashes - return local(filepath.FromSlash(upth)) - } - - if runtime.GOOS != "windows" { - // crude processing: this leaves full URIs with a host with a (mostly) unexpected result - upth = strings.TrimPrefix(upth, `file://`) + if isEmbedFS { + // on windows, we need to slash the path if FS is an embed FS. + return local(strings.TrimLeft(filepath.ToSlash(cpth), "./")) // remove invalid leading characters for embed FS + } - return local(filepath.FromSlash(upth)) + return local(filepath.FromSlash(cpth)) } - // windows-only pre-processing of file://... URIs + // windows-only pre-processing of file://... URIs, excluding embed.FS // support for canonical file URIs on windows. u, err := url.Parse(filepath.ToSlash(upth)) @@ -139,19 +113,29 @@ func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(s } } -func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { +func loadHTTPBytes(opts ...Option) func(path string) ([]byte, error) { + o := optionsWithDefaults(opts) + return func(path string) ([]byte, error) { - client := &http.Client{Timeout: timeout} - req, err := http.NewRequest(http.MethodGet, path, nil) //nolint:noctx + client := o.client + timeoutCtx := context.Background() + var cancel func() + + if o.httpTimeout > 0 { + timeoutCtx, cancel = context.WithTimeout(timeoutCtx, o.httpTimeout) + defer cancel() + } + + req, err := http.NewRequestWithContext(timeoutCtx, http.MethodGet, path, nil) if err != nil { return nil, err } - if LoadHTTPBasicAuthUsername != "" && LoadHTTPBasicAuthPassword != "" { - req.SetBasicAuth(LoadHTTPBasicAuthUsername, LoadHTTPBasicAuthPassword) + if o.basicAuthUsername != "" && o.basicAuthPassword != "" { + req.SetBasicAuth(o.basicAuthUsername, o.basicAuthPassword) } - for key, val := range LoadHTTPCustomHeaders { + for key, val := range o.customHeaders { req.Header.Set(key, val) } @@ -168,7 +152,7 @@ func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { } if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status) + return nil, fmt.Errorf("could not access document at %q [%s]: %w", path, resp.Status, ErrLoader) } return io.ReadAll(resp.Body) diff --git a/vendor/github.com/go-openapi/swag/loading/options.go b/vendor/github.com/go-openapi/swag/loading/options.go new file mode 100644 index 000000000000..6674ac69e628 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading/options.go @@ -0,0 +1,125 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package loading + +import ( + "io/fs" + "net/http" + "os" + "time" +) + +type ( + // Option provides options for loading a file over HTTP or from a file. + Option func(*options) + + httpOptions struct { + httpTimeout time.Duration + basicAuthUsername string + basicAuthPassword string + customHeaders map[string]string + client *http.Client + } + + fileOptions struct { + fs fs.ReadFileFS + } + + options struct { + httpOptions + fileOptions + } +) + +func (fo fileOptions) ReadFileFunc() func(string) ([]byte, error) { + if fo.fs == nil { + return os.ReadFile + } + + return fo.fs.ReadFile +} + +// WithTimeout sets a timeout for the remote file loader. +// +// The default timeout is 30s. +func WithTimeout(timeout time.Duration) Option { + return func(o *options) { + o.httpTimeout = timeout + } +} + +// WithBasicAuth sets a basic authentication scheme for the remote file loader. +func WithBasicAuth(username, password string) Option { + return func(o *options) { + o.basicAuthUsername = username + o.basicAuthPassword = password + } +} + +// WithCustomHeaders sets custom headers for the remote file loader. +func WithCustomHeaders(headers map[string]string) Option { + return func(o *options) { + if o.customHeaders == nil { + o.customHeaders = make(map[string]string, len(headers)) + } + + for header, value := range headers { + o.customHeaders[header] = value + } + } +} + +// WithHTTPClient overrides the default HTTP client used to fetch a remote file. +// +// By default, [http.DefaultClient] is used. +func WithHTTPClient(client *http.Client) Option { + return func(o *options) { + o.client = client + } +} + +// WithFS sets a file system for the local file loader. +// +// If the provided file system is a [fs.ReadFileFS], the ReadFile function is used. +// Otherwise, ReadFile is wrapped using [fs.ReadFile]. +// +// By default, the file system is the one provided by the os package. +// +// For example, this may be set to consume from an embedded file system, or a rooted FS. +func WithFS(filesystem fs.FS) Option { + return func(o *options) { + if rfs, ok := filesystem.(fs.ReadFileFS); ok { + o.fs = rfs + + return + } + o.fs = readFileFS{FS: filesystem} + } +} + +type readFileFS struct { + fs.FS +} + +func (r readFileFS) ReadFile(name string) ([]byte, error) { + return fs.ReadFile(r.FS, name) +} + +func optionsWithDefaults(opts []Option) options { + const defaultTimeout = 30 * time.Second + + o := options{ + // package level defaults + httpOptions: httpOptions{ + httpTimeout: defaultTimeout, + client: http.DefaultClient, + }, + } + + for _, apply := range opts { + apply(&o) + } + + return o +} diff --git a/vendor/github.com/go-openapi/swag/loading/yaml.go b/vendor/github.com/go-openapi/swag/loading/yaml.go new file mode 100644 index 000000000000..3ebb53668c47 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading/yaml.go @@ -0,0 +1,37 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package loading + +import ( + "encoding/json" + "path/filepath" + + "github.com/go-openapi/swag/yamlutils" +) + +// YAMLMatcher matches yaml for a file loader. +func YAMLMatcher(path string) bool { + ext := filepath.Ext(path) + return ext == ".yaml" || ext == ".yml" +} + +// YAMLDoc loads a yaml document from either http or a file and converts it to json. +func YAMLDoc(path string, opts ...Option) (json.RawMessage, error) { + yamlDoc, err := YAMLData(path, opts...) + if err != nil { + return nil, err + } + + return yamlutils.YAMLToJSON(yamlDoc) +} + +// YAMLData loads a yaml document from either http or a file. +func YAMLData(path string, opts ...Option) (any, error) { + data, err := LoadFromFileOrHTTP(path, opts...) + if err != nil { + return nil, err + } + + return yamlutils.BytesToYAMLDoc(data) +} diff --git a/vendor/github.com/go-openapi/swag/loading_iface.go b/vendor/github.com/go-openapi/swag/loading_iface.go new file mode 100644 index 000000000000..27ec3fb8c37a --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading_iface.go @@ -0,0 +1,91 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import ( + "encoding/json" + "time" + + "github.com/go-openapi/swag/loading" +) + +var ( + // Package-level defaults for the file loading utilities (deprecated). + + // LoadHTTPTimeout the default timeout for load requests. + // + // Deprecated: use [loading.WithTimeout] instead. + LoadHTTPTimeout = 30 * time.Second + + // LoadHTTPBasicAuthUsername the username to use when load requests require basic auth. + // + // Deprecated: use [loading.WithBasicAuth] instead. + LoadHTTPBasicAuthUsername = "" + + // LoadHTTPBasicAuthPassword the password to use when load requests require basic auth. + // + // Deprecated: use [loading.WithBasicAuth] instead. + LoadHTTPBasicAuthPassword = "" + + // LoadHTTPCustomHeaders an optional collection of custom HTTP headers for load requests. + // + // Deprecated: use [loading.WithCustomHeaders] instead. + LoadHTTPCustomHeaders = map[string]string{} +) + +// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the provided path. +// +// Deprecated: use [loading.LoadFromFileOrHTTP] instead. +func LoadFromFileOrHTTP(pth string, opts ...loading.Option) ([]byte, error) { + return loading.LoadFromFileOrHTTP(pth, loadingOptionsWithDefaults(opts)...) +} + +// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in +// timeout arg allows for per request overriding of the request timeout. +// +// Deprecated: use [loading.LoadFileOrHTTP] with the [loading.WithTimeout] option instead. +func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration, opts ...loading.Option) ([]byte, error) { + opts = append(opts, loading.WithTimeout(timeout)) + + return LoadFromFileOrHTTP(pth, opts...) +} + +// LoadStrategy returns a loader function for a given path or URL. +// +// Deprecated: use [loading.LoadStrategy] instead. +func LoadStrategy(pth string, local, remote func(string) ([]byte, error), opts ...loading.Option) func(string) ([]byte, error) { + return loading.LoadStrategy(pth, local, remote, loadingOptionsWithDefaults(opts)...) +} + +// YAMLMatcher matches yaml for a file loader. +// +// Deprecated: use [loading.YAMLMatcher] instead. +func YAMLMatcher(path string) bool { return loading.YAMLMatcher(path) } + +// YAMLDoc loads a yaml document from either http or a file and converts it to json. +// +// Deprecated: use [loading.YAMLDoc] instead. +func YAMLDoc(path string) (json.RawMessage, error) { + return loading.YAMLDoc(path) +} + +// YAMLData loads a yaml document from either http or a file. +// +// Deprecated: use [loading.YAMLData] instead. +func YAMLData(path string) (any, error) { + return loading.YAMLData(path) +} + +// loadingOptionsWithDefaults bridges deprecated default settings that use package-level variables, +// with the recommended use of loading.Option. +func loadingOptionsWithDefaults(opts []loading.Option) []loading.Option { + o := []loading.Option{ + loading.WithTimeout(LoadHTTPTimeout), + loading.WithBasicAuth(LoadHTTPBasicAuthUsername, LoadHTTPBasicAuthPassword), + loading.WithCustomHeaders(LoadHTTPCustomHeaders), + } + o = append(o, opts...) + + return o +} diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md similarity index 53% rename from vendor/github.com/go-openapi/swag/BENCHMARK.md rename to vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md index e7f28ed6b789..6674c63b7294 100644 --- a/vendor/github.com/go-openapi/swag/BENCHMARK.md +++ b/vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md @@ -1,12 +1,10 @@ -# Benchmarks - -## Name mangling utilities +# Benchmarking name mangling utilities ```bash go test -bench XXX -run XXX -benchtime 30s ``` -### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df +## Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df ``` goos: linux @@ -21,7 +19,7 @@ BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op ``` -### Benchmarks after PR #79 +## Benchmarks after PR #79 ~ x10 performance improvement and ~ /100 memory allocations. @@ -50,3 +48,43 @@ BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op ``` + +## Benchmarks at d7d2d1b895f5b6747afaff312dd2a402e69e818b + +go1.24 + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 19757858 1881 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-16 17494111 2094 ns/op 74 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-16 28161226 1492 ns/op 158 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-16 23787333 1489 ns/op 158 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 17537257 2030 ns/op 103 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 16977453 2156 ns/op 105 B/op 6 allocs/op +``` + +## Benchmarks after PR #106 + +Moving the scope of everything down to a struct allowed to reduce a bit garbage and pooling. + +On top of that, ToGoName (and thus ToVarName) have been subject to a minor optimization, removing a few allocations. + +Overall timings improve by ~ -10%. + +go1.24 + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag/mangling +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 22496130 1618 ns/op 31 B/op 3 allocs/op +BenchmarkToXXXName/ToVarName-16 22538068 1618 ns/op 33 B/op 3 allocs/op +BenchmarkToXXXName/ToFileName-16 27722977 1236 ns/op 105 B/op 6 allocs/op +BenchmarkToXXXName/ToCommandName-16 27967395 1258 ns/op 105 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 18587901 1917 ns/op 103 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 17193208 2019 ns/op 108 B/op 7 allocs/op +``` diff --git a/vendor/github.com/go-openapi/swag/mangling/LICENSE b/vendor/github.com/go-openapi/swag/mangling/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/mangling/doc.go b/vendor/github.com/go-openapi/swag/mangling/doc.go new file mode 100644 index 000000000000..ce0d8904857a --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/doc.go @@ -0,0 +1,25 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package mangling provides name mangling capabilities. +// +// Name mangling is an important stage when generating code: +// it helps construct safe program identifiers that abide by the language rules +// and play along with linters. +// +// Examples: +// +// Suppose we get an object name taken from an API spec: "json_object", +// +// We may generate a legit go type name using [NameMangler.ToGoName]: "JsonObject". +// +// We may then locate this type in a source file named using [NameMangler.ToFileName]: "json_object.go". +// +// The methods exposed by the NameMangler are used to generate code in many different contexts, such as: +// +// - generating exported or unexported go identifiers from a JSON schema or an API spec +// - generating file names +// - generating human-readable comments for types and variables +// - generating JSON-like API identifiers from go code +// - ... +package mangling diff --git a/vendor/github.com/go-openapi/swag/mangling/initialism_index.go b/vendor/github.com/go-openapi/swag/mangling/initialism_index.go new file mode 100644 index 000000000000..e5b70c149388 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/initialism_index.go @@ -0,0 +1,270 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package mangling + +import ( + "sort" + "strings" + "unicode" + "unicode/utf8" +) + +// DefaultInitialisms returns all the initialisms configured by default for this package. +// +// # Motivation +// +// Common initialisms are acronyms for which the ordinary camel-casing rules are altered and +// for which we retain the original case. +// +// This is largely specific to the go naming conventions enforced by golint (now revive). +// +// # Example +// +// In go, "id" is a good-looking identifier, but "Id" is not and "ID" is preferred +// (notice that this stems only from conventions: the go compiler accepts all of these). +// +// Similarly, we may use "http", but not "Http". In this case, "HTTP" is preferred. +// +// # Reference and customization +// +// The default list of these casing-style exceptions is taken from the [github.com/mgechev/revive] linter for go: +// https://github.com/mgechev/revive/blob/master/lint/name.go#L93 +// +// There are a few additions to the original list, such as IPv4, IPv6 and OAI ("OpenAPI"). +// +// For these additions, "IPv4" would be preferred to "Ipv4" or "IPV4", and "OAI" to "Oai" +// +// You may redefine this list entirely using the mangler option [WithInitialisms], or simply add extra definitions +// using [WithAdditionalInitialisms]. +// +// # Mixed-case and plurals +// +// Notice that initialisms are not necessarily fully upper-cased: a mixed-case initialism indicates the preferred casing. +// +// Obviously, lower-case only initialisms do not make a lot of sense: if lower-case only initialisms are added, +// they will be considered fully capitalized. +// +// Plural forms use mixed case like "IDs". And so do values like "IPv4" or "IPv6". +// +// The [NameMangler] automatically detects simple plurals for words such as "IDs" or "APIs", +// so you don't need to configure these variants. +// +// At this moment, it doesn't support pluralization of terms that ends with an 's' (or 'S'), since there is +// no clear consensus on whether a word like DNS should be pluralized as DNSes or remain invariant. +// The [NameMangler] consider those invariant. Therefore DNSs or DNSes are not recognized as plurals for DNS. +// +// Besids, we don't want to support pluralization of terms which would otherwise conflict with another one, +// like "HTTPs" vs "HTTPS". All these should be considered invariant. Hence: "Https" matches "HTTPS" and +// "HTTPSS" is "HTTPS" followed by "S". +func DefaultInitialisms() []string { + return []string{ + "ACL", + "API", + "ASCII", + "CPU", + "CSS", + "DNS", + "EOF", + "GUID", + "HTML", + "HTTPS", + "HTTP", + "ID", + "IP", + "IPv4", // prefer the mixed case outcome IPv4 over the capitalized IPV4 + "IPv6", // prefer the mixed case outcome IPv6 over the capitalized IPV6 + "JSON", + "LHS", + "OAI", + "QPS", + "RAM", + "RHS", + "RPC", + "SLA", + "SMTP", + "SQL", + "SSH", + "TCP", + "TLS", + "TTL", + "UDP", + "UI", + "UID", + "UUID", + "URI", + "URL", + "UTF8", + "VM", + "XML", + "XMPP", + "XSRF", + "XSS", + } +} + +type indexOfInitialisms struct { + initialismsCache + + index map[string]struct{} +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + index: make(map[string]struct{}), + } +} + +func (m *indexOfInitialisms) add(words ...string) *indexOfInitialisms { + for _, word := range words { + // sanitization of injected words: trimmed from blanks, and must start with a letter + trimmed := strings.TrimSpace(word) + + firstRune, _ := utf8.DecodeRuneInString(trimmed) + if !unicode.IsLetter(firstRune) { + continue + } + + // Initialisms are case-sensitive. This means that we support mixed-case words. + // However, if specified as a lower-case string, the initialism should be fully capitalized. + if trimmed == strings.ToLower(trimmed) { + m.index[strings.ToUpper(trimmed)] = struct{}{} + + continue + } + + m.index[trimmed] = struct{}{} + } + return m +} + +func (m *indexOfInitialisms) sorted() []string { + result := make([]string, 0, len(m.index)) + for k := range m.index { + result = append(result, k) + } + sort.Sort(sort.Reverse(byInitialism(result))) + return result +} + +func (m *indexOfInitialisms) buildCache() { + m.build(m.sorted(), m.pluralForm) +} + +// initialismsCache caches all needed pre-computed and converted initialism entries, +// in the desired resolution order. +type initialismsCache struct { + initialisms []string + initialismsRunes [][]rune + initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version + initialismsPluralForm []pluralForm +} + +func (c *initialismsCache) build(in []string, pluralfunc func(string) pluralForm) { + c.initialisms = in + c.initialismsRunes = asRunes(c.initialisms) + c.initialismsUpperCased = asUpperCased(c.initialisms) + c.initialismsPluralForm = asPluralForms(c.initialisms, pluralfunc) +} + +// pluralForm denotes the kind of pluralization to be used for initialisms. +// +// At this moment, initialisms are either invariant or follow a simple plural form with an +// extra (lower case) "s". +type pluralForm uint8 + +const ( + notPlural pluralForm = iota + invariantPlural + simplePlural +) + +func (f pluralForm) String() string { + switch f { + case notPlural: + return "notPlural" + case invariantPlural: + return "invariantPlural" + case simplePlural: + return "simplePlural" + default: + return "" + } +} + +// pluralForm indicates how we want to pluralize a given initialism. +// +// Besides configured invariant forms (like HTTP and HTTPS), +// an initialism is normally pluralized by adding a single 's', like in IDs. +// +// Initialisms ending with an 'S' or an 's' are configured as invariant (we don't +// support plural forms like CSSes or DNSes, however the mechanism could be extended to +// do just that). +func (m *indexOfInitialisms) pluralForm(key string) pluralForm { + if _, ok := m.index[key]; !ok { + return notPlural + } + + if strings.HasSuffix(strings.ToUpper(key), "S") { + return invariantPlural + } + + if _, ok := m.index[key+"s"]; ok { + return invariantPlural + } + + if _, ok := m.index[key+"S"]; ok { + return invariantPlural + } + + return simplePlural +} + +type byInitialism []string + +func (s byInitialism) Len() int { + return len(s) +} +func (s byInitialism) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less specifies the order in which initialisms are prioritized: +// 1. match longest first +// 2. when equal length, match in reverse lexicographical order, lower case match comes first +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) + } + + return s[i] < s[j] +} + +func asRunes(in []string) [][]rune { + out := make([][]rune, len(in)) + for i, initialism := range in { + out[i] = []rune(initialism) + } + + return out +} + +func asUpperCased(in []string) [][]rune { + out := make([][]rune, len(in)) + + for i, initialism := range in { + out[i] = []rune(upper(trim(initialism))) + } + + return out +} + +// asPluralForms bakes an index of pluralization support. +func asPluralForms(in []string, pluralFunc func(string) pluralForm) []pluralForm { + out := make([]pluralForm, len(in)) + for i, initialism := range in { + out[i] = pluralFunc(initialism) + } + + return out +} diff --git a/vendor/github.com/go-openapi/swag/mangling/name_lexem.go b/vendor/github.com/go-openapi/swag/mangling/name_lexem.go new file mode 100644 index 000000000000..bc837e3b9f5d --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/name_lexem.go @@ -0,0 +1,186 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package mangling + +import ( + "bytes" + "strings" + "unicode" + "unicode/utf8" +) + +type ( + lexemKind uint8 + + nameLexem struct { + original string + matchedInitialism string + kind lexemKind + } +) + +const ( + lexemKindCasualName lexemKind = iota + lexemKindInitialismName +) + +func newInitialismNameLexem(original, matchedInitialism string) nameLexem { + return nameLexem{ + kind: lexemKindInitialismName, + original: original, + matchedInitialism: matchedInitialism, + } +} + +func newCasualNameLexem(original string) nameLexem { + return nameLexem{ + kind: lexemKindCasualName, + original: trim(original), // TODO: save on calls to trim + } +} + +// WriteTitleized writes the titleized lexeme to a bytes.Buffer. +// +// If the first letter cannot be capitalized, it doesn't write anything and return false, +// so the caller may attempt some workaround strategy. +func (l nameLexem) WriteTitleized(w *bytes.Buffer, alwaysUpper bool) bool { + if l.kind == lexemKindInitialismName { + w.WriteString(l.matchedInitialism) + + return true + } + + if len(l.original) == 0 { + return true + } + + if len(l.original) == 1 { + // identifier is too short: casing will depend on the context + firstByte := l.original[0] + switch { + case 'A' <= firstByte && firstByte <= 'Z': + // safe + w.WriteByte(firstByte) + + return true + case alwaysUpper && 'a' <= firstByte && firstByte <= 'z': + w.WriteByte(firstByte - 'a' + 'A') + + return true + default: + + // not a letter: skip and let the caller decide + return false + } + } + + if firstByte := l.original[0]; firstByte < utf8.RuneSelf { + // ASCII + switch { + case 'A' <= firstByte && firstByte <= 'Z': + // already an upper case letter + w.WriteString(l.original) + + return true + case 'a' <= firstByte && firstByte <= 'z': + w.WriteByte(firstByte - 'a' + 'A') + w.WriteString(l.original[1:]) + + return true + default: + // not a good candidate: doesn't start with a letter + return false + } + } + + // unicode + firstRune, idx := utf8.DecodeRuneInString(l.original) + if !unicode.IsLetter(firstRune) || !unicode.IsUpper(unicode.ToUpper(firstRune)) { + // not a good candidate: doesn't start with a letter + // or a rune for which case doesn't make sense (e.g. East-Asian runes etc) + return false + } + + rest := l.original[idx:] + w.WriteRune(unicode.ToUpper(firstRune)) + w.WriteString(strings.ToLower(rest)) + + return true +} + +// WriteLower is like write titleized but it writes a lower-case version of the lexeme. +// +// Similarly, there is no writing if the casing of the first rune doesn't make sense. +func (l nameLexem) WriteLower(w *bytes.Buffer, alwaysLower bool) bool { + if l.kind == lexemKindInitialismName { + w.WriteString(lower(l.matchedInitialism)) + + return true + } + + if len(l.original) == 0 { + return true + } + + if len(l.original) == 1 { + // identifier is too short: casing will depend on the context + firstByte := l.original[0] + switch { + case 'a' <= firstByte && firstByte <= 'z': + // safe + w.WriteByte(firstByte) + + return true + case alwaysLower && 'A' <= firstByte && firstByte <= 'Z': + w.WriteByte(firstByte - 'A' + 'a') + + return true + default: + + // not a letter: skip and let the caller decide + return false + } + } + + if firstByte := l.original[0]; firstByte < utf8.RuneSelf { + // ASCII + switch { + case 'a' <= firstByte && firstByte <= 'z': + // already a lower case letter + w.WriteString(l.original) + + return true + case 'A' <= firstByte && firstByte <= 'Z': + w.WriteByte(firstByte - 'A' + 'a') + w.WriteString(l.original[1:]) + + return true + default: + // not a good candidate: doesn't start with a letter + return false + } + } + + // unicode + firstRune, idx := utf8.DecodeRuneInString(l.original) + if !unicode.IsLetter(firstRune) || !unicode.IsLower(unicode.ToLower(firstRune)) { + // not a good candidate: doesn't start with a letter + // or a rune for which case doesn't make sense (e.g. East-Asian runes etc) + return false + } + + rest := l.original[idx:] + w.WriteRune(unicode.ToLower(firstRune)) + w.WriteString(rest) + + return true +} + +func (l nameLexem) GetOriginal() string { + return l.original +} + +func (l nameLexem) IsInitialism() bool { + return l.kind == lexemKindInitialismName +} diff --git a/vendor/github.com/go-openapi/swag/mangling/name_mangler.go b/vendor/github.com/go-openapi/swag/mangling/name_mangler.go new file mode 100644 index 000000000000..da685681d08c --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/name_mangler.go @@ -0,0 +1,370 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package mangling + +import ( + "strings" + "unicode" +) + +// NameMangler knows how to transform sentences or words into +// identifiers that are a better fit in contexts such as: +// +// - unexported or exported go variable identifiers +// - file names +// - camel cased identifiers +// - ... +// +// The [NameMangler] is safe for concurrent use, save for its [NameMangler.AddInitialisms] method, +// which is not. +// +// # Known limitations +// +// At this moment, the [NameMangler] doesn't play well with "all caps" text: +// +// unless every single upper-cased word is declared as an initialism, capitalized words would generally +// not be transformed with the expected result, e.g. +// +// ToFileName("THIS_IS_ALL_CAPS") +// +// yields the weird outcome +// +// "t_h_i_s_i_s_a_l_l_c_a_p_s" +type NameMangler struct { + options + + index *indexOfInitialisms + + splitter splitter + splitterWithPostSplit splitter + + _ struct{} +} + +// NewNameMangler builds a name mangler ready to convert strings. +// +// The default name mangler is configured with default common initialisms and all default options. +func NewNameMangler(opts ...Option) NameMangler { + m := NameMangler{ + options: optionsWithDefaults(opts), + index: newIndexOfInitialisms(), + } + m.addInitialisms(m.commonInitialisms...) + + // a splitter that returns matches lexemes as ready-to-assemble strings: + // details of the lexemes are redeemed. + m.splitter = newSplitter( + withInitialismsCache(&m.index.initialismsCache), + withReplaceFunc(m.replaceFunc), + ) + + // a splitter that returns matches lexemes ready for post-processing + m.splitterWithPostSplit = newSplitter( + withInitialismsCache(&m.index.initialismsCache), + withReplaceFunc(m.replaceFunc), + withPostSplitInitialismCheck, + ) + + return m +} + +// AddInitialisms declares extra initialisms to the mangler. +// +// It declares extra words as "initialisms" (i.e. words that won't be camel cased or titled cased), +// on top of the existing list of common initialisms (such as ID, HTTP...). +// +// Added words must start with a (unicode) letter. If some don't, they are ignored. +// Added words are either fully capitalized or mixed-cased. Lower-case only words are considered capitalized. +// +// It is typically used just after initializing the [NameMangler]. +// +// When all initialisms are known at the time the mangler is initialized, it is preferable to +// use [NewNameMangler] with the option [WithAdditionalInitialisms]. +// +// Adding initialisms mutates the mangler and should not be carried out concurrently with other calls to the mangler. +func (m *NameMangler) AddInitialisms(words ...string) { + m.addInitialisms(words...) +} + +// Initialisms renders the list of initialisms supported by this mangler. +func (m *NameMangler) Initialisms() []string { + return m.index.initialisms +} + +// Camelize a single word. +// +// Example: +// +// - "HELLO" and "hello" become "Hello". +func (m NameMangler) Camelize(word string) string { + ru := []rune(word) + + switch len(ru) { + case 0: + return "" + case 1: + return string(unicode.ToUpper(ru[0])) + default: + camelized := poolOfBuffers.BorrowBuffer(len(word)) + camelized.Grow(len(word)) + defer func() { + poolOfBuffers.RedeemBuffer(camelized) + }() + + camelized.WriteRune(unicode.ToUpper(ru[0])) + for _, ru := range ru[1:] { + camelized.WriteRune(unicode.ToLower(ru)) + } + + return camelized.String() + } +} + +// ToFileName generates a suitable snake-case file name from a sentence. +// +// It lower-cases everything with underscore (_) as a word separator. +// +// Examples: +// +// - "Hello, Swagger" becomes "hello_swagger" +// - "HelloSwagger" becomes "hello_swagger" +func (m NameMangler) ToFileName(name string) string { + inptr := m.split(name) + in := *inptr + out := make([]string, 0, len(in)) + + for _, w := range in { + out = append(out, lower(w)) + } + poolOfStrings.RedeemStrings(inptr) + + return strings.Join(out, "_") +} + +// ToCommandName generates a suitable CLI command name from a sentence. +// +// It lower-cases everything with dash (-) as a word separator. +// +// Examples: +// +// - "Hello, Swagger" becomes "hello-swagger" +// - "HelloSwagger" becomes "hello-swagger" +func (m NameMangler) ToCommandName(name string) string { + inptr := m.split(name) + in := *inptr + out := make([]string, 0, len(in)) + + for _, w := range in { + out = append(out, lower(w)) + } + poolOfStrings.RedeemStrings(inptr) + + return strings.Join(out, "-") +} + +// ToHumanNameLower represents a code name as a human-readable series of words. +// +// It lower-cases everything with blank space as a word separator. +// +// NOTE: parts recognized as initialisms just keep their original casing. +// +// Examples: +// +// - "Hello, Swagger" becomes "hello swagger" +// - "HelloSwagger" or "Hello-Swagger" become "hello swagger" +func (m NameMangler) ToHumanNameLower(name string) string { + s := m.splitterWithPostSplit + in := s.split(name) + out := make([]string, 0, len(*in)) + + for _, w := range *in { + if !w.IsInitialism() { + out = append(out, lower(w.GetOriginal())) + } else { + out = append(out, trim(w.GetOriginal())) + } + } + + poolOfLexems.RedeemLexems(in) + + return strings.Join(out, " ") +} + +// ToHumanNameTitle represents a code name as a human-readable series of titleized words. +// +// It titleizes every word with blank space as a word separator. +// +// Examples: +// +// - "hello, Swagger" becomes "Hello Swagger" +// - "helloSwagger" becomes "Hello Swagger" +func (m NameMangler) ToHumanNameTitle(name string) string { + s := m.splitterWithPostSplit + in := s.split(name) + + out := make([]string, 0, len(*in)) + for _, w := range *in { + original := trim(w.GetOriginal()) + if !w.IsInitialism() { + out = append(out, m.Camelize(original)) + } else { + out = append(out, original) + } + } + poolOfLexems.RedeemLexems(in) + + return strings.Join(out, " ") +} + +// ToJSONName generates a camelized single-word version of a sentence. +// +// The output assembles every camelized word, but for the first word, which +// is lower-cased. +// +// Example: +// +// - "Hello_swagger" becomes "helloSwagger" +func (m NameMangler) ToJSONName(name string) string { + inptr := m.split(name) + in := *inptr + out := make([]string, 0, len(in)) + + for i, w := range in { + if i == 0 { + out = append(out, lower(w)) + continue + } + out = append(out, m.Camelize(trim(w))) + } + + poolOfStrings.RedeemStrings(inptr) + + return strings.Join(out, "") +} + +// ToVarName generates a legit unexported go variable name from a sentence. +// +// The generated name plays well with linters (see also [NameMangler.ToGoName]). +// +// Examples: +// +// - "Hello_swagger" becomes "helloSwagger" +// - "Http_server" becomes "httpServer" +// +// This name applies the same rules as [NameMangler.ToGoName] (legit exported variable), save the +// capitalization of the initial rune. +// +// Special case: when the initial part is a recognized as an initialism (like in the example above), +// the full part is lower-cased. +func (m NameMangler) ToVarName(name string) string { + return m.goIdentifier(name, false) +} + +// ToGoName generates a legit exported go variable name from a sentence. +// +// The generated name plays well with most linters. +// +// ToGoName abides by the go "exported" symbol rule starting with an upper-case letter. +// +// Examples: +// +// - "hello_swagger" becomes "HelloSwagger" +// - "Http_server" becomes "HTTPServer" +// +// # Edge cases +// +// Whenever the first rune is not eligible to upper case, a special prefix is prepended to the resulting name. +// By default this is simply "X" and you may customize this behavior using the [WithGoNamePrefixFunc] option. +// +// This happens when the first rune is not a letter, e.g. a digit, or a symbol that has no word transliteration +// (see also [WithReplaceFunc] about symbol transliterations), +// as well as for most East Asian or Devanagari runes, for which there is no such concept as upper-case. +// +// # Linting +// +// [revive], the successor of golint is the reference linter. +// +// This means that [NameMangler.ToGoName] supports the initialisms that revive checks (see also [DefaultInitialisms]). +// +// At this moment, there is no attempt to transliterate unicode into ascii, meaning that some linters +// (e.g. asciicheck, gosmopolitan) may croak on go identifiers generated from unicode input. +// +// [revive]: https://github.com/mgechev/revive +func (m NameMangler) ToGoName(name string) string { + return m.goIdentifier(name, true) +} + +func (m NameMangler) goIdentifier(name string, exported bool) string { + s := m.splitterWithPostSplit + lexems := s.split(name) + defer func() { + poolOfLexems.RedeemLexems(lexems) + }() + lexemes := *lexems + + if len(lexemes) == 0 { + return "" + } + + result := poolOfBuffers.BorrowBuffer(len(name)) + defer func() { + poolOfBuffers.RedeemBuffer(result) + }() + + firstPart := lexemes[0] + if !exported { + if ok := firstPart.WriteLower(result, true); !ok { + // NOTE: an initialism as the first part is lower-cased: no longer generates stuff like hTTPxyz. + // + // same prefixing rule applied to unexported variable as to an exported one, so that we have consistent + // names, whether the generated identifier is exported or not. + result.WriteString(strings.ToLower(m.prefixFunc()(name))) + result.WriteString(lexemes[0].GetOriginal()) + } + } else { + if ok := firstPart.WriteTitleized(result, true); !ok { + // "repairs" a lexeme that doesn't start with a letter to become + // the start a legit go name. The current strategy is very crude and simply adds a fixed prefix, + // e.g. "X". + // For instance "1_sesame_street" would be split into lexemes ["1", "sesame", "street"] and + // the first one ("1") would result in something like "X1" (with the default prefix function). + // + // NOTE: no longer forcing the first part to be fully upper-cased + result.WriteString(m.prefixFunc()(name)) + result.WriteString(lexemes[0].GetOriginal()) + } + } + + for _, lexem := range lexemes[1:] { + // NOTE: no longer forcing initialism parts to be fully upper-cased: + // * pluralized initialism preserve their trailing "s" + // * mixed-cased initialisms, such as IPv4, are preserved + if ok := lexem.WriteTitleized(result, false); !ok { + // it's not titleized: perhaps it's too short, perhaps the first rune is not a letter. + // write anyway + result.WriteString(lexem.GetOriginal()) + } + } + + return result.String() +} + +func (m *NameMangler) addInitialisms(words ...string) { + m.index.add(words...) + m.index.buildCache() +} + +// split calls the inner splitter. +func (m NameMangler) split(str string) *[]string { + s := m.splitter + lexems := s.split(str) + result := poolOfStrings.BorrowStrings() + + for _, lexem := range *lexems { + *result = append(*result, lexem.GetOriginal()) + } + poolOfLexems.RedeemLexems(lexems) + + return result +} diff --git a/vendor/github.com/go-openapi/swag/mangling/options.go b/vendor/github.com/go-openapi/swag/mangling/options.go new file mode 100644 index 000000000000..3c92b2f18bf1 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/options.go @@ -0,0 +1,150 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package mangling + +type ( + // PrefixFunc defines a safeguard rule (that may depend on the input string), to prefix + // a generated go name (in [NameMangler.ToGoName] and [NameMangler.ToVarName]). + // + // See [NameMangler.ToGoName] for more about which edge cases the prefix function covers. + PrefixFunc func(string) string + + // ReplaceFunc is a transliteration function to replace special runes by a word. + ReplaceFunc func(r rune) (string, bool) + + // Option to configure a [NameMangler]. + Option func(*options) + + options struct { + commonInitialisms []string + + goNamePrefixFunc PrefixFunc + goNamePrefixFuncPtr *PrefixFunc + replaceFunc func(r rune) (string, bool) + } +) + +func (o *options) prefixFunc() PrefixFunc { + if o.goNamePrefixFuncPtr != nil && *o.goNamePrefixFuncPtr != nil { + return *o.goNamePrefixFuncPtr + } + + return o.goNamePrefixFunc +} + +// WithGoNamePrefixFunc overrides the default prefix rule to safeguard generated go names. +// +// Example: +// +// This helps convert "123" into "{prefix}123" (a very crude strategy indeed, but it works). +// +// See [github.com/go-swagger/go-swagger/generator.DefaultFuncMap] for an example. +// +// The prefix function is assumed to return a string that starts with an upper case letter. +// +// The default is to prefix with "X". +// +// See [NameMangler.ToGoName] for more about which edge cases the prefix function covers. +func WithGoNamePrefixFunc(fn PrefixFunc) Option { + return func(o *options) { + o.goNamePrefixFunc = fn + } +} + +// WithGoNamePrefixFuncPtr is like [WithGoNamePrefixFunc] but it specifies a pointer to a function. +// +// [WithGoNamePrefixFunc] should be preferred in most situations. This option should only serve the +// purpose of handling special situations where the prefix function is not an internal variable +// (e.g. an exported package global). +// +// [WithGoNamePrefixFuncPtr] supersedes [WithGoNamePrefixFunc] if it also specified. +// +// If the provided pointer is nil or points to a nil value, this option has no effect. +// +// The caller should ensure that no undesirable concurrent changes are applied to the function pointed to. +func WithGoNamePrefixFuncPtr(ptr *PrefixFunc) Option { + return func(o *options) { + o.goNamePrefixFuncPtr = ptr + } +} + +// WithInitialisms declares the initialisms this mangler supports. +// +// This supersedes any pre-loaded defaults (see [DefaultInitialisms] for more about what initialisms are). +// +// It declares words to be recognized as "initialisms" (i.e. words that won't be camel cased or titled cased). +// +// Words must start with a (unicode) letter. If some don't, they are ignored. +// Words are either fully capitalized or mixed-cased. Lower-case only words are considered capitalized. +func WithInitialisms(words ...string) Option { + return func(o *options) { + o.commonInitialisms = words + } +} + +// WithAdditionalInitialisms adds new initialisms to the currently supported list (see [DefaultInitialisms]). +// +// The same sanitization rules apply as those described for [WithInitialisms]. +func WithAdditionalInitialisms(words ...string) Option { + return func(o *options) { + o.commonInitialisms = append(o.commonInitialisms, words...) + } +} + +// WithReplaceFunc specifies a custom transliteration function instead of the default. +// +// The default translates the following characters into words as follows: +// +// - '@' -> 'At' +// - '&' -> 'And' +// - '|' -> 'Pipe' +// - '$' -> 'Dollar' +// - '!' -> 'Bang' +// +// Notice that the outcome of a transliteration should always be titleized. +func WithReplaceFunc(fn ReplaceFunc) Option { + return func(o *options) { + o.replaceFunc = fn + } +} + +func defaultPrefixFunc(_ string) string { + return "X" +} + +// defaultReplaceTable finds a word representation for special characters. +func defaultReplaceTable(r rune) (string, bool) { + switch r { + case '@': + return "At ", true + case '&': + return "And ", true + case '|': + return "Pipe ", true + case '$': + return "Dollar ", true + case '!': + return "Bang ", true + case '-': + return "", true + case '_': + return "", true + default: + return "", false + } +} + +func optionsWithDefaults(opts []Option) options { + o := options{ + commonInitialisms: DefaultInitialisms(), + goNamePrefixFunc: defaultPrefixFunc, + replaceFunc: defaultReplaceTable, + } + + for _, apply := range opts { + apply(&o) + } + + return o +} diff --git a/vendor/github.com/go-openapi/swag/mangling/pools.go b/vendor/github.com/go-openapi/swag/mangling/pools.go new file mode 100644 index 000000000000..f8104351445d --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/pools.go @@ -0,0 +1,123 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package mangling + +import ( + "bytes" + "sync" +) + +const maxAllocMatches = 8 + +type ( + // memory pools of temporary objects. + // + // These are used to recycle temporarily allocated objects + // and relieve the GC from undue pressure. + + matchesPool struct { + *sync.Pool + } + + buffersPool struct { + *sync.Pool + } + + lexemsPool struct { + *sync.Pool + } + + stringsPool struct { + *sync.Pool + } +) + +var ( + // poolOfMatches holds temporary slices for recycling during the initialism match process + poolOfMatches = matchesPool{ + Pool: &sync.Pool{ + New: func() any { + s := make(initialismMatches, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfBuffers = buffersPool{ + Pool: &sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + }, + } + + poolOfLexems = lexemsPool{ + Pool: &sync.Pool{ + New: func() any { + s := make([]nameLexem, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfStrings = stringsPool{ + Pool: &sync.Pool{ + New: func() any { + s := make([]string, 0, maxAllocMatches) + + return &s + }, + }, + } +) + +func (p matchesPool) BorrowMatches() *initialismMatches { + s := p.Get().(*initialismMatches) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer { + s := p.Get().(*bytes.Buffer) + s.Reset() + + if s.Cap() < size { + s.Grow(size) + } + + return s +} + +func (p lexemsPool) BorrowLexems() *[]nameLexem { + s := p.Get().(*[]nameLexem) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p stringsPool) BorrowStrings() *[]string { + s := p.Get().(*[]string) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p matchesPool) RedeemMatches(s *initialismMatches) { + p.Put(s) +} + +func (p buffersPool) RedeemBuffer(s *bytes.Buffer) { + p.Put(s) +} + +func (p lexemsPool) RedeemLexems(s *[]nameLexem) { + p.Put(s) +} + +func (p stringsPool) RedeemStrings(s *[]string) { + p.Put(s) +} diff --git a/vendor/github.com/go-openapi/swag/mangling/split.go b/vendor/github.com/go-openapi/swag/mangling/split.go new file mode 100644 index 000000000000..ad0dec1708ec --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/split.go @@ -0,0 +1,306 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package mangling + +import ( + "unicode" +) + +type splitterOption func(*splitter) + +// withPostSplitInitialismCheck allows to catch initialisms after main split process +func withPostSplitInitialismCheck(s *splitter) { + s.postSplitInitialismCheck = true +} + +func withReplaceFunc(fn ReplaceFunc) func(*splitter) { + return func(s *splitter) { + s.replaceFunc = fn + } +} + +func withInitialismsCache(c *initialismsCache) splitterOption { + return func(s *splitter) { + s.initialismsCache = c + } +} + +type ( + initialismMatch struct { + body []rune + start, end int + complete bool + hasPlural pluralForm + } + initialismMatches []initialismMatch +) + +func (m initialismMatch) isZero() bool { + return m.start == 0 && m.end == 0 +} + +type splitter struct { + *initialismsCache + + postSplitInitialismCheck bool + replaceFunc ReplaceFunc +} + +func newSplitter(options ...splitterOption) splitter { + var s splitter + + for _, option := range options { + option(&s) + } + + if s.replaceFunc == nil { + s.replaceFunc = defaultReplaceTable + } + + return s +} + +func (s splitter) split(name string) *[]nameLexem { + nameRunes := []rune(name) + matches := s.gatherInitialismMatches(nameRunes) + if matches == nil { + return poolOfLexems.BorrowLexems() + } + + return s.mapMatchesToNameLexems(nameRunes, matches) +} + +func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { + var matches *initialismMatches + + for currentRunePosition, currentRune := range nameRunes { + // recycle these allocations as we loop over runes + // with such recycling, only 2 slices should be allocated per call + // instead of o(n). + newMatches := poolOfMatches.BorrowMatches() + + // check current initialism matches + if matches != nil { // skip first iteration + for _, match := range *matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + *newMatches = append(*newMatches, match) + + // the match is complete: keep it then move on to next rune + continue + } + + if currentRunePosition-match.start == len(match.body) { + // unmatched: skip + continue + } + + currentMatchRune := match.body[currentRunePosition-match.start] + if currentMatchRune != currentRune { + // failed match, move on to next rune + continue + } + + // try to complete ongoing match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close; the next step is to check the symbol ahead + // if it is a lowercase letter, then it is not the end of match + // but the beginning of the next word. + // + // NOTE(fredbi): this heuristic sometimes leads to counterintuitive splits and + // perhaps (not sure yet) we should check against case _alternance_. + // + // Example: + // + // In the current version, in the sentence "IDS initialism", "ID" is recognized as an initialism, + // leading to a split like "id_s_initialism" (or IDSInitialism), + // whereas in the sentence "IDx initialism", it is not and produces something like + // "i_d_x_initialism" (or IDxInitialism). The generated file name is not great. + // + // Both go identifiers are tolerated by linters. + // + // Notice that the slightly different input "IDs initialism" is correctly detected + // as a pluralized initialism and produces something like "ids_initialism" (or IDsInitialism). + + if currentRunePosition < len(nameRunes)-1 { + nextRune := nameRunes[currentRunePosition+1] + + // recognize a plural form for this initialism (only simple pluralization is supported) + if nextRune == 's' && match.hasPlural == simplePlural { + // detected a pluralized initialism + match.body = append(match.body, nextRune) + currentRunePosition++ + if currentRunePosition < len(nameRunes)-1 { + nextRune = nameRunes[currentRunePosition+1] + if newWord := unicode.IsLower(nextRune); newWord { + // it is the start of a new word. + // Match is only partial and the initialism is not recognized : move on + continue + } + } + + // this is a pluralized match: keep it + match.complete = true + match.hasPlural = simplePlural + match.end = currentRunePosition + *newMatches = append(*newMatches, match) + + // match is complete: keep it then move on to next rune + continue + } + + if newWord := unicode.IsLower(nextRune); newWord { + // it is the start of a new word + // Match is only partial and the initialism is not recognized : move on + continue + } + } + + match.complete = true + match.end = currentRunePosition + } + + // append the ongoing matching attempt (not necessarily complete) + *newMatches = append(*newMatches, match) + } + } + + // check for new initialism matches, based on the first character + for i, r := range s.initialismsRunes { + if r[0] == currentRune { + *newMatches = append(*newMatches, initialismMatch{ + start: currentRunePosition, + body: r, + complete: false, + hasPlural: s.initialismsPluralForm[i], + }) + } + } + + if matches != nil { + poolOfMatches.RedeemMatches(matches) + } + matches = newMatches + } + + // up to the caller to redeem this last slice + return matches +} + +func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem { + nameLexems := poolOfLexems.BorrowLexems() + + var lastAcceptedMatch initialismMatch + for _, match := range *matches { + if !match.complete { + continue + } + + if firstMatch := lastAcceptedMatch.isZero(); firstMatch { + s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start]) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) + + lastAcceptedMatch = match + + continue + } + + if overlappedMatch := match.start <= lastAcceptedMatch.end; overlappedMatch { + continue + } + + middle := nameRunes[lastAcceptedMatch.end+1 : match.start] + s.appendBrokenDownCasualString(nameLexems, middle) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) + + lastAcceptedMatch = match + } + + // we have not found any accepted matches + if lastAcceptedMatch.isZero() { + *nameLexems = (*nameLexems)[:0] + s.appendBrokenDownCasualString(nameLexems, nameRunes) + } else if lastAcceptedMatch.end+1 != len(nameRunes) { + rest := nameRunes[lastAcceptedMatch.end+1:] + s.appendBrokenDownCasualString(nameLexems, rest) + } + + poolOfMatches.RedeemMatches(matches) + + return nameLexems +} + +func (s splitter) breakInitialism(original string) nameLexem { + return newInitialismNameLexem(original, original) +} + +func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) { + currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused + defer func() { + poolOfBuffers.RedeemBuffer(currentSegment) + }() + + addCasualNameLexem := func(original string) { + *segments = append(*segments, newCasualNameLexem(original)) + } + + addInitialismNameLexem := func(original, match string) { + *segments = append(*segments, newInitialismNameLexem(original, match)) + } + + var addNameLexem func(string) + if s.postSplitInitialismCheck { + addNameLexem = func(original string) { + for i := range s.initialisms { + if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) { + addInitialismNameLexem(original, s.initialisms[i]) + + return + } + } + + addCasualNameLexem(original) + } + } else { + addNameLexem = addCasualNameLexem + } + + // NOTE: (performance). The few remaining non-amortized allocations + // lay in the code below: using String() forces + for _, rn := range str { + if replace, found := s.replaceFunc(rn); found { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() + } + + if replace != "" { + addNameLexem(replace) + } + + continue + } + + if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() + } + + continue + } + + if unicode.IsUpper(rn) { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + } + currentSegment.Reset() + } + + currentSegment.WriteRune(rn) + } + + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + } +} diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/mangling/string_bytes.go similarity index 60% rename from vendor/github.com/go-openapi/swag/string_bytes.go rename to vendor/github.com/go-openapi/swag/mangling/string_bytes.go index 90745d5ca9f1..28daaf72b1a1 100644 --- a/vendor/github.com/go-openapi/swag/string_bytes.go +++ b/vendor/github.com/go-openapi/swag/mangling/string_bytes.go @@ -1,4 +1,7 @@ -package swag +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package mangling import "unsafe" diff --git a/vendor/github.com/go-openapi/swag/mangling/util.go b/vendor/github.com/go-openapi/swag/mangling/util.go new file mode 100644 index 000000000000..0636417e360b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/util.go @@ -0,0 +1,118 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package mangling + +import ( + "strings" + "unicode" + "unicode/utf8" +) + +// Removes leading whitespaces +func trim(str string) string { return strings.TrimSpace(str) } + +// upper is strings.ToUpper() combined with trim +func upper(str string) string { + return strings.ToUpper(trim(str)) +} + +// lower is strings.ToLower() combined with trim +func lower(str string) string { + return strings.ToLower(trim(str)) +} + +// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but +// it ignores leading and trailing blank spaces in the compared +// string. +// +// base is assumed to be composed of upper-cased runes, and be already +// trimmed. +// +// This code is heavily inspired from strings.EqualFold. +func isEqualFoldIgnoreSpace(base []rune, str string) bool { + var i, baseIndex int + // equivalent to b := []byte(str), but without data copy + b := hackStringBytes(str) + + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + break + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + break + } + i += size + } + + if i >= len(b) { + return len(base) == 0 + } + + for _, baseRune := range base { + if i >= len(b) { + break + } + + if c := b[i]; c < utf8.RuneSelf { + // single byte rune case (ASCII) + if baseRune >= utf8.RuneSelf { + return false + } + + baseChar := byte(baseRune) + if c != baseChar && ((c < 'a') || (c > 'z') || (c-'a'+'A' != baseChar)) { + return false + } + + baseIndex++ + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if unicode.ToUpper(r) != baseRune { + return false + } + baseIndex++ + i += size + } + + if baseIndex != len(base) { + return false + } + + // all passed: now we should only have blanks + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + return false + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + return false + } + + i += size + } + + return true +} diff --git a/vendor/github.com/go-openapi/swag/mangling_iface.go b/vendor/github.com/go-openapi/swag/mangling_iface.go new file mode 100644 index 000000000000..98b9a9992930 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling_iface.go @@ -0,0 +1,69 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import "github.com/go-openapi/swag/mangling" + +// GoNamePrefixFunc sets an optional rule to prefix go names +// which do not start with a letter. +// +// GoNamePrefixFunc should not be written to while concurrently using the other mangling functions of this package. +// +// Deprecated: use [mangling.WithGoNamePrefixFunc] instead. +var GoNamePrefixFunc mangling.PrefixFunc + +// swagNameMangler is a global instance of the name mangler specifically alloted +// to support deprecated functions. +var swagNameMangler = mangling.NewNameMangler( + mangling.WithGoNamePrefixFuncPtr(&GoNamePrefixFunc), +) + +// AddInitialisms adds additional initialisms to the default list (see [mangling.DefaultInitialisms]). +// +// AddInitialisms is not safe to be called concurrently. +// +// Deprecated: use [mangling.WithAdditionalInitialisms] instead. +func AddInitialisms(words ...string) { + swagNameMangler.AddInitialisms(words...) +} + +// Camelize a single word. +// +// Deprecated: use [mangling.NameMangler.Camelize] instead. +func Camelize(word string) string { return swagNameMangler.Camelize(word) } + +// ToFileName lowercases and underscores a go type name. +// +// Deprecated: use [mangling.NameMangler.ToFileName] instead. +func ToFileName(name string) string { return swagNameMangler.ToFileName(name) } + +// ToCommandName lowercases and underscores a go type name. +// +// Deprecated: use [mangling.NameMangler.ToCommandName] instead. +func ToCommandName(name string) string { return swagNameMangler.ToCommandName(name) } + +// ToHumanNameLower represents a code name as a human series of words. +// +// Deprecated: use [mangling.NameMangler.ToHumanNameLower] instead. +func ToHumanNameLower(name string) string { return swagNameMangler.ToHumanNameLower(name) } + +// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized. +// +// Deprecated: use [mangling.NameMangler.ToHumanNameTitle] instead. +func ToHumanNameTitle(name string) string { return swagNameMangler.ToHumanNameTitle(name) } + +// ToJSONName camel-cases a name which can be underscored or pascal-cased. +// +// Deprecated: use [mangling.NameMangler.ToJSONName] instead. +func ToJSONName(name string) string { return swagNameMangler.ToJSONName(name) } + +// ToVarName camel-cases a name which can be underscored or pascal-cased. +// +// Deprecated: use [mangling.NameMangler.ToVarName] instead. +func ToVarName(name string) string { return swagNameMangler.ToVarName(name) } + +// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes. +// +// Deprecated: use [mangling.NameMangler.ToGoName] instead. +func ToGoName(name string) string { return swagNameMangler.ToGoName(name) } diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go deleted file mode 100644 index 8bb64ac32f90..000000000000 --- a/vendor/github.com/go-openapi/swag/name_lexem.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "unicode" - "unicode/utf8" -) - -type ( - lexemKind uint8 - - nameLexem struct { - original string - matchedInitialism string - kind lexemKind - } -) - -const ( - lexemKindCasualName lexemKind = iota - lexemKindInitialismName -) - -func newInitialismNameLexem(original, matchedInitialism string) nameLexem { - return nameLexem{ - kind: lexemKindInitialismName, - original: original, - matchedInitialism: matchedInitialism, - } -} - -func newCasualNameLexem(original string) nameLexem { - return nameLexem{ - kind: lexemKindCasualName, - original: original, - } -} - -func (l nameLexem) GetUnsafeGoName() string { - if l.kind == lexemKindInitialismName { - return l.matchedInitialism - } - - var ( - first rune - rest string - ) - - for i, orig := range l.original { - if i == 0 { - first = orig - continue - } - - if i > 0 { - rest = l.original[i:] - break - } - } - - if len(l.original) > 1 { - b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest)) - defer func() { - poolOfBuffers.RedeemBuffer(b) - }() - b.WriteRune(unicode.ToUpper(first)) - b.WriteString(lower(rest)) - return b.String() - } - - return l.original -} - -func (l nameLexem) GetOriginal() string { - return l.original -} - -func (l nameLexem) IsInitialism() bool { - return l.kind == lexemKindInitialismName -} diff --git a/vendor/github.com/go-openapi/swag/net.go b/vendor/github.com/go-openapi/swag/net.go deleted file mode 100644 index 821235f84d4a..000000000000 --- a/vendor/github.com/go-openapi/swag/net.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "net" - "strconv" -) - -// SplitHostPort splits a network address into a host and a port. -// The port is -1 when there is no port to be found -func SplitHostPort(addr string) (host string, port int, err error) { - h, p, err := net.SplitHostPort(addr) - if err != nil { - return "", -1, err - } - if p == "" { - return "", -1, &net.AddrError{Err: "missing port in address", Addr: addr} - } - - pi, err := strconv.Atoi(p) - if err != nil { - return "", -1, err - } - return h, pi, nil -} diff --git a/vendor/github.com/go-openapi/swag/netutils/LICENSE b/vendor/github.com/go-openapi/swag/netutils/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/netutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/netutils/doc.go b/vendor/github.com/go-openapi/swag/netutils/doc.go new file mode 100644 index 000000000000..74282f8e51c5 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/netutils/doc.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package netutils provides helpers for network-related tasks. +package netutils diff --git a/vendor/github.com/go-openapi/swag/netutils/net.go b/vendor/github.com/go-openapi/swag/netutils/net.go new file mode 100644 index 000000000000..82a1544af7bf --- /dev/null +++ b/vendor/github.com/go-openapi/swag/netutils/net.go @@ -0,0 +1,31 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package netutils + +import ( + "net" + "strconv" +) + +// SplitHostPort splits a network address into a host and a port. +// +// The difference with the standard net.SplitHostPort is that the port is converted to an int. +// +// The port is -1 when there is no port to be found. +func SplitHostPort(addr string) (host string, port int, err error) { + h, p, err := net.SplitHostPort(addr) + if err != nil { + return "", -1, err + } + if p == "" { + return "", -1, &net.AddrError{Err: "missing port in address", Addr: addr} + } + + pi, err := strconv.Atoi(p) + if err != nil { + return "", -1, err + } + + return h, pi, nil +} diff --git a/vendor/github.com/go-openapi/swag/netutils_iface.go b/vendor/github.com/go-openapi/swag/netutils_iface.go new file mode 100644 index 000000000000..d658de25b3f3 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/netutils_iface.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import "github.com/go-openapi/swag/netutils" + +// SplitHostPort splits a network address into a host and a port. +// +// Deprecated: use [netutils.SplitHostPort] instead. +func SplitHostPort(addr string) (host string, port int, err error) { + return netutils.SplitHostPort(addr) +} diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go deleted file mode 100644 index 274727a866c4..000000000000 --- a/vendor/github.com/go-openapi/swag/split.go +++ /dev/null @@ -1,508 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "bytes" - "sync" - "unicode" - "unicode/utf8" -) - -type ( - splitter struct { - initialisms []string - initialismsRunes [][]rune - initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version - postSplitInitialismCheck bool - } - - splitterOption func(*splitter) - - initialismMatch struct { - body []rune - start, end int - complete bool - } - initialismMatches []initialismMatch -) - -type ( - // memory pools of temporary objects. - // - // These are used to recycle temporarily allocated objects - // and relieve the GC from undue pressure. - - matchesPool struct { - *sync.Pool - } - - buffersPool struct { - *sync.Pool - } - - lexemsPool struct { - *sync.Pool - } - - splittersPool struct { - *sync.Pool - } -) - -var ( - // poolOfMatches holds temporary slices for recycling during the initialism match process - poolOfMatches = matchesPool{ - Pool: &sync.Pool{ - New: func() any { - s := make(initialismMatches, 0, maxAllocMatches) - - return &s - }, - }, - } - - poolOfBuffers = buffersPool{ - Pool: &sync.Pool{ - New: func() any { - return new(bytes.Buffer) - }, - }, - } - - poolOfLexems = lexemsPool{ - Pool: &sync.Pool{ - New: func() any { - s := make([]nameLexem, 0, maxAllocMatches) - - return &s - }, - }, - } - - poolOfSplitters = splittersPool{ - Pool: &sync.Pool{ - New: func() any { - s := newSplitter() - - return &s - }, - }, - } -) - -// nameReplaceTable finds a word representation for special characters. -func nameReplaceTable(r rune) (string, bool) { - switch r { - case '@': - return "At ", true - case '&': - return "And ", true - case '|': - return "Pipe ", true - case '$': - return "Dollar ", true - case '!': - return "Bang ", true - case '-': - return "", true - case '_': - return "", true - default: - return "", false - } -} - -// split calls the splitter. -// -// Use newSplitter for more control and options -func split(str string) []string { - s := poolOfSplitters.BorrowSplitter() - lexems := s.split(str) - result := make([]string, 0, len(*lexems)) - - for _, lexem := range *lexems { - result = append(result, lexem.GetOriginal()) - } - poolOfLexems.RedeemLexems(lexems) - poolOfSplitters.RedeemSplitter(s) - - return result - -} - -func newSplitter(options ...splitterOption) splitter { - s := splitter{ - postSplitInitialismCheck: false, - initialisms: initialisms, - initialismsRunes: initialismsRunes, - initialismsUpperCased: initialismsUpperCased, - } - - for _, option := range options { - option(&s) - } - - return s -} - -// withPostSplitInitialismCheck allows to catch initialisms after main split process -func withPostSplitInitialismCheck(s *splitter) { - s.postSplitInitialismCheck = true -} - -func (p matchesPool) BorrowMatches() *initialismMatches { - s := p.Get().(*initialismMatches) - *s = (*s)[:0] // reset slice, keep allocated capacity - - return s -} - -func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer { - s := p.Get().(*bytes.Buffer) - s.Reset() - - if s.Cap() < size { - s.Grow(size) - } - - return s -} - -func (p lexemsPool) BorrowLexems() *[]nameLexem { - s := p.Get().(*[]nameLexem) - *s = (*s)[:0] // reset slice, keep allocated capacity - - return s -} - -func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter { - s := p.Get().(*splitter) - s.postSplitInitialismCheck = false // reset options - for _, apply := range options { - apply(s) - } - - return s -} - -func (p matchesPool) RedeemMatches(s *initialismMatches) { - p.Put(s) -} - -func (p buffersPool) RedeemBuffer(s *bytes.Buffer) { - p.Put(s) -} - -func (p lexemsPool) RedeemLexems(s *[]nameLexem) { - p.Put(s) -} - -func (p splittersPool) RedeemSplitter(s *splitter) { - p.Put(s) -} - -func (m initialismMatch) isZero() bool { - return m.start == 0 && m.end == 0 -} - -func (s splitter) split(name string) *[]nameLexem { - nameRunes := []rune(name) - matches := s.gatherInitialismMatches(nameRunes) - if matches == nil { - return poolOfLexems.BorrowLexems() - } - - return s.mapMatchesToNameLexems(nameRunes, matches) -} - -func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { - var matches *initialismMatches - - for currentRunePosition, currentRune := range nameRunes { - // recycle these allocations as we loop over runes - // with such recycling, only 2 slices should be allocated per call - // instead of o(n). - newMatches := poolOfMatches.BorrowMatches() - - // check current initialism matches - if matches != nil { // skip first iteration - for _, match := range *matches { - if keepCompleteMatch := match.complete; keepCompleteMatch { - *newMatches = append(*newMatches, match) - continue - } - - // drop failed match - currentMatchRune := match.body[currentRunePosition-match.start] - if currentMatchRune != currentRune { - continue - } - - // try to complete ongoing match - if currentRunePosition-match.start == len(match.body)-1 { - // we are close; the next step is to check the symbol ahead - // if it is a small letter, then it is not the end of match - // but beginning of the next word - - if currentRunePosition < len(nameRunes)-1 { - nextRune := nameRunes[currentRunePosition+1] - if newWord := unicode.IsLower(nextRune); newWord { - // oh ok, it was the start of a new word - continue - } - } - - match.complete = true - match.end = currentRunePosition - } - - *newMatches = append(*newMatches, match) - } - } - - // check for new initialism matches - for i := range s.initialisms { - initialismRunes := s.initialismsRunes[i] - if initialismRunes[0] == currentRune { - *newMatches = append(*newMatches, initialismMatch{ - start: currentRunePosition, - body: initialismRunes, - complete: false, - }) - } - } - - if matches != nil { - poolOfMatches.RedeemMatches(matches) - } - matches = newMatches - } - - // up to the caller to redeem this last slice - return matches -} - -func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem { - nameLexems := poolOfLexems.BorrowLexems() - - var lastAcceptedMatch initialismMatch - for _, match := range *matches { - if !match.complete { - continue - } - - if firstMatch := lastAcceptedMatch.isZero(); firstMatch { - s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start]) - *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) - - lastAcceptedMatch = match - - continue - } - - if overlappedMatch := match.start <= lastAcceptedMatch.end; overlappedMatch { - continue - } - - middle := nameRunes[lastAcceptedMatch.end+1 : match.start] - s.appendBrokenDownCasualString(nameLexems, middle) - *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) - - lastAcceptedMatch = match - } - - // we have not found any accepted matches - if lastAcceptedMatch.isZero() { - *nameLexems = (*nameLexems)[:0] - s.appendBrokenDownCasualString(nameLexems, nameRunes) - } else if lastAcceptedMatch.end+1 != len(nameRunes) { - rest := nameRunes[lastAcceptedMatch.end+1:] - s.appendBrokenDownCasualString(nameLexems, rest) - } - - poolOfMatches.RedeemMatches(matches) - - return nameLexems -} - -func (s splitter) breakInitialism(original string) nameLexem { - return newInitialismNameLexem(original, original) -} - -func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) { - currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused - defer func() { - poolOfBuffers.RedeemBuffer(currentSegment) - }() - - addCasualNameLexem := func(original string) { - *segments = append(*segments, newCasualNameLexem(original)) - } - - addInitialismNameLexem := func(original, match string) { - *segments = append(*segments, newInitialismNameLexem(original, match)) - } - - var addNameLexem func(string) - if s.postSplitInitialismCheck { - addNameLexem = func(original string) { - for i := range s.initialisms { - if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) { - addInitialismNameLexem(original, s.initialisms[i]) - - return - } - } - - addCasualNameLexem(original) - } - } else { - addNameLexem = addCasualNameLexem - } - - for _, rn := range str { - if replace, found := nameReplaceTable(rn); found { - if currentSegment.Len() > 0 { - addNameLexem(currentSegment.String()) - currentSegment.Reset() - } - - if replace != "" { - addNameLexem(replace) - } - - continue - } - - if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { - if currentSegment.Len() > 0 { - addNameLexem(currentSegment.String()) - currentSegment.Reset() - } - - continue - } - - if unicode.IsUpper(rn) { - if currentSegment.Len() > 0 { - addNameLexem(currentSegment.String()) - } - currentSegment.Reset() - } - - currentSegment.WriteRune(rn) - } - - if currentSegment.Len() > 0 { - addNameLexem(currentSegment.String()) - } -} - -// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but -// it ignores leading and trailing blank spaces in the compared -// string. -// -// base is assumed to be composed of upper-cased runes, and be already -// trimmed. -// -// This code is heavily inspired from strings.EqualFold. -func isEqualFoldIgnoreSpace(base []rune, str string) bool { - var i, baseIndex int - // equivalent to b := []byte(str), but without data copy - b := hackStringBytes(str) - - for i < len(b) { - if c := b[i]; c < utf8.RuneSelf { - // fast path for ASCII - if c != ' ' && c != '\t' { - break - } - i++ - - continue - } - - // unicode case - r, size := utf8.DecodeRune(b[i:]) - if !unicode.IsSpace(r) { - break - } - i += size - } - - if i >= len(b) { - return len(base) == 0 - } - - for _, baseRune := range base { - if i >= len(b) { - break - } - - if c := b[i]; c < utf8.RuneSelf { - // single byte rune case (ASCII) - if baseRune >= utf8.RuneSelf { - return false - } - - baseChar := byte(baseRune) - if c != baseChar && - !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) { - return false - } - - baseIndex++ - i++ - - continue - } - - // unicode case - r, size := utf8.DecodeRune(b[i:]) - if unicode.ToUpper(r) != baseRune { - return false - } - baseIndex++ - i += size - } - - if baseIndex != len(base) { - return false - } - - // all passed: now we should only have blanks - for i < len(b) { - if c := b[i]; c < utf8.RuneSelf { - // fast path for ASCII - if c != ' ' && c != '\t' { - return false - } - i++ - - continue - } - - // unicode case - r, size := utf8.DecodeRune(b[i:]) - if !unicode.IsSpace(r) { - return false - } - - i += size - } - - return true -} diff --git a/vendor/github.com/go-openapi/swag/stringutils/LICENSE b/vendor/github.com/go-openapi/swag/stringutils/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/stringutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/stringutils/collection_formats.go b/vendor/github.com/go-openapi/swag/stringutils/collection_formats.go new file mode 100644 index 000000000000..28056ad25c38 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/stringutils/collection_formats.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package stringutils + +import "strings" + +const ( + // collectionFormatComma = "csv" + collectionFormatSpace = "ssv" + collectionFormatTab = "tsv" + collectionFormatPipe = "pipes" + collectionFormatMulti = "multi" + + collectionFormatDefaultSep = "," +) + +// JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute): +// +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) +func JoinByFormat(data []string, format string) []string { + if len(data) == 0 { + return data + } + var sep string + switch format { + case collectionFormatSpace: + sep = " " + case collectionFormatTab: + sep = "\t" + case collectionFormatPipe: + sep = "|" + case collectionFormatMulti: + return data + default: + sep = collectionFormatDefaultSep + } + return []string{strings.Join(data, sep)} +} + +// SplitByFormat splits a string by a known format: +// +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) +func SplitByFormat(data, format string) []string { + if data == "" { + return nil + } + var sep string + switch format { + case collectionFormatSpace: + sep = " " + case collectionFormatTab: + sep = "\t" + case collectionFormatPipe: + sep = "|" + case collectionFormatMulti: + return nil + default: + sep = collectionFormatDefaultSep + } + var result []string + for _, s := range strings.Split(data, sep) { + if ts := strings.TrimSpace(s); ts != "" { + result = append(result, ts) + } + } + return result +} diff --git a/vendor/github.com/go-openapi/swag/stringutils/doc.go b/vendor/github.com/go-openapi/swag/stringutils/doc.go new file mode 100644 index 000000000000..c6d17a1160bd --- /dev/null +++ b/vendor/github.com/go-openapi/swag/stringutils/doc.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package stringutils exposes helpers to search and process strings. +package stringutils diff --git a/vendor/github.com/go-openapi/swag/stringutils/strings.go b/vendor/github.com/go-openapi/swag/stringutils/strings.go new file mode 100644 index 000000000000..cd792b7d0834 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/stringutils/strings.go @@ -0,0 +1,23 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package stringutils + +import ( + "slices" + "strings" +) + +// ContainsStrings searches a slice of strings for a case-sensitive match +// +// Now equivalent to the standard library [slice.Contains]. +func ContainsStrings(coll []string, item string) bool { + return slices.Contains(coll, item) +} + +// ContainsStringsCI searches a slice of strings for a case-insensitive match +func ContainsStringsCI(coll []string, item string) bool { + return slices.ContainsFunc(coll, func(e string) bool { + return strings.EqualFold(e, item) + }) +} diff --git a/vendor/github.com/go-openapi/swag/stringutils_iface.go b/vendor/github.com/go-openapi/swag/stringutils_iface.go new file mode 100644 index 000000000000..dbfa48484306 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/stringutils_iface.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import "github.com/go-openapi/swag/stringutils" + +// ContainsStrings searches a slice of strings for a case-sensitive match. +// +// Deprecated: use [slices.Contains] or [stringutils.ContainsStrings] instead. +func ContainsStrings(coll []string, item string) bool { + return stringutils.ContainsStrings(coll, item) +} + +// ContainsStringsCI searches a slice of strings for a case-insensitive match. +// +// Deprecated: use [stringutils.ContainsStringsCI] instead. +func ContainsStringsCI(coll []string, item string) bool { + return stringutils.ContainsStringsCI(coll, item) +} + +// JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute). +// +// Deprecated: use [stringutils.JoinByFormat] instead. +func JoinByFormat(data []string, format string) []string { + return stringutils.JoinByFormat(data, format) +} + +// SplitByFormat splits a string by a known format. +// +// Deprecated: use [stringutils.SplitByFormat] instead. +func SplitByFormat(data, format string) []string { + return stringutils.SplitByFormat(data, format) +} diff --git a/vendor/github.com/go-openapi/swag/typeutils/LICENSE b/vendor/github.com/go-openapi/swag/typeutils/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/typeutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/typeutils/doc.go b/vendor/github.com/go-openapi/swag/typeutils/doc.go new file mode 100644 index 000000000000..66bed20dff0e --- /dev/null +++ b/vendor/github.com/go-openapi/swag/typeutils/doc.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package typeutils exposes utilities to inspect generic types. +package typeutils diff --git a/vendor/github.com/go-openapi/swag/typeutils/types.go b/vendor/github.com/go-openapi/swag/typeutils/types.go new file mode 100644 index 000000000000..55487a673c4b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/typeutils/types.go @@ -0,0 +1,80 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package typeutils + +import "reflect" + +type zeroable interface { + IsZero() bool +} + +// IsZero returns true when the value passed into the function is a zero value. +// This allows for safer checking of interface values. +func IsZero(data any) bool { + v := reflect.ValueOf(data) + // check for nil data + switch v.Kind() { //nolint:exhaustive + case + reflect.Interface, + reflect.Func, + reflect.Chan, + reflect.Pointer, + reflect.UnsafePointer, + reflect.Map, + reflect.Slice: + if v.IsNil() { + return true + } + } + + // check for things that have an IsZero method instead + if vv, ok := data.(zeroable); ok { + return vv.IsZero() + } + + // continue with slightly more complex reflection + switch v.Kind() { //nolint:exhaustive + case reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Struct, reflect.Array: + return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) + case reflect.Invalid: + return true + default: + return false + } +} + +// IsNil checks if input is nil. +// +// For types chan, func, interface, map, pointer, or slice it returns true if its argument is nil. +// +// See [reflect.Value.IsNil]. +func IsNil(input any) bool { + if input == nil { + return true + } + + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Pointer, + reflect.UnsafePointer, + reflect.Map, + reflect.Slice, + reflect.Chan, + reflect.Interface, + reflect.Func: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} diff --git a/vendor/github.com/go-openapi/swag/typeutils_iface.go b/vendor/github.com/go-openapi/swag/typeutils_iface.go new file mode 100644 index 000000000000..b63813ea408e --- /dev/null +++ b/vendor/github.com/go-openapi/swag/typeutils_iface.go @@ -0,0 +1,12 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import "github.com/go-openapi/swag/typeutils" + +// IsZero returns true when the value passed into the function is a zero value. +// This allows for safer checking of interface values. +// +// Deprecated: use [typeutils.IsZero] instead. +func IsZero(data any) bool { return typeutils.IsZero(data) } diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go deleted file mode 100644 index 5051401c49ff..000000000000 --- a/vendor/github.com/go-openapi/swag/util.go +++ /dev/null @@ -1,364 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "reflect" - "strings" - "unicode" - "unicode/utf8" -) - -// GoNamePrefixFunc sets an optional rule to prefix go names -// which do not start with a letter. -// -// The prefix function is assumed to return a string that starts with an upper case letter. -// -// e.g. to help convert "123" into "{prefix}123" -// -// The default is to prefix with "X" -var GoNamePrefixFunc func(string) string - -func prefixFunc(name, in string) string { - if GoNamePrefixFunc == nil { - return "X" + in - } - - return GoNamePrefixFunc(name) + in -} - -const ( - // collectionFormatComma = "csv" - collectionFormatSpace = "ssv" - collectionFormatTab = "tsv" - collectionFormatPipe = "pipes" - collectionFormatMulti = "multi" -) - -// JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute): -// -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) -func JoinByFormat(data []string, format string) []string { - if len(data) == 0 { - return data - } - var sep string - switch format { - case collectionFormatSpace: - sep = " " - case collectionFormatTab: - sep = "\t" - case collectionFormatPipe: - sep = "|" - case collectionFormatMulti: - return data - default: - sep = "," - } - return []string{strings.Join(data, sep)} -} - -// SplitByFormat splits a string by a known format: -// -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) -func SplitByFormat(data, format string) []string { - if data == "" { - return nil - } - var sep string - switch format { - case collectionFormatSpace: - sep = " " - case collectionFormatTab: - sep = "\t" - case collectionFormatPipe: - sep = "|" - case collectionFormatMulti: - return nil - default: - sep = "," - } - var result []string - for _, s := range strings.Split(data, sep) { - if ts := strings.TrimSpace(s); ts != "" { - result = append(result, ts) - } - } - return result -} - -// Removes leading whitespaces -func trim(str string) string { - return strings.TrimSpace(str) -} - -// Shortcut to strings.ToUpper() -func upper(str string) string { - return strings.ToUpper(trim(str)) -} - -// Shortcut to strings.ToLower() -func lower(str string) string { - return strings.ToLower(trim(str)) -} - -// Camelize an uppercased word -func Camelize(word string) string { - camelized := poolOfBuffers.BorrowBuffer(len(word)) - defer func() { - poolOfBuffers.RedeemBuffer(camelized) - }() - - for pos, ru := range []rune(word) { - if pos > 0 { - camelized.WriteRune(unicode.ToLower(ru)) - } else { - camelized.WriteRune(unicode.ToUpper(ru)) - } - } - return camelized.String() -} - -// ToFileName lowercases and underscores a go type name -func ToFileName(name string) string { - in := split(name) - out := make([]string, 0, len(in)) - - for _, w := range in { - out = append(out, lower(w)) - } - - return strings.Join(out, "_") -} - -// ToCommandName lowercases and underscores a go type name -func ToCommandName(name string) string { - in := split(name) - out := make([]string, 0, len(in)) - - for _, w := range in { - out = append(out, lower(w)) - } - return strings.Join(out, "-") -} - -// ToHumanNameLower represents a code name as a human series of words -func ToHumanNameLower(name string) string { - s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) - in := s.split(name) - poolOfSplitters.RedeemSplitter(s) - out := make([]string, 0, len(*in)) - - for _, w := range *in { - if !w.IsInitialism() { - out = append(out, lower(w.GetOriginal())) - } else { - out = append(out, trim(w.GetOriginal())) - } - } - poolOfLexems.RedeemLexems(in) - - return strings.Join(out, " ") -} - -// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized -func ToHumanNameTitle(name string) string { - s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) - in := s.split(name) - poolOfSplitters.RedeemSplitter(s) - - out := make([]string, 0, len(*in)) - for _, w := range *in { - original := trim(w.GetOriginal()) - if !w.IsInitialism() { - out = append(out, Camelize(original)) - } else { - out = append(out, original) - } - } - poolOfLexems.RedeemLexems(in) - - return strings.Join(out, " ") -} - -// ToJSONName camelcases a name which can be underscored or pascal cased -func ToJSONName(name string) string { - in := split(name) - out := make([]string, 0, len(in)) - - for i, w := range in { - if i == 0 { - out = append(out, lower(w)) - continue - } - out = append(out, Camelize(trim(w))) - } - return strings.Join(out, "") -} - -// ToVarName camelcases a name which can be underscored or pascal cased -func ToVarName(name string) string { - res := ToGoName(name) - if isInitialism(res) { - return lower(res) - } - if len(res) <= 1 { - return lower(res) - } - return lower(res[:1]) + res[1:] -} - -// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes -func ToGoName(name string) string { - s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) - lexems := s.split(name) - poolOfSplitters.RedeemSplitter(s) - defer func() { - poolOfLexems.RedeemLexems(lexems) - }() - lexemes := *lexems - - if len(lexemes) == 0 { - return "" - } - - result := poolOfBuffers.BorrowBuffer(len(name)) - defer func() { - poolOfBuffers.RedeemBuffer(result) - }() - - // check if not starting with a letter, upper case - firstPart := lexemes[0].GetUnsafeGoName() - if lexemes[0].IsInitialism() { - firstPart = upper(firstPart) - } - - if c := firstPart[0]; c < utf8.RuneSelf { - // ASCII - switch { - case 'A' <= c && c <= 'Z': - result.WriteString(firstPart) - case 'a' <= c && c <= 'z': - result.WriteByte(c - 'a' + 'A') - result.WriteString(firstPart[1:]) - default: - result.WriteString(prefixFunc(name, firstPart)) - // NOTE: no longer check if prefixFunc returns a string that starts with uppercase: - // assume this is always the case - } - } else { - // unicode - firstRune, _ := utf8.DecodeRuneInString(firstPart) - switch { - case !unicode.IsLetter(firstRune): - result.WriteString(prefixFunc(name, firstPart)) - case !unicode.IsUpper(firstRune): - result.WriteString(prefixFunc(name, firstPart)) - /* - result.WriteRune(unicode.ToUpper(firstRune)) - result.WriteString(firstPart[offset:]) - */ - default: - result.WriteString(firstPart) - } - } - - for _, lexem := range lexemes[1:] { - goName := lexem.GetUnsafeGoName() - - // to support old behavior - if lexem.IsInitialism() { - goName = upper(goName) - } - result.WriteString(goName) - } - - return result.String() -} - -// ContainsStrings searches a slice of strings for a case-sensitive match -func ContainsStrings(coll []string, item string) bool { - for _, a := range coll { - if a == item { - return true - } - } - return false -} - -// ContainsStringsCI searches a slice of strings for a case-insensitive match -func ContainsStringsCI(coll []string, item string) bool { - for _, a := range coll { - if strings.EqualFold(a, item) { - return true - } - } - return false -} - -type zeroable interface { - IsZero() bool -} - -// IsZero returns true when the value passed into the function is a zero value. -// This allows for safer checking of interface values. -func IsZero(data interface{}) bool { - v := reflect.ValueOf(data) - // check for nil data - switch v.Kind() { //nolint:exhaustive - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - if v.IsNil() { - return true - } - } - - // check for things that have an IsZero method instead - if vv, ok := data.(zeroable); ok { - return vv.IsZero() - } - - // continue with slightly more complex reflection - switch v.Kind() { //nolint:exhaustive - case reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Struct, reflect.Array: - return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) - case reflect.Invalid: - return true - default: - return false - } -} - -// CommandLineOptionsGroup represents a group of user-defined command line options -type CommandLineOptionsGroup struct { - ShortDescription string - LongDescription string - Options interface{} -} diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go deleted file mode 100644 index f59e0259320c..000000000000 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "encoding/json" - "errors" - "fmt" - "path/filepath" - "reflect" - "sort" - "strconv" - - "github.com/mailru/easyjson/jlexer" - "github.com/mailru/easyjson/jwriter" - yaml "gopkg.in/yaml.v3" -) - -// YAMLMatcher matches yaml -func YAMLMatcher(path string) bool { - ext := filepath.Ext(path) - return ext == ".yaml" || ext == ".yml" -} - -// YAMLToJSON converts YAML unmarshaled data into json compatible data -func YAMLToJSON(data interface{}) (json.RawMessage, error) { - jm, err := transformData(data) - if err != nil { - return nil, err - } - b, err := WriteJSON(jm) - return json.RawMessage(b), err -} - -// BytesToYAMLDoc converts a byte slice into a YAML document -func BytesToYAMLDoc(data []byte) (interface{}, error) { - var document yaml.Node // preserve order that is present in the document - if err := yaml.Unmarshal(data, &document); err != nil { - return nil, err - } - if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { - return nil, errors.New("only YAML documents that are objects are supported") - } - return &document, nil -} - -func yamlNode(root *yaml.Node) (interface{}, error) { - switch root.Kind { - case yaml.DocumentNode: - return yamlDocument(root) - case yaml.SequenceNode: - return yamlSequence(root) - case yaml.MappingNode: - return yamlMapping(root) - case yaml.ScalarNode: - return yamlScalar(root) - case yaml.AliasNode: - return yamlNode(root.Alias) - default: - return nil, fmt.Errorf("unsupported YAML node type: %v", root.Kind) - } -} - -func yamlDocument(node *yaml.Node) (interface{}, error) { - if len(node.Content) != 1 { - return nil, fmt.Errorf("unexpected YAML Document node content length: %d", len(node.Content)) - } - return yamlNode(node.Content[0]) -} - -func yamlMapping(node *yaml.Node) (interface{}, error) { - m := make(JSONMapSlice, len(node.Content)/2) - - var j int - for i := 0; i < len(node.Content); i += 2 { - var nmi JSONMapItem - k, err := yamlStringScalarC(node.Content[i]) - if err != nil { - return nil, fmt.Errorf("unable to decode YAML map key: %w", err) - } - nmi.Key = k - v, err := yamlNode(node.Content[i+1]) - if err != nil { - return nil, fmt.Errorf("unable to process YAML map value for key %q: %w", k, err) - } - nmi.Value = v - m[j] = nmi - j++ - } - return m, nil -} - -func yamlSequence(node *yaml.Node) (interface{}, error) { - s := make([]interface{}, 0) - - for i := 0; i < len(node.Content); i++ { - - v, err := yamlNode(node.Content[i]) - if err != nil { - return nil, fmt.Errorf("unable to decode YAML sequence value: %w", err) - } - s = append(s, v) - } - return s, nil -} - -const ( // See https://yaml.org/type/ - yamlStringScalar = "tag:yaml.org,2002:str" - yamlIntScalar = "tag:yaml.org,2002:int" - yamlBoolScalar = "tag:yaml.org,2002:bool" - yamlFloatScalar = "tag:yaml.org,2002:float" - yamlTimestamp = "tag:yaml.org,2002:timestamp" - yamlNull = "tag:yaml.org,2002:null" -) - -func yamlScalar(node *yaml.Node) (interface{}, error) { - switch node.LongTag() { - case yamlStringScalar: - return node.Value, nil - case yamlBoolScalar: - b, err := strconv.ParseBool(node.Value) - if err != nil { - return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w", node.Value, err) - } - return b, nil - case yamlIntScalar: - i, err := strconv.ParseInt(node.Value, 10, 64) - if err != nil { - return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w", node.Value, err) - } - return i, nil - case yamlFloatScalar: - f, err := strconv.ParseFloat(node.Value, 64) - if err != nil { - return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w", node.Value, err) - } - return f, nil - case yamlTimestamp: - return node.Value, nil - case yamlNull: - return nil, nil //nolint:nilnil - default: - return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) - } -} - -func yamlStringScalarC(node *yaml.Node) (string, error) { - if node.Kind != yaml.ScalarNode { - return "", fmt.Errorf("expecting a string scalar but got %q", node.Kind) - } - switch node.LongTag() { - case yamlStringScalar, yamlIntScalar, yamlFloatScalar: - return node.Value, nil - default: - return "", fmt.Errorf("YAML tag %q is not supported as map key", node.LongTag()) - } -} - -// JSONMapSlice represent a JSON object, with the order of keys maintained -type JSONMapSlice []JSONMapItem - -// MarshalJSON renders a JSONMapSlice as JSON -func (s JSONMapSlice) MarshalJSON() ([]byte, error) { - w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty} - s.MarshalEasyJSON(w) - return w.BuildBytes() -} - -// MarshalEasyJSON renders a JSONMapSlice as JSON, using easyJSON -func (s JSONMapSlice) MarshalEasyJSON(w *jwriter.Writer) { - w.RawByte('{') - - ln := len(s) - last := ln - 1 - for i := 0; i < ln; i++ { - s[i].MarshalEasyJSON(w) - if i != last { // last item - w.RawByte(',') - } - } - - w.RawByte('}') -} - -// UnmarshalJSON makes a JSONMapSlice from JSON -func (s *JSONMapSlice) UnmarshalJSON(data []byte) error { - l := jlexer.Lexer{Data: data} - s.UnmarshalEasyJSON(&l) - return l.Error() -} - -// UnmarshalEasyJSON makes a JSONMapSlice from JSON, using easyJSON -func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) { - if in.IsNull() { - in.Skip() - return - } - - var result JSONMapSlice - in.Delim('{') - for !in.IsDelim('}') { - var mi JSONMapItem - mi.UnmarshalEasyJSON(in) - result = append(result, mi) - } - *s = result -} - -func (s JSONMapSlice) MarshalYAML() (interface{}, error) { - var n yaml.Node - n.Kind = yaml.DocumentNode - var nodes []*yaml.Node - for _, item := range s { - nn, err := json2yaml(item.Value) - if err != nil { - return nil, err - } - ns := []*yaml.Node{ - { - Kind: yaml.ScalarNode, - Tag: yamlStringScalar, - Value: item.Key, - }, - nn, - } - nodes = append(nodes, ns...) - } - - n.Content = []*yaml.Node{ - { - Kind: yaml.MappingNode, - Content: nodes, - }, - } - - return yaml.Marshal(&n) -} - -func isNil(input interface{}) bool { - if input == nil { - return true - } - kind := reflect.TypeOf(input).Kind() - switch kind { //nolint:exhaustive - case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: - return reflect.ValueOf(input).IsNil() - default: - return false - } -} - -func json2yaml(item interface{}) (*yaml.Node, error) { - if isNil(item) { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Value: "null", - }, nil - } - - switch val := item.(type) { - case JSONMapSlice: - var n yaml.Node - n.Kind = yaml.MappingNode - for i := range val { - childNode, err := json2yaml(&val[i].Value) - if err != nil { - return nil, err - } - n.Content = append(n.Content, &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: yamlStringScalar, - Value: val[i].Key, - }, childNode) - } - return &n, nil - case map[string]interface{}: - var n yaml.Node - n.Kind = yaml.MappingNode - keys := make([]string, 0, len(val)) - for k := range val { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - v := val[k] - childNode, err := json2yaml(v) - if err != nil { - return nil, err - } - n.Content = append(n.Content, &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: yamlStringScalar, - Value: k, - }, childNode) - } - return &n, nil - case []interface{}: - var n yaml.Node - n.Kind = yaml.SequenceNode - for i := range val { - childNode, err := json2yaml(val[i]) - if err != nil { - return nil, err - } - n.Content = append(n.Content, childNode) - } - return &n, nil - case string: - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: yamlStringScalar, - Value: val, - }, nil - case float64: - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: yamlFloatScalar, - Value: strconv.FormatFloat(val, 'f', -1, 64), - }, nil - case int64: - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: yamlIntScalar, - Value: strconv.FormatInt(val, 10), - }, nil - case uint64: - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: yamlIntScalar, - Value: strconv.FormatUint(val, 10), - }, nil - case bool: - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: yamlBoolScalar, - Value: strconv.FormatBool(val), - }, nil - default: - return nil, fmt.Errorf("unhandled type: %T", val) - } -} - -// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice -type JSONMapItem struct { - Key string - Value interface{} -} - -// MarshalJSON renders a JSONMapItem as JSON -func (s JSONMapItem) MarshalJSON() ([]byte, error) { - w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty} - s.MarshalEasyJSON(w) - return w.BuildBytes() -} - -// MarshalEasyJSON renders a JSONMapItem as JSON, using easyJSON -func (s JSONMapItem) MarshalEasyJSON(w *jwriter.Writer) { - w.String(s.Key) - w.RawByte(':') - w.Raw(WriteJSON(s.Value)) -} - -// UnmarshalJSON makes a JSONMapItem from JSON -func (s *JSONMapItem) UnmarshalJSON(data []byte) error { - l := jlexer.Lexer{Data: data} - s.UnmarshalEasyJSON(&l) - return l.Error() -} - -// UnmarshalEasyJSON makes a JSONMapItem from JSON, using easyJSON -func (s *JSONMapItem) UnmarshalEasyJSON(in *jlexer.Lexer) { - key := in.UnsafeString() - in.WantColon() - value := in.Interface() - in.WantComma() - s.Key = key - s.Value = value -} - -func transformData(input interface{}) (out interface{}, err error) { - format := func(t interface{}) (string, error) { - switch k := t.(type) { - case string: - return k, nil - case uint: - return strconv.FormatUint(uint64(k), 10), nil - case uint8: - return strconv.FormatUint(uint64(k), 10), nil - case uint16: - return strconv.FormatUint(uint64(k), 10), nil - case uint32: - return strconv.FormatUint(uint64(k), 10), nil - case uint64: - return strconv.FormatUint(k, 10), nil - case int: - return strconv.Itoa(k), nil - case int8: - return strconv.FormatInt(int64(k), 10), nil - case int16: - return strconv.FormatInt(int64(k), 10), nil - case int32: - return strconv.FormatInt(int64(k), 10), nil - case int64: - return strconv.FormatInt(k, 10), nil - default: - return "", fmt.Errorf("unexpected map key type, got: %T", k) - } - } - - switch in := input.(type) { - case yaml.Node: - return yamlNode(&in) - case *yaml.Node: - return yamlNode(in) - case map[interface{}]interface{}: - o := make(JSONMapSlice, 0, len(in)) - for ke, va := range in { - var nmi JSONMapItem - if nmi.Key, err = format(ke); err != nil { - return nil, err - } - - v, ert := transformData(va) - if ert != nil { - return nil, ert - } - nmi.Value = v - o = append(o, nmi) - } - return o, nil - case []interface{}: - len1 := len(in) - o := make([]interface{}, len1) - for i := 0; i < len1; i++ { - o[i], err = transformData(in[i]) - if err != nil { - return nil, err - } - } - return o, nil - } - return input, nil -} - -// YAMLDoc loads a yaml document from either http or a file and converts it to json -func YAMLDoc(path string) (json.RawMessage, error) { - yamlDoc, err := YAMLData(path) - if err != nil { - return nil, err - } - - data, err := YAMLToJSON(yamlDoc) - if err != nil { - return nil, err - } - - return data, nil -} - -// YAMLData loads a yaml document from either http or a file -func YAMLData(path string) (interface{}, error) { - data, err := LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - - return BytesToYAMLDoc(data) -} diff --git a/vendor/github.com/go-openapi/swag/yamlutils/LICENSE b/vendor/github.com/go-openapi/swag/yamlutils/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yamlutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/yamlutils/doc.go b/vendor/github.com/go-openapi/swag/yamlutils/doc.go new file mode 100644 index 000000000000..7bb92a82f1b0 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yamlutils/doc.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package yamlutils provides utilities to work with YAML documents. +// +// - [BytesToYAMLDoc] to construct a [yaml.Node] document +// - [YAMLToJSON] to convert a [yaml.Node] document to JSON bytes +// - [YAMLMapSlice] to serialize and deserialize YAML with the order of keys maintained +package yamlutils + +import ( + _ "go.yaml.in/yaml/v3" // for documentation purpose only +) diff --git a/vendor/github.com/go-openapi/swag/yamlutils/errors.go b/vendor/github.com/go-openapi/swag/yamlutils/errors.go new file mode 100644 index 000000000000..e87bc5e8beb3 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yamlutils/errors.go @@ -0,0 +1,15 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package yamlutils + +type yamlError string + +const ( + // ErrYAML is an error raised by YAML utilities + ErrYAML yamlError = "yaml error" +) + +func (e yamlError) Error() string { + return string(e) +} diff --git a/vendor/github.com/go-openapi/swag/yamlutils/ordered_map.go b/vendor/github.com/go-openapi/swag/yamlutils/ordered_map.go new file mode 100644 index 000000000000..3daf68dbba0c --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yamlutils/ordered_map.go @@ -0,0 +1,316 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package yamlutils + +import ( + "fmt" + "iter" + "slices" + "sort" + "strconv" + + "github.com/go-openapi/swag/conv" + "github.com/go-openapi/swag/jsonutils" + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" + "github.com/go-openapi/swag/typeutils" + yaml "go.yaml.in/yaml/v3" +) + +var ( + _ yaml.Marshaler = YAMLMapSlice{} + _ yaml.Unmarshaler = &YAMLMapSlice{} +) + +// YAMLMapSlice represents a YAML object, with the order of keys maintained. +// +// It is similar to [jsonutils.JSONMapSlice] and also knows how to marshal and unmarshal YAML. +// +// It behaves like an ordered map, but keys can't be accessed in constant time. +type YAMLMapSlice []YAMLMapItem + +// YAMLMapItem represents the value of a key in a YAML object held by [YAMLMapSlice]. +// +// It is entirely equivalent to [jsonutils.JSONMapItem], with the same limitation that +// you should not Marshal or Unmarshal directly this type, outside of a [YAMLMapSlice]. +type YAMLMapItem = jsonutils.JSONMapItem + +func (s YAMLMapSlice) OrderedItems() iter.Seq2[string, any] { + return func(yield func(string, any) bool) { + for _, item := range s { + if !yield(item.Key, item.Value) { + return + } + } + } +} + +// SetOrderedItems implements [ifaces.SetOrdered]: it merges keys passed by the iterator argument +// into the [YAMLMapSlice]. +func (s *YAMLMapSlice) SetOrderedItems(items iter.Seq2[string, any]) { + if items == nil { + // force receiver to be a nil slice + *s = nil + + return + } + + m := *s + if len(m) > 0 { + // update mode: short-circuited when unmarshaling fresh data structures + idx := make(map[string]int, len(m)) + + for i, item := range m { + idx[item.Key] = i + } + + for k, v := range items { + idx, ok := idx[k] + if ok { + m[idx].Value = v + + continue + } + + m = append(m, YAMLMapItem{Key: k, Value: v}) + } + + *s = m + + return + } + + for k, v := range items { + m = append(m, YAMLMapItem{Key: k, Value: v}) + } + + *s = m +} + +// MarshalJSON renders this YAML object as JSON bytes. +// +// The difference with standard JSON marshaling is that the order of keys is maintained. +func (s YAMLMapSlice) MarshalJSON() ([]byte, error) { + return jsonutils.JSONMapSlice(s).MarshalJSON() +} + +// UnmarshalJSON builds this YAML object from JSON bytes. +// +// The difference with standard JSON marshaling is that the order of keys is maintained. +func (s *YAMLMapSlice) UnmarshalJSON(data []byte) error { + js := jsonutils.JSONMapSlice(*s) + + if err := js.UnmarshalJSON(data); err != nil { + return err + } + + *s = YAMLMapSlice(js) + + return nil +} + +// MarshalYAML produces a YAML document as bytes +// +// The difference with standard YAML marshaling is that the order of keys is maintained. +// +// It implements [yaml.Marshaler]. +func (s YAMLMapSlice) MarshalYAML() (any, error) { + if typeutils.IsNil(s) { + return []byte("null\n"), nil + } + var n yaml.Node + n.Kind = yaml.DocumentNode + var nodes []*yaml.Node + + for _, item := range s { + nn, err := json2yaml(item.Value) + if err != nil { + return nil, err + } + + ns := []*yaml.Node{ + { + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: item.Key, + }, + nn, + } + nodes = append(nodes, ns...) + } + + n.Content = []*yaml.Node{ + { + Kind: yaml.MappingNode, + Content: nodes, + }, + } + + return yaml.Marshal(&n) +} + +// UnmarshalYAML builds a YAMLMapSlice object from a YAML document [yaml.Node]. +// +// It implements [yaml.Unmarshaler]. +func (s *YAMLMapSlice) UnmarshalYAML(node *yaml.Node) error { + if typeutils.IsNil(*s) { + // allow to unmarshal with a simple var declaration (nil slice) + *s = YAMLMapSlice{} + } + if node == nil { + *s = nil + return nil + } + + const sensibleAllocDivider = 2 + m := slices.Grow(*s, len(node.Content)/sensibleAllocDivider) + m = m[:0] + + for i := 0; i < len(node.Content); i += 2 { + var nmi YAMLMapItem + k, err := yamlStringScalarC(node.Content[i]) + if err != nil { + return fmt.Errorf("unable to decode YAML map key: %w: %w", err, ErrYAML) + } + nmi.Key = k + v, err := yamlNode(node.Content[i+1]) + if err != nil { + return fmt.Errorf("unable to process YAML map value for key %q: %w: %w", k, err, ErrYAML) + } + nmi.Value = v + m = append(m, nmi) + } + + *s = m + + return nil +} + +func json2yaml(item any) (*yaml.Node, error) { + if typeutils.IsNil(item) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Value: "null", + }, nil + } + + switch val := item.(type) { + case ifaces.Ordered: + return orderedYAML(val) + + case map[string]any: + var n yaml.Node + n.Kind = yaml.MappingNode + keys := make([]string, 0, len(val)) + for k := range val { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := val[k] + childNode, err := json2yaml(v) + if err != nil { + return nil, err + } + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: k, + }, childNode) + } + return &n, nil + + case []any: + var n yaml.Node + n.Kind = yaml.SequenceNode + for i := range val { + childNode, err := json2yaml(val[i]) + if err != nil { + return nil, err + } + n.Content = append(n.Content, childNode) + } + return &n, nil + case string: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: val, + }, nil + case float32: + return floatNode(val) + case float64: + return floatNode(val) + case int: + return integerNode(val) + case int8: + return integerNode(val) + case int16: + return integerNode(val) + case int32: + return integerNode(val) + case int64: + return integerNode(val) + case uint: + return uintegerNode(val) + case uint8: + return uintegerNode(val) + case uint16: + return uintegerNode(val) + case uint32: + return uintegerNode(val) + case uint64: + return uintegerNode(val) + case bool: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlBoolScalar, + Value: strconv.FormatBool(val), + }, nil + default: + return nil, fmt.Errorf("unhandled type: %T: %w", val, ErrYAML) + } +} + +func floatNode[T conv.Float](val T) (*yaml.Node, error) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlFloatScalar, + Value: conv.FormatFloat(val), + }, nil +} + +func integerNode[T conv.Signed](val T) (*yaml.Node, error) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: conv.FormatInteger(val), + }, nil +} + +func uintegerNode[T conv.Unsigned](val T) (*yaml.Node, error) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: conv.FormatUinteger(val), + }, nil +} + +func orderedYAML[T ifaces.Ordered](val T) (*yaml.Node, error) { + var n yaml.Node + n.Kind = yaml.MappingNode + for key, value := range val.OrderedItems() { + childNode, err := json2yaml(value) + if err != nil { + return nil, err + } + + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: key, + }, childNode) + } + return &n, nil +} diff --git a/vendor/github.com/go-openapi/swag/yamlutils/yaml.go b/vendor/github.com/go-openapi/swag/yamlutils/yaml.go new file mode 100644 index 000000000000..e3aff3c2fde9 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yamlutils/yaml.go @@ -0,0 +1,211 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package yamlutils + +import ( + json "encoding/json" + "fmt" + "strconv" + + "github.com/go-openapi/swag/jsonutils" + yaml "go.yaml.in/yaml/v3" +) + +// YAMLToJSON converts a YAML document into JSON bytes. +// +// Note: a YAML document is the output from a [yaml.Marshaler], e.g a pointer to a [yaml.Node]. +// +// [YAMLToJSON] is typically called after [BytesToYAMLDoc]. +func YAMLToJSON(value any) (json.RawMessage, error) { + jm, err := transformData(value) + if err != nil { + return nil, err + } + + b, err := jsonutils.WriteJSON(jm) + + return json.RawMessage(b), err +} + +// BytesToYAMLDoc converts a byte slice into a YAML document. +// +// This function only supports root documents that are objects. +// +// A YAML document is a pointer to a [yaml.Node]. +func BytesToYAMLDoc(data []byte) (any, error) { + var document yaml.Node // preserve order that is present in the document + if err := yaml.Unmarshal(data, &document); err != nil { + return nil, err + } + if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { + return nil, fmt.Errorf("only YAML documents that are objects are supported: %w", ErrYAML) + } + return &document, nil +} + +func yamlNode(root *yaml.Node) (any, error) { + switch root.Kind { + case yaml.DocumentNode: + return yamlDocument(root) + case yaml.SequenceNode: + return yamlSequence(root) + case yaml.MappingNode: + return yamlMapping(root) + case yaml.ScalarNode: + return yamlScalar(root) + case yaml.AliasNode: + return yamlNode(root.Alias) + default: + return nil, fmt.Errorf("unsupported YAML node type: %v: %w", root.Kind, ErrYAML) + } +} + +func yamlDocument(node *yaml.Node) (any, error) { + if len(node.Content) != 1 { + return nil, fmt.Errorf("unexpected YAML Document node content length: %d: %w", len(node.Content), ErrYAML) + } + return yamlNode(node.Content[0]) +} + +func yamlMapping(node *yaml.Node) (any, error) { + const sensibleAllocDivider = 2 // nodes concatenate (key,value) sequences + m := make(YAMLMapSlice, len(node.Content)/sensibleAllocDivider) + + if err := m.UnmarshalYAML(node); err != nil { + return nil, err + } + + return m, nil +} + +func yamlSequence(node *yaml.Node) (any, error) { + s := make([]any, 0) + + for i := range len(node.Content) { + v, err := yamlNode(node.Content[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode YAML sequence value: %w: %w", err, ErrYAML) + } + s = append(s, v) + } + return s, nil +} + +const ( // See https://yaml.org/type/ + yamlStringScalar = "tag:yaml.org,2002:str" + yamlIntScalar = "tag:yaml.org,2002:int" + yamlBoolScalar = "tag:yaml.org,2002:bool" + yamlFloatScalar = "tag:yaml.org,2002:float" + yamlTimestamp = "tag:yaml.org,2002:timestamp" + yamlNull = "tag:yaml.org,2002:null" +) + +func yamlScalar(node *yaml.Node) (any, error) { + switch node.LongTag() { + case yamlStringScalar: + return node.Value, nil + case yamlBoolScalar: + b, err := strconv.ParseBool(node.Value) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w: %w", node.Value, err, ErrYAML) + } + return b, nil + case yamlIntScalar: + i, err := strconv.ParseInt(node.Value, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w: %w", node.Value, err, ErrYAML) + } + return i, nil + case yamlFloatScalar: + f, err := strconv.ParseFloat(node.Value, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w: %w", node.Value, err, ErrYAML) + } + return f, nil + case yamlTimestamp: + // YAML timestamp is marshaled as string, not time + return node.Value, nil + case yamlNull: + return nil, nil //nolint:nilnil + default: + return nil, fmt.Errorf("YAML tag %q is not supported: %w", node.LongTag(), ErrYAML) + } +} + +func yamlStringScalarC(node *yaml.Node) (string, error) { + if node.Kind != yaml.ScalarNode { + return "", fmt.Errorf("expecting a string scalar but got %q: %w", node.Kind, ErrYAML) + } + switch node.LongTag() { + case yamlStringScalar, yamlIntScalar, yamlFloatScalar: + return node.Value, nil + default: + return "", fmt.Errorf("YAML tag %q is not supported as map key: %w", node.LongTag(), ErrYAML) + } +} + +func format(t any) (string, error) { + switch k := t.(type) { + case string: + return k, nil + case uint: + return strconv.FormatUint(uint64(k), 10), nil + case uint8: + return strconv.FormatUint(uint64(k), 10), nil + case uint16: + return strconv.FormatUint(uint64(k), 10), nil + case uint32: + return strconv.FormatUint(uint64(k), 10), nil + case uint64: + return strconv.FormatUint(k, 10), nil + case int: + return strconv.Itoa(k), nil + case int8: + return strconv.FormatInt(int64(k), 10), nil + case int16: + return strconv.FormatInt(int64(k), 10), nil + case int32: + return strconv.FormatInt(int64(k), 10), nil + case int64: + return strconv.FormatInt(k, 10), nil + default: + return "", fmt.Errorf("unexpected map key type, got: %T: %w", k, ErrYAML) + } +} + +func transformData(input any) (out any, err error) { + switch in := input.(type) { + case yaml.Node: + return yamlNode(&in) + case *yaml.Node: + return yamlNode(in) + case map[any]any: + o := make(YAMLMapSlice, 0, len(in)) + for ke, va := range in { + var nmi YAMLMapItem + if nmi.Key, err = format(ke); err != nil { + return nil, err + } + + v, ert := transformData(va) + if ert != nil { + return nil, ert + } + nmi.Value = v + o = append(o, nmi) + } + return o, nil + case []any: + len1 := len(in) + o := make([]any, len1) + for i := range len1 { + o[i], err = transformData(in[i]) + if err != nil { + return nil, err + } + } + return o, nil + } + return input, nil +} diff --git a/vendor/github.com/go-openapi/swag/yamlutils_iface.go b/vendor/github.com/go-openapi/swag/yamlutils_iface.go new file mode 100644 index 000000000000..57767efc567f --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yamlutils_iface.go @@ -0,0 +1,20 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import ( + "encoding/json" + + "github.com/go-openapi/swag/yamlutils" +) + +// YAMLToJSON converts YAML unmarshaled data into json compatible data +// +// Deprecated: use [yamlutils.YAMLToJSON] instead. +func YAMLToJSON(data any) (json.RawMessage, error) { return yamlutils.YAMLToJSON(data) } + +// BytesToYAMLDoc converts a byte slice into a YAML document +// +// Deprecated: use [yamlutils.BytesToYAMLDoc] instead. +func BytesToYAMLDoc(data []byte) (any, error) { return yamlutils.BytesToYAMLDoc(data) } diff --git a/vendor/github.com/go-openapi/validate/.editorconfig b/vendor/github.com/go-openapi/validate/.editorconfig new file mode 100644 index 000000000000..3152da69a5d7 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/validate/.gitattributes b/vendor/github.com/go-openapi/validate/.gitattributes new file mode 100644 index 000000000000..49ad52766abb --- /dev/null +++ b/vendor/github.com/go-openapi/validate/.gitattributes @@ -0,0 +1,2 @@ +# gofmt always uses LF, whereas Git uses CRLF on Windows. +*.go text eol=lf diff --git a/vendor/github.com/go-openapi/validate/.gitignore b/vendor/github.com/go-openapi/validate/.gitignore new file mode 100644 index 000000000000..fea8b84eca99 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/.gitignore @@ -0,0 +1,5 @@ +secrets.yml +coverage.out +*.cov +*.out +playground diff --git a/vendor/github.com/go-openapi/validate/.golangci.yml b/vendor/github.com/go-openapi/validate/.golangci.yml new file mode 100644 index 000000000000..10c513342fce --- /dev/null +++ b/vendor/github.com/go-openapi/validate/.golangci.yml @@ -0,0 +1,76 @@ +version: "2" +linters: + default: all + disable: + - cyclop + - depguard + - errchkjson + - errorlint + - exhaustruct + - forcetypeassert + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - godot + - godox + - gomoddirectives + - gosmopolitan + - inamedparam + - intrange + - ireturn + - lll + - musttag + - nestif + - nlreturn + - nonamedreturns + - noinlineerr + - paralleltest + - recvcheck + - testpackage + - thelper + - tparallel + - unparam + - varnamelen + - whitespace + - wrapcheck + - wsl + - wsl_v5 + settings: + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + gocyclo: + min-complexity: 45 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 diff --git a/vendor/github.com/go-openapi/validate/BENCHMARK.md b/vendor/github.com/go-openapi/validate/BENCHMARK.md new file mode 100644 index 000000000000..79cf6a077ba2 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/BENCHMARK.md @@ -0,0 +1,31 @@ +# Benchmark + +Validating the Kubernetes Swagger API + +## v0.22.6: 60,000,000 allocs +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/validate +cpu: AMD Ryzen 7 5800X 8-Core Processor +Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 8549863982 ns/op 7067424936 B/op 59583275 allocs/op +``` + +## After refact PR: minor but noticable improvements: 25,000,000 allocs +``` +go test -bench Spec +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/validate +cpu: AMD Ryzen 7 5800X 8-Core Processor +Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 4064535557 ns/op 3379715592 B/op 25320330 allocs/op +``` + +## After reduce GC pressure PR: 17,000,000 allocs +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/validate +cpu: AMD Ryzen 7 5800X 8-Core Processor +Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 3758414145 ns/op 2593881496 B/op 17111373 allocs/op +``` diff --git a/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..9322b065e37a --- /dev/null +++ b/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/validate/LICENSE b/vendor/github.com/go-openapi/validate/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/validate/README.md b/vendor/github.com/go-openapi/validate/README.md new file mode 100644 index 000000000000..73d87ce4f01f --- /dev/null +++ b/vendor/github.com/go-openapi/validate/README.md @@ -0,0 +1,40 @@ +# Validation helpers [![Build Status](https://github.com/go-openapi/validate/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/validate/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/validate/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/validate) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/validate/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/validate.svg)](https://pkg.go.dev/github.com/go-openapi/validate) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/validate)](https://goreportcard.com/report/github.com/go-openapi/validate) + +This package provides helpers to validate Swagger 2.0. specification (aka OpenAPI 2.0). + +Reference can be found here: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md. + +## What's inside? + +* A validator for Swagger specifications +* A validator for JSON schemas draft4 +* Helper functions to validate individual values (used by code generated by [go-swagger](https://github.com/go-swagger/go-swagger)). + * Required, RequiredNumber, RequiredString + * ReadOnly + * UniqueItems, MaxItems, MinItems + * Enum, EnumCase + * Pattern, MinLength, MaxLength + * Minimum, Maximum, MultipleOf + * FormatOf + +[Documentation](https://pkg.go.dev/github.com/go-openapi/validate) + +## Licensing + +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). + +## FAQ + +* Does this library support OpenAPI 3? + +> No. +> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). +> There is no plan to make it evolve toward supporting OpenAPI 3.x. +> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. +> +> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 diff --git a/vendor/github.com/go-openapi/validate/context.go b/vendor/github.com/go-openapi/validate/context.go new file mode 100644 index 000000000000..b4587dcd560f --- /dev/null +++ b/vendor/github.com/go-openapi/validate/context.go @@ -0,0 +1,59 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "context" +) + +// validateCtxKey is the key type of context key in this pkg +type validateCtxKey string + +const ( + operationTypeKey validateCtxKey = "operationTypeKey" +) + +type operationType string + +const ( + request operationType = "request" + response operationType = "response" + none operationType = "none" // not specified in ctx +) + +var operationTypeEnum = []operationType{request, response, none} + +// WithOperationRequest returns a new context with operationType request +// in context value +func WithOperationRequest(ctx context.Context) context.Context { + return withOperation(ctx, request) +} + +// WithOperationResponse returns a new context with operationType response +// in context value +func WithOperationResponse(ctx context.Context) context.Context { + return withOperation(ctx, response) +} + +func withOperation(ctx context.Context, operation operationType) context.Context { + return context.WithValue(ctx, operationTypeKey, operation) +} + +// extractOperationType extracts the operation type from ctx +// if not specified or of unknown value, return none operation type +func extractOperationType(ctx context.Context) operationType { + v := ctx.Value(operationTypeKey) + if v == nil { + return none + } + res, ok := v.(operationType) + if !ok { + return none + } + // validate the value is in operation enum + if err := Enum("", "", res, operationTypeEnum); err != nil { + return none + } + return res +} diff --git a/vendor/github.com/go-openapi/validate/debug.go b/vendor/github.com/go-openapi/validate/debug.go new file mode 100644 index 000000000000..79145a4495d2 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/debug.go @@ -0,0 +1,36 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "fmt" + "log" + "os" + "path/filepath" + "runtime" +) + +var ( + // Debug is true when the SWAGGER_DEBUG env var is not empty. + // It enables a more verbose logging of validators. + Debug = os.Getenv("SWAGGER_DEBUG") != "" + // validateLogger is a debug logger for this package + validateLogger *log.Logger +) + +func init() { + debugOptions() +} + +func debugOptions() { + validateLogger = log.New(os.Stdout, "validate:", log.LstdFlags) +} + +func debugLog(msg string, args ...any) { + // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() + if Debug { + _, file1, pos1, _ := runtime.Caller(1) + validateLogger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/go-openapi/validate/default_validator.go b/vendor/github.com/go-openapi/validate/default_validator.go new file mode 100644 index 000000000000..79a431677e45 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/default_validator.go @@ -0,0 +1,294 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "fmt" + "strings" + + "github.com/go-openapi/spec" +) + +// defaultValidator validates default values in a spec. +// According to Swagger spec, default values MUST validate their schema. +type defaultValidator struct { + SpecValidator *SpecValidator + visitedSchemas map[string]struct{} + schemaOptions *SchemaValidatorOptions +} + +// Validate validates the default values declared in the swagger spec +func (d *defaultValidator) Validate() *Result { + errs := pools.poolOfResults.BorrowResult() // will redeem when merged + + if d == nil || d.SpecValidator == nil { + return errs + } + d.resetVisited() + errs.Merge(d.validateDefaultValueValidAgainstSchema()) // error - + return errs +} + +// resetVisited resets the internal state of visited schemas +func (d *defaultValidator) resetVisited() { + if d.visitedSchemas == nil { + d.visitedSchemas = make(map[string]struct{}) + + return + } + + // TODO(go1.21): clear(ex.visitedSchemas) + for k := range d.visitedSchemas { + delete(d.visitedSchemas, k) + } +} + +func isVisited(path string, visitedSchemas map[string]struct{}) bool { + _, found := visitedSchemas[path] + if found { + return true + } + + // search for overlapping paths + var ( + parent string + suffix string + ) + const backtrackFromEnd = 2 + for i := len(path) - backtrackFromEnd; i >= 0; i-- { + r := path[i] + if r != '.' { + continue + } + + parent = path[0:i] + suffix = path[i+1:] + + if strings.HasSuffix(parent, suffix) { + return true + } + } + + return false +} + +// beingVisited asserts a schema is being visited +func (d *defaultValidator) beingVisited(path string) { + d.visitedSchemas[path] = struct{}{} +} + +// isVisited tells if a path has already been visited +func (d *defaultValidator) isVisited(path string) bool { + return isVisited(path, d.visitedSchemas) +} + +func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { + // every default value that is specified must validate against the schema for that property + // headers, items, parameters, schema + + res := pools.poolOfResults.BorrowResult() // will redeem when merged + s := d.SpecValidator + + for method, pathItem := range s.expandedAnalyzer().Operations() { + for path, op := range pathItem { + // parameters + for _, param := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { + if param.Default != nil && param.Required { + res.AddWarnings(requiredHasDefaultMsg(param.Name, param.In)) + } + + // reset explored schemas to get depth-first recursive-proof exploration + d.resetVisited() + + // Check simple parameters first + // default values provided must validate against their inline definition (no explicit schema) + if param.Default != nil && param.Schema == nil { + // check param default value is valid + red := newParamValidator(¶m, s.KnownFormats, d.schemaOptions).Validate(param.Default) //#nosec + if red.HasErrorsOrWarnings() { + res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In)) + res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + + // Recursively follows Items and Schemas + if param.Items != nil { + red := d.validateDefaultValueItemsAgainstSchema(param.Name, param.In, ¶m, param.Items) //#nosec + if red.HasErrorsOrWarnings() { + res.AddErrors(defaultValueItemsDoesNotValidateMsg(param.Name, param.In)) + res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + + if param.Schema != nil { + // Validate default value against schema + red := d.validateDefaultValueSchemaAgainstSchema(param.Name, param.In, param.Schema) + if red.HasErrorsOrWarnings() { + res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In)) + res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + } + + if op.Responses != nil { + if op.Responses.Default != nil { + // Same constraint on default Response + res.Merge(d.validateDefaultInResponse(op.Responses.Default, jsonDefault, path, 0, op.ID)) + } + // Same constraint on regular Responses + if op.Responses.StatusCodeResponses != nil { // Safeguard + for code, r := range op.Responses.StatusCodeResponses { + res.Merge(d.validateDefaultInResponse(&r, "response", path, code, op.ID)) //#nosec + } + } + } else if op.ID != "" { + // Empty op.ID means there is no meaningful operation: no need to report a specific message + res.AddErrors(noValidResponseMsg(op.ID)) + } + } + } + if s.spec.Spec().Definitions != nil { // Safeguard + // reset explored schemas to get depth-first recursive-proof exploration + d.resetVisited() + for nm, sch := range s.spec.Spec().Definitions { + res.Merge(d.validateDefaultValueSchemaAgainstSchema("definitions."+nm, "body", &sch)) //#nosec + } + } + return res +} + +func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, responseType, path string, responseCode int, operationID string) *Result { + s := d.SpecValidator + + response, res := responseHelp.expandResponseRef(resp, path, s) + if !res.IsValid() { + return res + } + + responseName, responseCodeAsStr := responseHelp.responseMsgVariants(responseType, responseCode) + + if response.Headers != nil { // Safeguard + for nm, h := range response.Headers { + // reset explored schemas to get depth-first recursive-proof exploration + d.resetVisited() + + if h.Default != nil { + red := newHeaderValidator(nm, &h, s.KnownFormats, d.schemaOptions).Validate(h.Default) //#nosec + if red.HasErrorsOrWarnings() { + res.AddErrors(defaultValueHeaderDoesNotValidateMsg(operationID, nm, responseName)) + res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + + // Headers have inline definition, like params + if h.Items != nil { + red := d.validateDefaultValueItemsAgainstSchema(nm, "header", &h, h.Items) //#nosec + if red.HasErrorsOrWarnings() { + res.AddErrors(defaultValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName)) + res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + + if _, err := compileRegexp(h.Pattern); err != nil { + res.AddErrors(invalidPatternInHeaderMsg(operationID, nm, responseName, h.Pattern, err)) + } + + // Headers don't have schema + } + } + if response.Schema != nil { + // reset explored schemas to get depth-first recursive-proof exploration + d.resetVisited() + + red := d.validateDefaultValueSchemaAgainstSchema(responseCodeAsStr, "response", response.Schema) + if red.HasErrorsOrWarnings() { + // Additional message to make sure the context of the error is not lost + res.AddErrors(defaultValueInDoesNotValidateMsg(operationID, responseName)) + res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + return res +} + +func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in string, schema *spec.Schema) *Result { + if schema == nil || d.isVisited(path) { + // Avoids recursing if we are already done with that check + return nil + } + d.beingVisited(path) + res := pools.poolOfResults.BorrowResult() + s := d.SpecValidator + + if schema.Default != nil { + res.Merge( + newSchemaValidator(schema, s.spec.Spec(), path+".default", s.KnownFormats, d.schemaOptions).Validate(schema.Default), + ) + } + if schema.Items != nil { + if schema.Items.Schema != nil { + res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".items.default", in, schema.Items.Schema)) + } + // Multiple schemas in items + if schema.Items.Schemas != nil { // Safeguard + for i, sch := range schema.Items.Schemas { + res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.items[%d].default", path, i), in, &sch)) //#nosec + } + } + } + if _, err := compileRegexp(schema.Pattern); err != nil { + res.AddErrors(invalidPatternInMsg(path, in, schema.Pattern)) + } + if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { + // NOTE: we keep validating values, even though additionalItems is not supported by Swagger 2.0 (and 3.0 as well) + res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".additionalItems", in, schema.AdditionalItems.Schema)) + } + for propName, prop := range schema.Properties { + res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec + } + for propName, prop := range schema.PatternProperties { + res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec + } + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".additionalProperties", in, schema.AdditionalProperties.Schema)) + } + if schema.AllOf != nil { + for i, aoSch := range schema.AllOf { + res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.allOf[%d]", path, i), in, &aoSch)) //#nosec + } + } + return res +} + +// TODO: Temporary duplicated code. Need to refactor with examples + +func (d *defaultValidator) validateDefaultValueItemsAgainstSchema(path, in string, root any, items *spec.Items) *Result { + res := pools.poolOfResults.BorrowResult() + s := d.SpecValidator + if items != nil { + if items.Default != nil { + res.Merge( + newItemsValidator(path, in, items, root, s.KnownFormats, d.schemaOptions).Validate(0, items.Default), + ) + } + if items.Items != nil { + res.Merge(d.validateDefaultValueItemsAgainstSchema(path+"[0].default", in, root, items.Items)) + } + if _, err := compileRegexp(items.Pattern); err != nil { + res.AddErrors(invalidPatternInMsg(path, in, items.Pattern)) + } + } + return res +} diff --git a/vendor/github.com/go-openapi/validate/doc.go b/vendor/github.com/go-openapi/validate/doc.go new file mode 100644 index 000000000000..a99893e1a383 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/doc.go @@ -0,0 +1,76 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +/* +Package validate provides methods to validate a swagger specification, +as well as tools to validate data against their schema. + +This package follows Swagger 2.0. specification (aka OpenAPI 2.0). Reference +can be found here: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md. + +# Validating a specification + +Validates a spec document (from JSON or YAML) against the JSON schema for swagger, +then checks a number of extra rules that can't be expressed in JSON schema. + +Entry points: + - Spec() + - NewSpecValidator() + - SpecValidator.Validate() + +Reported as errors: + + [x] definition can't declare a property that's already defined by one of its ancestors + [x] definition's ancestor can't be a descendant of the same model + [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method. Validation can be laxed by disabling StrictPathParamUniqueness. + [x] each security reference should contain only unique scopes + [x] each security scope in a security definition should be unique + [x] parameters in path must be unique + [x] each path parameter must correspond to a parameter placeholder and vice versa + [x] each referenceable definition must have references + [x] each definition property listed in the required array must be defined in the properties of the model + [x] each parameter should have a unique `name` and `type` combination + [x] each operation should have only 1 parameter of type body + [x] each reference must point to a valid object + [x] every default value that is specified must validate against the schema for that property + [x] items property is required for all schemas/definitions of type `array` + [x] path parameters must be declared a required + [x] headers must not contain $ref + [x] schema and property examples provided must validate against their respective object's schema + [x] examples provided must validate their schema + +Reported as warnings: + + [x] path parameters should not contain any of [{,},\w] + [x] empty path + [x] unused definitions + [x] unsupported validation of examples on non-JSON media types + [x] examples in response without schema + [x] readOnly properties should not be required + +# Validating a schema + +The schema validation toolkit validates data against JSON-schema-draft 04 schema. + +It is tested against the full json-schema-testing-suite (https://github.com/json-schema-org/JSON-Schema-Test-Suite), +except for the optional part (bignum, ECMA regexp, ...). + +It supports the complete JSON-schema vocabulary, including keywords not supported by Swagger (e.g. additionalItems, ...) + +Entry points: + - AgainstSchema() + - ... + +# Known limitations + +With the current version of this package, the following aspects of swagger are not yet supported: + + [ ] errors and warnings are not reported with key/line number in spec + [ ] default values and examples on responses only support application/json producer type + [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values + [ ] rules for collectionFormat are not implemented + [ ] no validation rule for polymorphism support (discriminator) [not done here] + [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid + [ ] arbitrary large numbers are not supported: max is math.MaxFloat64 +*/ +package validate diff --git a/vendor/github.com/go-openapi/validate/example_validator.go b/vendor/github.com/go-openapi/validate/example_validator.go new file mode 100644 index 000000000000..e4ef52c6dc16 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/example_validator.go @@ -0,0 +1,288 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "fmt" + + "github.com/go-openapi/spec" +) + +// ExampleValidator validates example values defined in a spec +type exampleValidator struct { + SpecValidator *SpecValidator + visitedSchemas map[string]struct{} + schemaOptions *SchemaValidatorOptions +} + +// Validate validates the example values declared in the swagger spec +// Example values MUST conform to their schema. +// +// With Swagger 2.0, examples are supported in: +// - schemas +// - individual property +// - responses +func (ex *exampleValidator) Validate() *Result { + errs := pools.poolOfResults.BorrowResult() + + if ex == nil || ex.SpecValidator == nil { + return errs + } + ex.resetVisited() + errs.Merge(ex.validateExampleValueValidAgainstSchema()) // error - + + return errs +} + +// resetVisited resets the internal state of visited schemas +func (ex *exampleValidator) resetVisited() { + if ex.visitedSchemas == nil { + ex.visitedSchemas = make(map[string]struct{}) + + return + } + + // TODO(go1.21): clear(ex.visitedSchemas) + for k := range ex.visitedSchemas { + delete(ex.visitedSchemas, k) + } +} + +// beingVisited asserts a schema is being visited +func (ex *exampleValidator) beingVisited(path string) { + ex.visitedSchemas[path] = struct{}{} +} + +// isVisited tells if a path has already been visited +func (ex *exampleValidator) isVisited(path string) bool { + return isVisited(path, ex.visitedSchemas) +} + +func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { + // every example value that is specified must validate against the schema for that property + // in: schemas, properties, object, items + // not in: headers, parameters without schema + + res := pools.poolOfResults.BorrowResult() + s := ex.SpecValidator + + for method, pathItem := range s.expandedAnalyzer().Operations() { + for path, op := range pathItem { + // parameters + for _, param := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { + + // As of swagger 2.0, Examples are not supported in simple parameters + // However, it looks like it is supported by go-openapi + + // reset explored schemas to get depth-first recursive-proof exploration + ex.resetVisited() + + // Check simple parameters first + // default values provided must validate against their inline definition (no explicit schema) + if param.Example != nil && param.Schema == nil { + // check param default value is valid + red := newParamValidator(¶m, s.KnownFormats, ex.schemaOptions).Validate(param.Example) //#nosec + if red.HasErrorsOrWarnings() { + res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In)) + res.MergeAsWarnings(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + + // Recursively follows Items and Schemas + if param.Items != nil { + red := ex.validateExampleValueItemsAgainstSchema(param.Name, param.In, ¶m, param.Items) //#nosec + if red.HasErrorsOrWarnings() { + res.AddWarnings(exampleValueItemsDoesNotValidateMsg(param.Name, param.In)) + res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + + if param.Schema != nil { + // Validate example value against schema + red := ex.validateExampleValueSchemaAgainstSchema(param.Name, param.In, param.Schema) + if red.HasErrorsOrWarnings() { + res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In)) + res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + } + + if op.Responses != nil { + if op.Responses.Default != nil { + // Same constraint on default Response + res.Merge(ex.validateExampleInResponse(op.Responses.Default, jsonDefault, path, 0, op.ID)) + } + // Same constraint on regular Responses + if op.Responses.StatusCodeResponses != nil { // Safeguard + for code, r := range op.Responses.StatusCodeResponses { + res.Merge(ex.validateExampleInResponse(&r, "response", path, code, op.ID)) //#nosec + } + } + } else if op.ID != "" { + // Empty op.ID means there is no meaningful operation: no need to report a specific message + res.AddErrors(noValidResponseMsg(op.ID)) + } + } + } + if s.spec.Spec().Definitions != nil { // Safeguard + // reset explored schemas to get depth-first recursive-proof exploration + ex.resetVisited() + for nm, sch := range s.spec.Spec().Definitions { + res.Merge(ex.validateExampleValueSchemaAgainstSchema("definitions."+nm, "body", &sch)) //#nosec + } + } + return res +} + +func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, responseType, path string, responseCode int, operationID string) *Result { + s := ex.SpecValidator + + response, res := responseHelp.expandResponseRef(resp, path, s) + if !res.IsValid() { // Safeguard + return res + } + + responseName, responseCodeAsStr := responseHelp.responseMsgVariants(responseType, responseCode) + + if response.Headers != nil { // Safeguard + for nm, h := range response.Headers { + // reset explored schemas to get depth-first recursive-proof exploration + ex.resetVisited() + + if h.Example != nil { + red := newHeaderValidator(nm, &h, s.KnownFormats, ex.schemaOptions).Validate(h.Example) //#nosec + if red.HasErrorsOrWarnings() { + res.AddWarnings(exampleValueHeaderDoesNotValidateMsg(operationID, nm, responseName)) + res.MergeAsWarnings(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + + // Headers have inline definition, like params + if h.Items != nil { + red := ex.validateExampleValueItemsAgainstSchema(nm, "header", &h, h.Items) //#nosec + if red.HasErrorsOrWarnings() { + res.AddWarnings(exampleValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName)) + res.MergeAsWarnings(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + + if _, err := compileRegexp(h.Pattern); err != nil { + res.AddErrors(invalidPatternInHeaderMsg(operationID, nm, responseName, h.Pattern, err)) + } + + // Headers don't have schema + } + } + if response.Schema != nil { + // reset explored schemas to get depth-first recursive-proof exploration + ex.resetVisited() + + red := ex.validateExampleValueSchemaAgainstSchema(responseCodeAsStr, "response", response.Schema) + if red.HasErrorsOrWarnings() { + // Additional message to make sure the context of the error is not lost + res.AddWarnings(exampleValueInDoesNotValidateMsg(operationID, responseName)) + res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + + if response.Examples != nil { + if response.Schema != nil { + if example, ok := response.Examples["application/json"]; ok { + res.MergeAsWarnings( + newSchemaValidator(response.Schema, s.spec.Spec(), path+".examples", s.KnownFormats, s.schemaOptions).Validate(example), + ) + } else { + // TODO: validate other media types too + res.AddWarnings(examplesMimeNotSupportedMsg(operationID, responseName)) + } + } else { + res.AddWarnings(examplesWithoutSchemaMsg(operationID, responseName)) + } + } + return res +} + +func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in string, schema *spec.Schema) *Result { + if schema == nil || ex.isVisited(path) { + // Avoids recursing if we are already done with that check + return nil + } + ex.beingVisited(path) + s := ex.SpecValidator + res := pools.poolOfResults.BorrowResult() + + if schema.Example != nil { + res.MergeAsWarnings( + newSchemaValidator(schema, s.spec.Spec(), path+".example", s.KnownFormats, ex.schemaOptions).Validate(schema.Example), + ) + } + if schema.Items != nil { + if schema.Items.Schema != nil { + res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".items.example", in, schema.Items.Schema)) + } + // Multiple schemas in items + if schema.Items.Schemas != nil { // Safeguard + for i, sch := range schema.Items.Schemas { + res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.items[%d].example", path, i), in, &sch)) //#nosec + } + } + } + if _, err := compileRegexp(schema.Pattern); err != nil { + res.AddErrors(invalidPatternInMsg(path, in, schema.Pattern)) + } + if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { + // NOTE: we keep validating values, even though additionalItems is unsupported in Swagger 2.0 (and 3.0 as well) + res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".additionalItems", in, schema.AdditionalItems.Schema)) + } + for propName, prop := range schema.Properties { + res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec + } + for propName, prop := range schema.PatternProperties { + res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec + } + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".additionalProperties", in, schema.AdditionalProperties.Schema)) + } + if schema.AllOf != nil { + for i, aoSch := range schema.AllOf { + res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.allOf[%d]", path, i), in, &aoSch)) //#nosec + } + } + return res +} + +// TODO: Temporary duplicated code. Need to refactor with examples +// + +func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in string, root any, items *spec.Items) *Result { + res := pools.poolOfResults.BorrowResult() + s := ex.SpecValidator + if items != nil { + if items.Example != nil { + res.MergeAsWarnings( + newItemsValidator(path, in, items, root, s.KnownFormats, ex.schemaOptions).Validate(0, items.Example), + ) + } + if items.Items != nil { + res.Merge(ex.validateExampleValueItemsAgainstSchema(path+"[0].example", in, root, items.Items)) + } + if _, err := compileRegexp(items.Pattern); err != nil { + res.AddErrors(invalidPatternInMsg(path, in, items.Pattern)) + } + } + + return res +} diff --git a/vendor/github.com/go-openapi/validate/formats.go b/vendor/github.com/go-openapi/validate/formats.go new file mode 100644 index 000000000000..85ee63494186 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/formats.go @@ -0,0 +1,88 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "reflect" + + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +type formatValidator struct { + Path string + In string + Format string + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +func newFormatValidator(path, in, format string, formats strfmt.Registry, opts *SchemaValidatorOptions) *formatValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var f *formatValidator + if opts.recycleValidators { + f = pools.poolOfFormatValidators.BorrowValidator() + } else { + f = new(formatValidator) + } + + f.Path = path + f.In = in + f.Format = format + f.KnownFormats = formats + f.Options = opts + + return f +} + +func (f *formatValidator) SetPath(path string) { + f.Path = path +} + +func (f *formatValidator) Applies(source any, kind reflect.Kind) bool { + if source == nil || f.KnownFormats == nil { + return false + } + + switch source := source.(type) { + case *spec.Items: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + case *spec.Parameter: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + case *spec.Schema: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + case *spec.Header: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + default: + return false + } +} + +func (f *formatValidator) Validate(val any) *Result { + if f.Options.recycleValidators { + defer func() { + f.redeem() + }() + } + + var result *Result + if f.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + + if err := FormatOf(f.Path, f.In, f.Format, val.(string), f.KnownFormats); err != nil { + result.AddErrors(err) + } + + return result +} + +func (f *formatValidator) redeem() { + pools.poolOfFormatValidators.RedeemValidator(f) +} diff --git a/vendor/github.com/go-openapi/validate/helpers.go b/vendor/github.com/go-openapi/validate/helpers.go new file mode 100644 index 000000000000..49b130473a9a --- /dev/null +++ b/vendor/github.com/go-openapi/validate/helpers.go @@ -0,0 +1,322 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +// TODO: define this as package validate/internal +// This must be done while keeping CI intact with all tests and test coverage + +import ( + "reflect" + "strconv" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" +) + +const ( + swaggerBody = "body" + swaggerExample = "example" + swaggerExamples = "examples" +) + +const ( + objectType = "object" + arrayType = "array" + stringType = "string" + integerType = "integer" + numberType = "number" + booleanType = "boolean" + fileType = "file" + nullType = "null" +) + +const ( + jsonProperties = "properties" + jsonItems = "items" + jsonType = "type" + // jsonSchema = "schema" + jsonDefault = "default" +) + +const ( + stringFormatDate = "date" + stringFormatDateTime = "date-time" + stringFormatPassword = "password" + stringFormatByte = "byte" + // stringFormatBinary = "binary" + stringFormatCreditCard = "creditcard" + stringFormatDuration = "duration" + stringFormatEmail = "email" + stringFormatHexColor = "hexcolor" + stringFormatHostname = "hostname" + stringFormatIPv4 = "ipv4" + stringFormatIPv6 = "ipv6" + stringFormatISBN = "isbn" + stringFormatISBN10 = "isbn10" + stringFormatISBN13 = "isbn13" + stringFormatMAC = "mac" + stringFormatBSONObjectID = "bsonobjectid" + stringFormatRGBColor = "rgbcolor" + stringFormatSSN = "ssn" + stringFormatURI = "uri" + stringFormatUUID = "uuid" + stringFormatUUID3 = "uuid3" + stringFormatUUID4 = "uuid4" + stringFormatUUID5 = "uuid5" + + integerFormatInt32 = "int32" + integerFormatInt64 = "int64" + integerFormatUInt32 = "uint32" + integerFormatUInt64 = "uint64" + + numberFormatFloat32 = "float32" + numberFormatFloat64 = "float64" + numberFormatFloat = "float" + numberFormatDouble = "double" +) + +// Helpers available at the package level +var ( + pathHelp *pathHelper + valueHelp *valueHelper + errorHelp *errorHelper + paramHelp *paramHelper + responseHelp *responseHelper +) + +type errorHelper struct { + // A collection of unexported helpers for error construction +} + +func (h *errorHelper) sErr(err errors.Error, recycle bool) *Result { + // Builds a Result from standard errors.Error + var result *Result + if recycle { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + result.Errors = []error{err} + + return result +} + +func (h *errorHelper) addPointerError(res *Result, err error, ref string, fromPath string) *Result { + // Provides more context on error messages + // reported by the jsoinpointer package by altering the passed Result + if err != nil { + res.AddErrors(cannotResolveRefMsg(fromPath, ref, err)) + } + return res +} + +type pathHelper struct { + // A collection of unexported helpers for path validation +} + +func (h *pathHelper) stripParametersInPath(path string) string { + // Returns a path stripped from all path parameters, with multiple or trailing slashes removed. + // + // Stripping is performed on a slash-separated basis, e.g '/a{/b}' remains a{/b} and not /a. + // - Trailing "/" make a difference, e.g. /a/ !~ /a (ex: canary/bitbucket.org/swagger.json) + // - presence or absence of a parameter makes a difference, e.g. /a/{log} !~ /a/ (ex: canary/kubernetes/swagger.json) + + // Regexp to extract parameters from path, with surrounding {}. + // NOTE: important non-greedy modifier + rexParsePathParam := mustCompileRegexp(`{[^{}]+?}`) + strippedSegments := []string{} + + for segment := range strings.SplitSeq(path, "/") { + strippedSegments = append(strippedSegments, rexParsePathParam.ReplaceAllString(segment, "X")) + } + return strings.Join(strippedSegments, "/") +} + +func (h *pathHelper) extractPathParams(path string) (params []string) { + // Extracts all params from a path, with surrounding "{}" + rexParsePathParam := mustCompileRegexp(`{[^{}]+?}`) + + for segment := range strings.SplitSeq(path, "/") { + for _, v := range rexParsePathParam.FindAllStringSubmatch(segment, -1) { + params = append(params, v...) + } + } + return +} + +type valueHelper struct { + // A collection of unexported helpers for value validation +} + +func (h *valueHelper) asInt64(val any) int64 { + // Number conversion function for int64, without error checking + // (implements an implicit type upgrade). + v := reflect.ValueOf(val) + switch v.Kind() { //nolint:exhaustive + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return int64(v.Uint()) //nolint:gosec + case reflect.Float32, reflect.Float64: + return int64(v.Float()) + default: + // panic("Non numeric value in asInt64()") + return 0 + } +} + +func (h *valueHelper) asUint64(val any) uint64 { + // Number conversion function for uint64, without error checking + // (implements an implicit type upgrade). + v := reflect.ValueOf(val) + switch v.Kind() { //nolint:exhaustive + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return uint64(v.Int()) //nolint:gosec + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return v.Uint() + case reflect.Float32, reflect.Float64: + return uint64(v.Float()) + default: + // panic("Non numeric value in asUint64()") + return 0 + } +} + +// Same for unsigned floats +func (h *valueHelper) asFloat64(val any) float64 { + // Number conversion function for float64, without error checking + // (implements an implicit type upgrade). + v := reflect.ValueOf(val) + switch v.Kind() { //nolint:exhaustive + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return float64(v.Uint()) + case reflect.Float32, reflect.Float64: + return v.Float() + default: + // panic("Non numeric value in asFloat64()") + return 0 + } +} + +type paramHelper struct { + // A collection of unexported helpers for parameters resolution +} + +func (h *paramHelper) safeExpandedParamsFor(path, method, operationID string, res *Result, s *SpecValidator) (params []spec.Parameter) { + operation, ok := s.expandedAnalyzer().OperationFor(method, path) + if ok { + // expand parameters first if necessary + resolvedParams := []spec.Parameter{} + for _, ppr := range operation.Parameters { + resolvedParam, red := h.resolveParam(path, method, operationID, &ppr, s) //#nosec + res.Merge(red) + if resolvedParam != nil { + resolvedParams = append(resolvedParams, *resolvedParam) + } + } + // remove params with invalid expansion from Slice + operation.Parameters = resolvedParams + + for _, ppr := range s.expandedAnalyzer().SafeParamsFor(method, path, + func(_ spec.Parameter, err error) bool { + // since params have already been expanded, there are few causes for error + res.AddErrors(someParametersBrokenMsg(path, method, operationID)) + // original error from analyzer + res.AddErrors(err) + return true + }) { + params = append(params, ppr) + } + } + return +} + +func (h *paramHelper) resolveParam(path, method, operationID string, param *spec.Parameter, s *SpecValidator) (*spec.Parameter, *Result) { + // Ensure parameter is expanded + var err error + res := new(Result) + isRef := param.Ref.String() != "" + if s.spec.SpecFilePath() == "" { + err = spec.ExpandParameterWithRoot(param, s.spec.Spec(), nil) + } else { + err = spec.ExpandParameter(param, s.spec.SpecFilePath()) + + } + if err != nil { // Safeguard + // NOTE: we may enter here when the whole parameter is an unresolved $ref + refPath := strings.Join([]string{"\"" + path + "\"", method}, ".") + errorHelp.addPointerError(res, err, param.Ref.String(), refPath) + return nil, res + } + res.Merge(h.checkExpandedParam(param, param.Name, param.In, operationID, isRef)) + return param, res +} + +func (h *paramHelper) checkExpandedParam(pr *spec.Parameter, path, in, operation string, isRef bool) *Result { + // Secure parameter structure after $ref resolution + res := new(Result) + simpleZero := spec.SimpleSchema{} + // Try to explain why... best guess + switch { + case pr.In == swaggerBody && (pr.SimpleSchema != simpleZero && pr.Type != objectType): + if isRef { + // Most likely, a $ref with a sibling is an unwanted situation: in itself this is a warning... + // but we detect it because of the following error: + // schema took over Parameter for an unexplained reason + res.AddWarnings(refShouldNotHaveSiblingsMsg(path, operation)) + } + res.AddErrors(invalidParameterDefinitionMsg(path, in, operation)) + case pr.In != swaggerBody && pr.Schema != nil: + if isRef { + res.AddWarnings(refShouldNotHaveSiblingsMsg(path, operation)) + } + res.AddErrors(invalidParameterDefinitionAsSchemaMsg(path, in, operation)) + case (pr.In == swaggerBody && pr.Schema == nil) || (pr.In != swaggerBody && pr.SimpleSchema == simpleZero): + // Other unexpected mishaps + res.AddErrors(invalidParameterDefinitionMsg(path, in, operation)) + } + return res +} + +type responseHelper struct { + // A collection of unexported helpers for response resolution +} + +func (r *responseHelper) expandResponseRef( + response *spec.Response, + path string, s *SpecValidator) (*spec.Response, *Result) { + // Ensure response is expanded + var err error + res := new(Result) + if s.spec.SpecFilePath() == "" { + // there is no physical document to resolve $ref in response + err = spec.ExpandResponseWithRoot(response, s.spec.Spec(), nil) + } else { + err = spec.ExpandResponse(response, s.spec.SpecFilePath()) + } + if err != nil { // Safeguard + // NOTE: we may enter here when the whole response is an unresolved $ref. + errorHelp.addPointerError(res, err, response.Ref.String(), path) + return nil, res + } + + return response, res +} + +func (r *responseHelper) responseMsgVariants( + responseType string, + responseCode int) (responseName, responseCodeAsStr string) { + // Path variants for messages + if responseType == jsonDefault { + responseCodeAsStr = jsonDefault + responseName = "default response" + } else { + responseCodeAsStr = strconv.Itoa(responseCode) + responseName = "response " + responseCodeAsStr + } + return +} diff --git a/vendor/github.com/go-openapi/validate/object_validator.go b/vendor/github.com/go-openapi/validate/object_validator.go new file mode 100644 index 000000000000..cf98ed377d54 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/object_validator.go @@ -0,0 +1,420 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "fmt" + "reflect" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +type objectValidator struct { + Path string + In string + MaxProperties *int64 + MinProperties *int64 + Required []string + Properties map[string]spec.Schema + AdditionalProperties *spec.SchemaOrBool + PatternProperties map[string]spec.Schema + Root any + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions + splitPath []string +} + +func newObjectValidator(path, in string, + maxProperties, minProperties *int64, required []string, properties spec.SchemaProperties, + additionalProperties *spec.SchemaOrBool, patternProperties spec.SchemaProperties, + root any, formats strfmt.Registry, opts *SchemaValidatorOptions) *objectValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var v *objectValidator + if opts.recycleValidators { + v = pools.poolOfObjectValidators.BorrowValidator() + } else { + v = new(objectValidator) + } + + v.Path = path + v.In = in + v.MaxProperties = maxProperties + v.MinProperties = minProperties + v.Required = required + v.Properties = properties + v.AdditionalProperties = additionalProperties + v.PatternProperties = patternProperties + v.Root = root + v.KnownFormats = formats + v.Options = opts + v.splitPath = strings.Split(v.Path, ".") + + return v +} + +func (o *objectValidator) Validate(data any) *Result { + if o.Options.recycleValidators { + defer func() { + o.redeem() + }() + } + + var val map[string]any + if data != nil { + var ok bool + val, ok = data.(map[string]any) + if !ok { + return errorHelp.sErr(invalidObjectMsg(o.Path, o.In), o.Options.recycleResult) + } + } + numKeys := int64(len(val)) + + if o.MinProperties != nil && numKeys < *o.MinProperties { + return errorHelp.sErr(errors.TooFewProperties(o.Path, o.In, *o.MinProperties), o.Options.recycleResult) + } + if o.MaxProperties != nil && numKeys > *o.MaxProperties { + return errorHelp.sErr(errors.TooManyProperties(o.Path, o.In, *o.MaxProperties), o.Options.recycleResult) + } + + var res *Result + if o.Options.recycleResult { + res = pools.poolOfResults.BorrowResult() + } else { + res = new(Result) + } + + o.precheck(res, val) + + // check validity of field names + if o.AdditionalProperties != nil && !o.AdditionalProperties.Allows { + // Case: additionalProperties: false + o.validateNoAdditionalProperties(val, res) + } else { + // Cases: empty additionalProperties (implying: true), or additionalProperties: true, or additionalProperties: { <> } + o.validateAdditionalProperties(val, res) + } + + o.validatePropertiesSchema(val, res) + + // Check patternProperties + // TODO: it looks like we have done that twice in many cases + for key, value := range val { + _, regularProperty := o.Properties[key] + matched, _, patterns := o.validatePatternProperty(key, value, res) // applies to regular properties as well + if regularProperty || !matched { + continue + } + + for _, pName := range patterns { + if v, ok := o.PatternProperties[pName]; ok { + r := newSchemaValidator(&v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value) + res.mergeForField(data.(map[string]any), key, r) + } + } + } + + return res +} + +func (o *objectValidator) SetPath(path string) { + o.Path = path + o.splitPath = strings.Split(path, ".") +} + +func (o *objectValidator) Applies(source any, kind reflect.Kind) bool { + // TODO: this should also work for structs + // there is a problem in the type validator where it will be unhappy about null values + // so that requires more testing + _, isSchema := source.(*spec.Schema) + return isSchema && (kind == reflect.Map || kind == reflect.Struct) +} + +func (o *objectValidator) isProperties() bool { + p := o.splitPath + return len(p) > 1 && p[len(p)-1] == jsonProperties && p[len(p)-2] != jsonProperties +} + +func (o *objectValidator) isDefault() bool { + p := o.splitPath + return len(p) > 1 && p[len(p)-1] == jsonDefault && p[len(p)-2] != jsonDefault +} + +func (o *objectValidator) isExample() bool { + p := o.splitPath + return len(p) > 1 && (p[len(p)-1] == swaggerExample || p[len(p)-1] == swaggerExamples) && p[len(p)-2] != swaggerExample +} + +func (o *objectValidator) checkArrayMustHaveItems(res *Result, val map[string]any) { + // for swagger 2.0 schemas, there is an additional constraint to have array items defined explicitly. + // with pure jsonschema draft 4, one may have arrays with undefined items (i.e. any type). + if val == nil { + return + } + + t, typeFound := val[jsonType] + if !typeFound { + return + } + + tpe, isString := t.(string) + if !isString || tpe != arrayType { + return + } + + item, itemsKeyFound := val[jsonItems] + if itemsKeyFound { + return + } + + res.AddErrors(errors.Required(jsonItems, o.Path, item)) +} + +func (o *objectValidator) checkItemsMustBeTypeArray(res *Result, val map[string]any) { + if val == nil { + return + } + + if o.isProperties() || o.isDefault() || o.isExample() { + return + } + + _, itemsKeyFound := val[jsonItems] + if !itemsKeyFound { + return + } + + t, typeFound := val[jsonType] + if !typeFound { + // there is no type + res.AddErrors(errors.Required(jsonType, o.Path, t)) + } + + if tpe, isString := t.(string); !isString || tpe != arrayType { + res.AddErrors(errors.InvalidType(o.Path, o.In, arrayType, nil)) + } +} + +func (o *objectValidator) precheck(res *Result, val map[string]any) { + if o.Options.EnableArrayMustHaveItemsCheck { + o.checkArrayMustHaveItems(res, val) + } + if o.Options.EnableObjectArrayTypeCheck { + o.checkItemsMustBeTypeArray(res, val) + } +} + +func (o *objectValidator) validateNoAdditionalProperties(val map[string]any, res *Result) { + for k := range val { + if k == "$schema" || k == "id" { + // special properties "$schema" and "id" are ignored + continue + } + + _, regularProperty := o.Properties[k] + if regularProperty { + continue + } + + matched := false + for pk := range o.PatternProperties { + re, err := compileRegexp(pk) + if err != nil { + continue + } + if matches := re.MatchString(k); matches { + matched = true + break + } + } + if matched { + continue + } + + res.AddErrors(errors.PropertyNotAllowed(o.Path, o.In, k)) + + // BUG(fredbi): This section should move to a part dedicated to spec validation as + // it will conflict with regular schemas where a property "headers" is defined. + + // + // Croaks a more explicit message on top of the standard one + // on some recognized cases. + // + // NOTE: edge cases with invalid type assertion are simply ignored here. + // NOTE: prefix your messages here by "IMPORTANT!" so there are not filtered + // by higher level callers (the IMPORTANT! tag will be eventually + // removed). + if k != "headers" || val[k] == nil { + continue + } + + // $ref is forbidden in header + headers, mapOk := val[k].(map[string]any) + if !mapOk { + continue + } + + for headerKey, headerBody := range headers { + if headerBody == nil { + continue + } + + headerSchema, mapOfMapOk := headerBody.(map[string]any) + if !mapOfMapOk { + continue + } + + _, found := headerSchema["$ref"] + if !found { + continue + } + + refString, stringOk := headerSchema["$ref"].(string) + if !stringOk { + continue + } + + msg := strings.Join([]string{", one may not use $ref=\":", refString, "\""}, "") + res.AddErrors(refNotAllowedInHeaderMsg(o.Path, headerKey, msg)) + /* + case "$ref": + if val[k] != nil { + // TODO: check context of that ref: warn about siblings, check against invalid context + } + */ + } + } +} + +func (o *objectValidator) validateAdditionalProperties(val map[string]any, res *Result) { + for key, value := range val { + _, regularProperty := o.Properties[key] + if regularProperty { + continue + } + + // Validates property against "patternProperties" if applicable + // BUG(fredbi): succeededOnce is always false + + // NOTE: how about regular properties which do not match patternProperties? + matched, succeededOnce, _ := o.validatePatternProperty(key, value, res) + if matched || succeededOnce { + continue + } + + if o.AdditionalProperties == nil || o.AdditionalProperties.Schema == nil { + continue + } + + // Cases: properties which are not regular properties and have not been matched by the PatternProperties validator + // AdditionalProperties as Schema + r := newSchemaValidator(o.AdditionalProperties.Schema, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value) + res.mergeForField(val, key, r) + } + // Valid cases: additionalProperties: true or undefined +} + +func (o *objectValidator) validatePropertiesSchema(val map[string]any, res *Result) { + createdFromDefaults := map[string]struct{}{} + + // Property types: + // - regular Property + pSchema := pools.poolOfSchemas.BorrowSchema() // recycle a spec.Schema object which lifespan extends only to the validation of properties + defer func() { + pools.poolOfSchemas.RedeemSchema(pSchema) + }() + + for pName := range o.Properties { + *pSchema = o.Properties[pName] + var rName string + if o.Path == "" { + rName = pName + } else { + rName = o.Path + "." + pName + } + + // Recursively validates each property against its schema + v, ok := val[pName] + if ok { + r := newSchemaValidator(pSchema, o.Root, rName, o.KnownFormats, o.Options).Validate(v) + res.mergeForField(val, pName, r) + + continue + } + + if pSchema.Default != nil { + // if a default value is defined, creates the property from defaults + // NOTE: JSON schema does not enforce default values to be valid against schema. Swagger does. + createdFromDefaults[pName] = struct{}{} + if !o.Options.skipSchemataResult { + res.addPropertySchemata(val, pName, pSchema) // this shallow-clones the content of the pSchema pointer + } + } + } + + if len(o.Required) == 0 { + return + } + + // Check required properties + for _, k := range o.Required { + v, ok := val[k] + if ok { + continue + } + _, isCreatedFromDefaults := createdFromDefaults[k] + if isCreatedFromDefaults { + continue + } + + res.AddErrors(errors.Required(fmt.Sprintf("%s.%s", o.Path, k), o.In, v)) + } +} + +// TODO: succeededOnce is not used anywhere +func (o *objectValidator) validatePatternProperty(key string, value any, result *Result) (bool, bool, []string) { + if len(o.PatternProperties) == 0 { + return false, false, nil + } + + matched := false + succeededOnce := false + patterns := make([]string, 0, len(o.PatternProperties)) + + schema := pools.poolOfSchemas.BorrowSchema() + defer func() { + pools.poolOfSchemas.RedeemSchema(schema) + }() + + for k := range o.PatternProperties { + re, err := compileRegexp(k) + if err != nil { + continue + } + + match := re.MatchString(key) + if !match { + continue + } + + *schema = o.PatternProperties[k] + patterns = append(patterns, k) + matched = true + validator := newSchemaValidator(schema, o.Root, fmt.Sprintf("%s.%s", o.Path, key), o.KnownFormats, o.Options) + + res := validator.Validate(value) + result.Merge(res) + } + + return matched, succeededOnce, patterns +} + +func (o *objectValidator) redeem() { + pools.poolOfObjectValidators.RedeemValidator(o) +} diff --git a/vendor/github.com/go-openapi/validate/options.go b/vendor/github.com/go-openapi/validate/options.go new file mode 100644 index 000000000000..f5e7f7131c72 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/options.go @@ -0,0 +1,51 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import "sync" + +// Opts specifies validation options for a SpecValidator. +// +// NOTE: other options might be needed, for example a go-swagger specific mode. +type Opts struct { + ContinueOnErrors bool // true: continue reporting errors, even if spec is invalid + + // StrictPathParamUniqueness enables a strict validation of paths that include + // path parameters. When true, it will enforce that for each method, the path + // is unique, regardless of path parameters such that GET:/petstore/{id} and + // GET:/petstore/{pet} anre considered duplicate paths. + // + // Consider disabling if path parameters can include slashes such as + // GET:/v1/{shelve} and GET:/v1/{book}, where the IDs are "shelve/*" and + // /"shelve/*/book/*" respectively. + StrictPathParamUniqueness bool + SkipSchemataResult bool +} + +var ( + defaultOpts = Opts{ + // default is to stop validation on errors + ContinueOnErrors: false, + + // StrictPathParamUniqueness is defaulted to true. This maintains existing + // behavior. + StrictPathParamUniqueness: true, + } + + defaultOptsMutex = &sync.Mutex{} +) + +// SetContinueOnErrors sets global default behavior regarding spec validation errors reporting. +// +// For extended error reporting, you most likely want to set it to true. +// For faster validation, it's better to give up early when a spec is detected as invalid: set it to false (this is the default). +// +// Setting this mode does NOT affect the validation status. +// +// NOTE: this method affects global defaults. It is not suitable for a concurrent usage. +func SetContinueOnErrors(c bool) { + defer defaultOptsMutex.Unlock() + defaultOptsMutex.Lock() + defaultOpts.ContinueOnErrors = c +} diff --git a/vendor/github.com/go-openapi/validate/pools.go b/vendor/github.com/go-openapi/validate/pools.go new file mode 100644 index 000000000000..1e734be493be --- /dev/null +++ b/vendor/github.com/go-openapi/validate/pools.go @@ -0,0 +1,369 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +//go:build !validatedebug + +package validate + +import ( + "sync" + + "github.com/go-openapi/spec" +) + +var pools allPools + +func init() { + resetPools() +} + +func resetPools() { + // NOTE: for testing purpose, we might want to reset pools after calling Validate twice. + // The pool is corrupted in that case: calling Put twice inserts a duplicate in the pool + // and further calls to Get are mishandled. + + pools = allPools{ + poolOfSchemaValidators: schemaValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &SchemaValidator{} + + return s + }, + }, + }, + poolOfObjectValidators: objectValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &objectValidator{} + + return s + }, + }, + }, + poolOfSliceValidators: sliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaSliceValidator{} + + return s + }, + }, + }, + poolOfItemsValidators: itemsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &itemsValidator{} + + return s + }, + }, + }, + poolOfBasicCommonValidators: basicCommonValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicCommonValidator{} + + return s + }, + }, + }, + poolOfHeaderValidators: headerValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &HeaderValidator{} + + return s + }, + }, + }, + poolOfParamValidators: paramValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &ParamValidator{} + + return s + }, + }, + }, + poolOfBasicSliceValidators: basicSliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicSliceValidator{} + + return s + }, + }, + }, + poolOfNumberValidators: numberValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &numberValidator{} + + return s + }, + }, + }, + poolOfStringValidators: stringValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &stringValidator{} + + return s + }, + }, + }, + poolOfSchemaPropsValidators: schemaPropsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaPropsValidator{} + + return s + }, + }, + }, + poolOfFormatValidators: formatValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &formatValidator{} + + return s + }, + }, + }, + poolOfTypeValidators: typeValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &typeValidator{} + + return s + }, + }, + }, + poolOfSchemas: schemasPool{ + Pool: &sync.Pool{ + New: func() any { + s := &spec.Schema{} + + return s + }, + }, + }, + poolOfResults: resultsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &Result{} + + return s + }, + }, + }, + } +} + +type ( + allPools struct { + // memory pools for all validator objects. + // + // Each pool can be borrowed from and redeemed to. + poolOfSchemaValidators schemaValidatorsPool + poolOfObjectValidators objectValidatorsPool + poolOfSliceValidators sliceValidatorsPool + poolOfItemsValidators itemsValidatorsPool + poolOfBasicCommonValidators basicCommonValidatorsPool + poolOfHeaderValidators headerValidatorsPool + poolOfParamValidators paramValidatorsPool + poolOfBasicSliceValidators basicSliceValidatorsPool + poolOfNumberValidators numberValidatorsPool + poolOfStringValidators stringValidatorsPool + poolOfSchemaPropsValidators schemaPropsValidatorsPool + poolOfFormatValidators formatValidatorsPool + poolOfTypeValidators typeValidatorsPool + poolOfSchemas schemasPool + poolOfResults resultsPool + } + + schemaValidatorsPool struct { + *sync.Pool + } + + objectValidatorsPool struct { + *sync.Pool + } + + sliceValidatorsPool struct { + *sync.Pool + } + + itemsValidatorsPool struct { + *sync.Pool + } + + basicCommonValidatorsPool struct { + *sync.Pool + } + + headerValidatorsPool struct { + *sync.Pool + } + + paramValidatorsPool struct { + *sync.Pool + } + + basicSliceValidatorsPool struct { + *sync.Pool + } + + numberValidatorsPool struct { + *sync.Pool + } + + stringValidatorsPool struct { + *sync.Pool + } + + schemaPropsValidatorsPool struct { + *sync.Pool + } + + formatValidatorsPool struct { + *sync.Pool + } + + typeValidatorsPool struct { + *sync.Pool + } + + schemasPool struct { + *sync.Pool + } + + resultsPool struct { + *sync.Pool + } +) + +func (p schemaValidatorsPool) BorrowValidator() *SchemaValidator { + return p.Get().(*SchemaValidator) +} + +func (p schemaValidatorsPool) RedeemValidator(s *SchemaValidator) { + // NOTE: s might be nil. In that case, Put is a noop. + p.Put(s) +} + +func (p objectValidatorsPool) BorrowValidator() *objectValidator { + return p.Get().(*objectValidator) +} + +func (p objectValidatorsPool) RedeemValidator(s *objectValidator) { + p.Put(s) +} + +func (p sliceValidatorsPool) BorrowValidator() *schemaSliceValidator { + return p.Get().(*schemaSliceValidator) +} + +func (p sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) { + p.Put(s) +} + +func (p itemsValidatorsPool) BorrowValidator() *itemsValidator { + return p.Get().(*itemsValidator) +} + +func (p itemsValidatorsPool) RedeemValidator(s *itemsValidator) { + p.Put(s) +} + +func (p basicCommonValidatorsPool) BorrowValidator() *basicCommonValidator { + return p.Get().(*basicCommonValidator) +} + +func (p basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) { + p.Put(s) +} + +func (p headerValidatorsPool) BorrowValidator() *HeaderValidator { + return p.Get().(*HeaderValidator) +} + +func (p headerValidatorsPool) RedeemValidator(s *HeaderValidator) { + p.Put(s) +} + +func (p paramValidatorsPool) BorrowValidator() *ParamValidator { + return p.Get().(*ParamValidator) +} + +func (p paramValidatorsPool) RedeemValidator(s *ParamValidator) { + p.Put(s) +} + +func (p basicSliceValidatorsPool) BorrowValidator() *basicSliceValidator { + return p.Get().(*basicSliceValidator) +} + +func (p basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) { + p.Put(s) +} + +func (p numberValidatorsPool) BorrowValidator() *numberValidator { + return p.Get().(*numberValidator) +} + +func (p numberValidatorsPool) RedeemValidator(s *numberValidator) { + p.Put(s) +} + +func (p stringValidatorsPool) BorrowValidator() *stringValidator { + return p.Get().(*stringValidator) +} + +func (p stringValidatorsPool) RedeemValidator(s *stringValidator) { + p.Put(s) +} + +func (p schemaPropsValidatorsPool) BorrowValidator() *schemaPropsValidator { + return p.Get().(*schemaPropsValidator) +} + +func (p schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) { + p.Put(s) +} + +func (p formatValidatorsPool) BorrowValidator() *formatValidator { + return p.Get().(*formatValidator) +} + +func (p formatValidatorsPool) RedeemValidator(s *formatValidator) { + p.Put(s) +} + +func (p typeValidatorsPool) BorrowValidator() *typeValidator { + return p.Get().(*typeValidator) +} + +func (p typeValidatorsPool) RedeemValidator(s *typeValidator) { + p.Put(s) +} + +func (p schemasPool) BorrowSchema() *spec.Schema { + return p.Get().(*spec.Schema) +} + +func (p schemasPool) RedeemSchema(s *spec.Schema) { + p.Put(s) +} + +func (p resultsPool) BorrowResult() *Result { + return p.Get().(*Result).cleared() +} + +func (p resultsPool) RedeemResult(s *Result) { + if s == emptyResult { + return + } + p.Put(s) +} diff --git a/vendor/github.com/go-openapi/validate/pools_debug.go b/vendor/github.com/go-openapi/validate/pools_debug.go new file mode 100644 index 000000000000..d123ed4093fe --- /dev/null +++ b/vendor/github.com/go-openapi/validate/pools_debug.go @@ -0,0 +1,1015 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +//go:build validatedebug + +package validate + +import ( + "fmt" + "runtime" + "sync" + "testing" + + "github.com/go-openapi/spec" +) + +// This version of the pools is to be used for debugging and testing, with build tag "validatedebug". +// +// In this mode, the pools are tracked for allocation and redemption of borrowed objects, so we can +// verify a few behaviors of the validators. The debug pools panic when an invalid usage pattern is detected. + +var pools allPools + +func init() { + resetPools() +} + +func resetPools() { + // NOTE: for testing purpose, we might want to reset pools after calling Validate twice. + // The pool is corrupted in that case: calling Put twice inserts a duplicate in the pool + // and further calls to Get are mishandled. + + pools = allPools{ + poolOfSchemaValidators: schemaValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &SchemaValidator{} + + return s + }, + }, + debugMap: make(map[*SchemaValidator]status), + allocMap: make(map[*SchemaValidator]string), + redeemMap: make(map[*SchemaValidator]string), + }, + poolOfObjectValidators: objectValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &objectValidator{} + + return s + }, + }, + debugMap: make(map[*objectValidator]status), + allocMap: make(map[*objectValidator]string), + redeemMap: make(map[*objectValidator]string), + }, + poolOfSliceValidators: sliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaSliceValidator{} + + return s + }, + }, + debugMap: make(map[*schemaSliceValidator]status), + allocMap: make(map[*schemaSliceValidator]string), + redeemMap: make(map[*schemaSliceValidator]string), + }, + poolOfItemsValidators: itemsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &itemsValidator{} + + return s + }, + }, + debugMap: make(map[*itemsValidator]status), + allocMap: make(map[*itemsValidator]string), + redeemMap: make(map[*itemsValidator]string), + }, + poolOfBasicCommonValidators: basicCommonValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicCommonValidator{} + + return s + }, + }, + debugMap: make(map[*basicCommonValidator]status), + allocMap: make(map[*basicCommonValidator]string), + redeemMap: make(map[*basicCommonValidator]string), + }, + poolOfHeaderValidators: headerValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &HeaderValidator{} + + return s + }, + }, + debugMap: make(map[*HeaderValidator]status), + allocMap: make(map[*HeaderValidator]string), + redeemMap: make(map[*HeaderValidator]string), + }, + poolOfParamValidators: paramValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &ParamValidator{} + + return s + }, + }, + debugMap: make(map[*ParamValidator]status), + allocMap: make(map[*ParamValidator]string), + redeemMap: make(map[*ParamValidator]string), + }, + poolOfBasicSliceValidators: basicSliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicSliceValidator{} + + return s + }, + }, + debugMap: make(map[*basicSliceValidator]status), + allocMap: make(map[*basicSliceValidator]string), + redeemMap: make(map[*basicSliceValidator]string), + }, + poolOfNumberValidators: numberValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &numberValidator{} + + return s + }, + }, + debugMap: make(map[*numberValidator]status), + allocMap: make(map[*numberValidator]string), + redeemMap: make(map[*numberValidator]string), + }, + poolOfStringValidators: stringValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &stringValidator{} + + return s + }, + }, + debugMap: make(map[*stringValidator]status), + allocMap: make(map[*stringValidator]string), + redeemMap: make(map[*stringValidator]string), + }, + poolOfSchemaPropsValidators: schemaPropsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaPropsValidator{} + + return s + }, + }, + debugMap: make(map[*schemaPropsValidator]status), + allocMap: make(map[*schemaPropsValidator]string), + redeemMap: make(map[*schemaPropsValidator]string), + }, + poolOfFormatValidators: formatValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &formatValidator{} + + return s + }, + }, + debugMap: make(map[*formatValidator]status), + allocMap: make(map[*formatValidator]string), + redeemMap: make(map[*formatValidator]string), + }, + poolOfTypeValidators: typeValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &typeValidator{} + + return s + }, + }, + debugMap: make(map[*typeValidator]status), + allocMap: make(map[*typeValidator]string), + redeemMap: make(map[*typeValidator]string), + }, + poolOfSchemas: schemasPool{ + Pool: &sync.Pool{ + New: func() any { + s := &spec.Schema{} + + return s + }, + }, + debugMap: make(map[*spec.Schema]status), + allocMap: make(map[*spec.Schema]string), + redeemMap: make(map[*spec.Schema]string), + }, + poolOfResults: resultsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &Result{} + + return s + }, + }, + debugMap: make(map[*Result]status), + allocMap: make(map[*Result]string), + redeemMap: make(map[*Result]string), + }, + } +} + +const ( + statusFresh status = iota + 1 + statusRecycled + statusRedeemed +) + +func (s status) String() string { + switch s { + case statusFresh: + return "fresh" + case statusRecycled: + return "recycled" + case statusRedeemed: + return "redeemed" + default: + panic(fmt.Errorf("invalid status: %d", s)) + } +} + +type ( + // Debug + status uint8 + + allPools struct { + // memory pools for all validator objects. + // + // Each pool can be borrowed from and redeemed to. + poolOfSchemaValidators schemaValidatorsPool + poolOfObjectValidators objectValidatorsPool + poolOfSliceValidators sliceValidatorsPool + poolOfItemsValidators itemsValidatorsPool + poolOfBasicCommonValidators basicCommonValidatorsPool + poolOfHeaderValidators headerValidatorsPool + poolOfParamValidators paramValidatorsPool + poolOfBasicSliceValidators basicSliceValidatorsPool + poolOfNumberValidators numberValidatorsPool + poolOfStringValidators stringValidatorsPool + poolOfSchemaPropsValidators schemaPropsValidatorsPool + poolOfFormatValidators formatValidatorsPool + poolOfTypeValidators typeValidatorsPool + poolOfSchemas schemasPool + poolOfResults resultsPool + } + + schemaValidatorsPool struct { + *sync.Pool + debugMap map[*SchemaValidator]status + allocMap map[*SchemaValidator]string + redeemMap map[*SchemaValidator]string + mx sync.Mutex + } + + objectValidatorsPool struct { + *sync.Pool + debugMap map[*objectValidator]status + allocMap map[*objectValidator]string + redeemMap map[*objectValidator]string + mx sync.Mutex + } + + sliceValidatorsPool struct { + *sync.Pool + debugMap map[*schemaSliceValidator]status + allocMap map[*schemaSliceValidator]string + redeemMap map[*schemaSliceValidator]string + mx sync.Mutex + } + + itemsValidatorsPool struct { + *sync.Pool + debugMap map[*itemsValidator]status + allocMap map[*itemsValidator]string + redeemMap map[*itemsValidator]string + mx sync.Mutex + } + + basicCommonValidatorsPool struct { + *sync.Pool + debugMap map[*basicCommonValidator]status + allocMap map[*basicCommonValidator]string + redeemMap map[*basicCommonValidator]string + mx sync.Mutex + } + + headerValidatorsPool struct { + *sync.Pool + debugMap map[*HeaderValidator]status + allocMap map[*HeaderValidator]string + redeemMap map[*HeaderValidator]string + mx sync.Mutex + } + + paramValidatorsPool struct { + *sync.Pool + debugMap map[*ParamValidator]status + allocMap map[*ParamValidator]string + redeemMap map[*ParamValidator]string + mx sync.Mutex + } + + basicSliceValidatorsPool struct { + *sync.Pool + debugMap map[*basicSliceValidator]status + allocMap map[*basicSliceValidator]string + redeemMap map[*basicSliceValidator]string + mx sync.Mutex + } + + numberValidatorsPool struct { + *sync.Pool + debugMap map[*numberValidator]status + allocMap map[*numberValidator]string + redeemMap map[*numberValidator]string + mx sync.Mutex + } + + stringValidatorsPool struct { + *sync.Pool + debugMap map[*stringValidator]status + allocMap map[*stringValidator]string + redeemMap map[*stringValidator]string + mx sync.Mutex + } + + schemaPropsValidatorsPool struct { + *sync.Pool + debugMap map[*schemaPropsValidator]status + allocMap map[*schemaPropsValidator]string + redeemMap map[*schemaPropsValidator]string + mx sync.Mutex + } + + formatValidatorsPool struct { + *sync.Pool + debugMap map[*formatValidator]status + allocMap map[*formatValidator]string + redeemMap map[*formatValidator]string + mx sync.Mutex + } + + typeValidatorsPool struct { + *sync.Pool + debugMap map[*typeValidator]status + allocMap map[*typeValidator]string + redeemMap map[*typeValidator]string + mx sync.Mutex + } + + schemasPool struct { + *sync.Pool + debugMap map[*spec.Schema]status + allocMap map[*spec.Schema]string + redeemMap map[*spec.Schema]string + mx sync.Mutex + } + + resultsPool struct { + *sync.Pool + debugMap map[*Result]status + allocMap map[*Result]string + redeemMap map[*Result]string + mx sync.Mutex + } +) + +func (p *schemaValidatorsPool) BorrowValidator() *SchemaValidator { + s := p.Get().(*SchemaValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled schema should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *schemaValidatorsPool) RedeemValidator(s *SchemaValidator) { + // NOTE: s might be nil. In that case, Put is a noop. + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed schema should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed schema should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *objectValidatorsPool) BorrowValidator() *objectValidator { + s := p.Get().(*objectValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled object should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *objectValidatorsPool) RedeemValidator(s *objectValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed object should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed object should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *sliceValidatorsPool) BorrowValidator() *schemaSliceValidator { + s := p.Get().(*schemaSliceValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled schemaSliceValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed schemaSliceValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed schemaSliceValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *itemsValidatorsPool) BorrowValidator() *itemsValidator { + s := p.Get().(*itemsValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled itemsValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *itemsValidatorsPool) RedeemValidator(s *itemsValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed itemsValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed itemsValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *basicCommonValidatorsPool) BorrowValidator() *basicCommonValidator { + s := p.Get().(*basicCommonValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled basicCommonValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed basicCommonValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed basicCommonValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *headerValidatorsPool) BorrowValidator() *HeaderValidator { + s := p.Get().(*HeaderValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled HeaderValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *headerValidatorsPool) RedeemValidator(s *HeaderValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed header should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed header should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *paramValidatorsPool) BorrowValidator() *ParamValidator { + s := p.Get().(*ParamValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled param should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *paramValidatorsPool) RedeemValidator(s *ParamValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed param should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed param should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *basicSliceValidatorsPool) BorrowValidator() *basicSliceValidator { + s := p.Get().(*basicSliceValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled basicSliceValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed basicSliceValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed basicSliceValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *numberValidatorsPool) BorrowValidator() *numberValidator { + s := p.Get().(*numberValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled number should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *numberValidatorsPool) RedeemValidator(s *numberValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed number should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed number should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *stringValidatorsPool) BorrowValidator() *stringValidator { + s := p.Get().(*stringValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled string should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *stringValidatorsPool) RedeemValidator(s *stringValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed string should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed string should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *schemaPropsValidatorsPool) BorrowValidator() *schemaPropsValidator { + s := p.Get().(*schemaPropsValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled param should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed schemaProps should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed schemaProps should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *formatValidatorsPool) BorrowValidator() *formatValidator { + s := p.Get().(*formatValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled format should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *formatValidatorsPool) RedeemValidator(s *formatValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed format should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed format should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *typeValidatorsPool) BorrowValidator() *typeValidator { + s := p.Get().(*typeValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled type should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *typeValidatorsPool) RedeemValidator(s *typeValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed type should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic(fmt.Errorf("redeemed type should have been allocated from a fresh or recycled pointer. Got status %s, already redeamed at: %s", x, p.redeemMap[s])) + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *schemasPool) BorrowSchema() *spec.Schema { + s := p.Get().(*spec.Schema) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled spec.Schema should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *schemasPool) RedeemSchema(s *spec.Schema) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed spec.Schema should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed spec.Schema should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *resultsPool) BorrowResult() *Result { + s := p.Get().(*Result).cleared() + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled result should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *resultsPool) RedeemResult(s *Result) { + if s == emptyResult { + if len(s.Errors) > 0 || len(s.Warnings) > 0 { + panic("empty result should not mutate") + } + return + } + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed Result should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed Result should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *allPools) allIsRedeemed(t testing.TB) bool { + outcome := true + for k, v := range p.poolOfSchemaValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("schemaValidator should be redeemed. Allocated by: %s", p.poolOfSchemaValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfObjectValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("objectValidator should be redeemed. Allocated by: %s", p.poolOfObjectValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfSliceValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("sliceValidator should be redeemed. Allocated by: %s", p.poolOfSliceValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfItemsValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("itemsValidator should be redeemed. Allocated by: %s", p.poolOfItemsValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfBasicCommonValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("basicCommonValidator should be redeemed. Allocated by: %s", p.poolOfBasicCommonValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfHeaderValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("headerValidator should be redeemed. Allocated by: %s", p.poolOfHeaderValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfParamValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("paramValidator should be redeemed. Allocated by: %s", p.poolOfParamValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfBasicSliceValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("basicSliceValidator should be redeemed. Allocated by: %s", p.poolOfBasicSliceValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfNumberValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("numberValidator should be redeemed. Allocated by: %s", p.poolOfNumberValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfStringValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("stringValidator should be redeemed. Allocated by: %s", p.poolOfStringValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfSchemaPropsValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("schemaPropsValidator should be redeemed. Allocated by: %s", p.poolOfSchemaPropsValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfFormatValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("formatValidator should be redeemed. Allocated by: %s", p.poolOfFormatValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfTypeValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("typeValidator should be redeemed. Allocated by: %s", p.poolOfTypeValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfSchemas.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("schemas should be redeemed. Allocated by: %s", p.poolOfSchemas.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfResults.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("result should be redeemed. Allocated by: %s", p.poolOfResults.allocMap[k]) + outcome = false + } + + return outcome +} + +func caller() string { + pc, _, _, _ := runtime.Caller(3) //nolint:dogsled + from, line := runtime.FuncForPC(pc).FileLine(pc) + + return fmt.Sprintf("%s:%d", from, line) +} diff --git a/vendor/github.com/go-openapi/validate/result.go b/vendor/github.com/go-openapi/validate/result.go new file mode 100644 index 000000000000..69219e99823b --- /dev/null +++ b/vendor/github.com/go-openapi/validate/result.go @@ -0,0 +1,560 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + stderrors "errors" + "reflect" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" +) + +var emptyResult = &Result{MatchCount: 1} + +// Result represents a validation result set, composed of +// errors and warnings. +// +// It is used to keep track of all detected errors and warnings during +// the validation of a specification. +// +// Matchcount is used to determine +// which errors are relevant in the case of AnyOf, OneOf +// schema validation. Results from the validation branch +// with most matches get eventually selected. +// +// TODO: keep path of key originating the error +type Result struct { + Errors []error + Warnings []error + MatchCount int + + // the object data + data any + + // Schemata for the root object + rootObjectSchemata schemata + // Schemata for object fields + fieldSchemata []fieldSchemata + // Schemata for slice items + itemSchemata []itemSchemata + + cachedFieldSchemata map[FieldKey][]*spec.Schema + cachedItemSchemata map[ItemKey][]*spec.Schema + + wantsRedeemOnMerge bool +} + +// FieldKey is a pair of an object and a field, usable as a key for a map. +type FieldKey struct { + object reflect.Value // actually a map[string]any, but the latter cannot be a key + field string +} + +// ItemKey is a pair of a slice and an index, usable as a key for a map. +type ItemKey struct { + slice reflect.Value // actually a []any, but the latter cannot be a key + index int +} + +// NewFieldKey returns a pair of an object and field usable as a key of a map. +func NewFieldKey(obj map[string]any, field string) FieldKey { + return FieldKey{object: reflect.ValueOf(obj), field: field} +} + +// Object returns the underlying object of this key. +func (fk *FieldKey) Object() map[string]any { + return fk.object.Interface().(map[string]any) +} + +// Field returns the underlying field of this key. +func (fk *FieldKey) Field() string { + return fk.field +} + +// NewItemKey returns a pair of a slice and index usable as a key of a map. +func NewItemKey(slice any, i int) ItemKey { + return ItemKey{slice: reflect.ValueOf(slice), index: i} +} + +// Slice returns the underlying slice of this key. +func (ik *ItemKey) Slice() []any { + return ik.slice.Interface().([]any) +} + +// Index returns the underlying index of this key. +func (ik *ItemKey) Index() int { + return ik.index +} + +type fieldSchemata struct { + obj map[string]any + field string + schemata schemata +} + +type itemSchemata struct { + slice reflect.Value + index int + schemata schemata +} + +// Merge merges this result with the other one(s), preserving match counts etc. +func (r *Result) Merge(others ...*Result) *Result { + for _, other := range others { + if other == nil { + continue + } + r.mergeWithoutRootSchemata(other) + r.rootObjectSchemata.Append(other.rootObjectSchemata) + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } + } + return r +} + +// Data returns the original data object used for validation. Mutating this renders +// the result invalid. +func (r *Result) Data() any { + return r.data +} + +// RootObjectSchemata returns the schemata which apply to the root object. +func (r *Result) RootObjectSchemata() []*spec.Schema { + return r.rootObjectSchemata.Slice() +} + +// FieldSchemata returns the schemata which apply to fields in objects. +func (r *Result) FieldSchemata() map[FieldKey][]*spec.Schema { + if r.cachedFieldSchemata != nil { + return r.cachedFieldSchemata + } + + ret := make(map[FieldKey][]*spec.Schema, len(r.fieldSchemata)) + for _, fs := range r.fieldSchemata { + key := NewFieldKey(fs.obj, fs.field) + if fs.schemata.one != nil { + ret[key] = append(ret[key], fs.schemata.one) + } else if len(fs.schemata.multiple) > 0 { + ret[key] = append(ret[key], fs.schemata.multiple...) + } + } + r.cachedFieldSchemata = ret + + return ret +} + +// ItemSchemata returns the schemata which apply to items in slices. +func (r *Result) ItemSchemata() map[ItemKey][]*spec.Schema { + if r.cachedItemSchemata != nil { + return r.cachedItemSchemata + } + + ret := make(map[ItemKey][]*spec.Schema, len(r.itemSchemata)) + for _, ss := range r.itemSchemata { + key := NewItemKey(ss.slice, ss.index) + if ss.schemata.one != nil { + ret[key] = append(ret[key], ss.schemata.one) + } else if len(ss.schemata.multiple) > 0 { + ret[key] = append(ret[key], ss.schemata.multiple...) + } + } + r.cachedItemSchemata = ret + return ret +} + +// MergeAsErrors merges this result with the other one(s), preserving match counts etc. +// +// Warnings from input are merged as Errors in the returned merged Result. +func (r *Result) MergeAsErrors(others ...*Result) *Result { + for _, other := range others { + if other != nil { + r.resetCaches() + r.AddErrors(other.Errors...) + r.AddErrors(other.Warnings...) + r.MatchCount += other.MatchCount + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } + } + } + return r +} + +// MergeAsWarnings merges this result with the other one(s), preserving match counts etc. +// +// Errors from input are merged as Warnings in the returned merged Result. +func (r *Result) MergeAsWarnings(others ...*Result) *Result { + for _, other := range others { + if other != nil { + r.resetCaches() + r.AddWarnings(other.Errors...) + r.AddWarnings(other.Warnings...) + r.MatchCount += other.MatchCount + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } + } + } + return r +} + +// AddErrors adds errors to this validation result (if not already reported). +// +// Since the same check may be passed several times while exploring the +// spec structure (via $ref, ...) reported messages are kept +// unique. +func (r *Result) AddErrors(errors ...error) { + for _, e := range errors { + found := false + if e != nil { + for _, isReported := range r.Errors { + if e.Error() == isReported.Error() { + found = true + break + } + } + if !found { + r.Errors = append(r.Errors, e) + } + } + } +} + +// AddWarnings adds warnings to this validation result (if not already reported). +func (r *Result) AddWarnings(warnings ...error) { + for _, e := range warnings { + found := false + if e != nil { + for _, isReported := range r.Warnings { + if e.Error() == isReported.Error() { + found = true + break + } + } + if !found { + r.Warnings = append(r.Warnings, e) + } + } + } +} + +// IsValid returns true when this result is valid. +// +// Returns true on a nil *Result. +func (r *Result) IsValid() bool { + if r == nil { + return true + } + return len(r.Errors) == 0 +} + +// HasErrors returns true when this result is invalid. +// +// Returns false on a nil *Result. +func (r *Result) HasErrors() bool { + if r == nil { + return false + } + return !r.IsValid() +} + +// HasWarnings returns true when this result contains warnings. +// +// Returns false on a nil *Result. +func (r *Result) HasWarnings() bool { + if r == nil { + return false + } + return len(r.Warnings) > 0 +} + +// HasErrorsOrWarnings returns true when this result contains +// either errors or warnings. +// +// Returns false on a nil *Result. +func (r *Result) HasErrorsOrWarnings() bool { + if r == nil { + return false + } + return len(r.Errors) > 0 || len(r.Warnings) > 0 +} + +// Inc increments the match count +func (r *Result) Inc() { + r.MatchCount++ +} + +// AsError renders this result as an error interface +// +// TODO: reporting / pretty print with path ordered and indented +func (r *Result) AsError() error { + if r.IsValid() { + return nil + } + return errors.CompositeValidationError(r.Errors...) +} + +func (r *Result) resetCaches() { + r.cachedFieldSchemata = nil + r.cachedItemSchemata = nil +} + +// mergeForField merges other into r, assigning other's root schemata to the given Object and field name. +// +//nolint:unparam +func (r *Result) mergeForField(obj map[string]any, field string, other *Result) *Result { + if other == nil { + return r + } + r.mergeWithoutRootSchemata(other) + + if other.rootObjectSchemata.Len() > 0 { + if r.fieldSchemata == nil { + r.fieldSchemata = make([]fieldSchemata, len(obj)) + } + // clone other schemata, as other is about to be redeemed to the pool + r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{ + obj: obj, + field: field, + schemata: other.rootObjectSchemata.Clone(), + }) + } + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } + + return r +} + +// mergeForSlice merges other into r, assigning other's root schemata to the given slice and index. +// +//nolint:unparam +func (r *Result) mergeForSlice(slice reflect.Value, i int, other *Result) *Result { + if other == nil { + return r + } + r.mergeWithoutRootSchemata(other) + + if other.rootObjectSchemata.Len() > 0 { + if r.itemSchemata == nil { + r.itemSchemata = make([]itemSchemata, slice.Len()) + } + // clone other schemata, as other is about to be redeemed to the pool + r.itemSchemata = append(r.itemSchemata, itemSchemata{ + slice: slice, + index: i, + schemata: other.rootObjectSchemata.Clone(), + }) + } + + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } + + return r +} + +// addRootObjectSchemata adds the given schemata for the root object of the result. +// +// Since the slice schemata might be reused, it is shallow-cloned before saving it into the result. +func (r *Result) addRootObjectSchemata(s *spec.Schema) { + clone := *s + r.rootObjectSchemata.Append(schemata{one: &clone}) +} + +// addPropertySchemata adds the given schemata for the object and field. +// +// Since the slice schemata might be reused, it is shallow-cloned before saving it into the result. +func (r *Result) addPropertySchemata(obj map[string]any, fld string, schema *spec.Schema) { + if r.fieldSchemata == nil { + r.fieldSchemata = make([]fieldSchemata, 0, len(obj)) + } + clone := *schema + r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{obj: obj, field: fld, schemata: schemata{one: &clone}}) +} + +/* +// addSliceSchemata adds the given schemata for the slice and index. +// The slice schemata might be reused. I.e. do not modify it after being added to a result. +func (r *Result) addSliceSchemata(slice reflect.Value, i int, schema *spec.Schema) { + if r.itemSchemata == nil { + r.itemSchemata = make([]itemSchemata, 0, slice.Len()) + } + r.itemSchemata = append(r.itemSchemata, itemSchemata{slice: slice, index: i, schemata: schemata{one: schema}}) +} +*/ + +// mergeWithoutRootSchemata merges other into r, ignoring the rootObject schemata. +func (r *Result) mergeWithoutRootSchemata(other *Result) { + r.resetCaches() + r.AddErrors(other.Errors...) + r.AddWarnings(other.Warnings...) + r.MatchCount += other.MatchCount + + if other.fieldSchemata != nil { + if r.fieldSchemata == nil { + r.fieldSchemata = make([]fieldSchemata, 0, len(other.fieldSchemata)) + } + for _, field := range other.fieldSchemata { + field.schemata = field.schemata.Clone() + r.fieldSchemata = append(r.fieldSchemata, field) + } + } + + if other.itemSchemata != nil { + if r.itemSchemata == nil { + r.itemSchemata = make([]itemSchemata, 0, len(other.itemSchemata)) + } + for _, field := range other.itemSchemata { + field.schemata = field.schemata.Clone() + r.itemSchemata = append(r.itemSchemata, field) + } + } +} + +func isImportant(err error) bool { + return strings.HasPrefix(err.Error(), "IMPORTANT!") +} + +func stripImportantTag(err error) error { + return stderrors.New(strings.TrimPrefix(err.Error(), "IMPORTANT!")) //nolint:err113 +} + +func (r *Result) keepRelevantErrors() *Result { + // TODO: this one is going to disapear... + // keepRelevantErrors strips a result from standard errors and keeps + // the ones which are supposedly more accurate. + // + // The original result remains unaffected (creates a new instance of Result). + // This method is used to work around the "matchCount" filter which would otherwise + // strip our result from some accurate error reporting from lower level validators. + // + // NOTE: this implementation with a placeholder (IMPORTANT!) is neither clean nor + // very efficient. On the other hand, relying on go-openapi/errors to manipulate + // codes would require to change a lot here. So, for the moment, let's go with + // placeholders. + strippedErrors := []error{} + for _, e := range r.Errors { + if isImportant(e) { + strippedErrors = append(strippedErrors, stripImportantTag(e)) + } + } + strippedWarnings := []error{} + for _, e := range r.Warnings { + if isImportant(e) { + strippedWarnings = append(strippedWarnings, stripImportantTag(e)) + } + } + var strippedResult *Result + if r.wantsRedeemOnMerge { + strippedResult = pools.poolOfResults.BorrowResult() + } else { + strippedResult = new(Result) + } + strippedResult.Errors = strippedErrors + strippedResult.Warnings = strippedWarnings + return strippedResult +} + +func (r *Result) cleared() *Result { + // clear the Result to be reusable. Keep allocated capacity. + r.Errors = r.Errors[:0] + r.Warnings = r.Warnings[:0] + r.MatchCount = 0 + r.data = nil + r.rootObjectSchemata.one = nil + r.rootObjectSchemata.multiple = r.rootObjectSchemata.multiple[:0] + r.fieldSchemata = r.fieldSchemata[:0] + r.itemSchemata = r.itemSchemata[:0] + for k := range r.cachedFieldSchemata { + delete(r.cachedFieldSchemata, k) + } + for k := range r.cachedItemSchemata { + delete(r.cachedItemSchemata, k) + } + r.wantsRedeemOnMerge = true // mark this result as eligible for redeem when merged into another + + return r +} + +// schemata is an arbitrary number of schemata. It does a distinction between zero, +// one and many schemata to avoid slice allocations. +type schemata struct { + // one is set if there is exactly one schema. In that case multiple must be nil. + one *spec.Schema + // multiple is an arbitrary number of schemas. If it is set, one must be nil. + multiple []*spec.Schema +} + +func (s *schemata) Len() int { + if s.one != nil { + return 1 + } + return len(s.multiple) +} + +func (s *schemata) Slice() []*spec.Schema { + if s == nil { + return nil + } + if s.one != nil { + return []*spec.Schema{s.one} + } + return s.multiple +} + +// Append appends the schemata in other to s. It mutates s in-place. +func (s *schemata) Append(other schemata) { + if other.one == nil && len(other.multiple) == 0 { + return + } + if s.one == nil && len(s.multiple) == 0 { + *s = other + return + } + + if s.one != nil { + if other.one != nil { + s.multiple = []*spec.Schema{s.one, other.one} + } else { + t := make([]*spec.Schema, 0, 1+len(other.multiple)) + s.multiple = append(append(t, s.one), other.multiple...) + } + s.one = nil + } else { + if other.one != nil { + s.multiple = append(s.multiple, other.one) + } else { + if cap(s.multiple) >= len(s.multiple)+len(other.multiple) { + s.multiple = append(s.multiple, other.multiple...) + } else { + t := make([]*spec.Schema, 0, len(s.multiple)+len(other.multiple)) + s.multiple = append(append(t, s.multiple...), other.multiple...) + } + } + } +} + +func (s schemata) Clone() schemata { + var clone schemata + + if s.one != nil { + clone.one = new(spec.Schema) + *clone.one = *s.one + } + + if len(s.multiple) > 0 { + clone.multiple = make([]*spec.Schema, len(s.multiple)) + for idx := range len(s.multiple) { + sp := new(spec.Schema) + *sp = *s.multiple[idx] + clone.multiple[idx] = sp + } + } + + return clone +} diff --git a/vendor/github.com/go-openapi/validate/rexp.go b/vendor/github.com/go-openapi/validate/rexp.go new file mode 100644 index 000000000000..795f148d0cfc --- /dev/null +++ b/vendor/github.com/go-openapi/validate/rexp.go @@ -0,0 +1,59 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "maps" + re "regexp" + "sync" + "sync/atomic" +) + +// Cache for compiled regular expressions +var ( + cacheMutex = &sync.Mutex{} + reDict = atomic.Value{} // map[string]*re.Regexp +) + +func compileRegexp(pattern string) (*re.Regexp, error) { + if cache, ok := reDict.Load().(map[string]*re.Regexp); ok { + if r := cache[pattern]; r != nil { + return r, nil + } + } + + r, err := re.Compile(pattern) + if err != nil { + return nil, err + } + cacheRegexp(r) + return r, nil +} + +func mustCompileRegexp(pattern string) *re.Regexp { + if cache, ok := reDict.Load().(map[string]*re.Regexp); ok { + if r := cache[pattern]; r != nil { + return r + } + } + + r := re.MustCompile(pattern) + cacheRegexp(r) + return r +} + +func cacheRegexp(r *re.Regexp) { + cacheMutex.Lock() + defer cacheMutex.Unlock() + + if cache, ok := reDict.Load().(map[string]*re.Regexp); !ok || cache[r.String()] == nil { + newCache := map[string]*re.Regexp{ + r.String(): r, + } + + maps.Copy(newCache, cache) + + reDict.Store(newCache) + } +} diff --git a/vendor/github.com/go-openapi/validate/schema.go b/vendor/github.com/go-openapi/validate/schema.go new file mode 100644 index 000000000000..375a98765d75 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/schema.go @@ -0,0 +1,351 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "encoding/json" + "reflect" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag/jsonutils" +) + +// SchemaValidator validates data against a JSON schema +type SchemaValidator struct { + Path string + in string + Schema *spec.Schema + validators [8]valueValidator + Root any + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +// AgainstSchema validates the specified data against the provided schema, using a registry of supported formats. +// +// When no pre-parsed *spec.Schema structure is provided, it uses a JSON schema as default. See example. +func AgainstSchema(schema *spec.Schema, data any, formats strfmt.Registry, options ...Option) error { + res := NewSchemaValidator(schema, nil, "", formats, + append(options, WithRecycleValidators(true), withRecycleResults(true))..., + ).Validate(data) + defer func() { + pools.poolOfResults.RedeemResult(res) + }() + + if res.HasErrors() { + return errors.CompositeValidationError(res.Errors...) + } + + return nil +} + +// NewSchemaValidator creates a new schema validator. +// +// Panics if the provided schema is invalid. +func NewSchemaValidator(schema *spec.Schema, rootSchema any, root string, formats strfmt.Registry, options ...Option) *SchemaValidator { + opts := new(SchemaValidatorOptions) + for _, o := range options { + o(opts) + } + + return newSchemaValidator(schema, rootSchema, root, formats, opts) +} + +func newSchemaValidator(schema *spec.Schema, rootSchema any, root string, formats strfmt.Registry, opts *SchemaValidatorOptions) *SchemaValidator { + if schema == nil { + return nil + } + + if rootSchema == nil { + rootSchema = schema + } + + if schema.ID != "" || schema.Ref.String() != "" || schema.Ref.IsRoot() { + err := spec.ExpandSchema(schema, rootSchema, nil) + if err != nil { + msg := invalidSchemaProvidedMsg(err).Error() + panic(msg) + } + } + + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var s *SchemaValidator + if opts.recycleValidators { + s = pools.poolOfSchemaValidators.BorrowValidator() + } else { + s = new(SchemaValidator) + } + + s.Path = root + s.in = "body" + s.Schema = schema + s.Root = rootSchema + s.Options = opts + s.KnownFormats = formats + + s.validators = [8]valueValidator{ + s.typeValidator(), + s.schemaPropsValidator(), + s.stringValidator(), + s.formatValidator(), + s.numberValidator(), + s.sliceValidator(), + s.commonValidator(), + s.objectValidator(), + } + + return s +} + +// SetPath sets the path for this schema valdiator +func (s *SchemaValidator) SetPath(path string) { + s.Path = path +} + +// Applies returns true when this schema validator applies +func (s *SchemaValidator) Applies(source any, _ reflect.Kind) bool { + _, ok := source.(*spec.Schema) + return ok +} + +// Validate validates the data against the schema +func (s *SchemaValidator) Validate(data any) *Result { + if s == nil { + return emptyResult + } + + if s.Options.recycleValidators { + defer func() { + s.redeemChildren() + s.redeem() // one-time use validator + }() + } + + var result *Result + if s.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + result.data = data + } else { + result = &Result{data: data} + } + + if s.Schema != nil && !s.Options.skipSchemataResult { + result.addRootObjectSchemata(s.Schema) + } + + if data == nil { + // early exit with minimal validation + result.Merge(s.validators[0].Validate(data)) // type validator + result.Merge(s.validators[6].Validate(data)) // common validator + + if s.Options.recycleValidators { + s.validators[0] = nil + s.validators[6] = nil + } + + return result + } + + tpe := reflect.TypeOf(data) + kind := tpe.Kind() + for kind == reflect.Ptr { + tpe = tpe.Elem() + kind = tpe.Kind() + } + d := data + + if kind == reflect.Struct { + // NOTE: since reflect retrieves the true nature of types + // this means that all strfmt types passed here (e.g. strfmt.Datetime, etc..) + // are converted here to strings, and structs are systematically converted + // to map[string]interface{}. + var dd any + if err := jsonutils.FromDynamicJSON(data, &dd); err != nil { + result.AddErrors(err) + result.Inc() + + return result + } + + d = dd + } + + // TODO: this part should be handed over to type validator + // Handle special case of json.Number data (number marshalled as string) + isnumber := s.Schema != nil && (s.Schema.Type.Contains(numberType) || s.Schema.Type.Contains(integerType)) + if num, ok := data.(json.Number); ok && isnumber { + if s.Schema.Type.Contains(integerType) { // avoid lossy conversion + in, erri := num.Int64() + if erri != nil { + result.AddErrors(invalidTypeConversionMsg(s.Path, erri)) + result.Inc() + + return result + } + d = in + } else { + nf, errf := num.Float64() + if errf != nil { + result.AddErrors(invalidTypeConversionMsg(s.Path, errf)) + result.Inc() + + return result + } + d = nf + } + + tpe = reflect.TypeOf(d) + kind = tpe.Kind() + } + + for idx, v := range s.validators { + if !v.Applies(s.Schema, kind) { + if s.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := v.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := v.(interface{ redeem() }); ok { + redeemable.redeem() + } + s.validators[idx] = nil // prevents further (unsafe) usage + } + + continue + } + + result.Merge(v.Validate(d)) + if s.Options.recycleValidators { + s.validators[idx] = nil // prevents further (unsafe) usage + } + result.Inc() + } + result.Inc() + + return result +} + +func (s *SchemaValidator) typeValidator() valueValidator { + return newTypeValidator( + s.Path, + s.in, + s.Schema.Type, + s.Schema.Nullable, + s.Schema.Format, + s.Options, + ) +} + +func (s *SchemaValidator) commonValidator() valueValidator { + return newBasicCommonValidator( + s.Path, + s.in, + s.Schema.Default, + s.Schema.Enum, + s.Options, + ) +} + +func (s *SchemaValidator) sliceValidator() valueValidator { + return newSliceValidator( + s.Path, + s.in, + s.Schema.MaxItems, + s.Schema.MinItems, + s.Schema.UniqueItems, + s.Schema.AdditionalItems, + s.Schema.Items, + s.Root, + s.KnownFormats, + s.Options, + ) +} + +func (s *SchemaValidator) numberValidator() valueValidator { + return newNumberValidator( + s.Path, + s.in, + s.Schema.Default, + s.Schema.MultipleOf, + s.Schema.Maximum, + s.Schema.ExclusiveMaximum, + s.Schema.Minimum, + s.Schema.ExclusiveMinimum, + "", + "", + s.Options, + ) +} + +func (s *SchemaValidator) stringValidator() valueValidator { + return newStringValidator( + s.Path, + s.in, + nil, + false, + false, + s.Schema.MaxLength, + s.Schema.MinLength, + s.Schema.Pattern, + s.Options, + ) +} + +func (s *SchemaValidator) formatValidator() valueValidator { + return newFormatValidator( + s.Path, + s.in, + s.Schema.Format, + s.KnownFormats, + s.Options, + ) +} + +func (s *SchemaValidator) schemaPropsValidator() valueValidator { + sch := s.Schema + return newSchemaPropsValidator( + s.Path, s.in, sch.AllOf, sch.OneOf, sch.AnyOf, sch.Not, sch.Dependencies, s.Root, s.KnownFormats, + s.Options, + ) +} + +func (s *SchemaValidator) objectValidator() valueValidator { + return newObjectValidator( + s.Path, + s.in, + s.Schema.MaxProperties, + s.Schema.MinProperties, + s.Schema.Required, + s.Schema.Properties, + s.Schema.AdditionalProperties, + s.Schema.PatternProperties, + s.Root, + s.KnownFormats, + s.Options, + ) +} + +func (s *SchemaValidator) redeem() { + pools.poolOfSchemaValidators.RedeemValidator(s) +} + +func (s *SchemaValidator) redeemChildren() { + for i, validator := range s.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + s.validators[i] = nil // free up allocated children if not in pool + } +} diff --git a/vendor/github.com/go-openapi/validate/schema_messages.go b/vendor/github.com/go-openapi/validate/schema_messages.go new file mode 100644 index 000000000000..e8c7c48ad7f1 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/schema_messages.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "github.com/go-openapi/errors" +) + +// Error messages related to schema validation and returned as results. +const ( + // ArrayDoesNotAllowAdditionalItemsError when an additionalItems construct is not verified by the array values provided. + // + // TODO: should move to package go-openapi/errors + ArrayDoesNotAllowAdditionalItemsError = "array doesn't allow for additional items" + + // HasDependencyError indicates that a dependencies construct was not verified + HasDependencyError = "%q has a dependency on %s" + + // InvalidSchemaProvidedError indicates that the schema provided to validate a value cannot be properly compiled + InvalidSchemaProvidedError = "Invalid schema provided to SchemaValidator: %v" + + // InvalidTypeConversionError indicates that a numerical conversion for the given type could not be carried on + InvalidTypeConversionError = "invalid type conversion in %s: %v " + + // MustValidateAtLeastOneSchemaError indicates that in a AnyOf construct, none of the schema constraints specified were verified + MustValidateAtLeastOneSchemaError = "%q must validate at least one schema (anyOf)" + + // MustValidateOnlyOneSchemaError indicates that in a OneOf construct, either none of the schema constraints specified were verified, or several were + MustValidateOnlyOneSchemaError = "%q must validate one and only one schema (oneOf). %s" + + // MustValidateAllSchemasError indicates that in a AllOf construct, at least one of the schema constraints specified were not verified + // + // TODO: punctuation in message + MustValidateAllSchemasError = "%q must validate all the schemas (allOf)%s" + + // MustNotValidateSchemaError indicates that in a Not construct, the schema constraint specified was verified + MustNotValidateSchemaError = "%q must not validate the schema (not)" +) + +// Warning messages related to schema validation and returned as results +const () + +func invalidSchemaProvidedMsg(err error) errors.Error { + return errors.New(InternalErrorCode, InvalidSchemaProvidedError, err) +} +func invalidTypeConversionMsg(path string, err error) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidTypeConversionError, path, err) +} +func mustValidateOnlyOneSchemaMsg(path, additionalMsg string) errors.Error { + return errors.New(errors.CompositeErrorCode, MustValidateOnlyOneSchemaError, path, additionalMsg) +} +func mustValidateAtLeastOneSchemaMsg(path string) errors.Error { + return errors.New(errors.CompositeErrorCode, MustValidateAtLeastOneSchemaError, path) +} +func mustValidateAllSchemasMsg(path, additionalMsg string) errors.Error { + return errors.New(errors.CompositeErrorCode, MustValidateAllSchemasError, path, additionalMsg) +} +func mustNotValidatechemaMsg(path string) errors.Error { + return errors.New(errors.CompositeErrorCode, MustNotValidateSchemaError, path) +} +func hasADependencyMsg(path, depkey string) errors.Error { + return errors.New(errors.CompositeErrorCode, HasDependencyError, path, depkey) +} +func arrayDoesNotAllowAdditionalItemsMsg() errors.Error { + return errors.New(errors.CompositeErrorCode, ArrayDoesNotAllowAdditionalItemsError) +} diff --git a/vendor/github.com/go-openapi/validate/schema_option.go b/vendor/github.com/go-openapi/validate/schema_option.go new file mode 100644 index 000000000000..d9fd21a75a12 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/schema_option.go @@ -0,0 +1,72 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +// SchemaValidatorOptions defines optional rules for schema validation +type SchemaValidatorOptions struct { + EnableObjectArrayTypeCheck bool + EnableArrayMustHaveItemsCheck bool + recycleValidators bool + recycleResult bool + skipSchemataResult bool +} + +// Option sets optional rules for schema validation +type Option func(*SchemaValidatorOptions) + +// EnableObjectArrayTypeCheck activates the swagger rule: an items must be in type: array +func EnableObjectArrayTypeCheck(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.EnableObjectArrayTypeCheck = enable + } +} + +// EnableArrayMustHaveItemsCheck activates the swagger rule: an array must have items defined +func EnableArrayMustHaveItemsCheck(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.EnableArrayMustHaveItemsCheck = enable + } +} + +// SwaggerSchema activates swagger schema validation rules +func SwaggerSchema(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.EnableObjectArrayTypeCheck = enable + svo.EnableArrayMustHaveItemsCheck = enable + } +} + +// WithRecycleValidators saves memory allocations and makes validators +// available for a single use of Validate() only. +// +// When a validator is recycled, called MUST not call the Validate() method twice. +func WithRecycleValidators(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.recycleValidators = enable + } +} + +func withRecycleResults(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.recycleResult = enable + } +} + +// WithSkipSchemataResult skips the deep audit payload stored in validation Result +func WithSkipSchemataResult(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.skipSchemataResult = enable + } +} + +// Options returns the current set of options +func (svo SchemaValidatorOptions) Options() []Option { + return []Option{ + EnableObjectArrayTypeCheck(svo.EnableObjectArrayTypeCheck), + EnableArrayMustHaveItemsCheck(svo.EnableArrayMustHaveItemsCheck), + WithRecycleValidators(svo.recycleValidators), + withRecycleResults(svo.recycleResult), + WithSkipSchemataResult(svo.skipSchemataResult), + } +} diff --git a/vendor/github.com/go-openapi/validate/schema_props.go b/vendor/github.com/go-openapi/validate/schema_props.go new file mode 100644 index 000000000000..485f536adc3a --- /dev/null +++ b/vendor/github.com/go-openapi/validate/schema_props.go @@ -0,0 +1,345 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "fmt" + "reflect" + + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +type schemaPropsValidator struct { + Path string + In string + AllOf []spec.Schema + OneOf []spec.Schema + AnyOf []spec.Schema + Not *spec.Schema + Dependencies spec.Dependencies + anyOfValidators []*SchemaValidator + allOfValidators []*SchemaValidator + oneOfValidators []*SchemaValidator + notValidator *SchemaValidator + Root any + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +func (s *schemaPropsValidator) SetPath(path string) { + s.Path = path +} + +func newSchemaPropsValidator( + path string, in string, allOf, oneOf, anyOf []spec.Schema, not *spec.Schema, deps spec.Dependencies, root any, formats strfmt.Registry, + opts *SchemaValidatorOptions) *schemaPropsValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + anyValidators := make([]*SchemaValidator, 0, len(anyOf)) + for i := range anyOf { + anyValidators = append(anyValidators, newSchemaValidator(&anyOf[i], root, path, formats, opts)) + } + allValidators := make([]*SchemaValidator, 0, len(allOf)) + for i := range allOf { + allValidators = append(allValidators, newSchemaValidator(&allOf[i], root, path, formats, opts)) + } + oneValidators := make([]*SchemaValidator, 0, len(oneOf)) + for i := range oneOf { + oneValidators = append(oneValidators, newSchemaValidator(&oneOf[i], root, path, formats, opts)) + } + + var notValidator *SchemaValidator + if not != nil { + notValidator = newSchemaValidator(not, root, path, formats, opts) + } + + var s *schemaPropsValidator + if opts.recycleValidators { + s = pools.poolOfSchemaPropsValidators.BorrowValidator() + } else { + s = new(schemaPropsValidator) + } + + s.Path = path + s.In = in + s.AllOf = allOf + s.OneOf = oneOf + s.AnyOf = anyOf + s.Not = not + s.Dependencies = deps + s.anyOfValidators = anyValidators + s.allOfValidators = allValidators + s.oneOfValidators = oneValidators + s.notValidator = notValidator + s.Root = root + s.KnownFormats = formats + s.Options = opts + + return s +} + +func (s *schemaPropsValidator) Applies(source any, _ reflect.Kind) bool { + _, isSchema := source.(*spec.Schema) + return isSchema +} + +func (s *schemaPropsValidator) Validate(data any) *Result { + var mainResult *Result + if s.Options.recycleResult { + mainResult = pools.poolOfResults.BorrowResult() + } else { + mainResult = new(Result) + } + + // Intermediary error results + + // IMPORTANT! messages from underlying validators + var keepResultAnyOf, keepResultOneOf, keepResultAllOf *Result + + if s.Options.recycleValidators { + defer func() { + s.redeemChildren() + s.redeem() + + // results are redeemed when merged + }() + } + + if len(s.anyOfValidators) > 0 { + keepResultAnyOf = pools.poolOfResults.BorrowResult() + s.validateAnyOf(data, mainResult, keepResultAnyOf) + } + + if len(s.oneOfValidators) > 0 { + keepResultOneOf = pools.poolOfResults.BorrowResult() + s.validateOneOf(data, mainResult, keepResultOneOf) + } + + if len(s.allOfValidators) > 0 { + keepResultAllOf = pools.poolOfResults.BorrowResult() + s.validateAllOf(data, mainResult, keepResultAllOf) + } + + if s.notValidator != nil { + s.validateNot(data, mainResult) + } + + if len(s.Dependencies) > 0 && reflect.TypeOf(data).Kind() == reflect.Map { + s.validateDependencies(data, mainResult) + } + + mainResult.Inc() + + // In the end we retain best failures for schema validation + // plus, if any, composite errors which may explain special cases (tagged as IMPORTANT!). + return mainResult.Merge(keepResultAllOf, keepResultOneOf, keepResultAnyOf) +} + +func (s *schemaPropsValidator) validateAnyOf(data any, mainResult, keepResultAnyOf *Result) { + // Validates at least one in anyOf schemas + var bestFailures *Result + + for i, anyOfSchema := range s.anyOfValidators { + result := anyOfSchema.Validate(data) + if s.Options.recycleValidators { + s.anyOfValidators[i] = nil + } + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + keepResultAnyOf.Merge(result.keepRelevantErrors()) // merges (and redeems) a new instance of Result + + if result.IsValid() { + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(bestFailures) + } + + _ = keepResultAnyOf.cleared() + mainResult.Merge(result) + + return + } + + // MatchCount is used to select errors from the schema with most positive checks + if bestFailures == nil || result.MatchCount > bestFailures.MatchCount { + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(bestFailures) + } + bestFailures = result + + continue + } + + if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched + } + } + + mainResult.AddErrors(mustValidateAtLeastOneSchemaMsg(s.Path)) + mainResult.Merge(bestFailures) +} + +func (s *schemaPropsValidator) validateOneOf(data any, mainResult, keepResultOneOf *Result) { + // Validates exactly one in oneOf schemas + var ( + firstSuccess, bestFailures *Result + validated int + ) + + for i, oneOfSchema := range s.oneOfValidators { + result := oneOfSchema.Validate(data) + if s.Options.recycleValidators { + s.oneOfValidators[i] = nil + } + + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + keepResultOneOf.Merge(result.keepRelevantErrors()) // merges (and redeems) a new instance of Result + + if result.IsValid() { + validated++ + _ = keepResultOneOf.cleared() + + if firstSuccess == nil { + firstSuccess = result + } else if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched + } + + continue + } + + // MatchCount is used to select errors from the schema with most positive checks + if validated == 0 && (bestFailures == nil || result.MatchCount > bestFailures.MatchCount) { + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(bestFailures) + } + bestFailures = result + } else if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched + } + } + + switch validated { + case 0: + mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, "Found none valid")) + mainResult.Merge(bestFailures) + // firstSucess necessarily nil + case 1: + mainResult.Merge(firstSuccess) + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(bestFailures) + } + default: + mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, fmt.Sprintf("Found %d valid alternatives", validated))) + mainResult.Merge(bestFailures) + if firstSuccess != nil && firstSuccess.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(firstSuccess) + } + } +} + +func (s *schemaPropsValidator) validateAllOf(data any, mainResult, keepResultAllOf *Result) { + // Validates all of allOf schemas + var validated int + + for i, allOfSchema := range s.allOfValidators { + result := allOfSchema.Validate(data) + if s.Options.recycleValidators { + s.allOfValidators[i] = nil + } + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + keepResultAllOf.Merge(result.keepRelevantErrors()) + if result.IsValid() { + validated++ + } + mainResult.Merge(result) + } + + switch validated { + case 0: + mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, ". None validated")) + case len(s.allOfValidators): + default: + mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, "")) + } +} + +func (s *schemaPropsValidator) validateNot(data any, mainResult *Result) { + result := s.notValidator.Validate(data) + if s.Options.recycleValidators { + s.notValidator = nil + } + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + if result.IsValid() { + mainResult.AddErrors(mustNotValidatechemaMsg(s.Path)) + } + if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched + } +} + +func (s *schemaPropsValidator) validateDependencies(data any, mainResult *Result) { + val := data.(map[string]any) + for key := range val { + dep, ok := s.Dependencies[key] + if !ok { + continue + } + + if dep.Schema != nil { + mainResult.Merge( + newSchemaValidator(dep.Schema, s.Root, s.Path+"."+key, s.KnownFormats, s.Options).Validate(data), + ) + continue + } + + if len(dep.Property) > 0 { + for _, depKey := range dep.Property { + if _, ok := val[depKey]; !ok { + mainResult.AddErrors(hasADependencyMsg(s.Path, depKey)) + } + } + } + } +} + +func (s *schemaPropsValidator) redeem() { + pools.poolOfSchemaPropsValidators.RedeemValidator(s) +} + +func (s *schemaPropsValidator) redeemChildren() { + for _, v := range s.anyOfValidators { + if v == nil { + continue + } + v.redeemChildren() + v.redeem() + } + s.anyOfValidators = nil + + for _, v := range s.allOfValidators { + if v == nil { + continue + } + v.redeemChildren() + v.redeem() + } + s.allOfValidators = nil + + for _, v := range s.oneOfValidators { + if v == nil { + continue + } + v.redeemChildren() + v.redeem() + } + s.oneOfValidators = nil + + if s.notValidator != nil { + s.notValidator.redeemChildren() + s.notValidator.redeem() + s.notValidator = nil + } +} diff --git a/vendor/github.com/go-openapi/validate/slice_validator.go b/vendor/github.com/go-openapi/validate/slice_validator.go new file mode 100644 index 000000000000..4a5a20896870 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/slice_validator.go @@ -0,0 +1,139 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "fmt" + "reflect" + + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +type schemaSliceValidator struct { + Path string + In string + MaxItems *int64 + MinItems *int64 + UniqueItems bool + AdditionalItems *spec.SchemaOrBool + Items *spec.SchemaOrArray + Root any + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +func newSliceValidator(path, in string, + maxItems, minItems *int64, uniqueItems bool, + additionalItems *spec.SchemaOrBool, items *spec.SchemaOrArray, + root any, formats strfmt.Registry, opts *SchemaValidatorOptions) *schemaSliceValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var v *schemaSliceValidator + if opts.recycleValidators { + v = pools.poolOfSliceValidators.BorrowValidator() + } else { + v = new(schemaSliceValidator) + } + + v.Path = path + v.In = in + v.MaxItems = maxItems + v.MinItems = minItems + v.UniqueItems = uniqueItems + v.AdditionalItems = additionalItems + v.Items = items + v.Root = root + v.KnownFormats = formats + v.Options = opts + + return v +} + +func (s *schemaSliceValidator) SetPath(path string) { + s.Path = path +} + +func (s *schemaSliceValidator) Applies(source any, kind reflect.Kind) bool { + _, ok := source.(*spec.Schema) + r := ok && kind == reflect.Slice + return r +} + +func (s *schemaSliceValidator) Validate(data any) *Result { + if s.Options.recycleValidators { + defer func() { + s.redeem() + }() + } + + var result *Result + if s.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + if data == nil { + return result + } + val := reflect.ValueOf(data) + size := val.Len() + + if s.Items != nil && s.Items.Schema != nil { + for i := range size { + validator := newSchemaValidator(s.Items.Schema, s.Root, s.Path, s.KnownFormats, s.Options) + validator.SetPath(fmt.Sprintf("%s.%d", s.Path, i)) + value := val.Index(i) + result.mergeForSlice(val, i, validator.Validate(value.Interface())) + } + } + + itemsSize := 0 + if s.Items != nil && len(s.Items.Schemas) > 0 { + itemsSize = len(s.Items.Schemas) + for i := range itemsSize { + if size <= i { + break + } + + validator := newSchemaValidator(&s.Items.Schemas[i], s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options) + result.mergeForSlice(val, i, validator.Validate(val.Index(i).Interface())) + } + } + if s.AdditionalItems != nil && itemsSize < size { + if s.Items != nil && len(s.Items.Schemas) > 0 && !s.AdditionalItems.Allows { + result.AddErrors(arrayDoesNotAllowAdditionalItemsMsg()) + } + if s.AdditionalItems.Schema != nil { + for i := itemsSize; i < size-itemsSize+1; i++ { + validator := newSchemaValidator(s.AdditionalItems.Schema, s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options) + result.mergeForSlice(val, i, validator.Validate(val.Index(i).Interface())) + } + } + } + + if s.MinItems != nil { + if err := MinItems(s.Path, s.In, int64(size), *s.MinItems); err != nil { + result.AddErrors(err) + } + } + if s.MaxItems != nil { + if err := MaxItems(s.Path, s.In, int64(size), *s.MaxItems); err != nil { + result.AddErrors(err) + } + } + if s.UniqueItems { + if err := UniqueItems(s.Path, s.In, val.Interface()); err != nil { + result.AddErrors(err) + } + } + result.Inc() + return result +} + +func (s *schemaSliceValidator) redeem() { + pools.poolOfSliceValidators.RedeemValidator(s) +} diff --git a/vendor/github.com/go-openapi/validate/spec.go b/vendor/github.com/go-openapi/validate/spec.go new file mode 100644 index 000000000000..8616a861f28c --- /dev/null +++ b/vendor/github.com/go-openapi/validate/spec.go @@ -0,0 +1,845 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "fmt" + "slices" + "sort" + "strings" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/errors" + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/loads" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag/jsonutils" +) + +// Spec validates an OpenAPI 2.0 specification document. +// +// Returns an error flattening in a single standard error, all validation messages. +// +// - TODO: $ref should not have siblings +// - TODO: make sure documentation reflects all checks and warnings +// - TODO: check on discriminators +// - TODO: explicit message on unsupported keywords (better than "forbidden property"...) +// - TODO: full list of unresolved refs +// - TODO: validate numeric constraints (issue#581): this should be handled like defaults and examples +// - TODO: option to determine if we validate for go-swagger or in a more general context +// - TODO: check on required properties to support anyOf, allOf, oneOf +// +// NOTE: SecurityScopes are maps: no need to check uniqueness +func Spec(doc *loads.Document, formats strfmt.Registry) error { + errs, _ /*warns*/ := NewSpecValidator(doc.Schema(), formats).Validate(doc) + if errs.HasErrors() { + return errors.CompositeValidationError(errs.Errors...) + } + return nil +} + +// SpecValidator validates a swagger 2.0 spec +type SpecValidator struct { + schema *spec.Schema // swagger 2.0 schema + spec *loads.Document + analyzer *analysis.Spec + expanded *loads.Document + KnownFormats strfmt.Registry + Options Opts // validation options + schemaOptions *SchemaValidatorOptions +} + +// NewSpecValidator creates a new swagger spec validator instance +func NewSpecValidator(schema *spec.Schema, formats strfmt.Registry) *SpecValidator { + // schema options that apply to all called validators + schemaOptions := new(SchemaValidatorOptions) + for _, o := range []Option{ + SwaggerSchema(true), + WithRecycleValidators(true), + // withRecycleResults(true), + } { + o(schemaOptions) + } + + return &SpecValidator{ + schema: schema, + KnownFormats: formats, + Options: defaultOpts, + schemaOptions: schemaOptions, + } +} + +// Validate validates the swagger spec +func (s *SpecValidator) Validate(data any) (*Result, *Result) { + s.schemaOptions.skipSchemataResult = s.Options.SkipSchemataResult + var sd *loads.Document + errs, warnings := new(Result), new(Result) + + if v, ok := data.(*loads.Document); ok { + sd = v + } + if sd == nil { + errs.AddErrors(invalidDocumentMsg()) + return errs, warnings // no point in continuing + } + s.spec = sd + s.analyzer = analysis.New(sd.Spec()) + + // Raw spec unmarshalling errors + var obj any + if err := json.Unmarshal(sd.Raw(), &obj); err != nil { + // NOTE: under normal conditions, the *load.Document has been already unmarshalled + // So this one is just a paranoid check on the behavior of the spec package + panic(InvalidDocumentError) + } + + defer func() { + // errs holds all errors and warnings, + // warnings only warnings + errs.MergeAsWarnings(warnings) + warnings.AddErrors(errs.Warnings...) + }() + + // Swagger schema validator + schv := newSchemaValidator(s.schema, nil, "", s.KnownFormats, s.schemaOptions) + errs.Merge(schv.Validate(obj)) // error - + // There may be a point in continuing to try and determine more accurate errors + if !s.Options.ContinueOnErrors && errs.HasErrors() { + return errs, warnings // no point in continuing + } + + errs.Merge(s.validateReferencesValid()) // error - + // There may be a point in continuing to try and determine more accurate errors + if !s.Options.ContinueOnErrors && errs.HasErrors() { + return errs, warnings // no point in continuing + } + + errs.Merge(s.validateDuplicateOperationIDs()) + errs.Merge(s.validateDuplicatePropertyNames()) // error - + errs.Merge(s.validateParameters()) // error - + errs.Merge(s.validateItems()) // error - + + // Properties in required definition MUST validate their schema + // Properties SHOULD NOT be declared as both required and readOnly (warning) + errs.Merge(s.validateRequiredDefinitions()) // error and warning + + // There may be a point in continuing to try and determine more accurate errors + if !s.Options.ContinueOnErrors && errs.HasErrors() { + return errs, warnings // no point in continuing + } + + // Values provided as default MUST validate their schema + df := &defaultValidator{SpecValidator: s, schemaOptions: s.schemaOptions} + errs.Merge(df.Validate()) + + // Values provided as examples MUST validate their schema + // Value provided as examples in a response without schema generate a warning + // Known limitations: examples in responses for mime type not application/json are ignored (warning) + ex := &exampleValidator{SpecValidator: s, schemaOptions: s.schemaOptions} + errs.Merge(ex.Validate()) + + errs.Merge(s.validateNonEmptyPathParamNames()) + + // errs.Merge(s.validateRefNoSibling()) // warning only + errs.Merge(s.validateReferenced()) // warning only + + return errs, warnings +} + +// SetContinueOnErrors sets the ContinueOnErrors option for this validator. +func (s *SpecValidator) SetContinueOnErrors(c bool) { + s.Options.ContinueOnErrors = c +} + +func (s *SpecValidator) validateNonEmptyPathParamNames() *Result { + res := pools.poolOfResults.BorrowResult() + if s.spec.Spec().Paths == nil { + // There is no Paths object: error + res.AddErrors(noValidPathMsg()) + + return res + } + + if s.spec.Spec().Paths.Paths == nil { + // Paths may be empty: warning + res.AddWarnings(noValidPathMsg()) + + return res + } + + for k := range s.spec.Spec().Paths.Paths { + if strings.Contains(k, "{}") { + res.AddErrors(emptyPathParameterMsg(k)) + } + } + + return res +} + +func (s *SpecValidator) validateDuplicateOperationIDs() *Result { + // OperationID, if specified, must be unique across the board + var analyzer *analysis.Spec + if s.expanded != nil { + // $ref are valid: we can analyze operations on an expanded spec + analyzer = analysis.New(s.expanded.Spec()) + } else { + // fallback on possible incomplete picture because of previous errors + analyzer = s.analyzer + } + res := pools.poolOfResults.BorrowResult() + known := make(map[string]int) + for _, v := range analyzer.OperationIDs() { + if v != "" { + known[v]++ + } + } + for k, v := range known { + if v > 1 { + res.AddErrors(nonUniqueOperationIDMsg(k, v)) + } + } + return res +} + +type dupProp struct { + Name string + Definition string +} + +func (s *SpecValidator) validateDuplicatePropertyNames() *Result { + // definition can't declare a property that's already defined by one of its ancestors + res := pools.poolOfResults.BorrowResult() + for k, sch := range s.spec.Spec().Definitions { + if len(sch.AllOf) == 0 { + continue + } + + knownanc := map[string]struct{}{ + "#/definitions/" + k: {}, + } + + ancs, rec := s.validateCircularAncestry(k, sch, knownanc) + if rec != nil && (rec.HasErrors() || !rec.HasWarnings()) { + res.Merge(rec) + } + if len(ancs) > 0 { + res.AddErrors(circularAncestryDefinitionMsg(k, ancs)) + return res + } + + knowns := make(map[string]struct{}) + dups, rep := s.validateSchemaPropertyNames(k, sch, knowns) + if rep != nil && (rep.HasErrors() || rep.HasWarnings()) { + res.Merge(rep) + } + if len(dups) > 0 { + var pns []string + for _, v := range dups { + pns = append(pns, v.Definition+"."+v.Name) + } + res.AddErrors(duplicatePropertiesMsg(k, pns)) + } + + } + return res +} + +func (s *SpecValidator) resolveRef(ref *spec.Ref) (*spec.Schema, error) { + if s.spec.SpecFilePath() != "" { + return spec.ResolveRefWithBase(s.spec.Spec(), ref, &spec.ExpandOptions{RelativeBase: s.spec.SpecFilePath()}) + } + // NOTE: it looks like with the new spec resolver, this code is now unrecheable + return spec.ResolveRef(s.spec.Spec(), ref) +} + +func (s *SpecValidator) validateSchemaPropertyNames(nm string, sch spec.Schema, knowns map[string]struct{}) ([]dupProp, *Result) { + var dups []dupProp + + schn := nm + schc := &sch + res := pools.poolOfResults.BorrowResult() + + for schc.Ref.String() != "" { + // gather property names + reso, err := s.resolveRef(&schc.Ref) + if err != nil { + errorHelp.addPointerError(res, err, schc.Ref.String(), nm) + return dups, res + } + schc = reso + schn = sch.Ref.String() + } + + if len(schc.AllOf) > 0 { + for _, chld := range schc.AllOf { + dup, rep := s.validateSchemaPropertyNames(schn, chld, knowns) + if rep != nil && (rep.HasErrors() || rep.HasWarnings()) { + res.Merge(rep) + } + dups = append(dups, dup...) + } + return dups, res + } + + for k := range schc.Properties { + _, ok := knowns[k] + if ok { + dups = append(dups, dupProp{Name: k, Definition: schn}) + } else { + knowns[k] = struct{}{} + } + } + + return dups, res +} + +func (s *SpecValidator) validateCircularAncestry(nm string, sch spec.Schema, knowns map[string]struct{}) ([]string, *Result) { + res := pools.poolOfResults.BorrowResult() + + if sch.Ref.String() == "" && len(sch.AllOf) == 0 { // Safeguard. We should not be able to actually get there + return nil, res + } + var ancs []string + + schn := nm + schc := &sch + + for schc.Ref.String() != "" { + reso, err := s.resolveRef(&schc.Ref) + if err != nil { + errorHelp.addPointerError(res, err, schc.Ref.String(), nm) + return ancs, res + } + schc = reso + schn = sch.Ref.String() + } + + if schn != nm && schn != "" { + if _, ok := knowns[schn]; ok { + ancs = append(ancs, schn) + } + knowns[schn] = struct{}{} + + if len(ancs) > 0 { + return ancs, res + } + } + + if len(schc.AllOf) > 0 { + for _, chld := range schc.AllOf { + if chld.Ref.String() != "" || len(chld.AllOf) > 0 { + anc, rec := s.validateCircularAncestry(schn, chld, knowns) + if rec != nil && (rec.HasErrors() || !rec.HasWarnings()) { + res.Merge(rec) + } + ancs = append(ancs, anc...) + if len(ancs) > 0 { + return ancs, res + } + } + } + } + return ancs, res +} + +func (s *SpecValidator) validateItems() *Result { + // validate parameter, items, schema and response objects for presence of item if type is array + res := pools.poolOfResults.BorrowResult() + + for method, pi := range s.analyzer.Operations() { + for path, op := range pi { + for _, param := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { + + if param.TypeName() == arrayType && param.ItemsTypeName() == "" { + res.AddErrors(arrayInParamRequiresItemsMsg(param.Name, op.ID)) + continue + } + if param.In != swaggerBody { + if param.Items != nil { + items := param.Items + for items.TypeName() == arrayType { + if items.ItemsTypeName() == "" { + res.AddErrors(arrayInParamRequiresItemsMsg(param.Name, op.ID)) + break + } + items = items.Items + } + } + } else { + // In: body + if param.Schema != nil { + res.Merge(s.validateSchemaItems(*param.Schema, fmt.Sprintf("body param %q", param.Name), op.ID)) + } + } + } + + var responses []spec.Response + if op.Responses != nil { + if op.Responses.Default != nil { + responses = append(responses, *op.Responses.Default) + } + if op.Responses.StatusCodeResponses != nil { + for _, v := range op.Responses.StatusCodeResponses { + responses = append(responses, v) + } + } + } + + for _, resp := range responses { + // Response headers with array + for hn, hv := range resp.Headers { + if hv.TypeName() == arrayType && hv.ItemsTypeName() == "" { + res.AddErrors(arrayInHeaderRequiresItemsMsg(hn, op.ID)) + } + } + if resp.Schema != nil { + res.Merge(s.validateSchemaItems(*resp.Schema, "response body", op.ID)) + } + } + } + } + return res +} + +// Verifies constraints on array type +func (s *SpecValidator) validateSchemaItems(schema spec.Schema, prefix, opID string) *Result { + res := pools.poolOfResults.BorrowResult() + if !schema.Type.Contains(arrayType) { + return res + } + + if schema.Items == nil || schema.Items.Len() == 0 { + res.AddErrors(arrayRequiresItemsMsg(prefix, opID)) + return res + } + + if schema.Items.Schema != nil { + schema = *schema.Items.Schema + if _, err := compileRegexp(schema.Pattern); err != nil { + res.AddErrors(invalidItemsPatternMsg(prefix, opID, schema.Pattern)) + } + + res.Merge(s.validateSchemaItems(schema, prefix, opID)) + } + return res +} + +func (s *SpecValidator) validatePathParamPresence(path string, fromPath, fromOperation []string) *Result { + // Each defined operation path parameters must correspond to a named element in the API's path pattern. + // (For example, you cannot have a path parameter named id for the following path /pets/{petId} but you must have a path parameter named petId.) + res := pools.poolOfResults.BorrowResult() + for _, l := range fromPath { + var matched bool + for _, r := range fromOperation { + if l == "{"+r+"}" { + matched = true + break + } + } + if !matched { + res.AddErrors(noParameterInPathMsg(l)) + } + } + + for _, p := range fromOperation { + var matched bool + if slices.Contains(fromPath, "{"+p+"}") { + matched = true + } + if !matched { + res.AddErrors(pathParamNotInPathMsg(path, p)) + } + } + + return res +} + +func (s *SpecValidator) validateReferenced() *Result { + var res Result + res.MergeAsWarnings(s.validateReferencedParameters()) + res.MergeAsWarnings(s.validateReferencedResponses()) + res.MergeAsWarnings(s.validateReferencedDefinitions()) + return &res +} + +func (s *SpecValidator) validateReferencedParameters() *Result { + // Each referenceable definition should have references. + params := s.spec.Spec().Parameters + if len(params) == 0 { + return nil + } + + expected := make(map[string]struct{}) + for k := range params { + expected["#/parameters/"+jsonpointer.Escape(k)] = struct{}{} + } + for _, k := range s.analyzer.AllParameterReferences() { + delete(expected, k) + } + + if len(expected) == 0 { + return nil + } + result := pools.poolOfResults.BorrowResult() + for k := range expected { + result.AddWarnings(unusedParamMsg(k)) + } + return result +} + +func (s *SpecValidator) validateReferencedResponses() *Result { + // Each referenceable definition should have references. + responses := s.spec.Spec().Responses + if len(responses) == 0 { + return nil + } + + expected := make(map[string]struct{}) + for k := range responses { + expected["#/responses/"+jsonpointer.Escape(k)] = struct{}{} + } + for _, k := range s.analyzer.AllResponseReferences() { + delete(expected, k) + } + + if len(expected) == 0 { + return nil + } + result := pools.poolOfResults.BorrowResult() + for k := range expected { + result.AddWarnings(unusedResponseMsg(k)) + } + return result +} + +func (s *SpecValidator) validateReferencedDefinitions() *Result { + // Each referenceable definition must have references. + defs := s.spec.Spec().Definitions + if len(defs) == 0 { + return nil + } + + expected := make(map[string]struct{}) + for k := range defs { + expected["#/definitions/"+jsonpointer.Escape(k)] = struct{}{} + } + for _, k := range s.analyzer.AllDefinitionReferences() { + delete(expected, k) + } + + if len(expected) == 0 { + return nil + } + + result := new(Result) + for k := range expected { + result.AddWarnings(unusedDefinitionMsg(k)) + } + return result +} + +func (s *SpecValidator) validateRequiredDefinitions() *Result { + // Each property listed in the required array must be defined in the properties of the model + res := pools.poolOfResults.BorrowResult() + +DEFINITIONS: + for d, schema := range s.spec.Spec().Definitions { + if schema.Required != nil { // Safeguard + for _, pn := range schema.Required { + red := s.validateRequiredProperties(pn, d, &schema) //#nosec + res.Merge(red) + if !red.IsValid() && !s.Options.ContinueOnErrors { + break DEFINITIONS // there is an error, let's stop that bleeding + } + } + } + } + return res +} + +func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Schema) *Result { + // Takes care of recursive property definitions, which may be nested in additionalProperties schemas + res := pools.poolOfResults.BorrowResult() + propertyMatch := false + patternMatch := false + additionalPropertiesMatch := false + isReadOnly := false + + // Regular properties + if _, ok := v.Properties[path]; ok { + propertyMatch = true + isReadOnly = v.Properties[path].ReadOnly + } + + // NOTE: patternProperties are not supported in swagger. Even though, we continue validation here + // We check all defined patterns: if one regexp is invalid, croaks an error + for pp, pv := range v.PatternProperties { + re, err := compileRegexp(pp) + if err != nil { + res.AddErrors(invalidPatternMsg(pp, in)) + } else if re.MatchString(path) { + patternMatch = true + if !propertyMatch { + isReadOnly = pv.ReadOnly + } + } + } + + if !propertyMatch && !patternMatch { + if v.AdditionalProperties != nil { + if v.AdditionalProperties.Allows && v.AdditionalProperties.Schema == nil { + additionalPropertiesMatch = true + } else if v.AdditionalProperties.Schema != nil { + // additionalProperties as schema are upported in swagger + // recursively validates additionalProperties schema + // TODO : anyOf, allOf, oneOf like in schemaPropsValidator + red := s.validateRequiredProperties(path, in, v.AdditionalProperties.Schema) + if red.IsValid() { + additionalPropertiesMatch = true + if !propertyMatch && !patternMatch { + isReadOnly = v.AdditionalProperties.Schema.ReadOnly + } + } + res.Merge(red) + } + } + } + + if !propertyMatch && !patternMatch && !additionalPropertiesMatch { + res.AddErrors(requiredButNotDefinedMsg(path, in)) + } + + if isReadOnly { + res.AddWarnings(readOnlyAndRequiredMsg(in, path)) + } + return res +} + +func (s *SpecValidator) validateParameters() *Result { + // - for each method, path is unique, regardless of path parameters + // e.g. GET:/petstore/{id}, GET:/petstore/{pet}, GET:/petstore are + // considered duplicate paths, if StrictPathParamUniqueness is enabled. + // - each parameter should have a unique `name` and `type` combination + // - each operation should have only 1 parameter of type body + // - there must be at most 1 parameter in body + // - parameters with pattern property must specify valid patterns + // - $ref in parameters must resolve + // - path param must be required + res := pools.poolOfResults.BorrowResult() + rexGarbledPathSegment := mustCompileRegexp(`.*[{}\s]+.*`) + for method, pi := range s.expandedAnalyzer().Operations() { + methodPaths := make(map[string]map[string]string) + for path, op := range pi { + if s.Options.StrictPathParamUniqueness { + pathToAdd := pathHelp.stripParametersInPath(path) + + // Warn on garbled path afer param stripping + if rexGarbledPathSegment.MatchString(pathToAdd) { + res.AddWarnings(pathStrippedParamGarbledMsg(pathToAdd)) + } + + // Check uniqueness of stripped paths + if _, found := methodPaths[method][pathToAdd]; found { + + // Sort names for stable, testable output + if strings.Compare(path, methodPaths[method][pathToAdd]) < 0 { + res.AddErrors(pathOverlapMsg(path, methodPaths[method][pathToAdd])) + } else { + res.AddErrors(pathOverlapMsg(methodPaths[method][pathToAdd], path)) + } + } else { + if _, found := methodPaths[method]; !found { + methodPaths[method] = map[string]string{} + } + methodPaths[method][pathToAdd] = path // Original non stripped path + + } + } + + var bodyParams []string + var paramNames []string + var hasForm, hasBody bool + + // Check parameters names uniqueness for operation + // TODO: should be done after param expansion + res.Merge(s.checkUniqueParams(path, method, op)) + + // pick the root schema from the swagger specification which describes a parameter + origSchema, ok := s.schema.Definitions["parameter"] + if !ok { + panic("unexpected swagger schema: missing #/definitions/parameter") + } + // clone it once to avoid expanding a global schema (e.g. swagger spec) + paramSchema, err := deepCloneSchema(origSchema) + if err != nil { + panic(fmt.Errorf("can't clone schema: %w", err)) + } + + for _, pr := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { + // An expanded parameter must validate the Parameter schema (an unexpanded $ref always passes high-level schema validation) + schv := newSchemaValidator(¶mSchema, s.schema, fmt.Sprintf("%s.%s.parameters.%s", path, method, pr.Name), s.KnownFormats, s.schemaOptions) + var obj any + if err := jsonutils.FromDynamicJSON(pr, &obj); err != nil { + res.AddErrors(err) + + return res + } + + res.Merge(schv.Validate(obj)) + + // Validate pattern regexp for parameters with a Pattern property + if _, err := compileRegexp(pr.Pattern); err != nil { + res.AddErrors(invalidPatternInParamMsg(op.ID, pr.Name, pr.Pattern)) + } + + // There must be at most one parameter in body: list them all + if pr.In == swaggerBody { + bodyParams = append(bodyParams, fmt.Sprintf("%q", pr.Name)) + hasBody = true + } + + if pr.In == "path" { + paramNames = append(paramNames, pr.Name) + // Path declared in path must have the required: true property + if !pr.Required { + res.AddErrors(pathParamRequiredMsg(op.ID, pr.Name)) + } + } + + if pr.In == "formData" { + hasForm = true + } + + if pr.Type != numberType && pr.Type != integerType && + (pr.Maximum != nil || pr.Minimum != nil || pr.MultipleOf != nil) { + // A non-numeric parameter has validation keywords for numeric instances (number and integer) + res.AddWarnings(parameterValidationTypeMismatchMsg(pr.Name, path, pr.Type)) + } + + if pr.Type != stringType && + // A non-string parameter has validation keywords for strings + (pr.MaxLength != nil || pr.MinLength != nil || pr.Pattern != "") { + res.AddWarnings(parameterValidationTypeMismatchMsg(pr.Name, path, pr.Type)) + } + + if pr.Type != arrayType && + // A non-array parameter has validation keywords for arrays + (pr.MaxItems != nil || pr.MinItems != nil || pr.UniqueItems) { + res.AddWarnings(parameterValidationTypeMismatchMsg(pr.Name, path, pr.Type)) + } + } + + // In:formData and In:body are mutually exclusive + if hasBody && hasForm { + res.AddErrors(bothFormDataAndBodyMsg(op.ID)) + } + // There must be at most one body param + // Accurately report situations when more than 1 body param is declared (possibly unnamed) + if len(bodyParams) > 1 { + sort.Strings(bodyParams) + res.AddErrors(multipleBodyParamMsg(op.ID, bodyParams)) + } + + // Check uniqueness of parameters in path + paramsInPath := pathHelp.extractPathParams(path) + for i, p := range paramsInPath { + for j, q := range paramsInPath { + if p == q && i > j { + res.AddErrors(pathParamNotUniqueMsg(path, p, q)) + break + } + } + } + + // Warns about possible malformed params in path + rexGarbledParam := mustCompileRegexp(`{.*[{}\s]+.*}`) + for _, p := range paramsInPath { + if rexGarbledParam.MatchString(p) { + res.AddWarnings(pathParamGarbledMsg(path, p)) + } + } + + // Match params from path vs params from params section + res.Merge(s.validatePathParamPresence(path, paramsInPath, paramNames)) + } + } + return res +} + +func (s *SpecValidator) validateReferencesValid() *Result { + // each reference must point to a valid object + res := pools.poolOfResults.BorrowResult() + for _, r := range s.analyzer.AllRefs() { + if !r.IsValidURI(s.spec.SpecFilePath()) { // Safeguard - spec should always yield a valid URI + res.AddErrors(invalidRefMsg(r.String())) + } + } + if !res.HasErrors() { + // NOTE: with default settings, loads.Document.Expanded() + // stops on first error. Anyhow, the expand option to continue + // on errors fails to report errors at all. + exp, err := s.spec.Expanded() + if err != nil { + res.AddErrors(unresolvedReferencesMsg(err)) + } + s.expanded = exp + } + return res +} + +func (s *SpecValidator) checkUniqueParams(path, method string, op *spec.Operation) *Result { + // Check for duplicate parameters declaration in param section. + // Each parameter should have a unique `name` and `type` combination + // NOTE: this could be factorized in analysis (when constructing the params map) + // However, there are some issues with such a factorization: + // - analysis does not seem to fully expand params + // - param keys may be altered by x-go-name + res := pools.poolOfResults.BorrowResult() + pnames := make(map[string]struct{}) + + if op.Parameters != nil { // Safeguard + for _, ppr := range op.Parameters { + var ok bool + pr, red := paramHelp.resolveParam(path, method, op.ID, &ppr, s) //#nosec + res.Merge(red) + + if pr != nil && pr.Name != "" { // params with empty name does no participate the check + key := fmt.Sprintf("%s#%s", pr.In, pr.Name) + + if _, ok = pnames[key]; ok { + res.AddErrors(duplicateParamNameMsg(pr.In, pr.Name, op.ID)) + } + pnames[key] = struct{}{} + } + } + } + return res +} + +// expandedAnalyzer returns expanded.Analyzer when it is available. +// otherwise just analyzer. +func (s *SpecValidator) expandedAnalyzer() *analysis.Spec { + if s.expanded != nil && s.expanded.Analyzer != nil { + return s.expanded.Analyzer + } + return s.analyzer +} + +func deepCloneSchema(src spec.Schema) (spec.Schema, error) { + var b bytes.Buffer + if err := gob.NewEncoder(&b).Encode(src); err != nil { + return spec.Schema{}, err + } + + var dst spec.Schema + if err := gob.NewDecoder(&b).Decode(&dst); err != nil { + return spec.Schema{}, err + } + + return dst, nil +} diff --git a/vendor/github.com/go-openapi/validate/spec_messages.go b/vendor/github.com/go-openapi/validate/spec_messages.go new file mode 100644 index 000000000000..9b079af647a7 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/spec_messages.go @@ -0,0 +1,355 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "net/http" + + "github.com/go-openapi/errors" +) + +// Error messages related to spec validation and returned as results. +const ( + // ArrayRequiresItemsError ... + ArrayRequiresItemsError = "%s for %q is a collection without an element type (array requires items definition)" + + // ArrayInParamRequiresItemsError ... + ArrayInParamRequiresItemsError = "param %q for %q is a collection without an element type (array requires item definition)" + + // ArrayInHeaderRequiresItemsError ... + ArrayInHeaderRequiresItemsError = "header %q for %q is a collection without an element type (array requires items definition)" + + // BothFormDataAndBodyError indicates that an operation specifies both a body and a formData parameter, which is forbidden + BothFormDataAndBodyError = "operation %q has both formData and body parameters. Only one such In: type may be used for a given operation" + + // CannotResolveReferenceError when a $ref could not be resolved + CannotResolveReferenceError = "could not resolve reference in %s to $ref %s: %v" + + // CircularAncestryDefinitionError ... + CircularAncestryDefinitionError = "definition %q has circular ancestry: %v" + + // DefaultValueDoesNotValidateError results from an invalid default value provided + DefaultValueDoesNotValidateError = "default value for %s in %s does not validate its schema" + + // DefaultValueItemsDoesNotValidateError results from an invalid default value provided for Items + DefaultValueItemsDoesNotValidateError = "default value for %s.items in %s does not validate its schema" + + // DefaultValueHeaderDoesNotValidateError results from an invalid default value provided in header + DefaultValueHeaderDoesNotValidateError = "in operation %q, default value in header %s for %s does not validate its schema" + + // DefaultValueHeaderItemsDoesNotValidateError results from an invalid default value provided in header.items + DefaultValueHeaderItemsDoesNotValidateError = "in operation %q, default value in header.items %s for %s does not validate its schema" + + // DefaultValueInDoesNotValidateError ... + DefaultValueInDoesNotValidateError = "in operation %q, default value in %s does not validate its schema" + + // DuplicateParamNameError ... + DuplicateParamNameError = "duplicate parameter name %q for %q in operation %q" + + // DuplicatePropertiesError ... + DuplicatePropertiesError = "definition %q contains duplicate properties: %v" + + // ExampleValueDoesNotValidateError results from an invalid example value provided + ExampleValueDoesNotValidateError = "example value for %s in %s does not validate its schema" + + // ExampleValueItemsDoesNotValidateError results from an invalid example value provided for Items + ExampleValueItemsDoesNotValidateError = "example value for %s.items in %s does not validate its schema" + + // ExampleValueHeaderDoesNotValidateError results from an invalid example value provided in header + ExampleValueHeaderDoesNotValidateError = "in operation %q, example value in header %s for %s does not validate its schema" + + // ExampleValueHeaderItemsDoesNotValidateError results from an invalid example value provided in header.items + ExampleValueHeaderItemsDoesNotValidateError = "in operation %q, example value in header.items %s for %s does not validate its schema" + + // ExampleValueInDoesNotValidateError ... + ExampleValueInDoesNotValidateError = "in operation %q, example value in %s does not validate its schema" + + // EmptyPathParameterError means that a path parameter was found empty (e.g. "{}") + EmptyPathParameterError = "%q contains an empty path parameter" + + // InvalidDocumentError states that spec validation only processes spec.Document objects + InvalidDocumentError = "spec validator can only validate spec.Document objects" + + // InvalidItemsPatternError indicates an Items definition with invalid pattern + InvalidItemsPatternError = "%s for %q has invalid items pattern: %q" + + // InvalidParameterDefinitionError indicates an error detected on a parameter definition + InvalidParameterDefinitionError = "invalid definition for parameter %s in %s in operation %q" + + // InvalidParameterDefinitionAsSchemaError indicates an error detected on a parameter definition, which was mistaken with a schema definition. + // Most likely, this situation is encountered whenever a $ref has been added as a sibling of the parameter definition. + InvalidParameterDefinitionAsSchemaError = "invalid definition as Schema for parameter %s in %s in operation %q" + + // InvalidPatternError ... + InvalidPatternError = "pattern %q is invalid in %s" + + // InvalidPatternInError indicates an invalid pattern in a schema or items definition + InvalidPatternInError = "%s in %s has invalid pattern: %q" + + // InvalidPatternInHeaderError indicates a header definition with an invalid pattern + InvalidPatternInHeaderError = "in operation %q, header %s for %s has invalid pattern %q: %v" + + // InvalidPatternInParamError ... + InvalidPatternInParamError = "operation %q has invalid pattern in param %q: %q" + + // InvalidReferenceError indicates that a $ref property could not be resolved + InvalidReferenceError = "invalid ref %q" + + // InvalidResponseDefinitionAsSchemaError indicates an error detected on a response definition, which was mistaken with a schema definition. + // Most likely, this situation is encountered whenever a $ref has been added as a sibling of the response definition. + InvalidResponseDefinitionAsSchemaError = "invalid definition as Schema for response %s in %s" + + // MultipleBodyParamError indicates that an operation specifies multiple parameter with in: body + MultipleBodyParamError = "operation %q has more than 1 body param: %v" + + // NonUniqueOperationIDError indicates that the same operationId has been specified several times + NonUniqueOperationIDError = "%q is defined %d times" + + // NoParameterInPathError indicates that a path was found without any parameter + NoParameterInPathError = "path param %q has no parameter definition" + + // NoValidPathErrorOrWarning indicates that no single path could be validated. If Paths is empty, this message is only a warning. + NoValidPathErrorOrWarning = "spec has no valid path defined" + + // NoValidResponseError indicates that no valid response description could be found for an operation + NoValidResponseError = "operation %q has no valid response" + + // PathOverlapError ... + PathOverlapError = "path %s overlaps with %s" + + // PathParamNotInPathError indicates that a parameter specified with in: path was not found in the path specification + PathParamNotInPathError = "path param %q is not present in path %q" + + // PathParamNotUniqueError ... + PathParamNotUniqueError = "params in path %q must be unique: %q conflicts with %q" + + // PathParamRequiredError ... + PathParamRequiredError = "in operation %q,path param %q must be declared as required" + + // RefNotAllowedInHeaderError indicates a $ref was found in a header definition, which is not allowed by Swagger + RefNotAllowedInHeaderError = "IMPORTANT!in %q: $ref are not allowed in headers. In context for header %q%s" + + // RequiredButNotDefinedError ... + RequiredButNotDefinedError = "%q is present in required but not defined as property in definition %q" + + // SomeParametersBrokenError indicates that some parameters could not be resolved, which might result in partial checks to be carried on + SomeParametersBrokenError = "some parameters definitions are broken in %q.%s. Cannot carry on full checks on parameters for operation %s" + + // UnresolvedReferencesError indicates that at least one $ref could not be resolved + UnresolvedReferencesError = "some references could not be resolved in spec. First found: %v" +) + +// Warning messages related to spec validation and returned as results +const ( + // ExamplesWithoutSchemaWarning indicates that examples are provided for a response,but not schema to validate the example against + ExamplesWithoutSchemaWarning = "Examples provided without schema in operation %q, %s" + + // ExamplesMimeNotSupportedWarning indicates that examples are provided with a mime type different than application/json, which + // the validator dos not support yetl + ExamplesMimeNotSupportedWarning = "No validation attempt for examples for media types other than application/json, in operation %q, %s" + + // PathParamGarbledWarning ... + PathParamGarbledWarning = "in path %q, param %q contains {,} or white space. Albeit not stricly illegal, this is probably no what you want" + + // ParamValidationTypeMismatch indicates that parameter has validation which does not match its type + ParamValidationTypeMismatch = "validation keywords of parameter %q in path %q don't match its type %s" + + // PathStrippedParamGarbledWarning ... + PathStrippedParamGarbledWarning = "path stripped from path parameters %s contains {,} or white space. This is probably no what you want." + + // ReadOnlyAndRequiredWarning ... + ReadOnlyAndRequiredWarning = "Required property %s in %q should not be marked as both required and readOnly" + + // RefShouldNotHaveSiblingsWarning indicates that a $ref was found with a sibling definition. This results in the $ref taking over its siblings, + // which is most likely not wanted. + RefShouldNotHaveSiblingsWarning = "$ref property should have no sibling in %q.%s" + + // RequiredHasDefaultWarning indicates that a required parameter property should not have a default + RequiredHasDefaultWarning = "%s in %s has a default value and is required as parameter" + + // UnusedDefinitionWarning ... + UnusedDefinitionWarning = "definition %q is not used anywhere" + + // UnusedParamWarning ... + UnusedParamWarning = "parameter %q is not used anywhere" + + // UnusedResponseWarning ... + UnusedResponseWarning = "response %q is not used anywhere" + + InvalidObject = "expected an object in %q.%s" +) + +// Additional error codes +const ( + // InternalErrorCode reports an internal technical error + InternalErrorCode = http.StatusInternalServerError + // NotFoundErrorCode indicates that a resource (e.g. a $ref) could not be found + NotFoundErrorCode = http.StatusNotFound +) + +func invalidDocumentMsg() errors.Error { + return errors.New(InternalErrorCode, InvalidDocumentError) +} +func invalidRefMsg(path string) errors.Error { + return errors.New(NotFoundErrorCode, InvalidReferenceError, path) +} +func unresolvedReferencesMsg(err error) errors.Error { + return errors.New(errors.CompositeErrorCode, UnresolvedReferencesError, err) +} +func noValidPathMsg() errors.Error { + return errors.New(errors.CompositeErrorCode, NoValidPathErrorOrWarning) +} +func emptyPathParameterMsg(path string) errors.Error { + return errors.New(errors.CompositeErrorCode, EmptyPathParameterError, path) +} +func nonUniqueOperationIDMsg(path string, i int) errors.Error { + return errors.New(errors.CompositeErrorCode, NonUniqueOperationIDError, path, i) +} +func circularAncestryDefinitionMsg(path string, args any) errors.Error { + return errors.New(errors.CompositeErrorCode, CircularAncestryDefinitionError, path, args) +} +func duplicatePropertiesMsg(path string, args any) errors.Error { + return errors.New(errors.CompositeErrorCode, DuplicatePropertiesError, path, args) +} +func pathParamNotInPathMsg(path, param string) errors.Error { + return errors.New(errors.CompositeErrorCode, PathParamNotInPathError, param, path) +} +func arrayRequiresItemsMsg(path, operation string) errors.Error { + return errors.New(errors.CompositeErrorCode, ArrayRequiresItemsError, path, operation) +} +func arrayInParamRequiresItemsMsg(path, operation string) errors.Error { + return errors.New(errors.CompositeErrorCode, ArrayInParamRequiresItemsError, path, operation) +} +func arrayInHeaderRequiresItemsMsg(path, operation string) errors.Error { + return errors.New(errors.CompositeErrorCode, ArrayInHeaderRequiresItemsError, path, operation) +} +func invalidItemsPatternMsg(path, operation, pattern string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidItemsPatternError, path, operation, pattern) +} +func invalidPatternMsg(pattern, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidPatternError, pattern, path) +} +func requiredButNotDefinedMsg(path, definition string) errors.Error { + return errors.New(errors.CompositeErrorCode, RequiredButNotDefinedError, path, definition) +} +func pathParamGarbledMsg(path, param string) errors.Error { + return errors.New(errors.CompositeErrorCode, PathParamGarbledWarning, path, param) +} +func pathStrippedParamGarbledMsg(path string) errors.Error { + return errors.New(errors.CompositeErrorCode, PathStrippedParamGarbledWarning, path) +} +func pathOverlapMsg(path, arg string) errors.Error { + return errors.New(errors.CompositeErrorCode, PathOverlapError, path, arg) +} +func invalidPatternInParamMsg(operation, param, pattern string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidPatternInParamError, operation, param, pattern) +} +func pathParamRequiredMsg(operation, param string) errors.Error { + return errors.New(errors.CompositeErrorCode, PathParamRequiredError, operation, param) +} +func bothFormDataAndBodyMsg(operation string) errors.Error { + return errors.New(errors.CompositeErrorCode, BothFormDataAndBodyError, operation) +} +func multipleBodyParamMsg(operation string, args any) errors.Error { + return errors.New(errors.CompositeErrorCode, MultipleBodyParamError, operation, args) +} +func pathParamNotUniqueMsg(path, param, arg string) errors.Error { + return errors.New(errors.CompositeErrorCode, PathParamNotUniqueError, path, param, arg) +} +func duplicateParamNameMsg(path, param, operation string) errors.Error { + return errors.New(errors.CompositeErrorCode, DuplicateParamNameError, param, path, operation) +} +func unusedParamMsg(arg string) errors.Error { + return errors.New(errors.CompositeErrorCode, UnusedParamWarning, arg) +} +func unusedDefinitionMsg(arg string) errors.Error { + return errors.New(errors.CompositeErrorCode, UnusedDefinitionWarning, arg) +} +func unusedResponseMsg(arg string) errors.Error { + return errors.New(errors.CompositeErrorCode, UnusedResponseWarning, arg) +} +func readOnlyAndRequiredMsg(path, param string) errors.Error { + return errors.New(errors.CompositeErrorCode, ReadOnlyAndRequiredWarning, param, path) +} +func noParameterInPathMsg(param string) errors.Error { + return errors.New(errors.CompositeErrorCode, NoParameterInPathError, param) +} +func requiredHasDefaultMsg(param, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, RequiredHasDefaultWarning, param, path) +} +func defaultValueDoesNotValidateMsg(param, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, DefaultValueDoesNotValidateError, param, path) +} +func defaultValueItemsDoesNotValidateMsg(param, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, DefaultValueItemsDoesNotValidateError, param, path) +} +func noValidResponseMsg(operation string) errors.Error { + return errors.New(errors.CompositeErrorCode, NoValidResponseError, operation) +} +func defaultValueHeaderDoesNotValidateMsg(operation, header, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, DefaultValueHeaderDoesNotValidateError, operation, header, path) +} +func defaultValueHeaderItemsDoesNotValidateMsg(operation, header, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, DefaultValueHeaderItemsDoesNotValidateError, operation, header, path) +} +func invalidPatternInHeaderMsg(operation, header, path, pattern string, args any) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidPatternInHeaderError, operation, header, path, pattern, args) +} +func invalidPatternInMsg(path, in, pattern string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidPatternInError, path, in, pattern) +} +func defaultValueInDoesNotValidateMsg(operation, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, DefaultValueInDoesNotValidateError, operation, path) +} +func exampleValueDoesNotValidateMsg(param, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExampleValueDoesNotValidateError, param, path) +} +func exampleValueItemsDoesNotValidateMsg(param, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExampleValueItemsDoesNotValidateError, param, path) +} +func exampleValueHeaderDoesNotValidateMsg(operation, header, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExampleValueHeaderDoesNotValidateError, operation, header, path) +} +func exampleValueHeaderItemsDoesNotValidateMsg(operation, header, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExampleValueHeaderItemsDoesNotValidateError, operation, header, path) +} +func exampleValueInDoesNotValidateMsg(operation, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExampleValueInDoesNotValidateError, operation, path) +} +func examplesWithoutSchemaMsg(operation, response string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExamplesWithoutSchemaWarning, operation, response) +} +func examplesMimeNotSupportedMsg(operation, response string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExamplesMimeNotSupportedWarning, operation, response) +} +func refNotAllowedInHeaderMsg(path, header, ref string) errors.Error { + return errors.New(errors.CompositeErrorCode, RefNotAllowedInHeaderError, path, header, ref) +} +func cannotResolveRefMsg(path, ref string, err error) errors.Error { + return errors.New(errors.CompositeErrorCode, CannotResolveReferenceError, path, ref, err) +} +func invalidParameterDefinitionMsg(path, method, operationID string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidParameterDefinitionError, path, method, operationID) +} +func invalidParameterDefinitionAsSchemaMsg(path, method, operationID string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidParameterDefinitionAsSchemaError, path, method, operationID) +} +func parameterValidationTypeMismatchMsg(param, path, typ string) errors.Error { + return errors.New(errors.CompositeErrorCode, ParamValidationTypeMismatch, param, path, typ) +} +func invalidObjectMsg(path, in string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidObject, path, in) +} + +// disabled +// +// func invalidResponseDefinitionAsSchemaMsg(path, method string) errors.Error { +// return errors.New(errors.CompositeErrorCode, InvalidResponseDefinitionAsSchemaError, path, method) +// } +func someParametersBrokenMsg(path, method, operationID string) errors.Error { + return errors.New(errors.CompositeErrorCode, SomeParametersBrokenError, path, method, operationID) +} +func refShouldNotHaveSiblingsMsg(path, operationID string) errors.Error { + return errors.New(errors.CompositeErrorCode, RefShouldNotHaveSiblingsWarning, operationID, path) +} diff --git a/vendor/github.com/go-openapi/validate/type.go b/vendor/github.com/go-openapi/validate/type.go new file mode 100644 index 000000000000..9b9ab8d917dd --- /dev/null +++ b/vendor/github.com/go-openapi/validate/type.go @@ -0,0 +1,203 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "reflect" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag/conv" + "github.com/go-openapi/swag/fileutils" +) + +type typeValidator struct { + Path string + In string + Type spec.StringOrArray + Nullable bool + Format string + Options *SchemaValidatorOptions +} + +func newTypeValidator(path, in string, typ spec.StringOrArray, nullable bool, format string, opts *SchemaValidatorOptions) *typeValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var t *typeValidator + if opts.recycleValidators { + t = pools.poolOfTypeValidators.BorrowValidator() + } else { + t = new(typeValidator) + } + + t.Path = path + t.In = in + t.Type = typ + t.Nullable = nullable + t.Format = format + t.Options = opts + + return t +} + +func (t *typeValidator) SetPath(path string) { + t.Path = path +} + +func (t *typeValidator) Applies(source any, _ reflect.Kind) bool { + // typeValidator applies to Schema, Parameter and Header objects + switch source.(type) { + case *spec.Schema: + case *spec.Parameter: + case *spec.Header: + default: + return false + } + + return (len(t.Type) > 0 || t.Format != "") +} + +func (t *typeValidator) Validate(data any) *Result { + if t.Options.recycleValidators { + defer func() { + t.redeem() + }() + } + + if data == nil { + // nil or zero value for the passed structure require Type: null + if len(t.Type) > 0 && !t.Type.Contains(nullType) && !t.Nullable { // TODO: if a property is not required it also passes this + return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), nullType), t.Options.recycleResult) + } + + return emptyResult + } + + // check if the type matches, should be used in every validator chain as first item + val := reflect.Indirect(reflect.ValueOf(data)) + kind := val.Kind() + + // infer schema type (JSON) and format from passed data type + schType, format := t.schemaInfoForType(data) + + // check numerical types + // TODO: check unsigned ints + // TODO: check json.Number (see schema.go) + isLowerInt := t.Format == integerFormatInt64 && format == integerFormatInt32 + isLowerFloat := t.Format == numberFormatFloat64 && format == numberFormatFloat32 + isFloatInt := schType == numberType && conv.IsFloat64AJSONInteger(val.Float()) && t.Type.Contains(integerType) + isIntFloat := schType == integerType && t.Type.Contains(numberType) + + if kind != reflect.String && kind != reflect.Slice && t.Format != "" && !t.Type.Contains(schType) && format != t.Format && !isFloatInt && !isIntFloat && !isLowerInt && !isLowerFloat { + // TODO: test case + return errorHelp.sErr(errors.InvalidType(t.Path, t.In, t.Format, format), t.Options.recycleResult) + } + + if !t.Type.Contains(numberType) && !t.Type.Contains(integerType) && t.Format != "" && (kind == reflect.String || kind == reflect.Slice) { + return emptyResult + } + + if !t.Type.Contains(schType) && !isFloatInt && !isIntFloat { + return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), schType), t.Options.recycleResult) + } + + return emptyResult +} + +func (t *typeValidator) schemaInfoForType(data any) (string, string) { + // internal type to JSON type with swagger 2.0 format (with go-openapi/strfmt extensions), + // see https://github.com/go-openapi/strfmt/blob/master/README.md + // TODO: this switch really is some sort of reverse lookup for formats. It should be provided by strfmt. + switch data.(type) { + case []byte, strfmt.Base64, *strfmt.Base64: + return stringType, stringFormatByte + case strfmt.CreditCard, *strfmt.CreditCard: + return stringType, stringFormatCreditCard + case strfmt.Date, *strfmt.Date: + return stringType, stringFormatDate + case strfmt.DateTime, *strfmt.DateTime: + return stringType, stringFormatDateTime + case strfmt.Duration, *strfmt.Duration: + return stringType, stringFormatDuration + case fileutils.File, *fileutils.File: + return fileType, "" + case strfmt.Email, *strfmt.Email: + return stringType, stringFormatEmail + case strfmt.HexColor, *strfmt.HexColor: + return stringType, stringFormatHexColor + case strfmt.Hostname, *strfmt.Hostname: + return stringType, stringFormatHostname + case strfmt.IPv4, *strfmt.IPv4: + return stringType, stringFormatIPv4 + case strfmt.IPv6, *strfmt.IPv6: + return stringType, stringFormatIPv6 + case strfmt.ISBN, *strfmt.ISBN: + return stringType, stringFormatISBN + case strfmt.ISBN10, *strfmt.ISBN10: + return stringType, stringFormatISBN10 + case strfmt.ISBN13, *strfmt.ISBN13: + return stringType, stringFormatISBN13 + case strfmt.MAC, *strfmt.MAC: + return stringType, stringFormatMAC + case strfmt.ObjectId, *strfmt.ObjectId: + return stringType, stringFormatBSONObjectID + case strfmt.Password, *strfmt.Password: + return stringType, stringFormatPassword + case strfmt.RGBColor, *strfmt.RGBColor: + return stringType, stringFormatRGBColor + case strfmt.SSN, *strfmt.SSN: + return stringType, stringFormatSSN + case strfmt.URI, *strfmt.URI: + return stringType, stringFormatURI + case strfmt.UUID, *strfmt.UUID: + return stringType, stringFormatUUID + case strfmt.UUID3, *strfmt.UUID3: + return stringType, stringFormatUUID3 + case strfmt.UUID4, *strfmt.UUID4: + return stringType, stringFormatUUID4 + case strfmt.UUID5, *strfmt.UUID5: + return stringType, stringFormatUUID5 + // TODO: missing binary (io.ReadCloser) + // TODO: missing json.Number + default: + val := reflect.ValueOf(data) + tpe := val.Type() + switch tpe.Kind() { //nolint:exhaustive + case reflect.Bool: + return booleanType, "" + case reflect.String: + return stringType, "" + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32: + // NOTE: that is the spec. With go-openapi, is that not uint32 for unsigned integers? + return integerType, integerFormatInt32 + case reflect.Int, reflect.Int64, reflect.Uint, reflect.Uint64: + return integerType, integerFormatInt64 + case reflect.Float32: + // NOTE: is that not numberFormatFloat? + return numberType, numberFormatFloat32 + case reflect.Float64: + // NOTE: is that not "double"? + return numberType, numberFormatFloat64 + // NOTE: go arrays (reflect.Array) are not supported (fixed length) + case reflect.Slice: + return arrayType, "" + case reflect.Map, reflect.Struct: + return objectType, "" + case reflect.Interface: + // What to do here? + panic("dunno what to do here") + case reflect.Ptr: + return t.schemaInfoForType(reflect.Indirect(val).Interface()) + } + } + return "", "" +} + +func (t *typeValidator) redeem() { + pools.poolOfTypeValidators.RedeemValidator(t) +} diff --git a/vendor/github.com/go-openapi/validate/update-fixtures.sh b/vendor/github.com/go-openapi/validate/update-fixtures.sh new file mode 100644 index 000000000000..21b06e2b09a1 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/update-fixtures.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +set -eu -o pipefail +dir=$(git rev-parse --show-toplevel) +scratch=$(mktemp -d -t tmp.XXXXXXXXXX) + +function finish { + rm -rf "$scratch" +} +trap finish EXIT SIGHUP SIGINT SIGTERM + +cd "$scratch" +git clone https://github.com/json-schema-org/JSON-Schema-Test-Suite Suite +cp -r Suite/tests/draft4/* "$dir/fixtures/jsonschema_suite" +cp -a Suite/remotes "$dir/fixtures/jsonschema_suite" diff --git a/vendor/github.com/go-openapi/validate/validator.go b/vendor/github.com/go-openapi/validate/validator.go new file mode 100644 index 000000000000..289a847fc7b0 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/validator.go @@ -0,0 +1,1040 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "fmt" + "reflect" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +// An EntityValidator is an interface for things that can validate entities +type EntityValidator interface { + Validate(any) *Result +} + +type valueValidator interface { + SetPath(path string) + Applies(any, reflect.Kind) bool + Validate(any) *Result +} + +type itemsValidator struct { + items *spec.Items + root any + path string + in string + validators [6]valueValidator + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +func newItemsValidator(path, in string, items *spec.Items, root any, formats strfmt.Registry, opts *SchemaValidatorOptions) *itemsValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var iv *itemsValidator + if opts.recycleValidators { + iv = pools.poolOfItemsValidators.BorrowValidator() + } else { + iv = new(itemsValidator) + } + + iv.path = path + iv.in = in + iv.items = items + iv.root = root + iv.KnownFormats = formats + iv.Options = opts + iv.validators = [6]valueValidator{ + iv.typeValidator(), + iv.stringValidator(), + iv.formatValidator(), + iv.numberValidator(), + iv.sliceValidator(), + iv.commonValidator(), + } + return iv +} + +func (i *itemsValidator) Validate(index int, data any) *Result { + if i.Options.recycleValidators { + defer func() { + i.redeemChildren() + i.redeem() + }() + } + + tpe := reflect.TypeOf(data) + kind := tpe.Kind() + var result *Result + if i.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + + path := fmt.Sprintf("%s.%d", i.path, index) + + for idx, validator := range i.validators { + if !validator.Applies(i.root, kind) { + if i.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + i.validators[idx] = nil // prevents further (unsafe) usage + } + + continue + } + + validator.SetPath(path) + err := validator.Validate(data) + if i.Options.recycleValidators { + i.validators[idx] = nil // prevents further (unsafe) usage + } + if err != nil { + result.Inc() + if err.HasErrors() { + result.Merge(err) + + break + } + + result.Merge(err) + } + } + + return result +} + +func (i *itemsValidator) typeValidator() valueValidator { + return newTypeValidator( + i.path, + i.in, + spec.StringOrArray([]string{i.items.Type}), + i.items.Nullable, + i.items.Format, + i.Options, + ) +} + +func (i *itemsValidator) commonValidator() valueValidator { + return newBasicCommonValidator( + "", + i.in, + i.items.Default, + i.items.Enum, + i.Options, + ) +} + +func (i *itemsValidator) sliceValidator() valueValidator { + return newBasicSliceValidator( + "", + i.in, + i.items.Default, + i.items.MaxItems, + i.items.MinItems, + i.items.UniqueItems, + i.items.Items, + i.root, + i.KnownFormats, + i.Options, + ) +} + +func (i *itemsValidator) numberValidator() valueValidator { + return newNumberValidator( + "", + i.in, + i.items.Default, + i.items.MultipleOf, + i.items.Maximum, + i.items.ExclusiveMaximum, + i.items.Minimum, + i.items.ExclusiveMinimum, + i.items.Type, + i.items.Format, + i.Options, + ) +} + +func (i *itemsValidator) stringValidator() valueValidator { + return newStringValidator( + "", + i.in, + i.items.Default, + false, // Required + false, // AllowEmpty + i.items.MaxLength, + i.items.MinLength, + i.items.Pattern, + i.Options, + ) +} + +func (i *itemsValidator) formatValidator() valueValidator { + return newFormatValidator( + "", + i.in, + i.items.Format, + i.KnownFormats, + i.Options, + ) +} + +func (i *itemsValidator) redeem() { + pools.poolOfItemsValidators.RedeemValidator(i) +} + +func (i *itemsValidator) redeemChildren() { + for idx, validator := range i.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + i.validators[idx] = nil // free up allocated children if not in pool + } +} + +type basicCommonValidator struct { + Path string + In string + Default any + Enum []any + Options *SchemaValidatorOptions +} + +func newBasicCommonValidator(path, in string, def any, enum []any, opts *SchemaValidatorOptions) *basicCommonValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var b *basicCommonValidator + if opts.recycleValidators { + b = pools.poolOfBasicCommonValidators.BorrowValidator() + } else { + b = new(basicCommonValidator) + } + + b.Path = path + b.In = in + b.Default = def + b.Enum = enum + b.Options = opts + + return b +} + +func (b *basicCommonValidator) SetPath(path string) { + b.Path = path +} + +func (b *basicCommonValidator) Applies(source any, _ reflect.Kind) bool { + switch source.(type) { + case *spec.Parameter, *spec.Schema, *spec.Header: + return true + default: + return false + } +} + +func (b *basicCommonValidator) Validate(data any) (res *Result) { + if b.Options.recycleValidators { + defer func() { + b.redeem() + }() + } + + if len(b.Enum) == 0 { + return nil + } + + for _, enumValue := range b.Enum { + actualType := reflect.TypeOf(enumValue) + if actualType == nil { // Safeguard + continue + } + + expectedValue := reflect.ValueOf(data) + if expectedValue.IsValid() && + expectedValue.Type().ConvertibleTo(actualType) && + reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), enumValue) { + return nil + } + } + + return errorHelp.sErr(errors.EnumFail(b.Path, b.In, data, b.Enum), b.Options.recycleResult) +} + +func (b *basicCommonValidator) redeem() { + pools.poolOfBasicCommonValidators.RedeemValidator(b) +} + +// A HeaderValidator has very limited subset of validations to apply +type HeaderValidator struct { + name string + header *spec.Header + validators [6]valueValidator + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +// NewHeaderValidator creates a new header validator object +func NewHeaderValidator(name string, header *spec.Header, formats strfmt.Registry, options ...Option) *HeaderValidator { + opts := new(SchemaValidatorOptions) + for _, o := range options { + o(opts) + } + + return newHeaderValidator(name, header, formats, opts) +} + +func newHeaderValidator(name string, header *spec.Header, formats strfmt.Registry, opts *SchemaValidatorOptions) *HeaderValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var p *HeaderValidator + if opts.recycleValidators { + p = pools.poolOfHeaderValidators.BorrowValidator() + } else { + p = new(HeaderValidator) + } + + p.name = name + p.header = header + p.KnownFormats = formats + p.Options = opts + p.validators = [6]valueValidator{ + newTypeValidator( + name, + "header", + spec.StringOrArray([]string{header.Type}), + header.Nullable, + header.Format, + p.Options, + ), + p.stringValidator(), + p.formatValidator(), + p.numberValidator(), + p.sliceValidator(), + p.commonValidator(), + } + + return p +} + +// Validate the value of the header against its schema +func (p *HeaderValidator) Validate(data any) *Result { + if p.Options.recycleValidators { + defer func() { + p.redeemChildren() + p.redeem() + }() + } + + if data == nil { + return nil + } + + var result *Result + if p.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + + tpe := reflect.TypeOf(data) + kind := tpe.Kind() + + for idx, validator := range p.validators { + if !validator.Applies(p.header, kind) { + if p.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + p.validators[idx] = nil // prevents further (unsafe) usage + } + + continue + } + + err := validator.Validate(data) + if p.Options.recycleValidators { + p.validators[idx] = nil // prevents further (unsafe) usage + } + if err != nil { + if err.HasErrors() { + result.Merge(err) + break + } + result.Merge(err) + } + } + + return result +} + +func (p *HeaderValidator) commonValidator() valueValidator { + return newBasicCommonValidator( + p.name, + "response", + p.header.Default, + p.header.Enum, + p.Options, + ) +} + +func (p *HeaderValidator) sliceValidator() valueValidator { + return newBasicSliceValidator( + p.name, + "response", + p.header.Default, + p.header.MaxItems, + p.header.MinItems, + p.header.UniqueItems, + p.header.Items, + p.header, + p.KnownFormats, + p.Options, + ) +} + +func (p *HeaderValidator) numberValidator() valueValidator { + return newNumberValidator( + p.name, + "response", + p.header.Default, + p.header.MultipleOf, + p.header.Maximum, + p.header.ExclusiveMaximum, + p.header.Minimum, + p.header.ExclusiveMinimum, + p.header.Type, + p.header.Format, + p.Options, + ) +} + +func (p *HeaderValidator) stringValidator() valueValidator { + return newStringValidator( + p.name, + "response", + p.header.Default, + true, + false, + p.header.MaxLength, + p.header.MinLength, + p.header.Pattern, + p.Options, + ) +} + +func (p *HeaderValidator) formatValidator() valueValidator { + return newFormatValidator( + p.name, + "response", + p.header.Format, + p.KnownFormats, + p.Options, + ) +} + +func (p *HeaderValidator) redeem() { + pools.poolOfHeaderValidators.RedeemValidator(p) +} + +func (p *HeaderValidator) redeemChildren() { + for idx, validator := range p.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + p.validators[idx] = nil // free up allocated children if not in pool + } +} + +// A ParamValidator has very limited subset of validations to apply +type ParamValidator struct { + param *spec.Parameter + validators [6]valueValidator + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +// NewParamValidator creates a new param validator object +func NewParamValidator(param *spec.Parameter, formats strfmt.Registry, options ...Option) *ParamValidator { + opts := new(SchemaValidatorOptions) + for _, o := range options { + o(opts) + } + + return newParamValidator(param, formats, opts) +} + +func newParamValidator(param *spec.Parameter, formats strfmt.Registry, opts *SchemaValidatorOptions) *ParamValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var p *ParamValidator + if opts.recycleValidators { + p = pools.poolOfParamValidators.BorrowValidator() + } else { + p = new(ParamValidator) + } + + p.param = param + p.KnownFormats = formats + p.Options = opts + p.validators = [6]valueValidator{ + newTypeValidator( + param.Name, + param.In, + spec.StringOrArray([]string{param.Type}), + param.Nullable, + param.Format, + p.Options, + ), + p.stringValidator(), + p.formatValidator(), + p.numberValidator(), + p.sliceValidator(), + p.commonValidator(), + } + + return p +} + +// Validate the data against the description of the parameter +func (p *ParamValidator) Validate(data any) *Result { + if data == nil { + return nil + } + + var result *Result + if p.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + + tpe := reflect.TypeOf(data) + kind := tpe.Kind() + + if p.Options.recycleValidators { + defer func() { + p.redeemChildren() + p.redeem() + }() + } + + // TODO: validate type + for idx, validator := range p.validators { + if !validator.Applies(p.param, kind) { + if p.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + p.validators[idx] = nil // prevents further (unsafe) usage + } + + continue + } + + err := validator.Validate(data) + if p.Options.recycleValidators { + p.validators[idx] = nil // prevents further (unsafe) usage + } + if err != nil { + if err.HasErrors() { + result.Merge(err) + break + } + result.Merge(err) + } + } + + return result +} + +func (p *ParamValidator) commonValidator() valueValidator { + return newBasicCommonValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.Enum, + p.Options, + ) +} + +func (p *ParamValidator) sliceValidator() valueValidator { + return newBasicSliceValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.MaxItems, + p.param.MinItems, + p.param.UniqueItems, + p.param.Items, + p.param, + p.KnownFormats, + p.Options, + ) +} + +func (p *ParamValidator) numberValidator() valueValidator { + return newNumberValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.MultipleOf, + p.param.Maximum, + p.param.ExclusiveMaximum, + p.param.Minimum, + p.param.ExclusiveMinimum, + p.param.Type, + p.param.Format, + p.Options, + ) +} + +func (p *ParamValidator) stringValidator() valueValidator { + return newStringValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.Required, + p.param.AllowEmptyValue, + p.param.MaxLength, + p.param.MinLength, + p.param.Pattern, + p.Options, + ) +} + +func (p *ParamValidator) formatValidator() valueValidator { + return newFormatValidator( + p.param.Name, + p.param.In, + p.param.Format, + p.KnownFormats, + p.Options, + ) +} + +func (p *ParamValidator) redeem() { + pools.poolOfParamValidators.RedeemValidator(p) +} + +func (p *ParamValidator) redeemChildren() { + for idx, validator := range p.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + p.validators[idx] = nil // free up allocated children if not in pool + } +} + +type basicSliceValidator struct { + Path string + In string + Default any + MaxItems *int64 + MinItems *int64 + UniqueItems bool + Items *spec.Items + Source any + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +func newBasicSliceValidator( + path, in string, + def any, maxItems, minItems *int64, uniqueItems bool, items *spec.Items, + source any, formats strfmt.Registry, + opts *SchemaValidatorOptions) *basicSliceValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var s *basicSliceValidator + if opts.recycleValidators { + s = pools.poolOfBasicSliceValidators.BorrowValidator() + } else { + s = new(basicSliceValidator) + } + + s.Path = path + s.In = in + s.Default = def + s.MaxItems = maxItems + s.MinItems = minItems + s.UniqueItems = uniqueItems + s.Items = items + s.Source = source + s.KnownFormats = formats + s.Options = opts + + return s +} + +func (s *basicSliceValidator) SetPath(path string) { + s.Path = path +} + +func (s *basicSliceValidator) Applies(source any, kind reflect.Kind) bool { + switch source.(type) { + case *spec.Parameter, *spec.Items, *spec.Header: + return kind == reflect.Slice + default: + return false + } +} + +func (s *basicSliceValidator) Validate(data any) *Result { + if s.Options.recycleValidators { + defer func() { + s.redeem() + }() + } + val := reflect.ValueOf(data) + + size := int64(val.Len()) + if s.MinItems != nil { + if err := MinItems(s.Path, s.In, size, *s.MinItems); err != nil { + return errorHelp.sErr(err, s.Options.recycleResult) + } + } + + if s.MaxItems != nil { + if err := MaxItems(s.Path, s.In, size, *s.MaxItems); err != nil { + return errorHelp.sErr(err, s.Options.recycleResult) + } + } + + if s.UniqueItems { + if err := UniqueItems(s.Path, s.In, data); err != nil { + return errorHelp.sErr(err, s.Options.recycleResult) + } + } + + if s.Items == nil { + return nil + } + + for i := range int(size) { + itemsValidator := newItemsValidator(s.Path, s.In, s.Items, s.Source, s.KnownFormats, s.Options) + ele := val.Index(i) + if err := itemsValidator.Validate(i, ele.Interface()); err != nil { + if err.HasErrors() { + return err + } + if err.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(err) + } + } + } + + return nil +} + +func (s *basicSliceValidator) redeem() { + pools.poolOfBasicSliceValidators.RedeemValidator(s) +} + +type numberValidator struct { + Path string + In string + Default any + MultipleOf *float64 + Maximum *float64 + ExclusiveMaximum bool + Minimum *float64 + ExclusiveMinimum bool + // Allows for more accurate behavior regarding integers + Type string + Format string + Options *SchemaValidatorOptions +} + +func newNumberValidator( + path, in string, def any, + multipleOf, maximum *float64, exclusiveMaximum bool, minimum *float64, exclusiveMinimum bool, + typ, format string, + opts *SchemaValidatorOptions) *numberValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var n *numberValidator + if opts.recycleValidators { + n = pools.poolOfNumberValidators.BorrowValidator() + } else { + n = new(numberValidator) + } + + n.Path = path + n.In = in + n.Default = def + n.MultipleOf = multipleOf + n.Maximum = maximum + n.ExclusiveMaximum = exclusiveMaximum + n.Minimum = minimum + n.ExclusiveMinimum = exclusiveMinimum + n.Type = typ + n.Format = format + n.Options = opts + + return n +} + +func (n *numberValidator) SetPath(path string) { + n.Path = path +} + +func (n *numberValidator) Applies(source any, kind reflect.Kind) bool { + switch source.(type) { + case *spec.Parameter, *spec.Schema, *spec.Items, *spec.Header: + isInt := kind >= reflect.Int && kind <= reflect.Uint64 + isFloat := kind == reflect.Float32 || kind == reflect.Float64 + return isInt || isFloat + default: + return false + } +} + +// Validate provides a validator for generic JSON numbers, +// +// By default, numbers are internally represented as float64. +// Formats float, or float32 may alter this behavior by mapping to float32. +// A special validation process is followed for integers, with optional "format": +// this is an attempt to provide a validation with native types. +// +// NOTE: since the constraint specified (boundary, multipleOf) is unmarshalled +// as float64, loss of information remains possible (e.g. on very large integers). +// +// Since this value directly comes from the unmarshalling, it is not possible +// at this stage of processing to check further and guarantee the correctness of such values. +// +// Normally, the JSON Number.MAX_SAFE_INTEGER (resp. Number.MIN_SAFE_INTEGER) +// would check we do not get such a loss. +// +// If this is the case, replace AddErrors() by AddWarnings() and IsValid() by !HasWarnings(). +// +// TODO: consider replacing boundary check errors by simple warnings. +// +// TODO: default boundaries with MAX_SAFE_INTEGER are not checked (specific to json.Number?) +func (n *numberValidator) Validate(val any) *Result { + if n.Options.recycleValidators { + defer func() { + n.redeem() + }() + } + + var res, resMultiple, resMinimum, resMaximum *Result + if n.Options.recycleResult { + res = pools.poolOfResults.BorrowResult() + } else { + res = new(Result) + } + + // Used only to attempt to validate constraint on value, + // even though value or constraint specified do not match type and format + data := valueHelp.asFloat64(val) + + // Is the provided value within the range of the specified numeric type and format? + res.AddErrors(IsValueValidAgainstRange(val, n.Type, n.Format, "Checked", n.Path)) + + if n.MultipleOf != nil { + resMultiple = pools.poolOfResults.BorrowResult() + + // Is the constraint specifier within the range of the specific numeric type and format? + resMultiple.AddErrors(IsValueValidAgainstRange(*n.MultipleOf, n.Type, n.Format, "MultipleOf", n.Path)) + if resMultiple.IsValid() { + // Constraint validated with compatible types + if err := MultipleOfNativeType(n.Path, n.In, val, *n.MultipleOf); err != nil { + resMultiple.Merge(errorHelp.sErr(err, n.Options.recycleResult)) + } + } else { + // Constraint nevertheless validated, converted as general number + if err := MultipleOf(n.Path, n.In, data, *n.MultipleOf); err != nil { + resMultiple.Merge(errorHelp.sErr(err, n.Options.recycleResult)) + } + } + } + + if n.Maximum != nil { + resMaximum = pools.poolOfResults.BorrowResult() + + // Is the constraint specifier within the range of the specific numeric type and format? + resMaximum.AddErrors(IsValueValidAgainstRange(*n.Maximum, n.Type, n.Format, "Maximum boundary", n.Path)) + if resMaximum.IsValid() { + // Constraint validated with compatible types + if err := MaximumNativeType(n.Path, n.In, val, *n.Maximum, n.ExclusiveMaximum); err != nil { + resMaximum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) + } + } else { + // Constraint nevertheless validated, converted as general number + if err := Maximum(n.Path, n.In, data, *n.Maximum, n.ExclusiveMaximum); err != nil { + resMaximum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) + } + } + } + + if n.Minimum != nil { + resMinimum = pools.poolOfResults.BorrowResult() + + // Is the constraint specifier within the range of the specific numeric type and format? + resMinimum.AddErrors(IsValueValidAgainstRange(*n.Minimum, n.Type, n.Format, "Minimum boundary", n.Path)) + if resMinimum.IsValid() { + // Constraint validated with compatible types + if err := MinimumNativeType(n.Path, n.In, val, *n.Minimum, n.ExclusiveMinimum); err != nil { + resMinimum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) + } + } else { + // Constraint nevertheless validated, converted as general number + if err := Minimum(n.Path, n.In, data, *n.Minimum, n.ExclusiveMinimum); err != nil { + resMinimum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) + } + } + } + res.Merge(resMultiple, resMinimum, resMaximum) + res.Inc() + + return res +} + +func (n *numberValidator) redeem() { + pools.poolOfNumberValidators.RedeemValidator(n) +} + +type stringValidator struct { + Path string + In string + Default any + Required bool + AllowEmptyValue bool + MaxLength *int64 + MinLength *int64 + Pattern string + Options *SchemaValidatorOptions +} + +func newStringValidator( + path, in string, + def any, required, allowEmpty bool, maxLength, minLength *int64, pattern string, + opts *SchemaValidatorOptions) *stringValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var s *stringValidator + if opts.recycleValidators { + s = pools.poolOfStringValidators.BorrowValidator() + } else { + s = new(stringValidator) + } + + s.Path = path + s.In = in + s.Default = def + s.Required = required + s.AllowEmptyValue = allowEmpty + s.MaxLength = maxLength + s.MinLength = minLength + s.Pattern = pattern + s.Options = opts + + return s +} + +func (s *stringValidator) SetPath(path string) { + s.Path = path +} + +func (s *stringValidator) Applies(source any, kind reflect.Kind) bool { + switch source.(type) { + case *spec.Parameter, *spec.Schema, *spec.Items, *spec.Header: + return kind == reflect.String + default: + return false + } +} + +func (s *stringValidator) Validate(val any) *Result { + if s.Options.recycleValidators { + defer func() { + s.redeem() + }() + } + + data, ok := val.(string) + if !ok { + return errorHelp.sErr(errors.InvalidType(s.Path, s.In, stringType, val), s.Options.recycleResult) + } + + if s.Required && !s.AllowEmptyValue && (s.Default == nil || s.Default == "") { + if err := RequiredString(s.Path, s.In, data); err != nil { + return errorHelp.sErr(err, s.Options.recycleResult) + } + } + + if s.MaxLength != nil { + if err := MaxLength(s.Path, s.In, data, *s.MaxLength); err != nil { + return errorHelp.sErr(err, s.Options.recycleResult) + } + } + + if s.MinLength != nil { + if err := MinLength(s.Path, s.In, data, *s.MinLength); err != nil { + return errorHelp.sErr(err, s.Options.recycleResult) + } + } + + if s.Pattern != "" { + if err := Pattern(s.Path, s.In, data, s.Pattern); err != nil { + return errorHelp.sErr(err, s.Options.recycleResult) + } + } + return nil +} + +func (s *stringValidator) redeem() { + pools.poolOfStringValidators.RedeemValidator(s) +} diff --git a/vendor/github.com/go-openapi/validate/values.go b/vendor/github.com/go-openapi/validate/values.go new file mode 100644 index 000000000000..e7dd5c8d3ab5 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/values.go @@ -0,0 +1,448 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "context" + "fmt" + "reflect" + "strings" + "unicode/utf8" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag/conv" +) + +type valueError string + +func (e valueError) Error() string { + return string(e) +} + +// ErrValue indicates that a value validation occurred +const ErrValue valueError = "value validation error" + +// Enum validates if the data is a member of the enum +func Enum(path, in string, data any, enum any) *errors.Validation { + return EnumCase(path, in, data, enum, true) +} + +// EnumCase validates if the data is a member of the enum and may respect case-sensitivity for strings +func EnumCase(path, in string, data any, enum any, caseSensitive bool) *errors.Validation { + val := reflect.ValueOf(enum) + if val.Kind() != reflect.Slice { + return nil + } + + dataString := convertEnumCaseStringKind(data, caseSensitive) + values := make([]any, 0, val.Len()) + for i := range val.Len() { + ele := val.Index(i) + enumValue := ele.Interface() + if data != nil { + if reflect.DeepEqual(data, enumValue) { + return nil + } + enumString := convertEnumCaseStringKind(enumValue, caseSensitive) + if dataString != nil && enumString != nil && strings.EqualFold(*dataString, *enumString) { + return nil + } + actualType := reflect.TypeOf(enumValue) + if actualType == nil { // Safeguard. Frankly, I don't know how we may get a nil + continue + } + expectedValue := reflect.ValueOf(data) + if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + // Attempt comparison after type conversion + if reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), enumValue) { + return nil + } + } + } + values = append(values, enumValue) + } + return errors.EnumFail(path, in, data, values) +} + +// convertEnumCaseStringKind converts interface if it is kind of string and case insensitivity is set +func convertEnumCaseStringKind(value any, caseSensitive bool) *string { + if caseSensitive { + return nil + } + + val := reflect.ValueOf(value) + if val.Kind() != reflect.String { + return nil + } + + str := fmt.Sprintf("%v", value) + return &str +} + +// MinItems validates that there are at least n items in a slice +func MinItems(path, in string, size, minimum int64) *errors.Validation { + if size < minimum { + return errors.TooFewItems(path, in, minimum, size) + } + return nil +} + +// MaxItems validates that there are at most n items in a slice +func MaxItems(path, in string, size, maximum int64) *errors.Validation { + if size > maximum { + return errors.TooManyItems(path, in, maximum, size) + } + return nil +} + +// UniqueItems validates that the provided slice has unique elements +func UniqueItems(path, in string, data any) *errors.Validation { + val := reflect.ValueOf(data) + if val.Kind() != reflect.Slice { + return nil + } + unique := make([]any, 0, val.Len()) + for i := range val.Len() { + v := val.Index(i).Interface() + for _, u := range unique { + if reflect.DeepEqual(v, u) { + return errors.DuplicateItems(path, in) + } + } + unique = append(unique, v) + } + return nil +} + +// MinLength validates a string for minimum length +func MinLength(path, in, data string, minLength int64) *errors.Validation { + strLen := int64(utf8.RuneCountInString(data)) + if strLen < minLength { + return errors.TooShort(path, in, minLength, data) + } + return nil +} + +// MaxLength validates a string for maximum length +func MaxLength(path, in, data string, maxLength int64) *errors.Validation { + strLen := int64(utf8.RuneCountInString(data)) + if strLen > maxLength { + return errors.TooLong(path, in, maxLength, data) + } + return nil +} + +// ReadOnly validates an interface for readonly +func ReadOnly(ctx context.Context, path, in string, data any) *errors.Validation { + + // read only is only validated when operationType is request + if op := extractOperationType(ctx); op != request { + return nil + } + + // data must be of zero value of its type + val := reflect.ValueOf(data) + if val.IsValid() { + if reflect.DeepEqual(reflect.Zero(val.Type()).Interface(), val.Interface()) { + return nil + } + } else { + return nil + } + + return errors.ReadOnly(path, in, data) +} + +// Required validates an interface for requiredness +func Required(path, in string, data any) *errors.Validation { + val := reflect.ValueOf(data) + if val.IsValid() { + if reflect.DeepEqual(reflect.Zero(val.Type()).Interface(), val.Interface()) { + return errors.Required(path, in, data) + } + return nil + } + return errors.Required(path, in, data) +} + +// RequiredString validates a string for requiredness +func RequiredString(path, in, data string) *errors.Validation { + if data == "" { + return errors.Required(path, in, data) + } + return nil +} + +// RequiredNumber validates a number for requiredness +func RequiredNumber(path, in string, data float64) *errors.Validation { + if data == 0 { + return errors.Required(path, in, data) + } + return nil +} + +// Pattern validates a string against a regular expression +func Pattern(path, in, data, pattern string) *errors.Validation { + re, err := compileRegexp(pattern) + if err != nil { + return errors.FailedPattern(path, in, fmt.Sprintf("%s, but pattern is invalid: %s", pattern, err.Error()), data) + } + if !re.MatchString(data) { + return errors.FailedPattern(path, in, pattern, data) + } + return nil +} + +// MaximumInt validates if a number is smaller than a given maximum +func MaximumInt(path, in string, data, maximum int64, exclusive bool) *errors.Validation { + if (!exclusive && data > maximum) || (exclusive && data >= maximum) { + return errors.ExceedsMaximumInt(path, in, maximum, exclusive, data) + } + return nil +} + +// MaximumUint validates if a number is smaller than a given maximum +func MaximumUint(path, in string, data, maximum uint64, exclusive bool) *errors.Validation { + if (!exclusive && data > maximum) || (exclusive && data >= maximum) { + return errors.ExceedsMaximumUint(path, in, maximum, exclusive, data) + } + return nil +} + +// Maximum validates if a number is smaller than a given maximum +func Maximum(path, in string, data, maximum float64, exclusive bool) *errors.Validation { + if (!exclusive && data > maximum) || (exclusive && data >= maximum) { + return errors.ExceedsMaximum(path, in, maximum, exclusive, data) + } + return nil +} + +// Minimum validates if a number is smaller than a given minimum +func Minimum(path, in string, data, minimum float64, exclusive bool) *errors.Validation { + if (!exclusive && data < minimum) || (exclusive && data <= minimum) { + return errors.ExceedsMinimum(path, in, minimum, exclusive, data) + } + return nil +} + +// MinimumInt validates if a number is smaller than a given minimum +func MinimumInt(path, in string, data, minimum int64, exclusive bool) *errors.Validation { + if (!exclusive && data < minimum) || (exclusive && data <= minimum) { + return errors.ExceedsMinimumInt(path, in, minimum, exclusive, data) + } + return nil +} + +// MinimumUint validates if a number is smaller than a given minimum +func MinimumUint(path, in string, data, minimum uint64, exclusive bool) *errors.Validation { + if (!exclusive && data < minimum) || (exclusive && data <= minimum) { + return errors.ExceedsMinimumUint(path, in, minimum, exclusive, data) + } + return nil +} + +// MultipleOf validates if the provided number is a multiple of the factor +func MultipleOf(path, in string, data, factor float64) *errors.Validation { + // multipleOf factor must be positive + if factor <= 0 { + return errors.MultipleOfMustBePositive(path, in, factor) + } + var mult float64 + if factor < 1 { + mult = 1 / factor * data + } else { + mult = data / factor + } + if !conv.IsFloat64AJSONInteger(mult) { + return errors.NotMultipleOf(path, in, factor, data) + } + return nil +} + +// MultipleOfInt validates if the provided integer is a multiple of the factor +func MultipleOfInt(path, in string, data int64, factor int64) *errors.Validation { + // multipleOf factor must be positive + if factor <= 0 { + return errors.MultipleOfMustBePositive(path, in, factor) + } + mult := data / factor + if mult*factor != data { + return errors.NotMultipleOf(path, in, factor, data) + } + return nil +} + +// MultipleOfUint validates if the provided unsigned integer is a multiple of the factor +func MultipleOfUint(path, in string, data, factor uint64) *errors.Validation { + // multipleOf factor must be positive + if factor == 0 { + return errors.MultipleOfMustBePositive(path, in, factor) + } + mult := data / factor + if mult*factor != data { + return errors.NotMultipleOf(path, in, factor, data) + } + return nil +} + +// FormatOf validates if a string matches a format in the format registry +func FormatOf(path, in, format, data string, registry strfmt.Registry) *errors.Validation { + if registry == nil { + registry = strfmt.Default + } + if ok := registry.ContainsName(format); !ok { + return errors.InvalidTypeName(format) + } + if ok := registry.Validates(format, data); !ok { + return errors.InvalidType(path, in, format, data) + } + return nil +} + +// MaximumNativeType provides native type constraint validation as a facade +// to various numeric types versions of Maximum constraint check. +// +// Assumes that any possible loss conversion during conversion has been +// checked beforehand. +// +// NOTE: currently, the max value is marshalled as a float64, no matter what, +// which means there may be a loss during conversions (e.g. for very large integers) +// +// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free +func MaximumNativeType(path, in string, val any, maximum float64, exclusive bool) *errors.Validation { + kind := reflect.ValueOf(val).Type().Kind() + switch kind { //nolint:exhaustive + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + value := valueHelp.asInt64(val) + return MaximumInt(path, in, value, int64(maximum), exclusive) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + value := valueHelp.asUint64(val) + if maximum < 0 { + return errors.ExceedsMaximum(path, in, maximum, exclusive, val) + } + return MaximumUint(path, in, value, uint64(maximum), exclusive) + case reflect.Float32, reflect.Float64: + fallthrough + default: + value := valueHelp.asFloat64(val) + return Maximum(path, in, value, maximum, exclusive) + } +} + +// MinimumNativeType provides native type constraint validation as a facade +// to various numeric types versions of Minimum constraint check. +// +// Assumes that any possible loss conversion during conversion has been +// checked beforehand. +// +// NOTE: currently, the min value is marshalled as a float64, no matter what, +// which means there may be a loss during conversions (e.g. for very large integers) +// +// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free +func MinimumNativeType(path, in string, val any, minimum float64, exclusive bool) *errors.Validation { + kind := reflect.ValueOf(val).Type().Kind() + switch kind { //nolint:exhaustive + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + value := valueHelp.asInt64(val) + return MinimumInt(path, in, value, int64(minimum), exclusive) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + value := valueHelp.asUint64(val) + if minimum < 0 { + return nil + } + return MinimumUint(path, in, value, uint64(minimum), exclusive) + case reflect.Float32, reflect.Float64: + fallthrough + default: + value := valueHelp.asFloat64(val) + return Minimum(path, in, value, minimum, exclusive) + } +} + +// MultipleOfNativeType provides native type constraint validation as a facade +// to various numeric types version of MultipleOf constraint check. +// +// Assumes that any possible loss conversion during conversion has been +// checked beforehand. +// +// NOTE: currently, the multipleOf factor is marshalled as a float64, no matter what, +// which means there may be a loss during conversions (e.g. for very large integers) +// +// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free +func MultipleOfNativeType(path, in string, val any, multipleOf float64) *errors.Validation { + kind := reflect.ValueOf(val).Type().Kind() + switch kind { //nolint:exhaustive + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + value := valueHelp.asInt64(val) + return MultipleOfInt(path, in, value, int64(multipleOf)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + value := valueHelp.asUint64(val) + return MultipleOfUint(path, in, value, uint64(multipleOf)) + case reflect.Float32, reflect.Float64: + fallthrough + default: + value := valueHelp.asFloat64(val) + return MultipleOf(path, in, value, multipleOf) + } +} + +// IsValueValidAgainstRange checks that a numeric value is compatible with +// the range defined by Type and Format, that is, may be converted without loss. +// +// NOTE: this check is about type capacity and not formal verification such as: 1.0 != 1L +func IsValueValidAgainstRange(val any, typeName, format, prefix, path string) error { + kind := reflect.ValueOf(val).Type().Kind() + + // What is the string representation of val + var stringRep string + switch kind { //nolint:exhaustive + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + stringRep = conv.FormatUinteger(valueHelp.asUint64(val)) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + stringRep = conv.FormatInteger(valueHelp.asInt64(val)) + case reflect.Float32, reflect.Float64: + stringRep = conv.FormatFloat(valueHelp.asFloat64(val)) + default: + return fmt.Errorf("%s value number range checking called with invalid (non numeric) val type in %s: %w", prefix, path, ErrValue) + } + + var errVal error + + switch typeName { + case integerType: + switch format { + case integerFormatInt32: + _, errVal = conv.ConvertInt32(stringRep) + case integerFormatUInt32: + _, errVal = conv.ConvertUint32(stringRep) + case integerFormatUInt64: + _, errVal = conv.ConvertUint64(stringRep) + case integerFormatInt64: + fallthrough + default: + _, errVal = conv.ConvertInt64(stringRep) + } + case numberType: + fallthrough + default: + switch format { + case numberFormatFloat, numberFormatFloat32: + _, errVal = conv.ConvertFloat32(stringRep) + case numberFormatDouble, numberFormatFloat64: + fallthrough + default: + // No check can be performed here since + // no number beyond float64 is supported + } + } + if errVal != nil { // We don't report the actual errVal from strconv + if format != "" { + errVal = fmt.Errorf("%s value must be of type %s with format %s in %s: %w", prefix, typeName, format, path, ErrValue) + } else { + errVal = fmt.Errorf("%s value must be of type %s (default format) in %s: %w", prefix, typeName, path, ErrValue) + } + } + return errVal +} diff --git a/vendor/github.com/google/certificate-transparency-go/.gitignore b/vendor/github.com/google/certificate-transparency-go/.gitignore new file mode 100644 index 000000000000..8c13cd1c9d3d --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/.gitignore @@ -0,0 +1,28 @@ +*.iml +*.swo +*.swp +*.tfstate +*.tfstate.backup +*~ +/.idea +/certcheck +/chainfix +/coverage.txt +/createtree +/crlcheck +/ctclient +/ct_server +/ct_hammer +/data +/dumpscts +/findlog +/goshawk +/gosmin +/gossip_server +/preloader +/scanlog +/sctcheck +/sctscan +/trillian_log_server +/trillian_log_signer +/trillian.json diff --git a/vendor/github.com/google/certificate-transparency-go/.golangci.yaml b/vendor/github.com/google/certificate-transparency-go/.golangci.yaml new file mode 100644 index 000000000000..e9b683b2bd18 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/.golangci.yaml @@ -0,0 +1,39 @@ +version: "2" +linters: + settings: + depguard: + rules: + main: + deny: + - pkg: ^golang.org/x/net/context$ + - pkg: github.com/gogo/protobuf/proto + - pkg: encoding/asn1 + - pkg: crypto/x509 + gocyclo: + min-complexity: 25 + exclusions: + generated: lax + rules: + - linters: + - staticcheck + text: 'SA1019: grpc.Dial is deprecated: use NewClient instead' + - linters: + - staticcheck + text: 'SA1019: grpc.DialContext is deprecated: use NewClient instead' + - linters: + - staticcheck + text: 'SA1019: grpc.WithBlock is deprecated: this DialOption is not supported by NewClient' + paths: + - (^|/)x509($|/) + - (^|/)x509util($|/) + - (^|/)asn1($|/) + - third_party$ + - builtin$ + - examples$ +formatters: + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/google/certificate-transparency-go/AUTHORS b/vendor/github.com/google/certificate-transparency-go/AUTHORS new file mode 100644 index 000000000000..ad514665ef63 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/AUTHORS @@ -0,0 +1,29 @@ +# This is the official list of benchmark authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. +# +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. +# +# Please keep the list sorted. + +Alex Cohn +Ed Maste +Elisha Silas +Fiaz Hossain +Google LLC +Internet Security Research Group +Jeff Trawick +Katriel Cohn-Gordon +Laël Cellier +Mark Schloesser +NORDUnet A/S +Nicholas Galbreath +Oliver Weidner +PrimeKey Solutions AB +Ruslan Kovalov +Sectigo Limited +Venafi, Inc. +Vladimir Rutsky +Ximin Luo diff --git a/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md new file mode 100644 index 000000000000..0206cfe12482 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md @@ -0,0 +1,1311 @@ +# CERTIFICATE-TRANSPARENCY-GO Changelog + +## HEAD + +## v1.3.2 + +### Misc + +* [migrillian] remove etcd support in #1699 +* Bump golangci-lint from 1.55.1 to 1.61.0 (developers should update to this version). +* Update ctclient tool to support SCT extensions field by @liweitianux in https://github.com/google/certificate-transparency-go/pull/1645 +* Bump go to 1.23 +* [ct_hammer] support HTTPS and Bearer token for Authentication. +* [preloader] support Bearer token Authentication for non temporal logs. +* [preloader] support end indexes +* [CTFE] Short cache max-age when get-entries returns fewer entries than requested by @robstradling in https://github.com/google/certificate-transparency-go/pull/1707 +* [CTFE] Disalllow mismatching signature algorithm identifiers in #702. +* [jsonclient] surface HTTP Do and Read errors #1695 by @FiloSottile + +### CTFE Storage Saving: Extra Data Issuance Chain Deduplication + +* Suppress unnecessary duplicate key errors in the IssuanceChainStorage PostgreSQL implementation by @robstradling in https://github.com/google/certificate-transparency-go/pull/1678 +* Only store IssuanceChain if not cached by @robstradling in https://github.com/google/certificate-transparency-go/pull/1679 + +### CTFE Rate Limiting Of Non-Fresh Submissions + +To protect a log from being flooded with requests for "old" certificates, optional rate limiting for "non-fresh submissions" can be configured by providing the following flags: + +- `non_fresh_submission_age` +- `non_fresh_submission_burst` +- `non_fresh_submission_limit` + +This can help to ensure that the log maintains its ability to (1) accept "fresh" submissions and (2) distribute all log entries to monitors. + +* [CTFE] Configurable mechanism to rate-limit non-fresh submissions by @robstradling in https://github.com/google/certificate-transparency-go/pull/1698 + +### Dependency updates + +* Bump the docker-deps group across 5 directories with 3 updates (#1705) +* Bump google.golang.org/grpc from 1.72.1 to 1.72.2 in the all-deps group (#1704) +* Bump github.com/go-jose/go-jose/v4 in the go_modules group (#1700) +* Bump the all-deps group with 7 updates (#1701) +* Bump the all-deps group with 7 updates (#1693) +* Bump the docker-deps group across 4 directories with 1 update (#1694) +* Bump github/codeql-action from 3.28.13 to 3.28.16 in the all-deps group (#1692) +* Bump the all-deps group across 1 directory with 7 updates (#1688) +* Bump distroless/base-debian12 (#1686) +* Bump golangci/golangci-lint-action from 6.5.1 to 7.0.0 in the all-deps group (#1685) +* Bump the all-deps group with 4 updates (#1681) +* Bump the all-deps group with 6 updates (#1683) +* Bump the docker-deps group across 4 directories with 2 updates (#1682) +* Bump github.com/golang-jwt/jwt/v4 in the go_modules group (#1680) +* Bump golangci/golangci-lint-action in the all-deps group (#1676) +* Bump the all-deps group with 2 updates (#1677) +* Bump github/codeql-action from 3.28.10 to 3.28.11 in the all-deps group (#1670) +* Bump the all-deps group with 8 updates (#1672) +* Bump the docker-deps group across 4 directories with 1 update (#1671) +* Bump the docker-deps group across 4 directories with 1 update (#1668) +* Bump the all-deps group with 4 updates (#1666) +* Bump golangci-lint from 1.55.1 to 1.61.0 (#1667) +* Bump the all-deps group with 3 updates (#1665) +* Bump github.com/spf13/cobra from 1.8.1 to 1.9.1 in the all-deps group (#1660) +* Bump the docker-deps group across 5 directories with 2 updates (#1661) +* Bump golangci/golangci-lint-action in the all-deps group (#1662) +* Bump the docker-deps group across 4 directories with 1 update (#1656) +* Bump the all-deps group with 2 updates (#1654) +* Bump the all-deps group with 4 updates (#1657) +* Bump github/codeql-action from 3.28.5 to 3.28.8 in the all-deps group (#1652) +* Bump github.com/spf13/pflag from 1.0.5 to 1.0.6 in the all-deps group (#1651) +* Bump the all-deps group with 2 updates (#1649) +* Bump the all-deps group with 5 updates (#1650) +* Bump the docker-deps group across 5 directories with 3 updates (#1648) +* Bump google.golang.org/protobuf in the all-deps group (#1647) +* Bump golangci/golangci-lint-action in the all-deps group (#1646) + +## v1.3.1 + +* Add AllLogListSignatureURL by @AlexLaroche in https://github.com/google/certificate-transparency-go/pull/1634 +* Add TiledLogs to log list JSON by @mcpherrinm in https://github.com/google/certificate-transparency-go/pull/1635 +* chore: relax go directive to permit 1.22.x by @dnwe in https://github.com/google/certificate-transparency-go/pull/1640 + +### Dependency Update + +* Bump github.com/fullstorydev/grpcurl from 1.9.1 to 1.9.2 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1627 +* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1628 +* Bump the docker-deps group across 5 directories with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1630 +* Bump github/codeql-action from 3.27.5 to 3.27.6 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1629 +* Bump golang.org/x/crypto from 0.30.0 to 0.31.0 in the go_modules group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1631 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1633 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1632 +* Bump the docker-deps group across 4 directories with 1 update by @dependabot in https://github.com/google/certificate-transparency-go/pull/1638 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1637 +* Bump the all-deps group across 1 directory with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1641 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1643 +* Bump google.golang.org/grpc from 1.69.2 to 1.69.4 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1642 + +## v1.3.0 + +### CTFE Storage Saving: Extra Data Issuance Chain Deduplication + +This feature now supports PostgreSQL, in addition to the support for MySQL/MariaDB that was added in [v1.2.0](#v1.2.0). + +Log operators can choose to enable this feature for new PostgreSQL-based CT logs by adding new CTFE configs in the [LogMultiConfig](trillian/ctfe/configpb/config.proto) and importing the [database schema](trillian/ctfe/storage/postgresql/schema.sql). The other available options are documented in the [v1.2.0](#v1.2.0) changelog entry. + +This change is tested in Cloud Build tests using the `postgres:17` Docker image as of the time of writing. + +* Add IssuanceChainStorage PostgreSQL implementation by @robstradling in https://github.com/google/certificate-transparency-go/pull/1618 + +### Misc + +* [Dependabot] Update all docker images in one PR by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1614 +* Explicitly include version tag by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1617 +* Add empty cloudbuild_postgresql.yaml by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1623 + +### Dependency update + +* Bump the all-deps group with 4 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1609 +* Bump golang from 1.23.2-bookworm to 1.23.3-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1611 +* Bump github/codeql-action from 3.27.0 to 3.27.1 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1610 +* Bump golang from 1.23.2-bookworm to 1.23.3-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1612 +* Bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1 in the go_modules group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1613 +* Bump the docker-deps group across 3 directories with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1616 +* Bump github/codeql-action from 3.27.1 to 3.27.2 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1615 +* Bump the docker-deps group across 4 directories with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1622 +* Bump github/codeql-action from 3.27.2 to 3.27.4 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1620 +* Bump the all-deps group with 4 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1621 +* Bump github.com/google/trillian from 1.6.1 to 1.7.0 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1624 +* Bump github/codeql-action from 3.27.4 to 3.27.5 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1625 + +## v1.2.2 + +* Recommended Go version for development: 1.22 + * Using a different version can lead to presubmits failing due to unexpected diffs. + +### Add TLS Support + +Add TLS support for Trillian: By using `--trillian_tls_ca_cert_file` flag, users can provide a CA certificate, that is used to establish a secure communication with Trillian log server. + +Add TLS support for ct_server: By using `--tls_certificate` and `--tls_key` flags, users can provide a service certificate and key, that enables the server to handle HTTPS requests. + +* Add TLS support for CTLog server by @fghanmi in https://github.com/google/certificate-transparency-go/pull/1523 +* Add TLS support for migrillian by @fghanmi in https://github.com/google/certificate-transparency-go/pull/1525 +* fix TLS configuration for ct_server by @fghanmi in https://github.com/google/certificate-transparency-go/pull/1542 +* Add Trillian TLS support for ct_server by @fghanmi in https://github.com/google/certificate-transparency-go/pull/1551 + +### HTTP Idle Connection Timeout Flag + +A new flag `http_idle_timeout` is added to set the HTTP server's idle timeout value in the ct_server binary. This controls the maximum amount of time to wait for the next request when keep-alives are enabled. + +* add flag for HTTP idle connection timeout value by @bobcallaway in https://github.com/google/certificate-transparency-go/pull/1597 + +### Misc + +* Refactor issuance chain service by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1512 +* Use the version in the go.mod file for vuln checks by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1528 + +### Fixes + +* Fix failed tests on 32-bit OS by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1540 + +### Dependency update + +* Bump go.etcd.io/etcd/v3 from 3.5.13 to 3.5.14 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1500 +* Bump github/codeql-action from 3.25.6 to 3.25.7 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1501 +* Bump golang.org/x/net from 0.25.0 to 0.26.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1503 +* Group dependabot updates as much as possible by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1506 +* Bump golang from 1.22.3-bookworm to 1.22.4-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1507 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1511 +* Bump golang from 1.22.3-bookworm to 1.22.4-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1510 +* Bump golang from 1.22.3-bookworm to 1.22.4-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1509 +* Bump golang from 1.22.3-bookworm to 1.22.4-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1508 +* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1516 +* Bump golang from `aec4784` to `9678844` in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1518 +* Bump alpine from 3.19 to 3.20 in /trillian/examples/deployment/docker/envsubst by @dependabot in https://github.com/google/certificate-transparency-go/pull/1492 +* Bump golang from `aec4784` to `9678844` in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1517 +* Bump golang from `aec4784` to `9678844` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1513 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1515 +* Bump golang from `aec4784` to `9678844` in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1514 +* Bump alpine from `77726ef` to `b89d9c9` in /trillian/examples/deployment/docker/envsubst in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1519 +* Bump k8s.io/klog/v2 from 2.130.0 to 2.130.1 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1521 +* Bump alpine from `77726ef` to `b89d9c9` in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1520 +* Bump github/codeql-action from 3.25.10 to 3.25.11 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1526 +* Bump version of go used by the vuln checker by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1527 +* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1530 +* Bump golang from 1.22.4-bookworm to 1.22.5-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1531 +* Bump golang from 1.22.4-bookworm to 1.22.5-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1532 +* Bump the all-deps group in /trillian/examples/deployment/docker/ctfe with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1533 +* Bump actions/upload-artifact from 4.3.3 to 4.3.4 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1534 +* Bump golang from 1.22.4-bookworm to 1.22.5-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1535 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1536 +* Bump github/codeql-action from 3.25.12 to 3.25.13 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1538 +* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1537 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1543 +* Bump golang from `6c27802` to `af9b40f` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1544 +* Bump golang from `6c27802` to `af9b40f` in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1548 +* Bump golang from `6c27802` to `af9b40f` in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1547 +* Bump alpine from `b89d9c9` to `0a4eaa0` in /trillian/examples/deployment/docker/envsubst in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1546 +* Bump the all-deps group in /internal/witness/cmd/feeder with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1545 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1549 +* Bump golang.org/x/time from 0.5.0 to 0.6.0 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1550 +* Bump golang from 1.22.5-bookworm to 1.22.6-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1552 +* Bump golang from 1.22.5-bookworm to 1.22.6-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1553 +* Bump golang from 1.22.5-bookworm to 1.22.6-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1554 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1555 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1556 +* Bump golang from 1.22.5-bookworm to 1.22.6-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1557 +* Bump github.com/prometheus/client_golang from 1.19.1 to 1.20.0 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1559 +* Bump github/codeql-action from 3.26.0 to 3.26.3 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1561 +* Bump golang from 1.22.6-bookworm to 1.23.0-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1558 +* Bump golang from 1.22.6-bookworm to 1.23.0-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1563 +* Bump golang from 1.22.6-bookworm to 1.23.0-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1560 +* Bump golang from 1.22.6-bookworm to 1.23.0-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1562 +* Bump go version to 1.22.6 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1564 +* Bump github.com/prometheus/client_golang from 1.20.0 to 1.20.2 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1565 +* Bump github/codeql-action from 3.26.3 to 3.26.5 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1566 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1568 +* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1569 +* Bump go from 1.22.6 to 1.22.7 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1574 +* Bump alpine from `0a4eaa0` to `beefdbd` in /trillian/examples/deployment/docker/envsubst in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1571 +* Bump the all-deps group across 1 directory with 5 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1577 +* Bump golang from 1.23.0-bookworm to 1.23.1-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1575 +* Bump golang from 1.23.0-bookworm to 1.23.1-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1576 +* Bump the all-deps group in /trillian/examples/deployment/docker/ctfe with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1572 +* Bump the all-deps group in /internal/witness/cmd/feeder with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1573 +* Bump the all-deps group with 4 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1578 +* Bump github/codeql-action from 3.26.6 to 3.26.7 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1579 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1580 +* Bump github/codeql-action from 3.26.7 to 3.26.8 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1581 +* Bump distroless/base-debian12 from `c925d12` to `88e0a2a` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1582 +* Bump the all-deps group in /trillian/examples/deployment/docker/ctfe with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1585 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1583 +* Bump golang from `1a5326b` to `dba79eb` in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1584 +* Bump golang from `1a5326b` to `dba79eb` in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1587 +* Bump golang from `1a5326b` to `dba79eb` in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1586 +* Bump the all-deps group with 5 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1588 +* Bump the all-deps group with 6 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1589 +* Bump golang from 1.23.1-bookworm to 1.23.2-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1593 +* Bump golang from 1.23.1-bookworm to 1.23.2-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1592 +* Bump golang from 1.23.1-bookworm to 1.23.2-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1591 +* Bump golang from 1.23.1-bookworm to 1.23.2-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1590 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1595 +* Bump github.com/prometheus/client_golang from 1.20.4 to 1.20.5 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1598 +* Bump golang from `18d2f94` to `2341ddf` in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1602 +* Bump golang from `18d2f94` to `2341ddf` in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1599 +* Bump golang from `18d2f94` to `2341ddf` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1600 +* Bump golang from `18d2f94` to `2341ddf` in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1601 +* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1603 +* Bump distroless/base-debian12 from `6ae5fe6` to `8fe31fb` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1604 + +## v1.2.1 + +### Fixes + +* Fix Go potential bugs and maintainability by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1496 + +### Dependency update + +* Bump google.golang.org/grpc from 1.63.2 to 1.64.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1482 + +## v1.2.0 + +### CTFE Storage Saving: Extra Data Issuance Chain Deduplication + +To reduce CT/Trillian database storage by deduplication of the entire issuance chain (intermediate certificate(s) and root certificate) that is currently stored in the Trillian merkle tree leaf ExtraData field. Storage cost should be reduced by at least 33% for new CT logs with this feature enabled. Currently only MySQL/MariaDB is supported to store the issuance chain in the CTFE database. + +Existing logs are not affected by this change. + +Log operators can choose to opt-in this change for new CT logs by adding new CTFE configs in the [LogMultiConfig](trillian/ctfe/configpb/config.proto) and importing the [database schema](trillian/ctfe/storage/mysql/schema.sql). See [example](trillian/examples/deployment/docker/ctfe/ct_server.cfg). + +- `ctfe_storage_connection_string` +- `extra_data_issuance_chain_storage_backend` + +An optional LRU cache can be enabled by providing the following flags. + +- `cache_type` +- `cache_size` +- `cache_ttl` + +This change is tested in Cloud Build tests using the `mysql:8.4` Docker image as of the time of writing. + +* Add issuance chain storage interface by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1430 +* Add issuance chain cache interface by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1431 +* Add CTFE extra data storage saving configs to config.proto by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1432 +* Add new types `PrecertChainEntryHash` and `CertificateChainHash` for TLS marshal/unmarshal in storage saving by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1435 +* Add IssuanceChainCache LRU implementation by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1454 +* Add issuance chain service by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1452 +* Add CTFE extra data storage saving configs validation by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1456 +* Add IssuanceChainStorage MySQL implementation by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1462 +* Fix errcheck lint in mysql test by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1464 +* CTFE Extra Data Issuance Chain Deduplication by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1477 +* Fix incorrect deployment doc and server config by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1494 + +### Submission proxy: Root compatibility checking + +* Adds the ability for a CT client to disable root compatibile checking by @aaomidi in https://github.com/google/certificate-transparency-go/pull/1258 + +### Fixes + +* Return 429 Too Many Requests for gRPC error code `ResourceExhausted` from Trillian by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1401 +* Safeguard against redirects on PUT request by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1418 +* Fix CT client upload to be safe against no-op POSTs by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1424 + +### Misc + +* Prefix errors.New variables with the word "Err" by @aaomidi in https://github.com/google/certificate-transparency-go/pull/1399 +* Remove lint exceptions and fix remaining issues by @silaselisha in https://github.com/google/certificate-transparency-go/pull/1438 +* Fix invalid Go toolchain version by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1471 +* Regenerate proto files by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1489 + +### Dependency update + +* Bump distroless/base-debian12 from `5eae9ef` to `28a7f1f` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1388 +* Bump github/codeql-action from 3.24.6 to 3.24.7 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1389 +* Bump actions/checkout from 4.1.1 to 4.1.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1390 +* Bump golang from `6699d28` to `7f9c058` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1391 +* Bump golang from `6699d28` to `7f9c058` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1392 +* Bump golang from `6699d28` to `7a392a2` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1393 +* Bump golang from `6699d28` to `7a392a2` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1394 +* Bump golang from `7a392a2` to `d996c64` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1395 +* Bump golang from `7f9c058` to `d996c64` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1396 +* Bump golang from `7a392a2` to `d996c64` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1397 +* Bump golang from `7f9c058` to `d996c64` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1398 +* Bump github/codeql-action from 3.24.7 to 3.24.8 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1400 +* Bump github/codeql-action from 3.24.8 to 3.24.9 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1402 +* Bump go.etcd.io/etcd/v3 from 3.5.12 to 3.5.13 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1405 +* Bump distroless/base-debian12 from `28a7f1f` to `611d30d` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1406 +* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1407 +* Bump golang.org/x/net from 0.22.0 to 0.23.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1408 +* update govulncheck go version from 1.21.8 to 1.21.9 by @phbnf in https://github.com/google/certificate-transparency-go/pull/1412 +* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1409 +* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1410 +* Bump golang.org/x/crypto from 0.21.0 to 0.22.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1414 +* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1411 +* Bump github/codeql-action from 3.24.9 to 3.24.10 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1415 +* Bump golang.org/x/net from 0.23.0 to 0.24.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1416 +* Bump google.golang.org/grpc from 1.62.1 to 1.63.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1417 +* Bump github.com/fullstorydev/grpcurl from 1.8.9 to 1.9.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1419 +* Bump golang from `48b942a` to `3451eec` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1421 +* Bump golang from `48b942a` to `3451eec` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1423 +* Bump golang from `48b942a` to `3451eec` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1420 +* Bump golang from `3451eec` to `b03f3ba` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1426 +* Bump golang from `3451eec` to `b03f3ba` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1425 +* Bump golang from `48b942a` to `3451eec` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1422 +* Bump golang from `3451eec` to `b03f3ba` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1427 +* Bump golang from `3451eec` to `b03f3ba` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1428 +* Bump github/codeql-action from 3.24.10 to 3.25.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1433 +* Bump github/codeql-action from 3.25.0 to 3.25.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1434 +* Bump actions/upload-artifact from 4.3.1 to 4.3.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1436 +* Bump actions/checkout from 4.1.2 to 4.1.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1437 +* Bump actions/upload-artifact from 4.3.2 to 4.3.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1440 +* Bump github/codeql-action from 3.25.1 to 3.25.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1441 +* Bump golang from `b03f3ba` to `d0902ba` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1444 +* Bump golang from `b03f3ba` to `d0902ba` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1443 +* Bump github.com/rs/cors from 1.10.1 to 1.11.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1442 +* Bump golang from `b03f3ba` to `d0902ba` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1447 +* Bump actions/checkout from 4.1.3 to 4.1.4 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1446 +* Bump github/codeql-action from 3.25.2 to 3.25.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1449 +* Bump golangci/golangci-lint-action from 4.0.0 to 5.0.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1448 +* Bump golang from `b03f3ba` to `d0902ba` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1445 +* Bump golangci/golangci-lint-action from 5.0.0 to 5.1.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1451 +* Bump distroless/base-debian12 from `611d30d` to `d8d01e2` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1450 +* Bump google.golang.org/protobuf from 1.33.1-0.20240408130810-98873a205002 to 1.34.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1453 +* Bump actions/setup-go from 5.0.0 to 5.0.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1455 +* Bump golang.org/x/net from 0.24.0 to 0.25.0 and golang.org/x/crypto from v0.22.0 to v0.23.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1457 +* Bump google.golang.org/protobuf from 1.34.0 to 1.34.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1458 +* Bump distroless/base-debian12 from `d8d01e2` to `786007f` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1461 +* Bump golangci/golangci-lint-action from 5.1.0 to 5.3.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1460 +* Bump `go-version-input` to 1.21.10 in govulncheck.yml by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1472 +* Bump golangci/golangci-lint-action from 5.3.0 to 6.0.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1473 +* Bump actions/checkout from 4.1.4 to 4.1.5 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1469 +* Bump github.com/go-sql-driver/mysql from 1.7.1 to 1.8.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1465 +* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1466 +* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1463 +* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1470 +* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1467 +* Bump github/codeql-action from 3.25.3 to 3.25.4 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1474 +* Bump github.com/prometheus/client_golang from 1.19.0 to 1.19.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1475 +* Bump ossf/scorecard-action from 2.3.1 to 2.3.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1476 +* Bump github/codeql-action from 3.25.4 to 3.25.5 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1478 +* Bump golang from `6d71b7c` to `ef27a3c` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1480 +* Bump golang from `6d71b7c` to `ef27a3c` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1481 +* Bump golang from `6d71b7c` to `ef27a3c` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1479 +* Bump golang from `6d71b7c` to `ef27a3c` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1483 +* Bump golang from `ef27a3c` to `5c56bd4` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1484 +* Bump golang from `ef27a3c` to `5c56bd4` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1485 +* Bump golang from `ef27a3c` to `5c56bd4` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1486 +* Bump actions/checkout from 4.1.5 to 4.1.6 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1487 +* Bump golang from `ef27a3c` to `5c56bd4` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1488 +* Bump github/codeql-action from 3.25.5 to 3.25.6 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1490 +* Bump alpine from `c5b1261` to `58d02b4` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1491 +* Bump alpine from `58d02b4` to `77726ef` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1493 + +## v1.1.8 + +* Recommended Go version for development: 1.21 + * Using a different version can lead to presubmits failing due to unexpected diffs. + +### Add support for AIX + +* crypto/x509: add AIX operating system by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1277 + +### Monitoring + +* Distribution metric to monitor the start of get-entries requests by @phbnf in https://github.com/google/certificate-transparency-go/pull/1364 + +### Fixes + +* Use the appropriate HTTP response code for backend timeouts by @robstradling in https://github.com/google/certificate-transparency-go/pull/1313 + +### Misc + +* Move golangci-lint from Cloud Build to GitHub Action by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1230 +* Set golangci-lint GH action timeout to 5m by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1231 +* Added Slack channel details by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1246 +* Improve fuzzing by @AdamKorcz in https://github.com/google/certificate-transparency-go/pull/1345 + +### Dependency update + +* Bump golang from `20f9ab5` to `5ee1296` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1216 +* Bump golang from `20f9ab5` to `5ee1296` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1217 +* Bump golang from `20f9ab5` to `5ee1296` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1218 +* Bump k8s.io/klog/v2 from 2.100.1 to 2.110.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1219 +* Bump golang from `20f9ab5` to `5ee1296` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1220 +* Bump golang from `5ee1296` to `5bafbbb` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1221 +* Bump golang from `5ee1296` to `5bafbbb` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1222 +* Bump golang from `5ee1296` to `5bafbbb` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1223 +* Bump golang from `5ee1296` to `5bafbbb` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1224 +* Update the minimal image to gcr.io/distroless/base-debian12 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1148 +* Bump jq from 1.6 to 1.7 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1225 +* Bump github.com/spf13/cobra from 1.7.0 to 1.8.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1226 +* Bump golang.org/x/time from 0.3.0 to 0.4.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1227 +* Bump github.com/mattn/go-sqlite3 from 1.14.17 to 1.14.18 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1228 +* Bump github.com/gorilla/mux from 1.8.0 to 1.8.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1229 +* Bump golang from 1.21.3-bookworm to 1.21.4-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1232 +* Bump golang from 1.21.3-bookworm to 1.21.4-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1233 +* Bump golang from 1.21.3-bookworm to 1.21.4-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1234 +* Bump golang from 1.21.3-bookworm to 1.21.4-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1235 +* Bump go-version-input from 1.20.10 to 1.20.11 in govulncheck.yml by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1238 +* Bump golang.org/x/net from 0.17.0 to 0.18.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1236 +* Bump github/codeql-action from 2.22.5 to 2.22.6 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1240 +* Bump github/codeql-action from 2.22.6 to 2.22.7 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1241 +* Bump golang from `85aacbe` to `dadce81` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1243 +* Bump golang from `85aacbe` to `dadce81` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1242 +* Bump golang from `85aacbe` to `dadce81` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1244 +* Bump golang from `85aacbe` to `dadce81` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1245 +* Bump golang from `dadce81` to `52362e2` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1247 +* Bump golang from `dadce81` to `52362e2` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1248 +* Bump golang from `dadce81` to `52362e2` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1249 +* Bump golang from `dadce81` to `52362e2` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1250 +* Bump github/codeql-action from 2.22.7 to 2.22.8 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1251 +* Bump golang.org/x/net from 0.18.0 to 0.19.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1252 +* Bump golang.org/x/time from 0.4.0 to 0.5.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1254 +* Bump alpine from `eece025` to `34871e7` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1256 +* Bump alpine from `eece025` to `34871e7` in /trillian/examples/deployment/docker/envsubst by @dependabot in https://github.com/google/certificate-transparency-go/pull/1257 +* Bump go-version-input from 1.20.11 to 1.20.12 in govulncheck.yml by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1264 +* Bump actions/setup-go from 4.1.0 to 5.0.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1261 +* Bump golang from 1.21.4-bookworm to 1.21.5-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1259 +* Bump golang from 1.21.4-bookworm to 1.21.5-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1263 +* Bump golang from 1.21.4-bookworm to 1.21.5-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1262 +* Bump golang from 1.21.4-bookworm to 1.21.5-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1260 +* Bump go.etcd.io/etcd/v3 from 3.5.10 to 3.5.11 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1266 +* Bump github/codeql-action from 2.22.8 to 2.22.9 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1269 +* Bump alpine from `34871e7` to `51b6726` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1270 +* Bump alpine from 3.18 to 3.19 in /trillian/examples/deployment/docker/envsubst by @dependabot in https://github.com/google/certificate-transparency-go/pull/1271 +* Bump golang from `a6b787c` to `2d3b13c` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1272 +* Bump golang from `a6b787c` to `2d3b13c` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1273 +* Bump golang from `a6b787c` to `2d3b13c` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1274 +* Bump golang from `a6b787c` to `2d3b13c` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1275 +* Bump github/codeql-action from 2.22.9 to 2.22.10 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1278 +* Bump google.golang.org/grpc from 1.59.0 to 1.60.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1279 +* Bump github/codeql-action from 2.22.10 to 3.22.11 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1280 +* Bump distroless/base-debian12 from `1dfdb5e` to `8a0bb63` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1281 +* Bump github.com/google/trillian from 1.5.3 to 1.5.4-0.20240110091238-00ca9abe023d by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1297 +* Bump actions/upload-artifact from 3.1.3 to 4.0.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1282 +* Bump github/codeql-action from 3.22.11 to 3.23.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1295 +* Bump github.com/mattn/go-sqlite3 from 1.14.18 to 1.14.19 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1283 +* Bump golang from 1.21.5-bookworm to 1.21.6-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1300 +* Bump distroless/base-debian12 from `8a0bb63` to `0a93daa` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1284 +* Bump golang from 1.21.5-bookworm to 1.21.6-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1299 +* Bump golang from 1.21.5-bookworm to 1.21.6-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1298 +* Bump golang from 1.21.5-bookworm to 1.21.6-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1301 +* Bump golang from `688ad7f` to `1e8ea75` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1306 +* Bump golang from `688ad7f` to `1e8ea75` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1305 +* Use trillian release instead of pinned commit by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1304 +* Bump actions/upload-artifact from 4.0.0 to 4.1.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1310 +* Bump golang from `1e8ea75` to `cbee5d2` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1312 +* Bump golang from `688ad7f` to `cbee5d2` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1308 +* Bump golang from `1e8ea75` to `cbee5d2` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1311 +* Bump golang.org/x/net from 0.19.0 to 0.20.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1302 +* Bump golang from `b651ed8` to `cbee5d2` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1309 +* Bump golang from `cbee5d2` to `c4b696f` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1314 +* Bump golang from `cbee5d2` to `c4b696f` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1315 +* Bump github/codeql-action from 3.23.0 to 3.23.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1317 +* Bump golang from `cbee5d2` to `c4b696f` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1316 +* Bump golang from `cbee5d2` to `c4b696f` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1318 +* Bump k8s.io/klog/v2 from 2.120.0 to 2.120.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1319 +* Bump actions/upload-artifact from 4.1.0 to 4.2.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1320 +* Bump actions/upload-artifact from 4.2.0 to 4.3.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1321 +* Bump golang from `c4b696f` to `d8c365d` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1326 +* Bump golang from `c4b696f` to `d8c365d` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1323 +* Bump google.golang.org/grpc from 1.60.1 to 1.61.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1324 +* Bump golang from `c4b696f` to `d8c365d` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1322 +* Bump golang from `c4b696f` to `d8c365d` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1325 +* Bump github.com/mattn/go-sqlite3 from 1.14.19 to 1.14.20 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1327 +* Bump github/codeql-action from 3.23.1 to 3.23.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1328 +* Bump alpine from `51b6726` to `c5b1261` in /trillian/examples/deployment/docker/envsubst by @dependabot in https://github.com/google/certificate-transparency-go/pull/1330 +* Bump alpine from `51b6726` to `c5b1261` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1329 +* Bump go.etcd.io/etcd/v3 from 3.5.11 to 3.5.12 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1332 +* Bump github.com/mattn/go-sqlite3 from 1.14.20 to 1.14.21 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1333 +* Bump golang from `d8c365d` to `69bfed3` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1335 +* Bump golang from `d8c365d` to `69bfed3` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1338 +* Bump golang from `d8c365d` to `69bfed3` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1337 +* Bump golang from `d8c365d` to `69bfed3` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1336 +* Bump golang from `69bfed3` to `3efef61` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1339 +* Bump github.com/mattn/go-sqlite3 from 1.14.21 to 1.14.22 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1344 +* Bump golang from `69bfed3` to `3efef61` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1341 +* Bump golang from `69bfed3` to `3efef61` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1343 +* Bump distroless/base-debian12 from `0a93daa` to `f47fa3d` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1340 +* Bump golang from `69bfed3` to `3efef61` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1342 +* Bump github/codeql-action from 3.23.2 to 3.24.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1346 +* Bump actions/upload-artifact from 4.3.0 to 4.3.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1347 +* Bump golang from 1.21.6-bookworm to 1.22.0-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1350 +* Bump golang from 1.21.6-bookworm to 1.22.0-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1348 +* Bump golang from 1.21.6-bookworm to 1.22.0-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1349 +* Bump golang from 1.21.6-bookworm to 1.22.0-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1351 +* Bump golang.org/x/crypto from 0.18.0 to 0.19.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1353 +* Bump golangci/golangci-lint-action from 3.7.0 to 4.0.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1354 +* Bump golang.org/x/net from 0.20.0 to 0.21.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1352 +* Bump distroless/base-debian12 from `f47fa3d` to `2102ce1` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1355 +* Bump github/codeql-action from 3.24.0 to 3.24.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1357 +* Bump golang from `874c267` to `5a3e169` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1356 +* Bump golang from `874c267` to `5a3e169` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1358 +* Bump golang from `874c267` to `5a3e169` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1359 +* Bump golang from `874c267` to `5a3e169` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1360 +* Bump github/codeql-action from 3.24.1 to 3.24.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1366 +* Bump golang from `5a3e169` to `925fe3f` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1363 +* Bump google.golang.org/grpc from 1.61.0 to 1.61.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1362 +* Bump golang from `5a3e169` to `925fe3f` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1365 +* Bump golang from `5a3e169` to `925fe3f` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1361 +* Bump golang from `5a3e169` to `925fe3f` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1367 +* Bump golang/govulncheck-action from 1.0.1 to 1.0.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1368 +* Bump github/codeql-action from 3.24.3 to 3.24.5 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1371 +* Bump google.golang.org/grpc from 1.61.1 to 1.62.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1369 +* Bump distroless/base-debian12 from `2102ce1` to `5eae9ef` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1372 +* Bump distroless/base-debian12 from `5eae9ef` to `f9b0e86` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1375 +* Bump golang.org/x/crypto from 0.19.0 to 0.20.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1374 +* Bump github.com/prometheus/client_golang from 1.18.0 to 1.19.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1373 +* Bump github/codeql-action from 3.24.5 to 3.24.6 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1377 +* Bump distroless/base-debian12 from `f9b0e86` to `5eae9ef` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1376 +* Bump golang.org/x/net from 0.21.0 to 0.22.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1378 +* Bump Go from 1.20 to 1.21 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1386 +* Bump google.golang.org/grpc from 1.62.0 to 1.62.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1380 +* Bump golang from 1.22.0-bookworm to 1.22.1-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1382 +* Bump golang from 1.22.0-bookworm to 1.22.1-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1385 +* Bump golang from 1.22.0-bookworm to 1.22.1-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1384 +* Bump golang from 1.22.0-bookworm to 1.22.1-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1383 + +## v1.1.7 + +* Recommended Go version for development: 1.20 + * This is the version used by the Cloud Build presubmits. Using a different version can lead to presubmits failing due to unexpected diffs. + +* Bump golangci-lint from 1.51.1 to 1.55.1 (developers should update to this version). + +### Add support for WASI port + +* Add build tags for wasip1 GOOS by @flavio in https://github.com/google/certificate-transparency-go/pull/1089 + +### Add support for IBM Z operating system z/OS + +* Add build tags for zOS by @onlywork1984 in https://github.com/google/certificate-transparency-go/pull/1088 + +### Log List + +* Add support for "is_all_logs" field in loglist3 by @phbnf in https://github.com/google/certificate-transparency-go/pull/1095 + +### Documentation + +* Improve Dockerized Test Deployment documentation by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1179 + +### Misc + +* Escape forward slashes in certificate Subject names when used as user quota id strings by @robstradling in https://github.com/google/certificate-transparency-go/pull/1059 +* Search whole chain looking for issuer match by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1112 +* Use proper check per @AGWA instead of buggy check introduced in #1112 by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1114 +* Build the ctfe/ct_server binary without depending on glibc by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1119 +* Migrate CTFE Ingress manifest to support GKE version 1.23 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1086 +* Remove Dependabot ignore configuration by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1097 +* Add "github-actions" and "docker" Dependabot config by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1101 +* Add top level permission in CodeQL workflow by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1102 +* Pin Docker image dependencies by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1110 +* Remove GO111MODULE from Dockerfile and Cloud Build yaml files by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1113 +* Add docker Dependabot config by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1126 +* Export is_mirror = 0.0 for non mirror instead of nothing by @phbnf in https://github.com/google/certificate-transparency-go/pull/1133 +* Add govulncheck GitHub action by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1145 +* Spelling by @jsoref in https://github.com/google/certificate-transparency-go/pull/1144 + +### Dependency update + +* Bump Go from 1.19 to 1.20 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1146 +* Bump golangci-lint from 1.51.1 to 1.55.1 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1214 +* Bump go.etcd.io/etcd/v3 from 3.5.8 to 3.5.9 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1083 +* Bump golang.org/x/crypto from 0.8.0 to 0.9.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/108 +* Bump github.com/mattn/go-sqlite3 from 1.14.16 to 1.14.17 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1092 +* Bump golang.org/x/net from 0.10.0 to 0.11.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1094 +* Bump github.com/prometheus/client_golang from 1.15.1 to 1.16.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1098 +* Bump google.golang.org/protobuf from 1.30.0 to 1.31.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1099 +* Bump golang.org/x/net from 0.11.0 to 0.12.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1108 +* Bump actions/checkout from 3.1.0 to 3.5.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1103 +* Bump github/codeql-action from 2.1.27 to 2.20.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1104 +* Bump ossf/scorecard-action from 2.0.6 to 2.2.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1105 +* Bump actions/upload-artifact from 3.1.0 to 3.1.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1106 +* Bump github/codeql-action from 2.20.3 to 2.20.4 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1115 +* Bump github/codeql-action from 2.20.4 to 2.21.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1117 +* Bump golang.org/x/net from 0.12.0 to 0.14.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1124 +* Bump github/codeql-action from 2.21.0 to 2.21.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1121 +* Bump github/codeql-action from 2.21.2 to 2.21.4 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1125 +* Bump golang from `fd9306e` to `eb3f9ac` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1127 +* Bump alpine from 3.8 to 3.18 in /trillian/examples/deployment/docker/envsubst by @dependabot in https://github.com/google/certificate-transparency-go/pull/1129 +* Bump golang from `fd9306e` to `eb3f9ac` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1128 +* Bump alpine from `82d1e9d` to `7144f7b` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1130 +* Bump golang from `fd9306e` to `eb3f9ac` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1131 +* Bump golang from 1.19-alpine to 1.21-alpine in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1132 +* Bump actions/checkout from 3.5.3 to 3.6.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1134 +* Bump github/codeql-action from 2.21.4 to 2.21.5 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1135 +* Bump distroless/base from `73deaaf` to `46c5b9b` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1136 +* Bump actions/checkout from 3.6.0 to 4.0.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1137 +* Bump golang.org/x/net from 0.14.0 to 0.15.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1139 +* Bump github.com/rs/cors from 1.9.0 to 1.10.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1140 +* Bump actions/upload-artifact from 3.1.2 to 3.1.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1141 +* Bump golang from `445f340` to `96634e5` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1142 +* Bump github/codeql-action from 2.21.5 to 2.21.6 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1149 +* Bump Docker golang base images to 1.21.1 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1147 +* Bump github/codeql-action from 2.21.6 to 2.21.7 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1150 +* Bump github/codeql-action from 2.21.7 to 2.21.8 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1152 +* Bump golang from `d3114db` to `a0b3bc4` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1155 +* Bump golang from `d3114db` to `a0b3bc4` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1157 +* Bump golang from `d3114db` to `a0b3bc4` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1156 +* Bump golang from `d3114db` to `a0b3bc4` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1158 +* Bump golang from `e06b3a4` to `114b9cc` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1159 +* Bump golang from `a0b3bc4` to `114b9cc` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1160 +* Bump golang from `a0b3bc4` to `114b9cc` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1161 +* Bump actions/checkout from 4.0.0 to 4.1.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1162 +* Bump golang from `114b9cc` to `9c7ea4a` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1163 +* Bump golang from `114b9cc` to `9c7ea4a` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1166 +* Bump golang from `114b9cc` to `9c7ea4a` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1165 +* Bump golang from `114b9cc` to `9c7ea4a` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1164 +* Bump github/codeql-action from 2.21.8 to 2.21.9 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1169 +* Bump golang from `9c7ea4a` to `61f84bc` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1168 +* Bump github.com/prometheus/client_golang from 1.16.0 to 1.17.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1172 +* Bump golang from `9c7ea4a` to `61f84bc` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1170 +* Bump github.com/rs/cors from 1.10.0 to 1.10.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1176 +* Bump alpine from `7144f7b` to `eece025` in /trillian/examples/deployment/docker/envsubst by @dependabot in https://github.com/google/certificate-transparency-go/pull/1174 +* Bump alpine from `7144f7b` to `eece025` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1175 +* Bump golang from `9c7ea4a` to `61f84bc` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1171 +* Bump golang from `9c7ea4a` to `61f84bc` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1173 +* Bump distroless/base from `46c5b9b` to `a35b652` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1177 +* Bump golang.org/x/crypto from 0.13.0 to 0.14.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1178 +* Bump github/codeql-action from 2.21.9 to 2.22.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1180 +* Bump golang from 1.21.1-bookworm to 1.21.2-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1181 +* Bump golang.org/x/net from 0.15.0 to 0.16.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1184 +* Bump golang from 1.21.1-bookworm to 1.21.2-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1182 +* Bump golang from 1.21.1-bookworm to 1.21.2-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1185 +* Bump golang from 1.21.1-bookworm to 1.21.2-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1183 +* Bump github/codeql-action from 2.22.0 to 2.22.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1186 +* Bump distroless/base from `a35b652` to `b31a6e0` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1188 +* Bump ossf/scorecard-action from 2.2.0 to 2.3.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1187 +* Bump github.com/google/go-cmp from 0.5.9 to 0.6.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1189 +* Bump golang.org/x/net from 0.16.0 to 0.17.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1190 +* Bump go-version-input from 1.20.8 to 1.20.10 in govulncheck by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1195 +* Bump golang from 1.21.2-bookworm to 1.21.3-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1193 +* Bump golang from 1.21.2-bookworm to 1.21.3-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1191 +* Bump golang from 1.21.2-bookworm to 1.21.3-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1194 +* Bump golang from 1.21.2-bookworm to 1.21.3-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1192 +* Bump golang from `a94b089` to `8f9a1ec` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1196 +* Bump github/codeql-action from 2.22.1 to 2.22.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1197 +* Bump golang from `a94b089` to `5cc7ddc` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1200 +* Bump golang from `a94b089` to `5cc7ddc` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1199 +* Bump github/codeql-action from 2.22.2 to 2.22.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1202 +* Bump golang from `5cc7ddc` to `20f9ab5` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1203 +* Bump golang from `a94b089` to `20f9ab5` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1198 +* Bump golang from `8f9a1ec` to `20f9ab5` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1201 +* Bump actions/checkout from 4.1.0 to 4.1.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1204 +* Bump github/codeql-action from 2.22.3 to 2.22.4 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1206 +* Bump ossf/scorecard-action from 2.3.0 to 2.3.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1207 +* Bump github/codeql-action from 2.22.4 to 2.22.5 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1209 +* Bump multiple Go module dependencies by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1213 + +## v1.1.6 + +### Dependency update + +* Bump Trillian to v1.5.2 +* Bump Prometheus to v0.43.1 + +## v1.1.5 + +### Public/Private Key Consistency + + * #1044: If a public key has been configured for a log, check that it is consistent with the private key. + * #1046: Ensure that no two logs in the CTFE configuration use the same private key. + +### Cleanup + + * Remove v2 log list package files. + +### Misc + + * Updated golangci-lint to v1.51.1 (developers should update to this version). + * Bump Go version from 1.17 to 1.19. + +## v1.1.4 + +[Published 2022-10-21](https://github.com/google/certificate-transparency-go/releases/tag/v1.1.4) + +### Cleanup + + * Remove log list v1 package and its dependencies. + +### Migrillian + + * #960: Skip consistency check when root is size zero. + +### Misc + + * Update Trillian to [0a389c4](https://github.com/google/trillian/commit/0a389c4bb8d97fb3be8f55d7e5b428cf4304986f) + * Migrate loglist dependency from v1 to v3 in ctclient cmd. + * Migrate loglist dependency from v1 to v3 in ctutil/loginfo.go + * Migrate loglist dependency from v1 to v3 in ctutil/sctscan.go + * Migrate loglist dependency from v1 to v3 in trillian/integration/ct_hammer/main.go + * Downgrade 429 errors to verbosity 2 + +## v1.1.3 + +[Published 2022-05-14](https://github.com/google/certificate-transparency-go/releases/tag/v1.1.3) + +### Integration + + * Breaking change to API for `integration.HammerCTLog`: + * Added `ctx` as first argument, and terminate loop if it becomes cancelled + +### JSONClient + + * PostAndParseWithRetry now does backoff-and-retry upon receiving HTTP 429. + +### Cleanup + + * `WithBalancerName` is deprecated and removed, using the recommended way. + * `ctfe.PEMCertPool` type has been moved to `x509util.PEMCertPool` to reduce + dependencies (#903). + +### Misc + + * updated golangci-lint to v1.46.1 (developers should update to this version) + * update `google.golang.org/grpc` to v1.46.0 + * `ctclient` tool now uses Cobra for better CLI experience (#901). + * #800: Remove dependency from `ratelimit`. + * #927: Add read-only mode to CTFE config. + +## v1.1.2 + +[Published 2021-09-21](https://github.com/google/certificate-transparency-go/releases/tag/v1.1.2) + +### CTFE + + * Removed the `-by_range` flag. + +### Updated dependencies + + * Trillian from v1.3.11 to v1.4.0 + * protobuf to v2 + +## v1.1.1 + +[Published 2020-10-06](https://github.com/google/certificate-transparency-go/releases/tag/v1.1.1) + +### Tools + +#### CT Hammer + +Added a flag (--strict_sth_consistency_size) which when set to true enforces the current behaviour of only request consistency proofs between tree sizes for which the hammer has seen valid STHs. +When setting this flag to false, if no two usable STHs are available the hammer will attempt to request a consistency proof between the latest STH it's seen and a random smaller (but > 0) tree size. + + +### CTFE + +#### Caching + +The CTFE now includes a Cache-Control header in responses containing purely +immutable data, e.g. those for get-entries and get-proof-by-hash. This allows +clients and proxies to cache these responses for up to 24 hours. + +#### EKU Filtering + +> :warning: **It is not yet recommended to enable this option in a production CT Log!** + +CTFE now supports filtering logging submissions by leaf certificate EKU. +This is enabled by adding an extKeyUsage list to a log's stanza in the +config file. + +The format is a list of strings corresponding to the supported golang x509 EKUs: + |Config string | Extended Key Usage | + |----------------------------|----------------------------------------| + |`Any` | ExtKeyUsageAny | + |`ServerAuth` | ExtKeyUsageServerAuth | + |`ClientAuth` | ExtKeyUsageClientAuth | + |`CodeSigning` | ExtKeyUsageCodeSigning | + |`EmailProtection` | ExtKeyUsageEmailProtection | + |`IPSECEndSystem` | ExtKeyUsageIPSECEndSystem | + |`IPSECTunnel` | ExtKeyUsageIPSECTunnel | + |`IPSECUser` | ExtKeyUsageIPSECUser | + |`TimeStamping` | ExtKeyUsageTimeStamping | + |`OCSPSigning` | ExtKeyUsageOCSPSigning | + |`MicrosoftServerGatedCrypto`| ExtKeyUsageMicrosoftServerGatedCrypto | + |`NetscapeServerGatedCrypto` | ExtKeyUsageNetscapeServerGatedCrypto | + +When an extKeyUsage list is specified, the CT Log will reject logging +submissions for leaf certificates that do not contain an EKU present in this +list. + +When enabled, EKU filtering is only performed at the leaf level (i.e. there is +no 'nested' EKU filtering performed). + +If no list is specified, or the list contains an `Any` entry, no EKU +filtering will be performed. + +#### GetEntries +Calls to `get-entries` which are at (or above) the maximum permitted number of +entries whose `start` parameter does not fall on a multiple of the maximum +permitted number of entries, will have their responses truncated such that +subsequent requests will align with this boundary. +This is intended to coerce callers of `get-entries` into all using the same +`start` and `end` parameters and thereby increase the cacheability of +these requests. + +e.g.: + +
+Old behaviour:
+             1         2         3
+             0         0         0
+Entries>-----|---------|---------|----...
+Client A -------|---------|----------|...
+Client B --|--------|---------|-------...
+           ^        ^         ^
+           `--------`---------`---- requests
+
+With coercion (max batch = 10 entries):
+             1         2         3
+             0         0         0
+Entries>-----|---------|---------|----...
+Client A ----X---------|---------|...
+Client B --|-X---------|---------|-------...
+             ^
+             `-- Requests truncated
+
+ +This behaviour can be disabled by setting the `--align_getentries` +flag to false. + +#### Flags + +The `ct_server` binary changed the default of these flags: + +- `by_range` - Now defaults to `true` + +The `ct_server` binary added the following flags: +- `align_getentries` - See GetEntries section above for details + +Added `backend` flag to `migrillian`, which now replaces the deprecated +"backend" feature of Migrillian configs. + +#### FixedBackendResolver Replaced + +This was previously used in situations where a comma separated list of +backends was provided in the `rpcBackend` flag rather than a single value. + +It has been replaced by equivalent functionality using a newer gRPC API. +However this support was only intended for use in integration tests. In +production we recommend the use of etcd or a gRPC load balancer. + +### LogList + +Log list tools updated to use the correct v2 URL (from v2_beta previously). + +### Libraries + +#### x509 fork + +Merged upstream Go 1.13 and Go 1.14 changes (with the exception +of https://github.com/golang/go/commit/14521198679e, to allow +old certs using a malformed root still to be logged). + +#### asn1 fork + +Merged upstream Go 1.14 changes. + +#### ctutil + +Added VerifySCTWithVerifier() to verify SCTs using a given ct.SignatureVerifier. + +### Configuration Files + +Configuration files that previously had to be text-encoded Protobuf messages can +now alternatively be binary-encoded instead. + +### JSONClient + +- `PostAndParseWithRetry` error logging now includes log URI in messages. + +### Minimal Gossip Example + +All the code for this, except for the x509ext package, has been moved over +to the [trillian-examples](https://github.com/google/trillian-examples) repository. + +This keeps the code together and removes a circular dependency between the +two repositories. The package layout and structure remains the same so +updating should just mean changing any relevant import paths. + +### Dependencies + +A circular dependency on the [monologue](https://github.com/google/monologue) repository has been removed. + +A circular dependency on the [trillian-examples](https://github.com/google/trillian-examples) repository has been removed. + +The version of trillian in use has been updated to 1.3.11. This has required +various other dependency updates including gRPC and protobuf. This code now +uses the v2 proto API. The Travis tests now expect the 3.11.4 version of +protoc. + +The version of etcd in use has been switched to the one from `go.etcd.io`. + +Most of the above changes are to align versions more closely with the ones +used in the trillian repository. + +## v1.1.0 + +Published 2019-11-14 15:00:00 +0000 UTC + +### CTFE + +The `reject_expired` and `reject_unexpired` configuration fields for the CTFE +have been changed so that their behaviour reflects their name: + +- `reject_expired` only rejects expired certificates (i.e. it now allows + not-yet-valid certificates). +- `reject_unexpired` only allows expired certificates (i.e. it now rejects + not-yet-valid certificates). + +A `reject_extensions` configuration field for the CTFE was added, this allows +submissions to be rejected if they contain an extension with any of the +specified OIDs. + +A `frozen_sth` configuration field for the CTFE was added. This STH will be +served permanently. It must be signed by the log's private key. + +A `/healthz` URL has been added which responds with HTTP 200 OK and the string +"ok" when the server is up. + +#### Flags + +The `ct_server` binary has these new flags: + +- `mask_internal_errors` - Removes error strings from HTTP 500 responses + (Internal Server Error) + +Removed default values for `--metrics_endpoint` and `--log_rpc_server` flags. +This makes it easier to get the documented "unset" behaviour. + +#### Metrics + +The CTFE exports these new metrics: + +- `is_mirror` - set to 1 for mirror logs (copies of logs hosted elsewhere) +- `frozen_sth_timestamp` - time of the frozen Signed Tree Head in milliseconds + since the epoch + +#### Kubernetes + +Updated prometheus-to-sd to v0.5.2. + +A dedicated node pool is no longer required by the Kubernetes manifests. + +### Log Lists + +A new package has been created for parsing, searching and creating JSON log +lists compatible with the +[v2 schema](http://www.gstatic.com/ct/log_list/v2_beta/log_list_schema.json): +`github.com/google/certificate-transparency-go/loglist2`. + +### Docker Images + +Our Docker images have been updated to use Go 1.11 and +[Distroless base images](https://github.com/GoogleContainerTools/distroless). + +The CTFE Docker image now sets `ENTRYPOINT`. + +### Utilities / Libraries + +#### jsonclient + +The `jsonclient` package now copes with empty HTTP responses. The user-agent +header it sends can now be specified. + +#### x509 and asn1 forks + +Merged upstream changes from Go 1.12 into the `asn1` and `x509` packages. + +Added a "lax" tag to `asn1` that applies recursively and makes some checks more +relaxed: + +- parsePrintableString() copes with invalid PrintableString contents, e.g. use + of tagPrintableString when the string data is really ISO8859-1. +- checkInteger() allows integers that are not minimally encoded (and so are + not correct DER). +- OIDs are allowed to be empty. + +The following `x509` functions will now return `x509.NonFatalErrors` if ASN.1 +parsing fails in strict mode but succeeds in lax mode. Previously, they only +attempted strict mode parsing. + +- `x509.ParseTBSCertificate()` +- `x509.ParseCertificate()` +- `x509.ParseCertificates()` + +The `x509` package will now treat a negative RSA modulus as a non-fatal error. + +The `x509` package now supports RSASES-OAEP and Ed25519 keys. + +#### ctclient + +The `ctclient` tool now defaults to using +[all_logs_list.json](https://www.gstatic.com/ct/log_list/all_logs_list.json) +instead of [log_list.json](https://www.gstatic.com/ct/log_list/log_list.json). +This can be overridden using the `--log_list` flag. + +It can now perform inclusion checks on pre-certificates. + +It has these new commands: + +- `bisect` - Finds a log entry given a timestamp. + +It has these new flags: + +- `--chain` - Displays the entire certificate chain +- `--dns_server` - The DNS server to direct queries to (system resolver by + default) +- `--skip_https_verify` - Skips verification of the HTTPS connection +- `--timestamp` - Timestamp to use for `bisect` and `inclusion` commands (for + `inclusion`, only if --leaf_hash is not used) + +It now accepts hex or base64-encoded strings for the `--tree_hash`, +`--prev_hash` and `--leaf_hash` flags. + +#### certcheck + +The `certcheck` tool has these new flags: + +- `--check_time` - Check current validity of certificate (replaces + `--timecheck`) +- `--check_name` - Check validity of certificate name +- `--check_eku` - Check validity of EKU nesting +- `--check_path_len` - Check validity of path length constraint +- `--check_name_constraint` - Check name constraints +- `--check_unknown_critical_exts` - Check for unknown critical extensions + (replaces `--ignore_unknown_critical_exts`) +- `--strict` - Set non-zero exit code for non-fatal errors in parsing + +#### sctcheck + +The `sctcheck` tool has these new flags: + +- `--check_inclusion` - Checks that the SCT was honoured (i.e. the + corresponding certificate was included in the issuing CT log) + +#### ct_hammer + +The `ct_hammer` tool has these new flags: + +- `--duplicate_chance` - Allows setting the probability of the hammer sending + a duplicate submission. + +## v1.0.21 - CTFE Logging / Path Options. Mirroring. RPKI. Non Fatal X.509 error improvements + +Published 2018-08-20 10:11:04 +0000 UTC + +### CTFE + +`CTFE` no longer prints certificate chains as long byte strings in messages when handler errors occur. This was obscuring the reason for the failure and wasn't particularly useful. + +`CTFE` now has a global log URL path prefix flag and a configuration proto for a log specific path. The latter should help for various migration strategies if existing C++ server logs are going to be converted to run on the new code. + +### Mirroring + +More progress has been made on log mirroring. We believe that it's now at the point where testing can begin. + +### Utilities / Libraries + +The `certcheck` and `ct_hammer` utilities have received more enhancements. + +`x509` and `x509util` now support Subject Information Access and additional extensions for [RPKI / RFC 3779](https://www.ietf.org/rfc/rfc3779.txt). + +`scanner` / `fixchain` and some other command line utilities now have better handling of non-fatal errors. + +Commit [3629d6846518309d22c16fee15d1007262a459d2](https://api.github.com/repos/google/certificate-transparency-go/commits/3629d6846518309d22c16fee15d1007262a459d2) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.21) + +## v1.0.20 - Minimal Gossip / Go 1.11 Fix / Utility Improvements + +Published 2018-07-05 09:21:34 +0000 UTC + +Enhancements have been made to various utilities including `scanner`, `sctcheck`, `loglist` and `x509util`. + +The `allow_verification_with_non_compliant_keys` flag has been removed from `signatures.go`. + +An implementation of Gossip has been added. See the `gossip/minimal` package for more information. + +An X.509 compatibility issue for Go 1.11 has been fixed. This should be backwards compatible with 1.10. + +Commit [37a384cd035e722ea46e55029093e26687138edf](https://api.github.com/repos/google/certificate-transparency-go/commits/37a384cd035e722ea46e55029093e26687138edf) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.20) + +## v1.0.19 - CTFE User Quota + +Published 2018-06-01 13:51:52 +0000 UTC + +CTFE now supports Trillian Log's explicit quota API; quota can be requested based on the remote user's IP, as well as per-issuing certificate in submitted chains. + +Commit [8736a411b4ff214ea20687e46c2b67d66ebd83fc](https://api.github.com/repos/google/certificate-transparency-go/commits/8736a411b4ff214ea20687e46c2b67d66ebd83fc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.19) + +## v1.0.18 - Adding Migration Tool / Client Additions / K8 Config + +Published 2018-06-01 14:28:20 +0000 UTC + +Work on a log migration tool (Migrillian) is in progress. This is not yet ready for production use but will provide features for mirroring and migrating logs. + +The `RequestLog` API allows for logging of SCTs when they are issued by CTFE. + +The CT Go client now supports `GetEntryAndProof`. Utilities have been switched over to use the `glog` package. + +Commit [77abf2dac5410a62c04ac1c662c6d0fa54afc2dc](https://api.github.com/repos/google/certificate-transparency-go/commits/77abf2dac5410a62c04ac1c662c6d0fa54afc2dc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.18) + +## v1.0.17 - Merkle verification / Tracing / Demo script / CORS + +Published 2018-06-01 14:25:16 +0000 UTC + +Now uses Merkle Tree verification from Trillian. + +The CT server now supports CORS. + +Request tracing added using OpenCensus. For GCE / K8 it just requires the flag to be enabled to export traces to Stackdriver. Other environments may differ. + +A demo script was added that goes through setting up a simple deployment suitable for development / demo purposes. This may be useful for those new to the project. + +Commit [3c3d22ce946447d047a03228ebb4a41e3e4eb15b](https://api.github.com/repos/google/certificate-transparency-go/commits/3c3d22ce946447d047a03228ebb4a41e3e4eb15b) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.17) + +## v1.0.16 - Lifecycle test / Go 1.10.1 + +Published 2018-06-01 14:22:23 +0000 UTC + +An integration test was added that goes through a create / drain queue / freeze lifecycle for a log. + +Changes to `x509` were merged from Go 1.10.1. + +Commit [a72423d09b410b80673fd1135ba1022d04bac6cd](https://api.github.com/repos/google/certificate-transparency-go/commits/a72423d09b410b80673fd1135ba1022d04bac6cd) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.16) + +## v1.0.15 - More control of verification, grpclb, stackdriver metrics + +Published 2018-06-01 14:20:32 +0000 UTC + +Facilities were added to the `x509` package to control whether verification checks are applied. + +Log server requests are now balanced using `gRPClb`. + +For Kubernetes, metrics can be published to Stackdriver monitoring. + +Commit [684d6eee6092774e54d301ccad0ed61bc8d010c1](https://api.github.com/repos/google/certificate-transparency-go/commits/684d6eee6092774e54d301ccad0ed61bc8d010c1) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.15) + +## v1.0.14 - SQLite Removed, LeafHashForLeaf + +Published 2018-06-01 14:15:37 +0000 UTC + +Support for SQLite was removed. This motivation was ongoing test flakiness caused by multi-user access. This database may work for an embedded scenario but is not suitable for use in a server environment. + +A `LeafHashForLeaf` client API was added and is now used by the CT client and integration tests. + +Commit [698cd6a661196db4b2e71437422178ffe8705006](https://api.github.com/repos/google/certificate-transparency-go/commits/698cd6a661196db4b2e71437422178ffe8705006) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.14) + +## v1.0.13 - Crypto changes, util updates, sync with trillian repo, loglist verification + +Published 2018-06-01 14:15:21 +0000 UTC + +Some of our custom crypto package that were wrapping calls to the standard package have been removed and the base features used directly. + +Updates were made to GCE ingress and health checks. + +The log list utility can verify signatures. + +Commit [480c3654a70c5383b9543ec784203030aedbd3a5](https://api.github.com/repos/google/certificate-transparency-go/commits/480c3654a70c5383b9543ec784203030aedbd3a5) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.13) + +## v1.0.12 - Client / util updates & CTFE fixes + +Published 2018-06-01 14:13:42 +0000 UTC + +The CT client can now use a JSON loglist to find logs. + +CTFE had a fix applied for preissued precerts. + +A DNS client was added and CT client was extended to support DNS retrieval. + +Commit [74c06c95e0b304a050a1c33764c8a01d653a16e3](https://api.github.com/repos/google/certificate-transparency-go/commits/74c06c95e0b304a050a1c33764c8a01d653a16e3) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.12) + +## v1.0.11 - Kubernetes CI / Integration fixes + +Published 2018-06-01 14:12:18 +0000 UTC + +Updates to Kubernetes configs, mostly related to running a CI instance. + +Commit [0856acca7e0ab7f082ae83a1fbb5d21160962efc](https://api.github.com/repos/google/certificate-transparency-go/commits/0856acca7e0ab7f082ae83a1fbb5d21160962efc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.11) + +## v1.0.10 - More scanner, x509, utility and client fixes. CTFE updates + +Published 2018-06-01 14:09:47 +0000 UTC + +The CT client was using the wrong protobuffer library package. To guard against this in future a check has been added to our lint config. + +The `x509` and `asn1` packages have had upstream fixes applied from Go 1.10rc1. + +Commit [1bec4527572c443752ad4f2830bef88be0533236](https://api.github.com/repos/google/certificate-transparency-go/commits/1bec4527572c443752ad4f2830bef88be0533236) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.10) + +## v1.0.9 - Scanner, x509, utility and client fixes + +Published 2018-06-01 14:11:13 +0000 UTC + +The `scanner` utility now displays throughput stats. + +Build instructions and README files were updated. + +The `certcheck` utility can be told to ignore unknown critical X.509 extensions. + +Commit [c06833528d04a94eed0c775104d1107bab9ae17c](https://api.github.com/repos/google/certificate-transparency-go/commits/c06833528d04a94eed0c775104d1107bab9ae17c) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.9) + +## v1.0.8 - Client fixes, align with trillian repo + +Published 2018-06-01 14:06:44 +0000 UTC + + + +Commit [e8b02c60f294b503dbb67de0868143f5d4935e56](https://api.github.com/repos/google/certificate-transparency-go/commits/e8b02c60f294b503dbb67de0868143f5d4935e56) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.8) + +## v1.0.7 - CTFE fixes + +Published 2018-06-01 14:06:13 +0000 UTC + +An issue was fixed with CTFE signature caching. In an unlikely set of circumstances this could lead to log mis-operation. While the chances of this are small, we recommend that versions prior to this one are not deployed. + +Commit [52c0590bd3b4b80c5497005b0f47e10557425eeb](https://api.github.com/repos/google/certificate-transparency-go/commits/52c0590bd3b4b80c5497005b0f47e10557425eeb) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.7) + +## v1.0.6 - crlcheck improvements / other fixes + +Published 2018-06-01 14:04:22 +0000 UTC + +The `crlcheck` utility has had several fixes and enhancements. Additionally the `hammer` now supports temporal logs. + +Commit [3955e4a00c42e83ff17ce25003976159c5d0f0f9](https://api.github.com/repos/google/certificate-transparency-go/commits/3955e4a00c42e83ff17ce25003976159c5d0f0f9) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.6) + +## v1.0.5 - X509 and asn1 fixes + +Published 2018-06-01 14:02:58 +0000 UTC + +This release is mostly fixes to the `x509` and `asn1` packages. Some command line utilities were also updated. + +Commit [ae40d07cce12f1227c6e658e61c9dddb7646f97b](https://api.github.com/repos/google/certificate-transparency-go/commits/ae40d07cce12f1227c6e658e61c9dddb7646f97b) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.5) + +## v1.0.4 - Multi log backend configs + +Published 2018-06-01 14:02:07 +0000 UTC + +Support was added to allow CTFE to use multiple backends, each serving a distinct set of logs. It allows for e.g. regional backend deployment with common frontend servers. + +Commit [62023ed90b41fa40854957b5dec7d9d73594723f](https://api.github.com/repos/google/certificate-transparency-go/commits/62023ed90b41fa40854957b5dec7d9d73594723f) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.4) + +## v1.0.3 - Hammer updates, use standard context + +Published 2018-06-01 14:01:11 +0000 UTC + +After the Go 1.9 migration references to anything other than the standard `context` package have been removed. This is the only one that should be used from now on. + +Commit [b28beed8b9aceacc705e0ff4a11d435a310e3d97](https://api.github.com/repos/google/certificate-transparency-go/commits/b28beed8b9aceacc705e0ff4a11d435a310e3d97) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.3) + +## v1.0.2 - Go 1.9 + +Published 2018-06-01 14:00:00 +0000 UTC + +Go 1.9 is now required to build the code. + +Commit [3aed33d672ee43f04b1e8a00b25ca3e2e2e74309](https://api.github.com/repos/google/certificate-transparency-go/commits/3aed33d672ee43f04b1e8a00b25ca3e2e2e74309) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.2) + +## v1.0.1 - Hammer and client improvements + +Published 2018-06-01 13:59:29 +0000 UTC + + + +Commit [c28796cc21776667fb05d6300e32d9517be96515](https://api.github.com/repos/google/certificate-transparency-go/commits/c28796cc21776667fb05d6300e32d9517be96515) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.1) + +## v1.0 - First Trillian CT Release + +Published 2018-06-01 13:59:00 +0000 UTC + +This is the point that corresponds to the 1.0 release in the trillian repo. + +Commit [abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d](https://api.github.com/repos/google/certificate-transparency-go/commits/abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0) diff --git a/vendor/github.com/google/certificate-transparency-go/CODEOWNERS b/vendor/github.com/google/certificate-transparency-go/CODEOWNERS new file mode 100644 index 000000000000..0c931e87ce2f --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/CODEOWNERS @@ -0,0 +1 @@ +* @google/certificate-transparency diff --git a/vendor/github.com/google/certificate-transparency-go/CONTRIBUTING.md b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTING.md new file mode 100644 index 000000000000..43de4c9d4709 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTING.md @@ -0,0 +1,58 @@ +# How to contribute # + +We'd love to accept your patches and contributions to this project. There are +a just a few small guidelines you need to follow. + + +## Contributor License Agreement ## + +Contributions to any Google project must be accompanied by a Contributor +License Agreement. This is not a copyright **assignment**, it simply gives +Google permission to use and redistribute your contributions as part of the +project. + + * If you are an individual writing original source code and you're sure you + own the intellectual property, then you'll need to sign an [individual + CLA][]. + + * If you work for a company that wants to allow you to contribute your work, + then you'll need to sign a [corporate CLA][]. + +You generally only need to submit a CLA once, so if you've already submitted +one (even if it was for a different project), you probably don't need to do it +again. + +[individual CLA]: https://developers.google.com/open-source/cla/individual +[corporate CLA]: https://developers.google.com/open-source/cla/corporate + +Once your CLA is submitted (or if you already submitted one for +another Google project), make a commit adding yourself to the +[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part +of your first [pull request][]. + +[AUTHORS]: AUTHORS +[CONTRIBUTORS]: CONTRIBUTORS + + +## Submitting a patch ## + + 1. It's generally best to start by opening a new issue describing the bug or + feature you're intending to fix. Even if you think it's relatively minor, + it's helpful to know what people are working on. Mention in the initial + issue that you are planning to work on that bug or feature so that it can + be assigned to you. + + 1. Follow the normal process of [forking][] the project, and setup a new + branch to work in. It's important that each group of changes be done in + separate branches in order to ensure that a pull request only includes the + commits related to that bug or feature. + + 1. Do your best to have [well-formed commit messages][] for each change. + This provides consistency throughout the project, and ensures that commit + messages are able to be formatted properly by various git tools. + + 1. Finally, push the commits to your fork and submit a [pull request][]. + +[forking]: https://help.github.com/articles/fork-a-repo +[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html +[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS new file mode 100644 index 000000000000..3a98a7e1ef56 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS @@ -0,0 +1,62 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. +# +# Names should be added to this file as: +# Name +# +# Please keep the list sorted. + +Adam Eijdenberg +Al Cutter +Alex Cohn +Ben Laurie +Chris Kennelly +David Drysdale +Deyan Bektchiev +Ed Maste +Elisha Silas +Emilia Kasper +Eran Messeri +Fiaz Hossain +Gary Belvin +Jeff Trawick +Joe Tsai +Kat Joyce +Katriel Cohn-Gordon +Kiril Nikolov +Konrad Kraszewski +Laël Cellier +Linus Nordberg +Mark Schloesser +Nicholas Galbreath +Oliver Weidner +Pascal Leroy +Paul Hadfield +Paul Lietar +Pavel Kalinnikov +Pierre Phaneuf +Rob Percival +Rob Stradling +Roger Ng +Roland Shoemaker +Ruslan Kovalov +Samuel Lidén Borell +Tatiana Merkulova +Vladimir Rutsky +Ximin Luo diff --git a/vendor/github.com/google/certificate-transparency-go/LICENSE b/vendor/github.com/google/certificate-transparency-go/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/certificate-transparency-go/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/google/certificate-transparency-go/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000000..c3c0feb3abcf --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,16 @@ + + +### Checklist + + + +- [ ] I have updated the [CHANGELOG](CHANGELOG.md). + - Adjust the draft version number according to [semantic versioning](https://semver.org/) rules. +- [ ] I have updated [documentation](docs/) accordingly. diff --git a/vendor/github.com/google/certificate-transparency-go/README.md b/vendor/github.com/google/certificate-transparency-go/README.md new file mode 100644 index 000000000000..bade700508e6 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/README.md @@ -0,0 +1,120 @@ +# Certificate Transparency: Go Code + +[![Go Report Card](https://goreportcard.com/badge/github.com/google/certificate-transparency-go)](https://goreportcard.com/report/github.com/google/certificate-transparency-go) +[![GoDoc](https://godoc.org/github.com/google/certificate-transparency-go?status.svg)](https://godoc.org/github.com/google/certificate-transparency-go) +![CodeQL workflow](https://github.com/google/certificate-transparency-go/actions/workflows/codeql.yml/badge.svg) + +This repository holds Go code related to +[Certificate Transparency](https://www.certificate-transparency.org/) (CT). The +repository requires Go version 1.23. + + - [Repository Structure](#repository-structure) + - [Trillian CT Personality](#trillian-ct-personality) + - [Working on the Code](#working-on-the-code) + - [Running Codebase Checks](#running-codebase-checks) + - [Rebuilding Generated Code](#rebuilding-generated-code) + +## Support + +- Slack: https://transparency-dev.slack.com/ ([invitation](https://join.slack.com/t/transparency-dev/shared_invite/zt-27pkqo21d-okUFhur7YZ0rFoJVIOPznQ)) + +## Repository Structure + +The main parts of the repository are: + + - Encoding libraries: + - `asn1/` and `x509/` are forks of the upstream Go `encoding/asn1` and + `crypto/x509` libraries. We maintain separate forks of these packages + because CT is intended to act as an observatory of certificates across the + ecosystem; as such, we need to be able to process somewhat-malformed + certificates that the stricter upstream code would (correctly) reject. + Our `x509` fork also includes code for working with the + [pre-certificates defined in RFC 6962](https://tools.ietf.org/html/rfc6962#section-3.1). + - `tls` holds a library for processing TLS-encoded data as described in + [RFC 5246](https://tools.ietf.org/html/rfc5246). + - `x509util/` provides additional utilities for dealing with + `x509.Certificate`s. + - CT client libraries: + - The top-level `ct` package (in `.`) holds types and utilities for working + with CT data structures defined in + [RFC 6962](https://tools.ietf.org/html/rfc6962). + - `client/` and `jsonclient/` hold libraries that allow access to CT Logs + via HTTP entrypoints described in + [section 4 of RFC 6962](https://tools.ietf.org/html/rfc6962#section-4). + - `dnsclient/` has a library that allows access to CT Logs over + [DNS](https://github.com/google/certificate-transparency-rfcs/blob/master/dns/draft-ct-over-dns.md). + - `scanner/` holds a library for scanning the entire contents of an existing + CT Log. + - CT Personality for [Trillian](https://github.com/google/trillian): + - `trillian/` holds code that allows a Certificate Transparency Log to be + run using a Trillian Log as its back-end -- see + [below](#trillian-ct-personality). + - Command line tools: + - `./client/ctclient` allows interaction with a CT Log. + - `./ctutil/sctcheck` allows SCTs (signed certificate timestamps) from a CT + Log to be verified. + - `./scanner/scanlog` allows an existing CT Log to be scanned for certificates + of interest; please be polite when running this tool against a Log. + - `./x509util/certcheck` allows display and verification of certificates + - `./x509util/crlcheck` allows display and verification of certificate + revocation lists (CRLs). + - Other libraries related to CT: + - `ctutil/` holds utility functions for validating and verifying CT data + structures. + - `loglist3/` has a library for reading + [v3 JSON lists of CT Logs](https://groups.google.com/a/chromium.org/g/ct-policy/c/IdbrdAcDQto/m/i5KPyzYwBAAJ). + + +## Trillian CT Personality + +The `trillian/` subdirectory holds code and scripts for running a CT Log based +on the [Trillian](https://github.com/google/trillian) general transparency Log, +and is [documented separately](trillian/README.md). + + +## Working on the Code + +Developers who want to make changes to the codebase need some additional +dependencies and tools, described in the following sections. + +### Running Codebase Checks + +The [`scripts/presubmit.sh`](scripts/presubmit.sh) script runs various tools +and tests over the codebase; please ensure this script passes before sending +pull requests for review. + +```bash +# Install golangci-lint +go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.61.0 + +# Run code generation, build, test and linters +./scripts/presubmit.sh + +# Run build, test and linters but skip code generation +./scripts/presubmit.sh --no-generate + +# Or just run the linters alone: +golangci-lint run +``` + +### Rebuilding Generated Code + +Some of the CT Go code is autogenerated from other files: + +- [Protocol buffer](https://developers.google.com/protocol-buffers/) message + definitions are converted to `.pb.go` implementations. +- A mock implementation of the Trillian gRPC API (in `trillian/mockclient`) is + created with [GoMock](https://github.com/golang/mock). + +Re-generating mock or protobuffer files is only needed if you're changing +the original files; if you do, you'll need to install the prerequisites: + +- tools written in `go` can be installed with a single run of `go install` + (courtesy of [`tools.go`](./tools/tools.go) and `go.mod`). +- `protoc` tool: you'll need [version 3.20.1](https://github.com/protocolbuffers/protobuf/releases/tag/v3.20.1) installed, and `PATH` updated to include its `bin/` directory. + +With tools installed, run the following: + +```bash +go generate -x ./... # hunts for //go:generate comments and runs them +``` diff --git a/vendor/github.com/google/certificate-transparency-go/asn1/README.md b/vendor/github.com/google/certificate-transparency-go/asn1/README.md new file mode 100644 index 000000000000..a42ac4ebe331 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/asn1/README.md @@ -0,0 +1,7 @@ +# Important Notice + +This is a fork of the `encoding/asn1` Go package. The original source can be found on +[GitHub](https://github.com/golang/go). + +Be careful about making local modifications to this code as it will +make maintenance harder in future. diff --git a/vendor/github.com/google/certificate-transparency-go/asn1/asn1.go b/vendor/github.com/google/certificate-transparency-go/asn1/asn1.go new file mode 100644 index 000000000000..aaca5fd2606f --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/asn1/asn1.go @@ -0,0 +1,1195 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 implements parsing of DER-encoded ASN.1 data structures, +// as defined in ITU-T Rec X.690. +// +// See also “A Layman's Guide to a Subset of ASN.1, BER, and DER,” +// http://luca.ntop.org/Teaching/Appunti/asn1.html. +// +// This is a fork of the Go standard library ASN.1 implementation +// (encoding/asn1), with the aim of relaxing checks for various things +// that are common errors present in many X.509 certificates in the +// wild. +// +// Main differences: +// - Extra "lax" tag that recursively applies and relaxes some strict +// checks: +// - parsePrintableString() copes with invalid PrintableString contents, +// e.g. use of tagPrintableString when the string data is really +// ISO8859-1. +// - checkInteger() allows integers that are not minimally encoded (and +// so are not correct DER). +// - parseObjectIdentifier() allows zero-length OIDs. +// - Better diagnostics on which particular field causes errors. +package asn1 + +// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc +// are different encoding formats for those objects. Here, we'll be dealing +// with DER, the Distinguished Encoding Rules. DER is used in X.509 because +// it's fast to parse and, unlike BER, has a unique encoding for every object. +// When calculating hashes over objects, it's important that the resulting +// bytes be the same at both ends and DER removes this margin of error. +// +// ASN.1 is very complex and this package doesn't attempt to implement +// everything by any means. + +import ( + "errors" + "fmt" + "math" + "math/big" + "reflect" + "strconv" + "time" + "unicode/utf16" + "unicode/utf8" +) + +// A StructuralError suggests that the ASN.1 data is valid, but the Go type +// which is receiving it doesn't match. +type StructuralError struct { + Msg string + Field string +} + +func (e StructuralError) Error() string { + var prefix string + if e.Field != "" { + prefix = e.Field + ": " + } + return "asn1: structure error: " + prefix + e.Msg +} + +// A SyntaxError suggests that the ASN.1 data is invalid. +type SyntaxError struct { + Msg string + Field string +} + +func (e SyntaxError) Error() string { + var prefix string + if e.Field != "" { + prefix = e.Field + ": " + } + return "asn1: syntax error: " + prefix + e.Msg +} + +// We start by dealing with each of the primitive types in turn. + +// BOOLEAN + +func parseBool(bytes []byte, fieldName string) (ret bool, err error) { + if len(bytes) != 1 { + err = SyntaxError{"invalid boolean", fieldName} + return + } + + // DER demands that "If the encoding represents the boolean value TRUE, + // its single contents octet shall have all eight bits set to one." + // Thus only 0 and 255 are valid encoded values. + switch bytes[0] { + case 0: + ret = false + case 0xff: + ret = true + default: + err = SyntaxError{"invalid boolean", fieldName} + } + + return +} + +// INTEGER + +// checkInteger returns nil if the given bytes are a valid DER-encoded +// INTEGER and an error otherwise. +func checkInteger(bytes []byte, lax bool, fieldName string) error { + if len(bytes) == 0 { + return StructuralError{"empty integer", fieldName} + } + if len(bytes) == 1 { + return nil + } + if lax { + return nil + } + if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) { + return StructuralError{"integer not minimally-encoded", fieldName} + } + return nil +} + +// parseInt64 treats the given bytes as a big-endian, signed integer and +// returns the result. +func parseInt64(bytes []byte, lax bool, fieldName string) (ret int64, err error) { + err = checkInteger(bytes, lax, fieldName) + if err != nil { + return + } + if len(bytes) > 8 { + // We'll overflow an int64 in this case. + err = StructuralError{"integer too large", fieldName} + return + } + for bytesRead := 0; bytesRead < len(bytes); bytesRead++ { + ret <<= 8 + ret |= int64(bytes[bytesRead]) + } + + // Shift up and down in order to sign extend the result. + ret <<= 64 - uint8(len(bytes))*8 + ret >>= 64 - uint8(len(bytes))*8 + return +} + +// parseInt treats the given bytes as a big-endian, signed integer and returns +// the result. +func parseInt32(bytes []byte, lax bool, fieldName string) (int32, error) { + if err := checkInteger(bytes, lax, fieldName); err != nil { + return 0, err + } + ret64, err := parseInt64(bytes, lax, fieldName) + if err != nil { + return 0, err + } + if ret64 != int64(int32(ret64)) { + return 0, StructuralError{"integer too large", fieldName} + } + return int32(ret64), nil +} + +var bigOne = big.NewInt(1) + +// parseBigInt treats the given bytes as a big-endian, signed integer and returns +// the result. +func parseBigInt(bytes []byte, lax bool, fieldName string) (*big.Int, error) { + if err := checkInteger(bytes, lax, fieldName); err != nil { + return nil, err + } + ret := new(big.Int) + if len(bytes) > 0 && bytes[0]&0x80 == 0x80 { + // This is a negative number. + notBytes := make([]byte, len(bytes)) + for i := range notBytes { + notBytes[i] = ^bytes[i] + } + ret.SetBytes(notBytes) + ret.Add(ret, bigOne) + ret.Neg(ret) + return ret, nil + } + ret.SetBytes(bytes) + return ret, nil +} + +// BIT STRING + +// BitString is the structure to use when you want an ASN.1 BIT STRING type. A +// bit string is padded up to the nearest byte in memory and the number of +// valid bits is recorded. Padding bits will be zero. +type BitString struct { + Bytes []byte // bits packed into bytes. + BitLength int // length in bits. +} + +// At returns the bit at the given index. If the index is out of range it +// returns false. +func (b BitString) At(i int) int { + if i < 0 || i >= b.BitLength { + return 0 + } + x := i / 8 + y := 7 - uint(i%8) + return int(b.Bytes[x]>>y) & 1 +} + +// RightAlign returns a slice where the padding bits are at the beginning. The +// slice may share memory with the BitString. +func (b BitString) RightAlign() []byte { + shift := uint(8 - (b.BitLength % 8)) + if shift == 8 || len(b.Bytes) == 0 { + return b.Bytes + } + + a := make([]byte, len(b.Bytes)) + a[0] = b.Bytes[0] >> shift + for i := 1; i < len(b.Bytes); i++ { + a[i] = b.Bytes[i-1] << (8 - shift) + a[i] |= b.Bytes[i] >> shift + } + + return a +} + +// parseBitString parses an ASN.1 bit string from the given byte slice and returns it. +func parseBitString(bytes []byte, fieldName string) (ret BitString, err error) { + if len(bytes) == 0 { + err = SyntaxError{"zero length BIT STRING", fieldName} + return + } + paddingBits := int(bytes[0]) + if paddingBits > 7 || + len(bytes) == 1 && paddingBits > 0 || + bytes[len(bytes)-1]&((1< 0 { + s += "." + } + s += strconv.Itoa(v) + } + + return s +} + +// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and +// returns it. An object identifier is a sequence of variable length integers +// that are assigned in a hierarchy. +func parseObjectIdentifier(bytes []byte, lax bool, fieldName string) (s ObjectIdentifier, err error) { + if len(bytes) == 0 { + if lax { + return ObjectIdentifier{}, nil + } + err = SyntaxError{"zero length OBJECT IDENTIFIER", fieldName} + return + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + s = make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + v, offset, err := parseBase128Int(bytes, 0, fieldName) + if err != nil { + return + } + if v < 80 { + s[0] = v / 40 + s[1] = v % 40 + } else { + s[0] = 2 + s[1] = v - 80 + } + + i := 2 + for ; offset < len(bytes); i++ { + v, offset, err = parseBase128Int(bytes, offset, fieldName) + if err != nil { + return + } + s[i] = v + } + s = s[0:i] + return +} + +// ENUMERATED + +// An Enumerated is represented as a plain int. +type Enumerated int + +// FLAG + +// A Flag accepts any data and is set to true if present. +type Flag bool + +// parseBase128Int parses a base-128 encoded int from the given offset in the +// given byte slice. It returns the value and the new offset. +func parseBase128Int(bytes []byte, initOffset int, fieldName string) (ret, offset int, err error) { + offset = initOffset + var ret64 int64 + for shifted := 0; offset < len(bytes); shifted++ { + // 5 * 7 bits per byte == 35 bits of data + // Thus the representation is either non-minimal or too large for an int32 + if shifted == 5 { + err = StructuralError{"base 128 integer too large", fieldName} + return + } + ret64 <<= 7 + b := bytes[offset] + ret64 |= int64(b & 0x7f) + offset++ + if b&0x80 == 0 { + ret = int(ret64) + // Ensure that the returned value fits in an int on all platforms + if ret64 > math.MaxInt32 { + err = StructuralError{"base 128 integer too large", fieldName} + } + return + } + } + err = SyntaxError{"truncated base 128 integer", fieldName} + return +} + +// UTCTime + +func parseUTCTime(bytes []byte) (ret time.Time, err error) { + s := string(bytes) + + formatStr := "0601021504Z0700" + ret, err = time.Parse(formatStr, s) + if err != nil { + formatStr = "060102150405Z0700" + ret, err = time.Parse(formatStr, s) + } + if err != nil { + return + } + + if serialized := ret.Format(formatStr); serialized != s { + err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized) + return + } + + if ret.Year() >= 2050 { + // UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + ret = ret.AddDate(-100, 0, 0) + } + + return +} + +// parseGeneralizedTime parses the GeneralizedTime from the given byte slice +// and returns the resulting time. +func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) { + const formatStr = "20060102150405Z0700" + s := string(bytes) + + if ret, err = time.Parse(formatStr, s); err != nil { + return + } + + if serialized := ret.Format(formatStr); serialized != s { + err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized) + } + + return +} + +// NumericString + +// parseNumericString parses an ASN.1 NumericString from the given byte array +// and returns it. +func parseNumericString(bytes []byte, fieldName string) (ret string, err error) { + for _, b := range bytes { + if !isNumeric(b) { + return "", SyntaxError{"NumericString contains invalid character", fieldName} + } + } + return string(bytes), nil +} + +// isNumeric reports whether the given b is in the ASN.1 NumericString set. +func isNumeric(b byte) bool { + return '0' <= b && b <= '9' || + b == ' ' +} + +// PrintableString + +// parsePrintableString parses an ASN.1 PrintableString from the given byte +// array and returns it. +func parsePrintableString(bytes []byte, lax bool, fieldName string) (ret string, err error) { + for _, b := range bytes { + if !isPrintable(b, allowAsterisk, allowAmpersand) { + if !lax { + err = SyntaxError{"PrintableString contains invalid character", fieldName} + } else { + // Might be an ISO8859-1 string stuffed in, check if it + // would be valid and assume that's what's happened if so, + // otherwise try T.61, failing that give up and just assign + // the bytes + switch { + case couldBeISO8859_1(bytes): + ret, err = iso8859_1ToUTF8(bytes), nil + case couldBeT61(bytes): + ret, err = parseT61String(bytes) + default: + err = SyntaxError{"PrintableString contains invalid character, couldn't determine correct String type", fieldName} + } + } + return + } + } + ret = string(bytes) + return +} + +type asteriskFlag bool +type ampersandFlag bool + +const ( + allowAsterisk asteriskFlag = true + rejectAsterisk asteriskFlag = false + + allowAmpersand ampersandFlag = true + rejectAmpersand ampersandFlag = false +) + +// isPrintable reports whether the given b is in the ASN.1 PrintableString set. +// If asterisk is allowAsterisk then '*' is also allowed, reflecting existing +// practice. If ampersand is allowAmpersand then '&' is allowed as well. +func isPrintable(b byte, asterisk asteriskFlag, ampersand ampersandFlag) bool { + return 'a' <= b && b <= 'z' || + 'A' <= b && b <= 'Z' || + '0' <= b && b <= '9' || + '\'' <= b && b <= ')' || + '+' <= b && b <= '/' || + b == ' ' || + b == ':' || + b == '=' || + b == '?' || + // This is technically not allowed in a PrintableString. + // However, x509 certificates with wildcard strings don't + // always use the correct string type so we permit it. + (bool(asterisk) && b == '*') || + // This is not technically allowed either. However, not + // only is it relatively common, but there are also a + // handful of CA certificates that contain it. At least + // one of which will not expire until 2027. + (bool(ampersand) && b == '&') +} + +// IA5String + +// parseIA5String parses an ASN.1 IA5String (ASCII string) from the given +// byte slice and returns it. +func parseIA5String(bytes []byte, fieldName string) (ret string, err error) { + for _, b := range bytes { + if b >= utf8.RuneSelf { + err = SyntaxError{"IA5String contains invalid character", fieldName} + return + } + } + ret = string(bytes) + return +} + +// T61String + +// parseT61String parses an ASN.1 T61String (8-bit clean string) from the given +// byte slice and returns it. +func parseT61String(bytes []byte) (ret string, err error) { + return string(bytes), nil +} + +// UTF8String + +// parseUTF8String parses an ASN.1 UTF8String (raw UTF-8) from the given byte +// array and returns it. +func parseUTF8String(bytes []byte) (ret string, err error) { + if !utf8.Valid(bytes) { + return "", errors.New("asn1: invalid UTF-8 string") + } + return string(bytes), nil +} + +// BMPString + +// parseBMPString parses an ASN.1 BMPString (Basic Multilingual Plane of +// ISO/IEC/ITU 10646-1) from the given byte slice and returns it. +func parseBMPString(bmpString []byte) (string, error) { + if len(bmpString)%2 != 0 { + return "", errors.New("pkcs12: odd-length BMP string") + } + + // Strip terminator if present. + if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 { + bmpString = bmpString[:l-2] + } + + s := make([]uint16, 0, len(bmpString)/2) + for len(bmpString) > 0 { + s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1])) + bmpString = bmpString[2:] + } + + return string(utf16.Decode(s)), nil +} + +// A RawValue represents an undecoded ASN.1 object. +type RawValue struct { + Class, Tag int + IsCompound bool + Bytes []byte + FullBytes []byte // includes the tag and length +} + +// RawContent is used to signal that the undecoded, DER data needs to be +// preserved for a struct. To use it, the first field of the struct must have +// this type. It's an error for any of the other fields to have this type. +type RawContent []byte + +// Tagging + +// parseTagAndLength parses an ASN.1 tag and length pair from the given offset +// into a byte slice. It returns the parsed data and the new offset. SET and +// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we +// don't distinguish between ordered and unordered objects in this code. +func parseTagAndLength(bytes []byte, initOffset int, fieldName string) (ret tagAndLength, offset int, err error) { + offset = initOffset + // parseTagAndLength should not be called without at least a single + // byte to read. Thus this check is for robustness: + if offset >= len(bytes) { + err = errors.New("asn1: internal error in parseTagAndLength") + return + } + b := bytes[offset] + offset++ + ret.class = int(b >> 6) + ret.isCompound = b&0x20 == 0x20 + ret.tag = int(b & 0x1f) + + // If the bottom five bits are set, then the tag number is actually base 128 + // encoded afterwards + if ret.tag == 0x1f { + ret.tag, offset, err = parseBase128Int(bytes, offset, fieldName) + if err != nil { + return + } + // Tags should be encoded in minimal form. + if ret.tag < 0x1f { + err = SyntaxError{"non-minimal tag", fieldName} + return + } + } + if offset >= len(bytes) { + err = SyntaxError{"truncated tag or length", fieldName} + return + } + b = bytes[offset] + offset++ + if b&0x80 == 0 { + // The length is encoded in the bottom 7 bits. + ret.length = int(b & 0x7f) + } else { + // Bottom 7 bits give the number of length bytes to follow. + numBytes := int(b & 0x7f) + if numBytes == 0 { + err = SyntaxError{"indefinite length found (not DER)", fieldName} + return + } + ret.length = 0 + for i := 0; i < numBytes; i++ { + if offset >= len(bytes) { + err = SyntaxError{"truncated tag or length", fieldName} + return + } + b = bytes[offset] + offset++ + if ret.length >= 1<<23 { + // We can't shift ret.length up without + // overflowing. + err = StructuralError{"length too large", fieldName} + return + } + ret.length <<= 8 + ret.length |= int(b) + if ret.length == 0 { + // DER requires that lengths be minimal. + err = StructuralError{"superfluous leading zeros in length", fieldName} + return + } + } + // Short lengths must be encoded in short form. + if ret.length < 0x80 { + err = StructuralError{"non-minimal length", fieldName} + return + } + } + + return +} + +// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse +// a number of ASN.1 values from the given byte slice and returns them as a +// slice of Go values of the given type. +func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type, lax bool, fieldName string) (ret reflect.Value, err error) { + matchAny, expectedTag, compoundType, ok := getUniversalType(elemType) + if !ok { + err = StructuralError{"unknown Go type for slice", fieldName} + return + } + + // First we iterate over the input and count the number of elements, + // checking that the types are correct in each case. + numElements := 0 + for offset := 0; offset < len(bytes); { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset, fieldName) + if err != nil { + return + } + switch t.tag { + case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString, TagBMPString: + // We pretend that various other string types are + // PRINTABLE STRINGs so that a sequence of them can be + // parsed into a []string. + t.tag = TagPrintableString + case TagGeneralizedTime, TagUTCTime: + // Likewise, both time types are treated the same. + t.tag = TagUTCTime + } + + if !matchAny && (t.class != ClassUniversal || t.isCompound != compoundType || t.tag != expectedTag) { + err = StructuralError{fmt.Sprintf("sequence tag mismatch (got:%+v, want:0/%d/%t)", t, expectedTag, compoundType), fieldName} + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"truncated sequence", fieldName} + return + } + offset += t.length + numElements++ + } + ret = reflect.MakeSlice(sliceType, numElements, numElements) + params := fieldParameters{lax: lax} + offset := 0 + for i := 0; i < numElements; i++ { + offset, err = parseField(ret.Index(i), bytes, offset, params) + if err != nil { + return + } + } + return +} + +var ( + bitStringType = reflect.TypeOf(BitString{}) + objectIdentifierType = reflect.TypeOf(ObjectIdentifier{}) + enumeratedType = reflect.TypeOf(Enumerated(0)) + flagType = reflect.TypeOf(Flag(false)) + timeType = reflect.TypeOf(time.Time{}) + rawValueType = reflect.TypeOf(RawValue{}) + rawContentsType = reflect.TypeOf(RawContent(nil)) + bigIntType = reflect.TypeOf(new(big.Int)) +) + +// invalidLength reports whether offset + length > sliceLength, or if the +// addition would overflow. +func invalidLength(offset, length, sliceLength int) bool { + return offset+length < offset || offset+length > sliceLength +} + +// Tests whether the data in |bytes| would be a valid ISO8859-1 string. +// Clearly, a sequence of bytes comprised solely of valid ISO8859-1 +// codepoints does not imply that the encoding MUST be ISO8859-1, rather that +// you would not encounter an error trying to interpret the data as such. +func couldBeISO8859_1(bytes []byte) bool { + for _, b := range bytes { + if b < 0x20 || (b >= 0x7F && b < 0xA0) { + return false + } + } + return true +} + +// Checks whether the data in |bytes| would be a valid T.61 string. +// Clearly, a sequence of bytes comprised solely of valid T.61 +// codepoints does not imply that the encoding MUST be T.61, rather that +// you would not encounter an error trying to interpret the data as such. +func couldBeT61(bytes []byte) bool { + for _, b := range bytes { + switch b { + case 0x00: + // Since we're guessing at (incorrect) encodings for a + // PrintableString, we'll err on the side of caution and disallow + // strings with a NUL in them, don't want to re-create a PayPal NUL + // situation in monitors. + fallthrough + case 0x23, 0x24, 0x5C, 0x5E, 0x60, 0x7B, 0x7D, 0x7E, 0xA5, 0xA6, 0xAC, 0xAD, 0xAE, 0xAF, + 0xB9, 0xBA, 0xC0, 0xC9, 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, + 0xDA, 0xDB, 0xDC, 0xDE, 0xDF, 0xE5, 0xFF: + // These are all invalid code points in T.61, so it can't be a T.61 string. + return false + } + } + return true +} + +// Converts the data in |bytes| to the equivalent UTF-8 string. +func iso8859_1ToUTF8(bytes []byte) string { + buf := make([]rune, len(bytes)) + for i, b := range bytes { + buf[i] = rune(b) + } + return string(buf) +} + +// parseField is the main parsing function. Given a byte slice and an offset +// into the array, it will try to parse a suitable ASN.1 value out and store it +// in the given Value. +func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) { + offset = initOffset + fieldType := v.Type() + + // If we have run out of data, it may be that there are optional elements at the end. + if offset == len(bytes) { + if !setDefaultValue(v, params) { + err = SyntaxError{"sequence truncated", params.name} + } + return + } + + // Deal with the ANY type. + if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset, params.name) + if err != nil { + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated", params.name} + return + } + var result interface{} + if !t.isCompound && t.class == ClassUniversal { + innerBytes := bytes[offset : offset+t.length] + switch t.tag { + case TagPrintableString: + result, err = parsePrintableString(innerBytes, params.lax, params.name) + case TagNumericString: + result, err = parseNumericString(innerBytes, params.name) + case TagIA5String: + result, err = parseIA5String(innerBytes, params.name) + case TagT61String: + result, err = parseT61String(innerBytes) + case TagUTF8String: + result, err = parseUTF8String(innerBytes) + case TagInteger: + result, err = parseInt64(innerBytes, params.lax, params.name) + case TagBitString: + result, err = parseBitString(innerBytes, params.name) + case TagOID: + result, err = parseObjectIdentifier(innerBytes, params.lax, params.name) + case TagUTCTime: + result, err = parseUTCTime(innerBytes) + case TagGeneralizedTime: + result, err = parseGeneralizedTime(innerBytes) + case TagOctetString: + result = innerBytes + case TagBMPString: + result, err = parseBMPString(innerBytes) + default: + // If we don't know how to handle the type, we just leave Value as nil. + } + } + offset += t.length + if err != nil { + return + } + if result != nil { + v.Set(reflect.ValueOf(result)) + } + return + } + + t, offset, err := parseTagAndLength(bytes, offset, params.name) + if err != nil { + return + } + if params.explicit { + expectedClass := ClassContextSpecific + if params.application { + expectedClass = ClassApplication + } + if offset == len(bytes) { + err = StructuralError{"explicit tag has no child", params.name} + return + } + if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) { + if fieldType == rawValueType { + // The inner element should not be parsed for RawValues. + } else if t.length > 0 { + t, offset, err = parseTagAndLength(bytes, offset, params.name) + if err != nil { + return + } + } else { + if fieldType != flagType { + err = StructuralError{"zero length explicit tag was not an asn1.Flag", params.name} + return + } + v.SetBool(true) + return + } + } else { + // The tags didn't match, it might be an optional element. + ok := setDefaultValue(v, params) + if ok { + offset = initOffset + } else { + err = StructuralError{"explicitly tagged member didn't match", params.name} + } + return + } + } + + matchAny, universalTag, compoundType, ok1 := getUniversalType(fieldType) + if !ok1 { + err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType), params.name} + return + } + + // Special case for strings: all the ASN.1 string types map to the Go + // type string. getUniversalType returns the tag for PrintableString + // when it sees a string, so if we see a different string type on the + // wire, we change the universal type to match. + if universalTag == TagPrintableString { + if t.class == ClassUniversal { + switch t.tag { + case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString, TagBMPString: + universalTag = t.tag + } + } else if params.stringType != 0 { + universalTag = params.stringType + } + } + + // Special case for time: UTCTime and GeneralizedTime both map to the + // Go type time.Time. + if universalTag == TagUTCTime && t.tag == TagGeneralizedTime && t.class == ClassUniversal { + universalTag = TagGeneralizedTime + } + + if params.set { + universalTag = TagSet + } + + matchAnyClassAndTag := matchAny + expectedClass := ClassUniversal + expectedTag := universalTag + + if !params.explicit && params.tag != nil { + expectedClass = ClassContextSpecific + expectedTag = *params.tag + matchAnyClassAndTag = false + } + + if !params.explicit && params.application && params.tag != nil { + expectedClass = ClassApplication + expectedTag = *params.tag + matchAnyClassAndTag = false + } + + if !params.explicit && params.private && params.tag != nil { + expectedClass = ClassPrivate + expectedTag = *params.tag + matchAnyClassAndTag = false + } + + // We have unwrapped any explicit tagging at this point. + if !matchAnyClassAndTag && (t.class != expectedClass || t.tag != expectedTag) || + (!matchAny && t.isCompound != compoundType) { + // Tags don't match. Again, it could be an optional element. + ok := setDefaultValue(v, params) + if ok { + offset = initOffset + } else { + err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset), params.name} + } + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated", params.name} + return + } + innerBytes := bytes[offset : offset+t.length] + offset += t.length + + // We deal with the structures defined in this package first. + switch fieldType { + case rawValueType: + result := RawValue{t.class, t.tag, t.isCompound, innerBytes, bytes[initOffset:offset]} + v.Set(reflect.ValueOf(result)) + return + case objectIdentifierType: + newSlice, err1 := parseObjectIdentifier(innerBytes, params.lax, params.name) + v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice))) + if err1 == nil { + reflect.Copy(v, reflect.ValueOf(newSlice)) + } + err = err1 + return + case bitStringType: + bs, err1 := parseBitString(innerBytes, params.name) + if err1 == nil { + v.Set(reflect.ValueOf(bs)) + } + err = err1 + return + case timeType: + var time time.Time + var err1 error + if universalTag == TagUTCTime { + time, err1 = parseUTCTime(innerBytes) + } else { + time, err1 = parseGeneralizedTime(innerBytes) + } + if err1 == nil { + v.Set(reflect.ValueOf(time)) + } + err = err1 + return + case enumeratedType: + parsedInt, err1 := parseInt32(innerBytes, params.lax, params.name) + if err1 == nil { + v.SetInt(int64(parsedInt)) + } + err = err1 + return + case flagType: + v.SetBool(true) + return + case bigIntType: + parsedInt, err1 := parseBigInt(innerBytes, params.lax, params.name) + if err1 == nil { + v.Set(reflect.ValueOf(parsedInt)) + } + err = err1 + return + } + switch val := v; val.Kind() { + case reflect.Bool: + parsedBool, err1 := parseBool(innerBytes, params.name) + if err1 == nil { + val.SetBool(parsedBool) + } + err = err1 + return + case reflect.Int, reflect.Int32, reflect.Int64: + if val.Type().Size() == 4 { + parsedInt, err1 := parseInt32(innerBytes, params.lax, params.name) + if err1 == nil { + val.SetInt(int64(parsedInt)) + } + err = err1 + } else { + parsedInt, err1 := parseInt64(innerBytes, params.lax, params.name) + if err1 == nil { + val.SetInt(parsedInt) + } + err = err1 + } + return + // TODO(dfc) Add support for the remaining integer types + case reflect.Struct: + structType := fieldType + + for i := 0; i < structType.NumField(); i++ { + if structType.Field(i).PkgPath != "" { + err = StructuralError{"struct contains unexported fields", structType.Field(i).Name} + return + } + } + + if structType.NumField() > 0 && + structType.Field(0).Type == rawContentsType { + bytes := bytes[initOffset:offset] + val.Field(0).Set(reflect.ValueOf(RawContent(bytes))) + } + + innerOffset := 0 + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + if i == 0 && field.Type == rawContentsType { + continue + } + innerParams := parseFieldParameters(field.Tag.Get("asn1")) + innerParams.name = field.Name + innerParams.lax = params.lax + innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, innerParams) + if err != nil { + return + } + } + // We allow extra bytes at the end of the SEQUENCE because + // adding elements to the end has been used in X.509 as the + // version numbers have increased. + return + case reflect.Slice: + sliceType := fieldType + if sliceType.Elem().Kind() == reflect.Uint8 { + val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes))) + reflect.Copy(val, reflect.ValueOf(innerBytes)) + return + } + newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem(), params.lax, params.name) + if err1 == nil { + val.Set(newSlice) + } + err = err1 + return + case reflect.String: + var v string + switch universalTag { + case TagPrintableString: + v, err = parsePrintableString(innerBytes, params.lax, params.name) + case TagNumericString: + v, err = parseNumericString(innerBytes, params.name) + case TagIA5String: + v, err = parseIA5String(innerBytes, params.name) + case TagT61String: + v, err = parseT61String(innerBytes) + case TagUTF8String: + v, err = parseUTF8String(innerBytes) + case TagGeneralString: + // GeneralString is specified in ISO-2022/ECMA-35, + // A brief review suggests that it includes structures + // that allow the encoding to change midstring and + // such. We give up and pass it as an 8-bit string. + v, err = parseT61String(innerBytes) + case TagBMPString: + v, err = parseBMPString(innerBytes) + + default: + err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag), params.name} + } + if err == nil { + val.SetString(v) + } + return + } + err = StructuralError{"unsupported: " + v.Type().String(), params.name} + return +} + +// canHaveDefaultValue reports whether k is a Kind that we will set a default +// value for. (A signed integer, essentially.) +func canHaveDefaultValue(k reflect.Kind) bool { + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + } + + return false +} + +// setDefaultValue is used to install a default value, from a tag string, into +// a Value. It is successful if the field was optional, even if a default value +// wasn't provided or it failed to install it into the Value. +func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) { + if !params.optional { + return + } + ok = true + if params.defaultValue == nil { + return + } + if canHaveDefaultValue(v.Kind()) { + v.SetInt(*params.defaultValue) + } + return +} + +// Unmarshal parses the DER-encoded ASN.1 data structure b +// and uses the reflect package to fill in an arbitrary value pointed at by val. +// Because Unmarshal uses the reflect package, the structs +// being written to must use upper case field names. +// +// An ASN.1 INTEGER can be written to an int, int32, int64, +// or *big.Int (from the math/big package). +// If the encoded value does not fit in the Go type, +// Unmarshal returns a parse error. +// +// An ASN.1 BIT STRING can be written to a BitString. +// +// An ASN.1 OCTET STRING can be written to a []byte. +// +// An ASN.1 OBJECT IDENTIFIER can be written to an +// ObjectIdentifier. +// +// An ASN.1 ENUMERATED can be written to an Enumerated. +// +// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a time.Time. +// +// An ASN.1 PrintableString, IA5String, or NumericString can be written to a string. +// +// Any of the above ASN.1 values can be written to an interface{}. +// The value stored in the interface has the corresponding Go type. +// For integers, that type is int64. +// +// An ASN.1 SEQUENCE OF x or SET OF x can be written +// to a slice if an x can be written to the slice's element type. +// +// An ASN.1 SEQUENCE or SET can be written to a struct +// if each of the elements in the sequence can be +// written to the corresponding element in the struct. +// +// The following tags on struct fields have special meaning to Unmarshal: +// +// application specifies that an APPLICATION tag is used +// private specifies that a PRIVATE tag is used +// default:x sets the default value for optional integer fields (only used if optional is also present) +// explicit specifies that an additional, explicit tag wraps the implicit one +// optional marks the field as ASN.1 OPTIONAL +// set causes a SET, rather than a SEQUENCE type to be expected +// tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC +// lax relax strict encoding checks for this field, and for any fields within it +// +// If the type of the first field of a structure is RawContent then the raw +// ASN1 contents of the struct will be stored in it. +// +// If the type name of a slice element ends with "SET" then it's treated as if +// the "set" tag was set on it. This can be used with nested slices where a +// struct tag cannot be given. +// +// Other ASN.1 types are not supported; if it encounters them, +// Unmarshal returns a parse error. +func Unmarshal(b []byte, val interface{}) (rest []byte, err error) { + return UnmarshalWithParams(b, val, "") +} + +// UnmarshalWithParams allows field parameters to be specified for the +// top-level element. The form of the params is the same as the field tags. +func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) { + v := reflect.ValueOf(val).Elem() + offset, err := parseField(v, b, 0, parseFieldParameters(params)) + if err != nil { + return nil, err + } + return b[offset:], nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/asn1/common.go b/vendor/github.com/google/certificate-transparency-go/asn1/common.go new file mode 100644 index 000000000000..982d06c09edb --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/asn1/common.go @@ -0,0 +1,187 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asn1 + +import ( + "reflect" + "strconv" + "strings" +) + +// ASN.1 objects have metadata preceding them: +// the tag: the type of the object +// a flag denoting if this object is compound or not +// the class type: the namespace of the tag +// the length of the object, in bytes + +// Here are some standard tags and classes + +// ASN.1 tags represent the type of the following object. +const ( + TagBoolean = 1 + TagInteger = 2 + TagBitString = 3 + TagOctetString = 4 + TagNull = 5 + TagOID = 6 + TagEnum = 10 + TagUTF8String = 12 + TagSequence = 16 + TagSet = 17 + TagNumericString = 18 + TagPrintableString = 19 + TagT61String = 20 + TagIA5String = 22 + TagUTCTime = 23 + TagGeneralizedTime = 24 + TagGeneralString = 27 + TagBMPString = 30 +) + +// ASN.1 class types represent the namespace of the tag. +const ( + ClassUniversal = 0 + ClassApplication = 1 + ClassContextSpecific = 2 + ClassPrivate = 3 +) + +type tagAndLength struct { + class, tag, length int + isCompound bool +} + +// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead +// of" and "in addition to". When not specified, every primitive type has a +// default tag in the UNIVERSAL class. +// +// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1 +// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT +// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another. +// +// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an +// /additional/ tag would wrap the default tag. This explicit tag will have the +// compound flag set. +// +// (This is used in order to remove ambiguity with optional elements.) +// +// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we +// don't support that here. We support a single layer of EXPLICIT or IMPLICIT +// tagging with tag strings on the fields of a structure. + +// fieldParameters is the parsed representation of tag string from a structure field. +type fieldParameters struct { + optional bool // true iff the field is OPTIONAL + explicit bool // true iff an EXPLICIT tag is in use. + application bool // true iff an APPLICATION tag is in use. + private bool // true iff a PRIVATE tag is in use. + defaultValue *int64 // a default value for INTEGER typed fields (maybe nil). + tag *int // the EXPLICIT or IMPLICIT tag (maybe nil). + stringType int // the string tag to use when marshaling. + timeType int // the time tag to use when marshaling. + set bool // true iff this should be encoded as a SET + omitEmpty bool // true iff this should be omitted if empty when marshaling. + lax bool // true iff unmarshalling should skip some error checks + name string // name of field for better diagnostics + + // Invariants: + // if explicit is set, tag is non-nil. +} + +// Given a tag string with the format specified in the package comment, +// parseFieldParameters will parse it into a fieldParameters structure, +// ignoring unknown parts of the string. +func parseFieldParameters(str string) (ret fieldParameters) { + for _, part := range strings.Split(str, ",") { + switch { + case part == "optional": + ret.optional = true + case part == "explicit": + ret.explicit = true + if ret.tag == nil { + ret.tag = new(int) + } + case part == "generalized": + ret.timeType = TagGeneralizedTime + case part == "utc": + ret.timeType = TagUTCTime + case part == "ia5": + ret.stringType = TagIA5String + case part == "printable": + ret.stringType = TagPrintableString + case part == "numeric": + ret.stringType = TagNumericString + case part == "utf8": + ret.stringType = TagUTF8String + case strings.HasPrefix(part, "default:"): + i, err := strconv.ParseInt(part[8:], 10, 64) + if err == nil { + ret.defaultValue = new(int64) + *ret.defaultValue = i + } + case strings.HasPrefix(part, "tag:"): + i, err := strconv.Atoi(part[4:]) + if err == nil { + ret.tag = new(int) + *ret.tag = i + } + case part == "set": + ret.set = true + case part == "application": + ret.application = true + if ret.tag == nil { + ret.tag = new(int) + } + case part == "private": + ret.private = true + if ret.tag == nil { + ret.tag = new(int) + } + case part == "omitempty": + ret.omitEmpty = true + case part == "lax": + ret.lax = true + } + } + return +} + +// Given a reflected Go type, getUniversalType returns the default tag number +// and expected compound flag. +func getUniversalType(t reflect.Type) (matchAny bool, tagNumber int, isCompound, ok bool) { + switch t { + case rawValueType: + return true, -1, false, true + case objectIdentifierType: + return false, TagOID, false, true + case bitStringType: + return false, TagBitString, false, true + case timeType: + return false, TagUTCTime, false, true + case enumeratedType: + return false, TagEnum, false, true + case bigIntType: + return false, TagInteger, false, true + } + switch t.Kind() { + case reflect.Bool: + return false, TagBoolean, false, true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return false, TagInteger, false, true + case reflect.Struct: + return false, TagSequence, true, true + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return false, TagOctetString, false, true + } + if strings.HasSuffix(t.Name(), "SET") { + return false, TagSet, true, true + } + return false, TagSequence, true, true + case reflect.String: + return false, TagPrintableString, false, true + } + return false, 0, false, false +} diff --git a/vendor/github.com/google/certificate-transparency-go/asn1/marshal.go b/vendor/github.com/google/certificate-transparency-go/asn1/marshal.go new file mode 100644 index 000000000000..9801b065a182 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/asn1/marshal.go @@ -0,0 +1,691 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asn1 + +import ( + "errors" + "fmt" + "math/big" + "reflect" + "time" + "unicode/utf8" +) + +var ( + byte00Encoder encoder = byteEncoder(0x00) + byteFFEncoder encoder = byteEncoder(0xff) +) + +// encoder represents an ASN.1 element that is waiting to be marshaled. +type encoder interface { + // Len returns the number of bytes needed to marshal this element. + Len() int + // Encode encodes this element by writing Len() bytes to dst. + Encode(dst []byte) +} + +type byteEncoder byte + +func (c byteEncoder) Len() int { + return 1 +} + +func (c byteEncoder) Encode(dst []byte) { + dst[0] = byte(c) +} + +type bytesEncoder []byte + +func (b bytesEncoder) Len() int { + return len(b) +} + +func (b bytesEncoder) Encode(dst []byte) { + if copy(dst, b) != len(b) { + panic("internal error") + } +} + +type stringEncoder string + +func (s stringEncoder) Len() int { + return len(s) +} + +func (s stringEncoder) Encode(dst []byte) { + if copy(dst, s) != len(s) { + panic("internal error") + } +} + +type multiEncoder []encoder + +func (m multiEncoder) Len() int { + var size int + for _, e := range m { + size += e.Len() + } + return size +} + +func (m multiEncoder) Encode(dst []byte) { + var off int + for _, e := range m { + e.Encode(dst[off:]) + off += e.Len() + } +} + +type taggedEncoder struct { + // scratch contains temporary space for encoding the tag and length of + // an element in order to avoid extra allocations. + scratch [8]byte + tag encoder + body encoder +} + +func (t *taggedEncoder) Len() int { + return t.tag.Len() + t.body.Len() +} + +func (t *taggedEncoder) Encode(dst []byte) { + t.tag.Encode(dst) + t.body.Encode(dst[t.tag.Len():]) +} + +type int64Encoder int64 + +func (i int64Encoder) Len() int { + n := 1 + + for i > 127 { + n++ + i >>= 8 + } + + for i < -128 { + n++ + i >>= 8 + } + + return n +} + +func (i int64Encoder) Encode(dst []byte) { + n := i.Len() + + for j := 0; j < n; j++ { + dst[j] = byte(i >> uint((n-1-j)*8)) + } +} + +func base128IntLength(n int64) int { + if n == 0 { + return 1 + } + + l := 0 + for i := n; i > 0; i >>= 7 { + l++ + } + + return l +} + +func appendBase128Int(dst []byte, n int64) []byte { + l := base128IntLength(n) + + for i := l - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + dst = append(dst, o) + } + + return dst +} + +func makeBigInt(n *big.Int, fieldName string) (encoder, error) { + if n == nil { + return nil, StructuralError{"empty integer", fieldName} + } + + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement + // form. So we'll invert and subtract 1. If the + // most-significant-bit isn't set then we'll need to pad the + // beginning with 0xff in order to keep the number negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + return multiEncoder([]encoder{byteFFEncoder, bytesEncoder(bytes)}), nil + } + return bytesEncoder(bytes), nil + } else if n.Sign() == 0 { + // Zero is written as a single 0 zero rather than no bytes. + return byte00Encoder, nil + } else { + bytes := n.Bytes() + if len(bytes) > 0 && bytes[0]&0x80 != 0 { + // We'll have to pad this with 0x00 in order to stop it + // looking like a negative number. + return multiEncoder([]encoder{byte00Encoder, bytesEncoder(bytes)}), nil + } + return bytesEncoder(bytes), nil + } +} + +func appendLength(dst []byte, i int) []byte { + n := lengthLength(i) + + for ; n > 0; n-- { + dst = append(dst, byte(i>>uint((n-1)*8))) + } + + return dst +} + +func lengthLength(i int) (numBytes int) { + numBytes = 1 + for i > 255 { + numBytes++ + i >>= 8 + } + return +} + +func appendTagAndLength(dst []byte, t tagAndLength) []byte { + b := uint8(t.class) << 6 + if t.isCompound { + b |= 0x20 + } + if t.tag >= 31 { + b |= 0x1f + dst = append(dst, b) + dst = appendBase128Int(dst, int64(t.tag)) + } else { + b |= uint8(t.tag) + dst = append(dst, b) + } + + if t.length >= 128 { + l := lengthLength(t.length) + dst = append(dst, 0x80|byte(l)) + dst = appendLength(dst, t.length) + } else { + dst = append(dst, byte(t.length)) + } + + return dst +} + +type bitStringEncoder BitString + +func (b bitStringEncoder) Len() int { + return len(b.Bytes) + 1 +} + +func (b bitStringEncoder) Encode(dst []byte) { + dst[0] = byte((8 - b.BitLength%8) % 8) + if copy(dst[1:], b.Bytes) != len(b.Bytes) { + panic("internal error") + } +} + +type oidEncoder []int + +func (oid oidEncoder) Len() int { + l := base128IntLength(int64(oid[0]*40 + oid[1])) + for i := 2; i < len(oid); i++ { + l += base128IntLength(int64(oid[i])) + } + return l +} + +func (oid oidEncoder) Encode(dst []byte) { + dst = appendBase128Int(dst[:0], int64(oid[0]*40+oid[1])) + for i := 2; i < len(oid); i++ { + dst = appendBase128Int(dst, int64(oid[i])) + } +} + +func makeObjectIdentifier(oid []int, fieldName string) (e encoder, err error) { + if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) { + return nil, StructuralError{"invalid object identifier", fieldName} + } + + return oidEncoder(oid), nil +} + +func makePrintableString(s, fieldName string) (e encoder, err error) { + for i := 0; i < len(s); i++ { + // The asterisk is often used in PrintableString, even though + // it is invalid. If a PrintableString was specifically + // requested then the asterisk is permitted by this code. + // Ampersand is allowed in parsing due a handful of CA + // certificates, however when making new certificates + // it is rejected. + if !isPrintable(s[i], allowAsterisk, rejectAmpersand) { + return nil, StructuralError{"PrintableString contains invalid character", fieldName} + } + } + + return stringEncoder(s), nil +} + +func makeIA5String(s, fieldName string) (e encoder, err error) { + for i := 0; i < len(s); i++ { + if s[i] > 127 { + return nil, StructuralError{"IA5String contains invalid character", fieldName} + } + } + + return stringEncoder(s), nil +} + +func makeNumericString(s string, fieldName string) (e encoder, err error) { + for i := 0; i < len(s); i++ { + if !isNumeric(s[i]) { + return nil, StructuralError{"NumericString contains invalid character", fieldName} + } + } + + return stringEncoder(s), nil +} + +func makeUTF8String(s string) encoder { + return stringEncoder(s) +} + +func appendTwoDigits(dst []byte, v int) []byte { + return append(dst, byte('0'+(v/10)%10), byte('0'+v%10)) +} + +func appendFourDigits(dst []byte, v int) []byte { + var bytes [4]byte + for i := range bytes { + bytes[3-i] = '0' + byte(v%10) + v /= 10 + } + return append(dst, bytes[:]...) +} + +func outsideUTCRange(t time.Time) bool { + year := t.Year() + return year < 1950 || year >= 2050 +} + +func makeUTCTime(t time.Time, fieldName string) (e encoder, err error) { + dst := make([]byte, 0, 18) + + dst, err = appendUTCTime(dst, t, fieldName) + if err != nil { + return nil, err + } + + return bytesEncoder(dst), nil +} + +func makeGeneralizedTime(t time.Time, fieldName string) (e encoder, err error) { + dst := make([]byte, 0, 20) + + dst, err = appendGeneralizedTime(dst, t, fieldName) + if err != nil { + return nil, err + } + + return bytesEncoder(dst), nil +} + +func appendUTCTime(dst []byte, t time.Time, fieldName string) (ret []byte, err error) { + year := t.Year() + + switch { + case 1950 <= year && year < 2000: + dst = appendTwoDigits(dst, year-1900) + case 2000 <= year && year < 2050: + dst = appendTwoDigits(dst, year-2000) + default: + return nil, StructuralError{"cannot represent time as UTCTime", fieldName} + } + + return appendTimeCommon(dst, t), nil +} + +func appendGeneralizedTime(dst []byte, t time.Time, fieldName string) (ret []byte, err error) { + year := t.Year() + if year < 0 || year > 9999 { + return nil, StructuralError{"cannot represent time as GeneralizedTime", fieldName} + } + + dst = appendFourDigits(dst, year) + + return appendTimeCommon(dst, t), nil +} + +func appendTimeCommon(dst []byte, t time.Time) []byte { + _, month, day := t.Date() + + dst = appendTwoDigits(dst, int(month)) + dst = appendTwoDigits(dst, day) + + hour, min, sec := t.Clock() + + dst = appendTwoDigits(dst, hour) + dst = appendTwoDigits(dst, min) + dst = appendTwoDigits(dst, sec) + + _, offset := t.Zone() + + switch { + case offset/60 == 0: + return append(dst, 'Z') + case offset > 0: + dst = append(dst, '+') + case offset < 0: + dst = append(dst, '-') + } + + offsetMinutes := offset / 60 + if offsetMinutes < 0 { + offsetMinutes = -offsetMinutes + } + + dst = appendTwoDigits(dst, offsetMinutes/60) + dst = appendTwoDigits(dst, offsetMinutes%60) + + return dst +} + +func stripTagAndLength(in []byte) []byte { + _, offset, err := parseTagAndLength(in, 0, "") + if err != nil { + return in + } + return in[offset:] +} + +func makeBody(value reflect.Value, params fieldParameters) (e encoder, err error) { + switch value.Type() { + case flagType: + return bytesEncoder(nil), nil + case timeType: + t := value.Interface().(time.Time) + if params.timeType == TagGeneralizedTime || outsideUTCRange(t) { + return makeGeneralizedTime(t, params.name) + } + return makeUTCTime(t, params.name) + case bitStringType: + return bitStringEncoder(value.Interface().(BitString)), nil + case objectIdentifierType: + return makeObjectIdentifier(value.Interface().(ObjectIdentifier), params.name) + case bigIntType: + return makeBigInt(value.Interface().(*big.Int), params.name) + } + + switch v := value; v.Kind() { + case reflect.Bool: + if v.Bool() { + return byteFFEncoder, nil + } + return byte00Encoder, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int64Encoder(v.Int()), nil + case reflect.Struct: + t := v.Type() + + for i := 0; i < t.NumField(); i++ { + if t.Field(i).PkgPath != "" { + return nil, StructuralError{"struct contains unexported fields", t.Field(i).Name} + } + } + + startingField := 0 + + n := t.NumField() + if n == 0 { + return bytesEncoder(nil), nil + } + + // If the first element of the structure is a non-empty + // RawContents, then we don't bother serializing the rest. + if t.Field(0).Type == rawContentsType { + s := v.Field(0) + if s.Len() > 0 { + bytes := s.Bytes() + /* The RawContents will contain the tag and + * length fields but we'll also be writing + * those ourselves, so we strip them out of + * bytes */ + return bytesEncoder(stripTagAndLength(bytes)), nil + } + + startingField = 1 + } + + switch n1 := n - startingField; n1 { + case 0: + return bytesEncoder(nil), nil + case 1: + return makeField(v.Field(startingField), parseFieldParameters(t.Field(startingField).Tag.Get("asn1"))) + default: + m := make([]encoder, n1) + for i := 0; i < n1; i++ { + m[i], err = makeField(v.Field(i+startingField), parseFieldParameters(t.Field(i+startingField).Tag.Get("asn1"))) + if err != nil { + return nil, err + } + } + + return multiEncoder(m), nil + } + case reflect.Slice: + sliceType := v.Type() + if sliceType.Elem().Kind() == reflect.Uint8 { + return bytesEncoder(v.Bytes()), nil + } + + var fp fieldParameters + + switch l := v.Len(); l { + case 0: + return bytesEncoder(nil), nil + case 1: + return makeField(v.Index(0), fp) + default: + m := make([]encoder, l) + + for i := 0; i < l; i++ { + m[i], err = makeField(v.Index(i), fp) + if err != nil { + return nil, err + } + } + + return multiEncoder(m), nil + } + case reflect.String: + switch params.stringType { + case TagIA5String: + return makeIA5String(v.String(), params.name) + case TagPrintableString: + return makePrintableString(v.String(), params.name) + case TagNumericString: + return makeNumericString(v.String(), params.name) + default: + return makeUTF8String(v.String()), nil + } + } + + return nil, StructuralError{"unknown Go type", params.name} +} + +func makeField(v reflect.Value, params fieldParameters) (e encoder, err error) { + if !v.IsValid() { + return nil, fmt.Errorf("asn1: cannot marshal nil value") + } + // If the field is an interface{} then recurse into it. + if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 { + return makeField(v.Elem(), params) + } + + if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty { + return bytesEncoder(nil), nil + } + + if params.optional && params.defaultValue != nil && canHaveDefaultValue(v.Kind()) { + defaultValue := reflect.New(v.Type()).Elem() + defaultValue.SetInt(*params.defaultValue) + + if reflect.DeepEqual(v.Interface(), defaultValue.Interface()) { + return bytesEncoder(nil), nil + } + } + + // If no default value is given then the zero value for the type is + // assumed to be the default value. This isn't obviously the correct + // behavior, but it's what Go has traditionally done. + if params.optional && params.defaultValue == nil { + if reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) { + return bytesEncoder(nil), nil + } + } + + if v.Type() == rawValueType { + rv := v.Interface().(RawValue) + if len(rv.FullBytes) != 0 { + return bytesEncoder(rv.FullBytes), nil + } + + t := new(taggedEncoder) + + t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound})) + t.body = bytesEncoder(rv.Bytes) + + return t, nil + } + + matchAny, tag, isCompound, ok := getUniversalType(v.Type()) + if !ok || matchAny { + return nil, StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type()), params.name} + } + + if params.timeType != 0 && tag != TagUTCTime { + return nil, StructuralError{"explicit time type given to non-time member", params.name} + } + + if params.stringType != 0 && tag != TagPrintableString { + return nil, StructuralError{"explicit string type given to non-string member", params.name} + } + + switch tag { + case TagPrintableString: + if params.stringType == 0 { + // This is a string without an explicit string type. We'll use + // a PrintableString if the character set in the string is + // sufficiently limited, otherwise we'll use a UTF8String. + for _, r := range v.String() { + if r >= utf8.RuneSelf || !isPrintable(byte(r), rejectAsterisk, rejectAmpersand) { + if !utf8.ValidString(v.String()) { + return nil, errors.New("asn1: string not valid UTF-8") + } + tag = TagUTF8String + break + } + } + } else { + tag = params.stringType + } + case TagUTCTime: + if params.timeType == TagGeneralizedTime || outsideUTCRange(v.Interface().(time.Time)) { + tag = TagGeneralizedTime + } + } + + if params.set { + if tag != TagSequence { + return nil, StructuralError{"non sequence tagged as set", params.name} + } + tag = TagSet + } + + t := new(taggedEncoder) + + t.body, err = makeBody(v, params) + if err != nil { + return nil, err + } + + bodyLen := t.body.Len() + + class := ClassUniversal + if params.tag != nil { + if params.application { + class = ClassApplication + } else if params.private { + class = ClassPrivate + } else { + class = ClassContextSpecific + } + + if params.explicit { + t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{ClassUniversal, tag, bodyLen, isCompound})) + + tt := new(taggedEncoder) + + tt.body = t + + tt.tag = bytesEncoder(appendTagAndLength(tt.scratch[:0], tagAndLength{ + class: class, + tag: *params.tag, + length: bodyLen + t.tag.Len(), + isCompound: true, + })) + + return tt, nil + } + + // implicit tag. + tag = *params.tag + } + + t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{class, tag, bodyLen, isCompound})) + + return t, nil +} + +// Marshal returns the ASN.1 encoding of val. +// +// In addition to the struct tags recognised by Unmarshal, the following can be +// used: +// +// ia5: causes strings to be marshaled as ASN.1, IA5String values +// omitempty: causes empty slices to be skipped +// printable: causes strings to be marshaled as ASN.1, PrintableString values +// utf8: causes strings to be marshaled as ASN.1, UTF8String values +// utc: causes time.Time to be marshaled as ASN.1, UTCTime values +// generalized: causes time.Time to be marshaled as ASN.1, GeneralizedTime values +func Marshal(val interface{}) ([]byte, error) { + return MarshalWithParams(val, "") +} + +// MarshalWithParams allows field parameters to be specified for the +// top-level element. The form of the params is the same as the field tags. +func MarshalWithParams(val interface{}, params string) ([]byte, error) { + e, err := makeField(reflect.ValueOf(val), parseFieldParameters(params)) + if err != nil { + return nil, err + } + b := make([]byte, e.Len()) + e.Encode(b) + return b, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go new file mode 100644 index 000000000000..a2fed51d883d --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go @@ -0,0 +1,278 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.1 +// protoc v3.20.1 +// source: client/configpb/multilog.proto + +package configpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// TemporalLogConfig is a set of LogShardConfig messages, whose +// time limits should be contiguous. +type TemporalLogConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Shard []*LogShardConfig `protobuf:"bytes,1,rep,name=shard,proto3" json:"shard,omitempty"` +} + +func (x *TemporalLogConfig) Reset() { + *x = TemporalLogConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_client_configpb_multilog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TemporalLogConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TemporalLogConfig) ProtoMessage() {} + +func (x *TemporalLogConfig) ProtoReflect() protoreflect.Message { + mi := &file_client_configpb_multilog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TemporalLogConfig.ProtoReflect.Descriptor instead. +func (*TemporalLogConfig) Descriptor() ([]byte, []int) { + return file_client_configpb_multilog_proto_rawDescGZIP(), []int{0} +} + +func (x *TemporalLogConfig) GetShard() []*LogShardConfig { + if x != nil { + return x.Shard + } + return nil +} + +// LogShardConfig describes the acceptable date range for a single shard of a temporal +// log. +type LogShardConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"` + // The log's public key in DER-encoded PKIX form. + PublicKeyDer []byte `protobuf:"bytes,2,opt,name=public_key_der,json=publicKeyDer,proto3" json:"public_key_der,omitempty"` + // not_after_start defines the start of the range of acceptable NotAfter + // values, inclusive. + // Leaving this unset implies no lower bound to the range. + NotAfterStart *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=not_after_start,json=notAfterStart,proto3" json:"not_after_start,omitempty"` + // not_after_limit defines the end of the range of acceptable NotAfter values, + // exclusive. + // Leaving this unset implies no upper bound to the range. + NotAfterLimit *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=not_after_limit,json=notAfterLimit,proto3" json:"not_after_limit,omitempty"` +} + +func (x *LogShardConfig) Reset() { + *x = LogShardConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_client_configpb_multilog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogShardConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogShardConfig) ProtoMessage() {} + +func (x *LogShardConfig) ProtoReflect() protoreflect.Message { + mi := &file_client_configpb_multilog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogShardConfig.ProtoReflect.Descriptor instead. +func (*LogShardConfig) Descriptor() ([]byte, []int) { + return file_client_configpb_multilog_proto_rawDescGZIP(), []int{1} +} + +func (x *LogShardConfig) GetUri() string { + if x != nil { + return x.Uri + } + return "" +} + +func (x *LogShardConfig) GetPublicKeyDer() []byte { + if x != nil { + return x.PublicKeyDer + } + return nil +} + +func (x *LogShardConfig) GetNotAfterStart() *timestamppb.Timestamp { + if x != nil { + return x.NotAfterStart + } + return nil +} + +func (x *LogShardConfig) GetNotAfterLimit() *timestamppb.Timestamp { + if x != nil { + return x.NotAfterLimit + } + return nil +} + +var File_client_configpb_multilog_proto protoreflect.FileDescriptor + +var file_client_configpb_multilog_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, + 0x62, 0x2f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x08, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x43, 0x0a, 0x11, 0x54, + 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x2e, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x67, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x22, 0xd0, 0x01, 0x0a, 0x0e, 0x4c, 0x6f, 0x67, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x44, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0f, 0x6e, + 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, + 0x42, 0x0a, 0x0f, 0x6e, 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x2d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, + 0x79, 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x6c, 0x6f, 0x67, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_client_configpb_multilog_proto_rawDescOnce sync.Once + file_client_configpb_multilog_proto_rawDescData = file_client_configpb_multilog_proto_rawDesc +) + +func file_client_configpb_multilog_proto_rawDescGZIP() []byte { + file_client_configpb_multilog_proto_rawDescOnce.Do(func() { + file_client_configpb_multilog_proto_rawDescData = protoimpl.X.CompressGZIP(file_client_configpb_multilog_proto_rawDescData) + }) + return file_client_configpb_multilog_proto_rawDescData +} + +var file_client_configpb_multilog_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_client_configpb_multilog_proto_goTypes = []interface{}{ + (*TemporalLogConfig)(nil), // 0: configpb.TemporalLogConfig + (*LogShardConfig)(nil), // 1: configpb.LogShardConfig + (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp +} +var file_client_configpb_multilog_proto_depIdxs = []int32{ + 1, // 0: configpb.TemporalLogConfig.shard:type_name -> configpb.LogShardConfig + 2, // 1: configpb.LogShardConfig.not_after_start:type_name -> google.protobuf.Timestamp + 2, // 2: configpb.LogShardConfig.not_after_limit:type_name -> google.protobuf.Timestamp + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_client_configpb_multilog_proto_init() } +func file_client_configpb_multilog_proto_init() { + if File_client_configpb_multilog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_client_configpb_multilog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TemporalLogConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_client_configpb_multilog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogShardConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_client_configpb_multilog_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_client_configpb_multilog_proto_goTypes, + DependencyIndexes: file_client_configpb_multilog_proto_depIdxs, + MessageInfos: file_client_configpb_multilog_proto_msgTypes, + }.Build() + File_client_configpb_multilog_proto = out.File + file_client_configpb_multilog_proto_rawDesc = nil + file_client_configpb_multilog_proto_goTypes = nil + file_client_configpb_multilog_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto new file mode 100644 index 000000000000..0774c35e210a --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto @@ -0,0 +1,45 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package configpb; + +option go_package = "github.com/google/certificate-transparency-go/client/multilog/configpb"; + +import "google/protobuf/timestamp.proto"; + +// TemporalLogConfig is a set of LogShardConfig messages, whose +// time limits should be contiguous. +message TemporalLogConfig { + repeated LogShardConfig shard = 1; +} + +// LogShardConfig describes the acceptable date range for a single shard of a temporal +// log. +message LogShardConfig { + string uri = 1; + + // The log's public key in DER-encoded PKIX form. + bytes public_key_der = 2; + + // not_after_start defines the start of the range of acceptable NotAfter + // values, inclusive. + // Leaving this unset implies no lower bound to the range. + google.protobuf.Timestamp not_after_start = 3; + // not_after_limit defines the end of the range of acceptable NotAfter values, + // exclusive. + // Leaving this unset implies no upper bound to the range. + google.protobuf.Timestamp not_after_limit = 4; +} diff --git a/vendor/github.com/google/certificate-transparency-go/client/getentries.go b/vendor/github.com/google/certificate-transparency-go/client/getentries.go new file mode 100644 index 000000000000..103dc8158038 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/getentries.go @@ -0,0 +1,68 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "errors" + "strconv" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/x509" +) + +// GetRawEntries exposes the /ct/v1/get-entries result with only the JSON parsing done. +func (c *LogClient) GetRawEntries(ctx context.Context, start, end int64) (*ct.GetEntriesResponse, error) { + if end < 0 { + return nil, errors.New("end should be >= 0") + } + if end < start { + return nil, errors.New("start should be <= end") + } + + params := map[string]string{ + "start": strconv.FormatInt(start, 10), + "end": strconv.FormatInt(end, 10), + } + + var resp ct.GetEntriesResponse + if _, _, err := c.GetAndParse(ctx, ct.GetEntriesPath, params, &resp); err != nil { + return nil, err + } + + return &resp, nil +} + +// GetEntries attempts to retrieve the entries in the sequence [start, end] from the CT log server +// (RFC6962 s4.6) as parsed [pre-]certificates for convenience, held in a slice of ct.LogEntry structures. +// However, this does mean that any certificate parsing failures will cause a failure of the whole +// retrieval operation; for more robust retrieval of parsed certificates, use GetRawEntries() and invoke +// ct.LogEntryFromLeaf() on each individual entry. +func (c *LogClient) GetEntries(ctx context.Context, start, end int64) ([]ct.LogEntry, error) { + resp, err := c.GetRawEntries(ctx, start, end) + if err != nil { + return nil, err + } + entries := make([]ct.LogEntry, len(resp.Entries)) + for i, entry := range resp.Entries { + index := start + int64(i) + logEntry, err := ct.LogEntryFromLeaf(index, &entry) + if x509.IsFatal(err) { + return nil, err + } + entries[i] = *logEntry + } + return entries, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/client/logclient.go b/vendor/github.com/google/certificate-transparency-go/client/logclient.go new file mode 100644 index 000000000000..0e90c1077f4a --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/logclient.go @@ -0,0 +1,227 @@ +// Copyright 2014 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package client is a CT log client implementation and contains types and code +// for interacting with RFC6962-compliant CT Log instances. +// See http://tools.ietf.org/html/rfc6962 for details +package client + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + "strconv" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/jsonclient" + "github.com/google/certificate-transparency-go/tls" +) + +// LogClient represents a client for a given CT Log instance +type LogClient struct { + jsonclient.JSONClient +} + +// CheckLogClient is an interface that allows (just) checking of various log contents. +type CheckLogClient interface { + BaseURI() string + GetSTH(context.Context) (*ct.SignedTreeHead, error) + GetSTHConsistency(ctx context.Context, first, second uint64) ([][]byte, error) + GetProofByHash(ctx context.Context, hash []byte, treeSize uint64) (*ct.GetProofByHashResponse, error) +} + +// New constructs a new LogClient instance. +// |uri| is the base URI of the CT log instance to interact with, e.g. +// https://ct.googleapis.com/pilot +// |hc| is the underlying client to be used for HTTP requests to the CT log. +// |opts| can be used to provide a custom logger interface and a public key +// for signature verification. +func New(uri string, hc *http.Client, opts jsonclient.Options) (*LogClient, error) { + logClient, err := jsonclient.New(uri, hc, opts) + if err != nil { + return nil, err + } + return &LogClient{*logClient}, err +} + +// RspError represents a server error including HTTP information. +type RspError = jsonclient.RspError + +// Attempts to add |chain| to the log, using the api end-point specified by +// |path|. If provided context expires before submission is complete an +// error will be returned. +func (c *LogClient) addChainWithRetry(ctx context.Context, ctype ct.LogEntryType, path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + var resp ct.AddChainResponse + var req ct.AddChainRequest + for _, link := range chain { + req.Chain = append(req.Chain, link.Data) + } + + httpRsp, body, err := c.PostAndParseWithRetry(ctx, path, &req, &resp) + if err != nil { + return nil, err + } + + var ds ct.DigitallySigned + if rest, err := tls.Unmarshal(resp.Signature, &ds); err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } else if len(rest) > 0 { + return nil, RspError{ + Err: fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)), + StatusCode: httpRsp.StatusCode, + Body: body, + } + } + + exts, err := base64.StdEncoding.DecodeString(resp.Extensions) + if err != nil { + return nil, RspError{ + Err: fmt.Errorf("invalid base64 data in Extensions (%q): %v", resp.Extensions, err), + StatusCode: httpRsp.StatusCode, + Body: body, + } + } + + var logID ct.LogID + copy(logID.KeyID[:], resp.ID) + sct := &ct.SignedCertificateTimestamp{ + SCTVersion: resp.SCTVersion, + LogID: logID, + Timestamp: resp.Timestamp, + Extensions: ct.CTExtensions(exts), + Signature: ds, + } + if err := c.VerifySCTSignature(*sct, ctype, chain); err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + return sct, nil +} + +// AddChain adds the (DER represented) X509 |chain| to the log. +func (c *LogClient) AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return c.addChainWithRetry(ctx, ct.X509LogEntryType, ct.AddChainPath, chain) +} + +// AddPreChain adds the (DER represented) Precertificate |chain| to the log. +func (c *LogClient) AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return c.addChainWithRetry(ctx, ct.PrecertLogEntryType, ct.AddPreChainPath, chain) +} + +// GetSTH retrieves the current STH from the log. +// Returns a populated SignedTreeHead, or a non-nil error (which may be of type +// RspError if a raw http.Response is available). +func (c *LogClient) GetSTH(ctx context.Context) (*ct.SignedTreeHead, error) { + var resp ct.GetSTHResponse + httpRsp, body, err := c.GetAndParse(ctx, ct.GetSTHPath, nil, &resp) + if err != nil { + return nil, err + } + + sth, err := resp.ToSignedTreeHead() + if err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + + if err := c.VerifySTHSignature(*sth); err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + return sth, nil +} + +// VerifySTHSignature checks the signature in sth, returning any error encountered or nil if verification is +// successful. +func (c *LogClient) VerifySTHSignature(sth ct.SignedTreeHead) error { + if c.Verifier == nil { + // Can't verify signatures without a verifier + return nil + } + return c.Verifier.VerifySTHSignature(sth) +} + +// VerifySCTSignature checks the signature in sct for the given LogEntryType, with associated certificate chain. +func (c *LogClient) VerifySCTSignature(sct ct.SignedCertificateTimestamp, ctype ct.LogEntryType, certData []ct.ASN1Cert) error { + if c.Verifier == nil { + // Can't verify signatures without a verifier + return nil + } + leaf, err := ct.MerkleTreeLeafFromRawChain(certData, ctype, sct.Timestamp) + if err != nil { + return fmt.Errorf("failed to build MerkleTreeLeaf: %v", err) + } + leaf.TimestampedEntry.Extensions = sct.Extensions + entry := ct.LogEntry{Leaf: *leaf} + return c.Verifier.VerifySCTSignature(sct, entry) +} + +// GetSTHConsistency retrieves the consistency proof between two snapshots. +func (c *LogClient) GetSTHConsistency(ctx context.Context, first, second uint64) ([][]byte, error) { + base10 := 10 + params := map[string]string{ + "first": strconv.FormatUint(first, base10), + "second": strconv.FormatUint(second, base10), + } + var resp ct.GetSTHConsistencyResponse + if _, _, err := c.GetAndParse(ctx, ct.GetSTHConsistencyPath, params, &resp); err != nil { + return nil, err + } + return resp.Consistency, nil +} + +// GetProofByHash returns an audit path for the hash of an SCT. +func (c *LogClient) GetProofByHash(ctx context.Context, hash []byte, treeSize uint64) (*ct.GetProofByHashResponse, error) { + b64Hash := base64.StdEncoding.EncodeToString(hash) + base10 := 10 + params := map[string]string{ + "tree_size": strconv.FormatUint(treeSize, base10), + "hash": b64Hash, + } + var resp ct.GetProofByHashResponse + if _, _, err := c.GetAndParse(ctx, ct.GetProofByHashPath, params, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +// GetAcceptedRoots retrieves the set of acceptable root certificates for a log. +func (c *LogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) { + var resp ct.GetRootsResponse + httpRsp, body, err := c.GetAndParse(ctx, ct.GetRootsPath, nil, &resp) + if err != nil { + return nil, err + } + var roots []ct.ASN1Cert + for _, cert64 := range resp.Certificates { + cert, err := base64.StdEncoding.DecodeString(cert64) + if err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + roots = append(roots, ct.ASN1Cert{Data: cert}) + } + return roots, nil +} + +// GetEntryAndProof returns a log entry and audit path for the index of a leaf. +func (c *LogClient) GetEntryAndProof(ctx context.Context, index, treeSize uint64) (*ct.GetEntryAndProofResponse, error) { + base10 := 10 + params := map[string]string{ + "leaf_index": strconv.FormatUint(index, base10), + "tree_size": strconv.FormatUint(treeSize, base10), + } + var resp ct.GetEntryAndProofResponse + if _, _, err := c.GetAndParse(ctx, ct.GetEntryAndProofPath, params, &resp); err != nil { + return nil, err + } + return &resp, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/client/multilog.go b/vendor/github.com/google/certificate-transparency-go/client/multilog.go new file mode 100644 index 000000000000..afd75a6db4a4 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/multilog.go @@ -0,0 +1,223 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "crypto/sha256" + "errors" + "fmt" + "net/http" + "os" + "time" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/client/configpb" + "github.com/google/certificate-transparency-go/jsonclient" + "github.com/google/certificate-transparency-go/x509" + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/proto" +) + +type interval struct { + lower *time.Time // nil => no lower bound + upper *time.Time // nil => no upper bound +} + +// TemporalLogConfigFromFile creates a TemporalLogConfig object from the given +// filename, which should contain text-protobuf encoded configuration data. +func TemporalLogConfigFromFile(filename string) (*configpb.TemporalLogConfig, error) { + if len(filename) == 0 { + return nil, errors.New("log config filename empty") + } + + cfgBytes, err := os.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("failed to read log config: %v", err) + } + + var cfg configpb.TemporalLogConfig + if txtErr := prototext.Unmarshal(cfgBytes, &cfg); txtErr != nil { + if binErr := proto.Unmarshal(cfgBytes, &cfg); binErr != nil { + return nil, fmt.Errorf("failed to parse TemporalLogConfig from %q as text protobuf (%v) or binary protobuf (%v)", filename, txtErr, binErr) + } + } + + if len(cfg.Shard) == 0 { + return nil, errors.New("empty log config found") + } + return &cfg, nil +} + +// AddLogClient is an interface that allows adding certificates and pre-certificates to a log. +// Both LogClient and TemporalLogClient implement this interface, which allows users to +// commonize code for adding certs to normal/temporal logs. +type AddLogClient interface { + AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) + AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) + GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) +} + +// TemporalLogClient allows [pre-]certificates to be uploaded to a temporal log. +type TemporalLogClient struct { + Clients []*LogClient + intervals []interval +} + +// NewTemporalLogClient builds a new client for interacting with a temporal log. +// The provided config should be contiguous and chronological. +func NewTemporalLogClient(cfg *configpb.TemporalLogConfig, hc *http.Client) (*TemporalLogClient, error) { + if len(cfg.GetShard()) == 0 { + return nil, errors.New("empty config") + } + + overall, err := shardInterval(cfg.Shard[0]) + if err != nil { + return nil, fmt.Errorf("cfg.Shard[0] invalid: %v", err) + } + intervals := make([]interval, 0, len(cfg.Shard)) + intervals = append(intervals, overall) + for i := 1; i < len(cfg.Shard); i++ { + interval, err := shardInterval(cfg.Shard[i]) + if err != nil { + return nil, fmt.Errorf("cfg.Shard[%d] invalid: %v", i, err) + } + if overall.upper == nil { + return nil, fmt.Errorf("cfg.Shard[%d] extends an interval with no upper bound", i) + } + if interval.lower == nil { + return nil, fmt.Errorf("cfg.Shard[%d] has no lower bound but extends an interval", i) + } + if !interval.lower.Equal(*overall.upper) { + return nil, fmt.Errorf("cfg.Shard[%d] starts at %v but previous interval ended at %v", i, interval.lower, overall.upper) + } + overall.upper = interval.upper + intervals = append(intervals, interval) + } + clients := make([]*LogClient, 0, len(cfg.Shard)) + for i, shard := range cfg.Shard { + opts := jsonclient.Options{UserAgent: "ct-go-multilog/1.0"} + opts.PublicKeyDER = shard.GetPublicKeyDer() + c, err := New(shard.Uri, hc, opts) + if err != nil { + return nil, fmt.Errorf("failed to create client for cfg.Shard[%d]: %v", i, err) + } + clients = append(clients, c) + } + tlc := TemporalLogClient{ + Clients: clients, + intervals: intervals, + } + return &tlc, nil +} + +// GetAcceptedRoots retrieves the set of acceptable root certificates for all +// of the shards of a temporal log (i.e. the union). +func (tlc *TemporalLogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) { + type result struct { + roots []ct.ASN1Cert + err error + } + results := make(chan result, len(tlc.Clients)) + for _, c := range tlc.Clients { + go func(c *LogClient) { + var r result + r.roots, r.err = c.GetAcceptedRoots(ctx) + results <- r + }(c) + } + + var allRoots []ct.ASN1Cert + seen := make(map[[sha256.Size]byte]bool) + for range tlc.Clients { + r := <-results + if r.err != nil { + return nil, r.err + } + for _, root := range r.roots { + h := sha256.Sum256(root.Data) + if seen[h] { + continue + } + seen[h] = true + allRoots = append(allRoots, root) + } + } + return allRoots, nil +} + +// AddChain adds the (DER represented) X509 chain to the appropriate log. +func (tlc *TemporalLogClient) AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return tlc.addChain(ctx, ct.X509LogEntryType, ct.AddChainPath, chain) +} + +// AddPreChain adds the (DER represented) Precertificate chain to the appropriate log. +func (tlc *TemporalLogClient) AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return tlc.addChain(ctx, ct.PrecertLogEntryType, ct.AddPreChainPath, chain) +} + +func (tlc *TemporalLogClient) addChain(ctx context.Context, ctype ct.LogEntryType, path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + // Parse the first entry in the chain + if len(chain) == 0 { + return nil, errors.New("missing chain") + } + cert, err := x509.ParseCertificate(chain[0].Data) + if err != nil { + return nil, fmt.Errorf("failed to parse initial chain entry: %v", err) + } + cidx, err := tlc.IndexByDate(cert.NotAfter) + if err != nil { + return nil, fmt.Errorf("failed to find log to process cert: %v", err) + } + return tlc.Clients[cidx].addChainWithRetry(ctx, ctype, path, chain) +} + +// IndexByDate returns the index of the Clients entry that is appropriate for the given +// date. +func (tlc *TemporalLogClient) IndexByDate(when time.Time) (int, error) { + for i, interval := range tlc.intervals { + if (interval.lower != nil) && when.Before(*interval.lower) { + continue + } + if (interval.upper != nil) && !when.Before(*interval.upper) { + continue + } + return i, nil + } + return -1, fmt.Errorf("no log found encompassing date %v", when) +} + +func shardInterval(cfg *configpb.LogShardConfig) (interval, error) { + var interval interval + if cfg.NotAfterStart != nil { + if err := cfg.NotAfterStart.CheckValid(); err != nil { + return interval, fmt.Errorf("failed to parse NotAfterStart: %v", err) + } + t := cfg.NotAfterStart.AsTime() + interval.lower = &t + } + if cfg.NotAfterLimit != nil { + if err := cfg.NotAfterLimit.CheckValid(); err != nil { + return interval, fmt.Errorf("failed to parse NotAfterLimit: %v", err) + } + t := cfg.NotAfterLimit.AsTime() + interval.upper = &t + } + + if interval.lower != nil && interval.upper != nil && !(*interval.lower).Before(*interval.upper) { + return interval, errors.New("inverted interval") + } + return interval, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml new file mode 100644 index 000000000000..50ac5ef7b13b --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml @@ -0,0 +1,214 @@ +############################################################################# +## The top section of this file is identical in the 3 cloudbuild.*yaml files. +## Make sure any edits you make here are copied over to the other files too +## if appropriate. +## +## TODO(al): consider if it's possible to merge these 3 files and control via +## substitutions. +############################################################################# + +timeout: 1200s +options: + machineType: N1_HIGHCPU_32 + volumes: + - name: go-modules + path: /go + env: + - GOPROXY=https://proxy.golang.org + - PROJECT_ROOT=github.com/google/certificate-transparency-go + - GOPATH=/go + +substitutions: + _CLUSTER_NAME: trillian-opensource-ci + _MASTER_ZONE: us-central1-a + +# Cloud Build logs sent to GCS bucket +logsBucket: 'gs://trillian-cloudbuild-logs' + +steps: +# First build a "ct_testbase" docker image which contains most of the tools we need for the later steps: +- name: 'gcr.io/cloud-builders/docker' + entrypoint: 'bash' + args: ['-c', 'docker pull gcr.io/$PROJECT_ID/ct_testbase:latest || exit 0'] +- name: 'gcr.io/cloud-builders/docker' + args: [ + 'build', + '-t', 'gcr.io/$PROJECT_ID/ct_testbase:latest', + '--cache-from', 'gcr.io/$PROJECT_ID/ct_testbase:latest', + '-f', './integration/Dockerfile', + '.' + ] + +# prepare spins up an ephemeral trillian instance for testing use. +- name: gcr.io/$PROJECT_ID/ct_testbase + entrypoint: 'bash' + id: 'prepare' + args: + - '-exc' + - | + # Use latest versions of Trillian docker images built by the Trillian CI cloudbuilders. + docker pull gcr.io/$PROJECT_ID/log_server:latest + docker tag gcr.io/$PROJECT_ID/log_server:latest deployment_trillian-log-server + docker pull gcr.io/$PROJECT_ID/log_signer:latest + docker tag gcr.io/$PROJECT_ID/log_signer:latest deployment_trillian-log-signer + + # Bring up an ephemeral trillian instance using the docker-compose config in the Trillian repo: + export TRILLIAN_LOCATION="$$(go list -f '{{.Dir}}' github.com/google/trillian)" + + # We need to fix up Trillian's docker-compose to connect to the CloudBuild network to that tests can use it: + echo -e "networks:\n default:\n external:\n name: cloudbuild" >> $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml + + docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml pull mysql trillian-log-server trillian-log-signer + docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml up -d mysql trillian-log-server trillian-log-signer + +# Install proto related bits and block on Trillian being ready +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'ci-ready' + entrypoint: 'bash' + args: + - '-ec' + - | + go install \ + github.com/golang/protobuf/proto \ + github.com/golang/protobuf/protoc-gen-go \ + github.com/golang/mock/mockgen \ + go.etcd.io/etcd/v3 go.etcd.io/etcd/etcdctl/v3 \ + github.com/fullstorydev/grpcurl/cmd/grpcurl + + # Generate all protoc and mockgen files + go generate -run="protoc" ./... + go generate -run="mockgen" ./... + + # Cache all the modules we'll need too + go mod download + go test ./... + + # Wait for trillian logserver to be up + until nc -z deployment_trillian-log-server_1 8090; do echo .; sleep 5; done + + # Reset the CT test database + export CT_GO_PATH="$$(go list -f '{{.Dir}}' github.com/google/certificate-transparency-go)" + export MYSQL_HOST="mysql" + export MYSQL_ROOT_PASSWORD="zaphod" + export MYSQL_USER_HOST="%" + yes | bash "$${CT_GO_PATH}/scripts/resetctdb.sh" --verbose + waitFor: ['prepare'] + +# Run the presubmit tests +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'default_test' + env: + - 'GOFLAGS=' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'race_detection' + env: + - 'GOFLAGS=-race' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'etcd_with_coverage' + env: + - 'GOFLAGS=' + - 'PRESUBMIT_OPTS=--no-linters --no-generate --coverage' + - 'WITH_ETCD=true' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'etcd_with_race' + env: + - 'GOFLAGS=-race' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'WITH_ETCD=true' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'with_pkcs11_and_race' + env: + - 'GOFLAGS=-race --tags=pkcs11' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'WITH_PKCS11=true' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +# Collect and submit codecoverage reports +- name: 'gcr.io/cloud-builders/curl' + id: 'codecov.io' + entrypoint: bash + args: ['-c', 'bash <(curl -s https://codecov.io/bash)'] + env: + - 'VCS_COMMIT_ID=$COMMIT_SHA' + - 'VCS_BRANCH_NAME=$BRANCH_NAME' + - 'VCS_PULL_REQUEST=$_PR_NUMBER' + - 'CI_BUILD_ID=$BUILD_ID' + - 'CODECOV_TOKEN=$_CODECOV_TOKEN' # _CODECOV_TOKEN is specified in the cloud build trigger + waitFor: ['etcd_with_coverage'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'ci_complete' + entrypoint: /bin/true + waitFor: ['codecov.io', 'default_test', 'race_detection', 'etcd_with_coverage', 'etcd_with_race', 'with_pkcs11_and_race'] + +############################################################################ +## End of replicated section. +## Below are deployment specific steps for the CD env. +############################################################################ + +- id: build_ctfe + name: gcr.io/cloud-builders/docker + args: + - build + - --file=trillian/examples/deployment/docker/ctfe/Dockerfile + - --tag=gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA} + - --cache-from=gcr.io/${PROJECT_ID}/ctfe + - . + waitFor: [-] +- id: build_envsubst + name: gcr.io/cloud-builders/docker + args: + - build + - trillian/examples/deployment/docker/envsubst + - -t + - envsubst + waitFor: ['ci_complete'] +- id: envsubst_kubernetes_configs + name: envsubst + args: + - trillian/examples/deployment/kubernetes/ctfe-deployment.yaml + - trillian/examples/deployment/kubernetes/ctfe-service.yaml + - trillian/examples/deployment/kubernetes/ctfe-ingress.yaml + env: + - PROJECT_ID=${PROJECT_ID} + - IMAGE_TAG=${COMMIT_SHA} + waitFor: + - build_envsubst +- id: update_kubernetes_configs_dryrun + name: gcr.io/cloud-builders/kubectl + args: + - apply + - --dry-run=server + - -f=trillian/examples/deployment/kubernetes/ctfe-deployment.yaml + - -f=trillian/examples/deployment/kubernetes/ctfe-service.yaml + - -f=trillian/examples/deployment/kubernetes/ctfe-ingress.yaml + env: + - CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE} + - CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME} + waitFor: + - envsubst_kubernetes_configs + - build_ctfe + +images: +- gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA} +- gcr.io/${PROJECT_ID}/ct_testbase:latest diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml new file mode 100644 index 000000000000..566edfe0f972 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml @@ -0,0 +1,230 @@ +############################################################################# +## The top section of this file is identical in the 3 cloudbuild.*yaml files. +## Make sure any edits you make here are copied over to the other files too +## if appropriate. +## +## TODO(al): consider if it's possible to merge these 3 files and control via +## substitutions. +############################################################################# + +timeout: 1200s +options: + machineType: N1_HIGHCPU_32 + volumes: + - name: go-modules + path: /go + env: + - GOPROXY=https://proxy.golang.org + - PROJECT_ROOT=github.com/google/certificate-transparency-go + - GOPATH=/go + +substitutions: + _CLUSTER_NAME: trillian-opensource-ci + _MASTER_ZONE: us-central1-a + +# Cloud Build logs sent to GCS bucket +logsBucket: 'gs://trillian-cloudbuild-logs' + +steps: +# First build a "ct_testbase" docker image which contains most of the tools we need for the later steps: +- name: 'gcr.io/cloud-builders/docker' + entrypoint: 'bash' + args: ['-c', 'docker pull gcr.io/$PROJECT_ID/ct_testbase:latest || exit 0'] +- name: 'gcr.io/cloud-builders/docker' + args: [ + 'build', + '-t', 'gcr.io/$PROJECT_ID/ct_testbase:latest', + '--cache-from', 'gcr.io/$PROJECT_ID/ct_testbase:latest', + '-f', './integration/Dockerfile', + '.' + ] + +# prepare spins up an ephemeral trillian instance for testing use. +- name: gcr.io/$PROJECT_ID/ct_testbase + entrypoint: 'bash' + id: 'prepare' + args: + - '-exc' + - | + # Use latest versions of Trillian docker images built by the Trillian CI cloudbuilders. + docker pull gcr.io/$PROJECT_ID/log_server:latest + docker tag gcr.io/$PROJECT_ID/log_server:latest deployment_trillian-log-server + docker pull gcr.io/$PROJECT_ID/log_signer:latest + docker tag gcr.io/$PROJECT_ID/log_signer:latest deployment_trillian-log-signer + + # Bring up an ephemeral trillian instance using the docker-compose config in the Trillian repo: + export TRILLIAN_LOCATION="$$(go list -f '{{.Dir}}' github.com/google/trillian)" + + # We need to fix up Trillian's docker-compose to connect to the CloudBuild network to that tests can use it: + echo -e "networks:\n default:\n external:\n name: cloudbuild" >> $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml + + docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml pull mysql trillian-log-server trillian-log-signer + docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml up -d mysql trillian-log-server trillian-log-signer + +# Install proto related bits and block on Trillian being ready +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'ci-ready' + entrypoint: 'bash' + args: + - '-ec' + - | + go install \ + github.com/golang/protobuf/proto \ + github.com/golang/protobuf/protoc-gen-go \ + github.com/golang/mock/mockgen \ + go.etcd.io/etcd/v3 go.etcd.io/etcd/etcdctl/v3 \ + github.com/fullstorydev/grpcurl/cmd/grpcurl + + # Generate all protoc and mockgen files + go generate -run="protoc" ./... + go generate -run="mockgen" ./... + + # Cache all the modules we'll need too + go mod download + go test ./... + + # Wait for trillian logserver to be up + until nc -z deployment_trillian-log-server_1 8090; do echo .; sleep 5; done + + # Reset the CT test database + export CT_GO_PATH="$$(go list -f '{{.Dir}}' github.com/google/certificate-transparency-go)" + export MYSQL_HOST="mysql" + export MYSQL_ROOT_PASSWORD="zaphod" + export MYSQL_USER_HOST="%" + yes | bash "$${CT_GO_PATH}/scripts/resetctdb.sh" --verbose + waitFor: ['prepare'] + +# Run the presubmit tests +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'default_test' + env: + - 'GOFLAGS=' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'race_detection' + env: + - 'GOFLAGS=-race' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'etcd_with_coverage' + env: + - 'GOFLAGS=' + - 'PRESUBMIT_OPTS=--no-linters --no-generate --coverage' + - 'WITH_ETCD=true' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'etcd_with_race' + env: + - 'GOFLAGS=-race' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'WITH_ETCD=true' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'with_pkcs11_and_race' + env: + - 'GOFLAGS=-race --tags=pkcs11' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'WITH_PKCS11=true' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +# Collect and submit codecoverage reports +- name: 'gcr.io/cloud-builders/curl' + id: 'codecov.io' + entrypoint: bash + args: ['-c', 'bash <(curl -s https://codecov.io/bash)'] + env: + - 'VCS_COMMIT_ID=$COMMIT_SHA' + - 'VCS_BRANCH_NAME=$BRANCH_NAME' + - 'VCS_PULL_REQUEST=$_PR_NUMBER' + - 'CI_BUILD_ID=$BUILD_ID' + - 'CODECOV_TOKEN=$_CODECOV_TOKEN' # _CODECOV_TOKEN is specified in the cloud build trigger + waitFor: ['etcd_with_coverage'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'ci_complete' + entrypoint: /bin/true + waitFor: ['codecov.io', 'default_test', 'race_detection', 'etcd_with_coverage', 'etcd_with_race', 'with_pkcs11_and_race'] + +############################################################################ +## End of replicated section. +## Below are deployment specific steps for the CD env. +############################################################################ + +- id: build_ctfe + name: gcr.io/cloud-builders/docker + args: + - build + - --file=trillian/examples/deployment/docker/ctfe/Dockerfile + - --tag=gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA} + - --cache-from=gcr.io/${PROJECT_ID}/ctfe + - . + waitFor: ["-"] +- id: push_ctfe + name: gcr.io/cloud-builders/docker + args: + - push + - gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA} + waitFor: + - build_ctfe +- id: tag_latest_ctfe + name: gcr.io/cloud-builders/gcloud + args: + - container + - images + - add-tag + - gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA} + - gcr.io/${PROJECT_ID}/ctfe:latest + waitFor: + - push_ctfe +- id: build_envsubst + name: gcr.io/cloud-builders/docker + args: + - build + - trillian/examples/deployment/docker/envsubst + - -t + - envsubst + waitFor: ["-"] +- id: envsubst_kubernetes_configs + name: envsubst + args: + - trillian/examples/deployment/kubernetes/ctfe-deployment.yaml + - trillian/examples/deployment/kubernetes/ctfe-service.yaml + - trillian/examples/deployment/kubernetes/ctfe-ingress.yaml + env: + - PROJECT_ID=${PROJECT_ID} + - IMAGE_TAG=${COMMIT_SHA} + waitFor: + - build_envsubst +- id: update_kubernetes_configs + name: gcr.io/cloud-builders/kubectl + args: + - apply + - -f=trillian/examples/deployment/kubernetes/ctfe-deployment.yaml + - -f=trillian/examples/deployment/kubernetes/ctfe-service.yaml + - -f=trillian/examples/deployment/kubernetes/ctfe-ingress.yaml + env: + - CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE} + - CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME} + waitFor: + - envsubst_kubernetes_configs + - push_ctfe + +images: +- gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA} +- gcr.io/${PROJECT_ID}/ct_testbase:latest diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild_postgresql.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild_postgresql.yaml new file mode 100644 index 000000000000..37faca72ac00 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild_postgresql.yaml @@ -0,0 +1,161 @@ +############################################################################# +## This file is based on cloudbuild.yaml, but targets PostgreSQL instead of +## MySQL. +############################################################################# + +timeout: 1200s +options: + machineType: N1_HIGHCPU_32 + volumes: + - name: go-modules + path: /go + env: + - GOPROXY=https://proxy.golang.org + - PROJECT_ROOT=github.com/google/certificate-transparency-go + - GOPATH=/go + +substitutions: + _CLUSTER_NAME: trillian-opensource-ci + _MASTER_ZONE: us-central1-a + +# Cloud Build logs sent to GCS bucket +logsBucket: 'gs://trillian-cloudbuild-logs' + +steps: +# First build a "ct_testbase" docker image which contains most of the tools we need for the later steps: +- name: 'gcr.io/cloud-builders/docker' + entrypoint: 'bash' + args: ['-c', 'docker pull gcr.io/$PROJECT_ID/ct_testbase:latest || exit 0'] +- name: 'gcr.io/cloud-builders/docker' + args: [ + 'build', + '-t', 'gcr.io/$PROJECT_ID/ct_testbase:latest', + '--cache-from', 'gcr.io/$PROJECT_ID/ct_testbase:latest', + '-f', './integration/Dockerfile', + '.' + ] + +# prepare spins up an ephemeral trillian instance for testing use. +- name: gcr.io/$PROJECT_ID/ct_testbase + entrypoint: 'bash' + id: 'prepare' + args: + - '-exc' + - | + # Use latest versions of Trillian docker images built by the Trillian CI cloudbuilders. + docker pull gcr.io/$PROJECT_ID/log_server:latest + docker tag gcr.io/$PROJECT_ID/log_server:latest postgresql_trillian-log-server + docker pull gcr.io/$PROJECT_ID/log_signer:latest + docker tag gcr.io/$PROJECT_ID/log_signer:latest postgresql_trillian-log-signer + + # Bring up an ephemeral trillian instance using the docker-compose config in the Trillian repo: + export TRILLIAN_LOCATION="$$(go list -f '{{.Dir}}' github.com/google/trillian)" + + # We need to fix up Trillian's docker-compose to connect to the CloudBuild network to that tests can use it: + echo -e "networks:\n default:\n external:\n name: cloudbuild" >> $${TRILLIAN_LOCATION}/examples/deployment/postgresql/docker-compose.yml + + docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/postgresql/docker-compose.yml pull postgresql trillian-log-server trillian-log-signer + docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/postgresql/docker-compose.yml up -d postgresql trillian-log-server trillian-log-signer + +# Install proto related bits and block on Trillian being ready +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'ci-ready' + entrypoint: 'bash' + args: + - '-ec' + - | + go install \ + github.com/golang/protobuf/proto \ + github.com/golang/protobuf/protoc-gen-go \ + github.com/golang/mock/mockgen \ + go.etcd.io/etcd/v3 go.etcd.io/etcd/etcdctl/v3 \ + github.com/fullstorydev/grpcurl/cmd/grpcurl + + # Generate all protoc and mockgen files + go generate -run="protoc" ./... + go generate -run="mockgen" ./... + + # Cache all the modules we'll need too + go mod download + go test ./... + + # Wait for trillian logserver to be up + until nc -z postgresql_trillian-log-server_1 8090; do echo .; sleep 5; done + + # Reset the CT test database + export CT_GO_PATH="$$(go list -f '{{.Dir}}' github.com/google/certificate-transparency-go)" + export POSTGRESQL_HOST="postgresql" + yes | bash "$${CT_GO_PATH}/scripts/resetpgctdb.sh" --verbose + waitFor: ['prepare'] + +# Run the presubmit tests +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'default_test' + env: + - 'GOFLAGS=' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090' + - 'CONFIG_SUBDIR=/postgresql' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'race_detection' + env: + - 'GOFLAGS=-race' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090' + - 'CONFIG_SUBDIR=/postgresql' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'etcd_with_coverage' + env: + - 'GOFLAGS=' + - 'PRESUBMIT_OPTS=--no-linters --no-generate --coverage' + - 'WITH_ETCD=true' + - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090' + - 'CONFIG_SUBDIR=/postgresql' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'etcd_with_race' + env: + - 'GOFLAGS=-race' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'WITH_ETCD=true' + - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090' + - 'CONFIG_SUBDIR=/postgresql' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'with_pkcs11_and_race' + env: + - 'GOFLAGS=-race --tags=pkcs11' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'WITH_PKCS11=true' + - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090' + - 'CONFIG_SUBDIR=/postgresql' + waitFor: ['ci-ready'] + +# Collect and submit codecoverage reports +- name: 'gcr.io/cloud-builders/curl' + id: 'codecov.io' + entrypoint: bash + args: ['-c', 'bash <(curl -s https://codecov.io/bash)'] + env: + - 'VCS_COMMIT_ID=$COMMIT_SHA' + - 'VCS_BRANCH_NAME=$BRANCH_NAME' + - 'VCS_PULL_REQUEST=$_PR_NUMBER' + - 'CI_BUILD_ID=$BUILD_ID' + - 'CODECOV_TOKEN=$_CODECOV_TOKEN' # _CODECOV_TOKEN is specified in the cloud build trigger + waitFor: ['etcd_with_coverage'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'ci_complete' + entrypoint: /bin/true + waitFor: ['codecov.io', 'default_test', 'race_detection', 'etcd_with_coverage', 'etcd_with_race', 'with_pkcs11_and_race'] diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml new file mode 100644 index 000000000000..ce9efc2b2cb3 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml @@ -0,0 +1,173 @@ +############################################################################# +## The top section of this file is identical in the 3 cloudbuild.*yaml files. +## Make sure any edits you make here are copied over to the other files too +## if appropriate. +## +## TODO(al): consider if it's possible to merge these 3 files and control via +## substitutions. +############################################################################# + +timeout: 1200s +options: + machineType: N1_HIGHCPU_32 + volumes: + - name: go-modules + path: /go + env: + - GOPROXY=https://proxy.golang.org + - PROJECT_ROOT=github.com/google/certificate-transparency-go + - GOPATH=/go + +substitutions: + _CLUSTER_NAME: trillian-opensource-ci + _MASTER_ZONE: us-central1-a + +# Cloud Build logs sent to GCS bucket +logsBucket: 'gs://trillian-cloudbuild-logs' + +steps: +# First build a "ct_testbase" docker image which contains most of the tools we need for the later steps: +- name: 'gcr.io/cloud-builders/docker' + entrypoint: 'bash' + args: ['-c', 'docker pull gcr.io/$PROJECT_ID/ct_testbase:latest || exit 0'] +- name: 'gcr.io/cloud-builders/docker' + args: [ + 'build', + '-t', 'gcr.io/$PROJECT_ID/ct_testbase:latest', + '--cache-from', 'gcr.io/$PROJECT_ID/ct_testbase:latest', + '-f', './integration/Dockerfile', + '.' + ] + +# prepare spins up an ephemeral trillian instance for testing use. +- name: gcr.io/$PROJECT_ID/ct_testbase + entrypoint: 'bash' + id: 'prepare' + args: + - '-exc' + - | + # Use latest versions of Trillian docker images built by the Trillian CI cloudbuilders. + docker pull gcr.io/$PROJECT_ID/log_server:latest + docker tag gcr.io/$PROJECT_ID/log_server:latest deployment_trillian-log-server + docker pull gcr.io/$PROJECT_ID/log_signer:latest + docker tag gcr.io/$PROJECT_ID/log_signer:latest deployment_trillian-log-signer + + # Bring up an ephemeral trillian instance using the docker-compose config in the Trillian repo: + export TRILLIAN_LOCATION="$$(go list -f '{{.Dir}}' github.com/google/trillian)" + + # We need to fix up Trillian's docker-compose to connect to the CloudBuild network to that tests can use it: + echo -e "networks:\n default:\n external:\n name: cloudbuild" >> $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml + + docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml pull mysql trillian-log-server trillian-log-signer + docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml up -d mysql trillian-log-server trillian-log-signer + +# Install proto related bits and block on Trillian being ready +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'ci-ready' + entrypoint: 'bash' + args: + - '-ec' + - | + go install \ + github.com/golang/protobuf/proto \ + github.com/golang/protobuf/protoc-gen-go \ + github.com/golang/mock/mockgen \ + go.etcd.io/etcd/v3 go.etcd.io/etcd/etcdctl/v3 \ + github.com/fullstorydev/grpcurl/cmd/grpcurl + + # Generate all protoc and mockgen files + go generate -run="protoc" ./... + go generate -run="mockgen" ./... + + # Cache all the modules we'll need too + go mod download + go test ./... + + # Wait for trillian logserver to be up + until nc -z deployment_trillian-log-server_1 8090; do echo .; sleep 5; done + waitFor: ['prepare'] + +# Run the presubmit tests +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'default_test' + env: + - 'GOFLAGS=' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'race_detection' + env: + - 'GOFLAGS=-race' + - 'PRESUBMIT_OPTS=--no-linters' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'etcd_with_coverage' + env: + - 'GOFLAGS=' + - 'PRESUBMIT_OPTS=--no-linters --coverage' + - 'WITH_ETCD=true' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'etcd_with_race' + env: + - 'GOFLAGS=-race' + - 'PRESUBMIT_OPTS=--no-linters' + - 'WITH_ETCD=true' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'with_pkcs11_and_race' + env: + - 'GOFLAGS=-race --tags=pkcs11' + - 'PRESUBMIT_OPTS=--no-linters' + - 'WITH_PKCS11=true' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +# Collect and submit codecoverage reports +- name: 'gcr.io/cloud-builders/curl' + id: 'codecov.io' + entrypoint: bash + args: ['-c', 'bash <(curl -s https://codecov.io/bash)'] + env: + - 'VCS_COMMIT_ID=$COMMIT_SHA' + - 'VCS_BRANCH_NAME=$BRANCH_NAME' + - 'VCS_PULL_REQUEST=$_PR_NUMBER' + - 'CI_BUILD_ID=$BUILD_ID' + - 'CODECOV_TOKEN=$_CODECOV_TOKEN' # _CODECOV_TOKEN is specified in the cloud build trigger + waitFor: ['etcd_with_coverage'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'ci_complete' + entrypoint: /bin/true + waitFor: ['codecov.io', 'default_test', 'race_detection', 'etcd_with_coverage', 'etcd_with_race', 'with_pkcs11_and_race'] + +############################################################################ +## End of replicated section. +## Below are deployment specific steps for the CD env. +############################################################################ + +- id: build_ctfe + name: gcr.io/cloud-builders/docker + args: + - build + - --file=trillian/examples/deployment/docker/ctfe/Dockerfile + - --tag=gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME} + - --cache-from=gcr.io/${PROJECT_ID}/ctfe + - . + +images: +- gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME} +- gcr.io/${PROJECT_ID}/ct_testbase:latest diff --git a/vendor/github.com/google/certificate-transparency-go/codecov.yml b/vendor/github.com/google/certificate-transparency-go/codecov.yml new file mode 100644 index 000000000000..7269ff271508 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/codecov.yml @@ -0,0 +1,19 @@ +# Customizations to codecov for c-t-go repo. This will be merged into +# the team / default codecov yaml file. +# +# Validate changes with: +# curl --data-binary @codecov.yml https://codecov.io/validate + +# Exclude code that's for testing, demos or utilities that aren't really +# part of production releases. +ignore: + - "**/mock_*.go" + - "**/testonly" + - "trillian/integration" + +coverage: + status: + project: + default: + # Allow 1% coverage drop without complaining, to avoid being too noisy. + threshold: 1% diff --git a/vendor/github.com/google/certificate-transparency-go/ctutil/ctutil.go b/vendor/github.com/google/certificate-transparency-go/ctutil/ctutil.go new file mode 100644 index 000000000000..640fcec9c1f1 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/ctutil/ctutil.go @@ -0,0 +1,211 @@ +// Copyright 2018 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ctutil contains utilities for Certificate Transparency. +package ctutil + +import ( + "bytes" + "crypto" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509" +) + +var emptyHash = [sha256.Size]byte{} + +// LeafHashB64 does as LeafHash does, but returns the leaf hash base64-encoded. +// The base64-encoded leaf hash returned by B64LeafHash can be used with the +// get-proof-by-hash API endpoint of Certificate Transparency Logs. +func LeafHashB64(chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) (string, error) { + hash, err := LeafHash(chain, sct, embedded) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(hash[:]), nil +} + +// LeafHash calculates the leaf hash of the certificate or precertificate at +// chain[0] that sct was issued for. +// +// sct is required because the SCT timestamp is used to calculate the leaf hash. +// Leaf hashes are unique to (pre)certificate-SCT pairs. +// +// This function can be used with three different types of leaf certificate: +// - X.509 Certificate: +// If using this function to calculate the leaf hash for a normal X.509 +// certificate then it is enough to just provide the end entity +// certificate in chain. This case assumes that the SCT being provided is +// not embedded within the leaf certificate provided, i.e. the certificate +// is what was submitted to the Certificate Transparency Log in order to +// obtain the SCT. For this case, set embedded to false. +// - Precertificate: +// If using this function to calculate the leaf hash for a precertificate +// then the issuing certificate must also be provided in chain. The +// precertificate should be at chain[0], and its issuer at chain[1]. For +// this case, set embedded to false. +// - X.509 Certificate containing the SCT embedded within it: +// If using this function to calculate the leaf hash for a certificate +// where the SCT provided is embedded within the certificate you +// are providing at chain[0], set embedded to true. LeafHash will +// calculate the leaf hash by building the corresponding precertificate. +// LeafHash will return an error if the provided SCT cannot be found +// embedded within chain[0]. As with the precertificate case, the issuing +// certificate must also be provided in chain. The certificate containing +// the embedded SCT should be at chain[0], and its issuer at chain[1]. +// +// Note: LeafHash doesn't check that the provided SCT verifies for the given +// chain. It simply calculates what the leaf hash would be for the given +// (pre)certificate-SCT pair. +func LeafHash(chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) ([sha256.Size]byte, error) { + leaf, err := createLeaf(chain, sct, embedded) + if err != nil { + return emptyHash, err + } + return ct.LeafHashForLeaf(leaf) +} + +// VerifySCT takes the public key of a Certificate Transparency Log, a +// certificate chain, and an SCT and verifies whether the SCT is a valid SCT for +// the certificate at chain[0], signed by the Log that the public key belongs +// to. If the SCT does not verify, an error will be returned. +// +// This function can be used with three different types of leaf certificate: +// - X.509 Certificate: +// If using this function to verify an SCT for a normal X.509 certificate +// then it is enough to just provide the end entity certificate in chain. +// This case assumes that the SCT being provided is not embedded within +// the leaf certificate provided, i.e. the certificate is what was +// submitted to the Certificate Transparency Log in order to obtain the +// SCT. For this case, set embedded to false. +// - Precertificate: +// If using this function to verify an SCT for a precertificate then the +// issuing certificate must also be provided in chain. The precertificate +// should be at chain[0], and its issuer at chain[1]. For this case, set +// embedded to false. +// - X.509 Certificate containing the SCT embedded within it: +// If the SCT you wish to verify is embedded within the certificate you +// are providing at chain[0], set embedded to true. VerifySCT will +// verify the provided SCT by building the corresponding precertificate. +// VerifySCT will return an error if the provided SCT cannot be found +// embedded within chain[0]. As with the precertificate case, the issuing +// certificate must also be provided in chain. The certificate containing +// the embedded SCT should be at chain[0], and its issuer at chain[1]. +func VerifySCT(pubKey crypto.PublicKey, chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) error { + s, err := ct.NewSignatureVerifier(pubKey) + if err != nil { + return fmt.Errorf("error creating signature verifier: %s", err) + } + + return VerifySCTWithVerifier(s, chain, sct, embedded) +} + +// VerifySCTWithVerifier takes a ct.SignatureVerifier, a certificate chain, and +// an SCT and verifies whether the SCT is a valid SCT for the certificate at +// chain[0], signed by the Log whose public key was used to set up the +// ct.SignatureVerifier. If the SCT does not verify, an error will be returned. +// +// This function can be used with three different types of leaf certificate: +// - X.509 Certificate: +// If using this function to verify an SCT for a normal X.509 certificate +// then it is enough to just provide the end entity certificate in chain. +// This case assumes that the SCT being provided is not embedded within +// the leaf certificate provided, i.e. the certificate is what was +// submitted to the Certificate Transparency Log in order to obtain the +// SCT. For this case, set embedded to false. +// - Precertificate: +// If using this function to verify an SCT for a precertificate then the +// issuing certificate must also be provided in chain. The precertificate +// should be at chain[0], and its issuer at chain[1]. For this case, set +// embedded to false. +// - X.509 Certificate containing the SCT embedded within it: +// If the SCT you wish to verify is embedded within the certificate you +// are providing at chain[0], set embedded to true. VerifySCT will +// verify the provided SCT by building the corresponding precertificate. +// VerifySCT will return an error if the provided SCT cannot be found +// embedded within chain[0]. As with the precertificate case, the issuing +// certificate must also be provided in chain. The certificate containing +// the embedded SCT should be at chain[0], and its issuer at chain[1]. +func VerifySCTWithVerifier(sv *ct.SignatureVerifier, chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) error { + if sv == nil { + return errors.New("ct.SignatureVerifier is nil") + } + + leaf, err := createLeaf(chain, sct, embedded) + if err != nil { + return err + } + + return sv.VerifySCTSignature(*sct, ct.LogEntry{Leaf: *leaf}) +} + +func createLeaf(chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) (*ct.MerkleTreeLeaf, error) { + if len(chain) == 0 { + return nil, errors.New("chain is empty") + } + if sct == nil { + return nil, errors.New("sct is nil") + } + + if embedded { + sctPresent, err := ContainsSCT(chain[0], sct) + if err != nil { + return nil, fmt.Errorf("error checking for SCT in leaf certificate: %s", err) + } + if !sctPresent { + return nil, errors.New("SCT provided is not embedded within leaf certificate") + } + } + + certType := ct.X509LogEntryType + if chain[0].IsPrecertificate() || embedded { + certType = ct.PrecertLogEntryType + } + + var leaf *ct.MerkleTreeLeaf + var err error + if embedded { + leaf, err = ct.MerkleTreeLeafForEmbeddedSCT(chain, sct.Timestamp) + } else { + leaf, err = ct.MerkleTreeLeafFromChain(chain, certType, sct.Timestamp) + } + if err != nil { + return nil, fmt.Errorf("error creating MerkleTreeLeaf: %s", err) + } + return leaf, nil +} + +// ContainsSCT checks to see whether the given SCT is embedded within the given +// certificate. +func ContainsSCT(cert *x509.Certificate, sct *ct.SignedCertificateTimestamp) (bool, error) { + if cert == nil || sct == nil { + return false, nil + } + + sctBytes, err := tls.Marshal(*sct) + if err != nil { + return false, fmt.Errorf("error tls.Marshalling SCT: %s", err) + } + for _, s := range cert.SCTList.SCTList { + if bytes.Equal(sctBytes, s.Val) { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/ctutil/loginfo.go b/vendor/github.com/google/certificate-transparency-go/ctutil/loginfo.go new file mode 100644 index 000000000000..83d9739c7e58 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/ctutil/loginfo.go @@ -0,0 +1,174 @@ +// Copyright 2018 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ctutil + +import ( + "context" + "crypto/sha256" + "fmt" + "net/http" + "strings" + "sync" + "time" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/client" + "github.com/google/certificate-transparency-go/jsonclient" + "github.com/google/certificate-transparency-go/loglist3" + "github.com/google/certificate-transparency-go/x509" + "github.com/transparency-dev/merkle/proof" + "github.com/transparency-dev/merkle/rfc6962" +) + +// LogInfo holds the objects needed to perform per-log verification and +// validation of SCTs. +type LogInfo struct { + Description string + Client client.CheckLogClient + MMD time.Duration + Verifier *ct.SignatureVerifier + PublicKey []byte + + mu sync.RWMutex + lastSTH *ct.SignedTreeHead +} + +// NewLogInfo builds a LogInfo object based on a log list entry. +func NewLogInfo(log *loglist3.Log, hc *http.Client) (*LogInfo, error) { + url := log.URL + if !strings.HasPrefix(url, "https://") { + url = "https://" + url + } + lc, err := client.New(url, hc, jsonclient.Options{PublicKeyDER: log.Key, UserAgent: "ct-go-logclient"}) + if err != nil { + return nil, fmt.Errorf("failed to create client for log %q: %v", log.Description, err) + } + return newLogInfo(log, lc) +} + +func newLogInfo(log *loglist3.Log, lc client.CheckLogClient) (*LogInfo, error) { + logKey, err := x509.ParsePKIXPublicKey(log.Key) + if err != nil { + return nil, fmt.Errorf("failed to parse public key data for log %q: %v", log.Description, err) + } + verifier, err := ct.NewSignatureVerifier(logKey) + if err != nil { + return nil, fmt.Errorf("failed to build verifier log %q: %v", log.Description, err) + } + mmd := time.Duration(log.MMD) * time.Second + return &LogInfo{ + Description: log.Description, + Client: lc, + MMD: mmd, + Verifier: verifier, + PublicKey: log.Key, + }, nil +} + +// LogInfoByHash holds LogInfo objects index by the SHA-256 hash of the log's public key. +type LogInfoByHash map[[sha256.Size]byte]*LogInfo + +// LogInfoByKeyHash builds a map of LogInfo objects indexed by their key hashes. +func LogInfoByKeyHash(ll *loglist3.LogList, hc *http.Client) (LogInfoByHash, error) { + return logInfoByKeyHash(ll, hc, NewLogInfo) +} + +func logInfoByKeyHash(ll *loglist3.LogList, hc *http.Client, infoFactory func(*loglist3.Log, *http.Client) (*LogInfo, error)) (map[[sha256.Size]byte]*LogInfo, error) { + result := make(map[[sha256.Size]byte]*LogInfo) + for _, operator := range ll.Operators { + for _, log := range operator.Logs { + h := sha256.Sum256(log.Key) + li, err := infoFactory(log, hc) + if err != nil { + return nil, err + } + result[h] = li + } + } + return result, nil +} + +// LastSTH returns the last STH known for the log. +func (li *LogInfo) LastSTH() *ct.SignedTreeHead { + li.mu.RLock() + defer li.mu.RUnlock() + return li.lastSTH +} + +// SetSTH sets the last STH known for the log. +func (li *LogInfo) SetSTH(sth *ct.SignedTreeHead) { + li.mu.Lock() + defer li.mu.Unlock() + li.lastSTH = sth +} + +// VerifySCTSignature checks the signature in the SCT matches the given leaf (adjusted for the +// timestamp in the SCT) and log. +func (li *LogInfo) VerifySCTSignature(sct ct.SignedCertificateTimestamp, leaf ct.MerkleTreeLeaf) error { + leaf.TimestampedEntry.Timestamp = sct.Timestamp + if err := li.Verifier.VerifySCTSignature(sct, ct.LogEntry{Leaf: leaf}); err != nil { + return fmt.Errorf("failed to verify SCT signature from log %q: %v", li.Description, err) + } + return nil +} + +// VerifyInclusionLatest checks that the given Merkle tree leaf, adjusted for the provided timestamp, +// is present in the latest known tree size of the log. If no tree size for the log is known, it will +// be queried. On success, returns the index of the leaf in the log. +func (li *LogInfo) VerifyInclusionLatest(ctx context.Context, leaf ct.MerkleTreeLeaf, timestamp uint64) (int64, error) { + sth := li.LastSTH() + if sth == nil { + var err error + sth, err = li.Client.GetSTH(ctx) + if err != nil { + return -1, fmt.Errorf("failed to get current STH for %q log: %v", li.Description, err) + } + li.SetSTH(sth) + } + return li.VerifyInclusionAt(ctx, leaf, timestamp, sth.TreeSize, sth.SHA256RootHash[:]) +} + +// VerifyInclusion checks that the given Merkle tree leaf, adjusted for the provided timestamp, +// is present in the current tree size of the log. On success, returns the index of the leaf +// in the log. +func (li *LogInfo) VerifyInclusion(ctx context.Context, leaf ct.MerkleTreeLeaf, timestamp uint64) (int64, error) { + sth, err := li.Client.GetSTH(ctx) + if err != nil { + return -1, fmt.Errorf("failed to get current STH for %q log: %v", li.Description, err) + } + li.SetSTH(sth) + return li.VerifyInclusionAt(ctx, leaf, timestamp, sth.TreeSize, sth.SHA256RootHash[:]) +} + +// VerifyInclusionAt checks that the given Merkle tree leaf, adjusted for the provided timestamp, +// is present in the given tree size & root hash of the log. On success, returns the index of the +// leaf in the log. +func (li *LogInfo) VerifyInclusionAt(ctx context.Context, leaf ct.MerkleTreeLeaf, timestamp, treeSize uint64, rootHash []byte) (int64, error) { + leaf.TimestampedEntry.Timestamp = timestamp + leafHash, err := ct.LeafHashForLeaf(&leaf) + if err != nil { + return -1, fmt.Errorf("failed to create leaf hash: %v", err) + } + + rsp, err := li.Client.GetProofByHash(ctx, leafHash[:], treeSize) + if err != nil { + return -1, fmt.Errorf("failed to GetProofByHash(sct,size=%d): %v", treeSize, err) + } + + if err := proof.VerifyInclusion(rfc6962.DefaultHasher, uint64(rsp.LeafIndex), treeSize, leafHash[:], rsp.AuditPath, rootHash); err != nil { + return -1, fmt.Errorf("failed to verify inclusion proof at size %d: %v", treeSize, err) + } + return rsp.LeafIndex, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/gossip/minimal/x509ext/x509ext.go b/vendor/github.com/google/certificate-transparency-go/gossip/minimal/x509ext/x509ext.go new file mode 100644 index 000000000000..540e717d74e3 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/gossip/minimal/x509ext/x509ext.go @@ -0,0 +1,92 @@ +// Copyright 2018 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package x509ext holds extensions types and values for minimal gossip. +package x509ext + +import ( + "errors" + "fmt" + + "github.com/google/certificate-transparency-go/asn1" + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509" + + ct "github.com/google/certificate-transparency-go" +) + +// OIDExtensionCTSTH is the OID value for an X.509 extension that holds +// a log STH value. +// TODO(drysdale): get an official OID value +var OIDExtensionCTSTH = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 5} + +// OIDExtKeyUsageCTMinimalGossip is the OID value for an extended key usage +// (EKU) that indicates a leaf certificate is used for the validation of STH +// values from public CT logs. +// TODO(drysdale): get an official OID value +var OIDExtKeyUsageCTMinimalGossip = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 6} + +// LogSTHInfo is the structure that gets TLS-encoded into the X.509 extension +// identified by OIDExtensionCTSTH. +type LogSTHInfo struct { + LogURL []byte `tls:"maxlen:255"` + Version tls.Enum `tls:"maxval:255"` + TreeSize uint64 + Timestamp uint64 + SHA256RootHash ct.SHA256Hash + TreeHeadSignature ct.DigitallySigned +} + +// LogSTHInfoFromCert retrieves the STH information embedded in a certificate. +func LogSTHInfoFromCert(cert *x509.Certificate) (*LogSTHInfo, error) { + for _, ext := range cert.Extensions { + if ext.Id.Equal(OIDExtensionCTSTH) { + var sthInfo LogSTHInfo + rest, err := tls.Unmarshal(ext.Value, &sthInfo) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal STH: %v", err) + } else if len(rest) > 0 { + return nil, fmt.Errorf("trailing data (%d bytes) after STH", len(rest)) + } + return &sthInfo, nil + } + } + return nil, errors.New("no STH extension found") +} + +// HasSTHInfo indicates whether a certificate has embedded STH information. +func HasSTHInfo(cert *x509.Certificate) bool { + for _, ext := range cert.Extensions { + if ext.Id.Equal(OIDExtensionCTSTH) { + return true + } + } + return false +} + +// STHFromCert retrieves the STH embedded in a certificate; note the returned STH +// does not have the LogID field filled in. +func STHFromCert(cert *x509.Certificate) (*ct.SignedTreeHead, error) { + sthInfo, err := LogSTHInfoFromCert(cert) + if err != nil { + return nil, err + } + return &ct.SignedTreeHead{ + Version: ct.Version(sthInfo.Version), + TreeSize: sthInfo.TreeSize, + Timestamp: sthInfo.Timestamp, + SHA256RootHash: sthInfo.SHA256RootHash, + TreeHeadSignature: sthInfo.TreeHeadSignature, + }, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go b/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go new file mode 100644 index 000000000000..30932f30d1ad --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go @@ -0,0 +1,72 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonclient + +import ( + "sync" + "time" +) + +type backoff struct { + mu sync.RWMutex + multiplier uint + notBefore time.Time +} + +const ( + // maximum backoff is 2^(maxMultiplier-1) = 128 seconds + maxMultiplier = 8 +) + +func (b *backoff) set(override *time.Duration) time.Duration { + b.mu.Lock() + defer b.mu.Unlock() + if b.notBefore.After(time.Now()) { + if override != nil { + // If existing backoff is set but override would be longer than + // it then set it to that. + notBefore := time.Now().Add(*override) + if notBefore.After(b.notBefore) { + b.notBefore = notBefore + } + } + return time.Until(b.notBefore) + } + var wait time.Duration + if override != nil { + wait = *override + } else { + if b.multiplier < maxMultiplier { + b.multiplier++ + } + wait = time.Second * time.Duration(1<<(b.multiplier-1)) + } + b.notBefore = time.Now().Add(wait) + return wait +} + +func (b *backoff) decreaseMultiplier() { + b.mu.Lock() + defer b.mu.Unlock() + if b.multiplier > 0 { + b.multiplier-- + } +} + +func (b *backoff) until() time.Time { + b.mu.RLock() + defer b.mu.RUnlock() + return b.notBefore +} diff --git a/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go b/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go new file mode 100644 index 000000000000..edb8f919afee --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go @@ -0,0 +1,336 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package jsonclient provides a simple client for fetching and parsing +// JSON CT structures from a log. +package jsonclient + +import ( + "bytes" + "context" + "crypto" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "math/rand" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/x509" +) + +const maxJitter = 250 * time.Millisecond + +type backoffer interface { + // set adjusts/increases the current backoff interval (typically on retryable failure); + // if the optional parameter is provided, this will be used as the interval if it is greater + // than the currently set interval. Returns the current wait period so that it can be + // logged along with any error message. + set(*time.Duration) time.Duration + // decreaseMultiplier reduces the current backoff multiplier, typically on success. + decreaseMultiplier() + // until returns the time until which the client should wait before making a request, + // it may be in the past in which case it should be ignored. + until() time.Time +} + +// JSONClient provides common functionality for interacting with a JSON server +// that uses cryptographic signatures. +type JSONClient struct { + uri string // the base URI of the server. e.g. https://ct.googleapis/pilot + httpClient *http.Client // used to interact with the server via HTTP + Verifier *ct.SignatureVerifier // nil for no verification (e.g. no public key available) + logger Logger // interface to use for logging warnings and errors + backoff backoffer // object used to store and calculate backoff information + userAgent string // If set, this is sent as the UserAgent header. + authorization string // If set, this is sent as the Authorization header. +} + +// Logger is a simple logging interface used to log internal errors and warnings +type Logger interface { + // Printf formats and logs a message + Printf(string, ...interface{}) +} + +// Options are the options for creating a new JSONClient. +type Options struct { + // Interface to use for logging warnings and errors, if nil the + // standard library log package will be used. + Logger Logger + // PEM format public key to use for signature verification. + PublicKey string + // DER format public key to use for signature verification. + PublicKeyDER []byte + // UserAgent, if set, will be sent as the User-Agent header with each request. + UserAgent string + // If set, this is sent as the Authorization header with each request. + Authorization string +} + +// ParsePublicKey parses and returns the public key contained in opts. +// If both opts.PublicKey and opts.PublicKeyDER are set, PublicKeyDER is used. +// If neither is set, nil will be returned. +func (opts *Options) ParsePublicKey() (crypto.PublicKey, error) { + if len(opts.PublicKeyDER) > 0 { + return x509.ParsePKIXPublicKey(opts.PublicKeyDER) + } + + if opts.PublicKey != "" { + pubkey, _ /* keyhash */, rest, err := ct.PublicKeyFromPEM([]byte(opts.PublicKey)) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, errors.New("extra data found after PEM key decoded") + } + return pubkey, nil + } + + return nil, nil +} + +type basicLogger struct{} + +func (bl *basicLogger) Printf(msg string, args ...interface{}) { + log.Printf(msg, args...) +} + +// RspError represents an error that occurred when processing a response from a server, +// and also includes key details from the http.Response that triggered the error. +type RspError struct { + Err error + StatusCode int + Body []byte +} + +// Error formats the RspError instance, focusing on the error. +func (e RspError) Error() string { + return e.Err.Error() +} + +// New constructs a new JSONClient instance, for the given base URI, using the +// given http.Client object (if provided) and the Options object. +// If opts does not specify a public key, signatures will not be verified. +func New(uri string, hc *http.Client, opts Options) (*JSONClient, error) { + pubkey, err := opts.ParsePublicKey() + if err != nil { + return nil, fmt.Errorf("invalid public key: %v", err) + } + + var verifier *ct.SignatureVerifier + if pubkey != nil { + var err error + verifier, err = ct.NewSignatureVerifier(pubkey) + if err != nil { + return nil, err + } + } + + if hc == nil { + hc = new(http.Client) + } + logger := opts.Logger + if logger == nil { + logger = &basicLogger{} + } + return &JSONClient{ + uri: strings.TrimRight(uri, "/"), + httpClient: hc, + Verifier: verifier, + logger: logger, + backoff: &backoff{}, + userAgent: opts.UserAgent, + authorization: opts.Authorization, + }, nil +} + +// BaseURI returns the base URI that the JSONClient makes queries to. +func (c *JSONClient) BaseURI() string { + return c.uri +} + +// GetAndParse makes a HTTP GET call to the given path, and attempts to parse +// the response as a JSON representation of the rsp structure. Returns the +// http.Response, the body of the response, and an error (which may be of +// type RspError if the HTTP response was available). It returns an error +// if the response status code is not 200 OK. +func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[string]string, rsp interface{}) (*http.Response, []byte, error) { + if ctx == nil { + return nil, nil, errors.New("context.Context required") + } + // Build a GET request with URL-encoded parameters. + vals := url.Values{} + for k, v := range params { + vals.Add(k, v) + } + fullURI := fmt.Sprintf("%s%s?%s", c.uri, path, vals.Encode()) + httpReq, err := http.NewRequestWithContext(ctx, http.MethodGet, fullURI, nil) + if err != nil { + return nil, nil, err + } + if len(c.userAgent) != 0 { + httpReq.Header.Set("User-Agent", c.userAgent) + } + if len(c.authorization) != 0 { + httpReq.Header.Add("Authorization", c.authorization) + } + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return nil, nil, err + } + body, err := io.ReadAll(httpRsp.Body) + if err != nil { + return nil, nil, RspError{Err: fmt.Errorf("failed to read response body: %w", err), StatusCode: httpRsp.StatusCode, Body: body} + } + if err := httpRsp.Body.Close(); err != nil { + return nil, nil, RspError{Err: fmt.Errorf("failed to close response body: %w", err), StatusCode: httpRsp.StatusCode, Body: body} + } + if httpRsp.StatusCode != http.StatusOK { + return nil, nil, RspError{Err: fmt.Errorf("got HTTP Status %q", httpRsp.Status), StatusCode: httpRsp.StatusCode, Body: body} + } + + if err := json.NewDecoder(bytes.NewReader(body)).Decode(rsp); err != nil { + return nil, nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + + return httpRsp, body, nil +} + +// PostAndParse makes a HTTP POST call to the given path, including the request +// parameters, and attempts to parse the response as a JSON representation of +// the rsp structure. Returns the http.Response, the body of the response, and +// an error (which may be of type RspError if the HTTP response was available). +// It does NOT return an error if the response status code is not 200 OK. +func (c *JSONClient) PostAndParse(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) { + if ctx == nil { + return nil, nil, errors.New("context.Context required") + } + // Build a POST request with JSON body. + postBody, err := json.Marshal(req) + if err != nil { + return nil, nil, err + } + fullURI := fmt.Sprintf("%s%s", c.uri, path) + httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, fullURI, bytes.NewReader(postBody)) + if err != nil { + return nil, nil, err + } + if len(c.userAgent) != 0 { + httpReq.Header.Set("User-Agent", c.userAgent) + } + if len(c.authorization) != 0 { + httpReq.Header.Add("Authorization", c.authorization) + } + httpReq.Header.Set("Content-Type", "application/json") + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return nil, nil, err + } + body, err := io.ReadAll(httpRsp.Body) + if err != nil { + _ = httpRsp.Body.Close() + return nil, nil, err + } + if err := httpRsp.Body.Close(); err != nil { + return nil, nil, err + } + if httpRsp.Request.Method != http.MethodPost { + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Redirections#permanent_redirections + return nil, nil, fmt.Errorf("POST request to %q was converted to %s request to %q", fullURI, httpRsp.Request.Method, httpRsp.Request.URL) + } + + if httpRsp.StatusCode == http.StatusOK { + if err := json.Unmarshal(body, &rsp); err != nil { + return nil, nil, RspError{StatusCode: httpRsp.StatusCode, Body: body, Err: err} + } + } + return httpRsp, body, nil +} + +// waitForBackoff blocks until the defined backoff interval or context has expired, if the returned +// not before time is in the past it returns immediately. +func (c *JSONClient) waitForBackoff(ctx context.Context) error { + dur := time.Until(c.backoff.until().Add(time.Millisecond * time.Duration(rand.Intn(int(maxJitter.Seconds()*1000))))) + if dur < 0 { + dur = 0 + } + backoffTimer := time.NewTimer(dur) + select { + case <-ctx.Done(): + return ctx.Err() + case <-backoffTimer.C: + } + return nil +} + +// PostAndParseWithRetry makes a HTTP POST call, but retries (with backoff) on +// retryable errors; the caller should set a deadline on the provided context +// to prevent infinite retries. Return values are as for PostAndParse. +func (c *JSONClient) PostAndParseWithRetry(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) { + if ctx == nil { + return nil, nil, errors.New("context.Context required") + } + for { + httpRsp, body, err := c.PostAndParse(ctx, path, req, rsp) + if err != nil { + // Don't retry context errors. + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return nil, nil, err + } + wait := c.backoff.set(nil) + c.logger.Printf("Request to %s failed, backing-off %s: %s", c.uri, wait, err) + } else { + switch httpRsp.StatusCode { + case http.StatusOK: + return httpRsp, body, nil + case http.StatusRequestTimeout: + // Request timeout, retry immediately + c.logger.Printf("Request to %s timed out, retrying immediately", c.uri) + case http.StatusServiceUnavailable: + fallthrough + case http.StatusTooManyRequests: + var backoff *time.Duration + // Retry-After may be either a number of seconds as a int or a RFC 1123 + // date string (RFC 7231 Section 7.1.3) + if retryAfter := httpRsp.Header.Get("Retry-After"); retryAfter != "" { + if seconds, err := strconv.Atoi(retryAfter); err == nil { + b := time.Duration(seconds) * time.Second + backoff = &b + } else if date, err := time.Parse(time.RFC1123, retryAfter); err == nil { + b := time.Until(date) + backoff = &b + } + } + wait := c.backoff.set(backoff) + c.logger.Printf("Request to %s failed, backing-off for %s: got HTTP status %s", c.uri, wait, httpRsp.Status) + default: + return nil, nil, RspError{ + StatusCode: httpRsp.StatusCode, + Body: body, + Err: fmt.Errorf("got HTTP status %q", httpRsp.Status)} + } + } + if err := c.waitForBackoff(ctx); err != nil { + return nil, nil, err + } + } +} diff --git a/vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go b/vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go new file mode 100644 index 000000000000..9ac54bae9128 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go @@ -0,0 +1,129 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loglist3 + +import ( + "github.com/google/certificate-transparency-go/x509" + "github.com/google/certificate-transparency-go/x509util" +) + +// LogRoots maps Log-URLs (stated at LogList) to the pools of their accepted +// root-certificates. +type LogRoots map[string]*x509util.PEMCertPool + +// Compatible creates a new LogList containing only Logs matching the temporal, +// root-acceptance and Log-status conditions. +func (ll *LogList) Compatible(cert *x509.Certificate, certRoot *x509.Certificate, roots LogRoots) LogList { + active := ll.TemporallyCompatible(cert) + // Do not check root compatbility if roots are not being provided. + if certRoot == nil { + return active + } + return active.RootCompatible(certRoot, roots) +} + +// SelectByStatus creates a new LogList containing only logs with status +// provided from the original. +func (ll *LogList) SelectByStatus(lstats []LogStatus) LogList { + var active LogList + for _, op := range ll.Operators { + activeOp := *op + activeOp.Logs = []*Log{} + for _, l := range op.Logs { + for _, lstat := range lstats { + if l.State.LogStatus() == lstat { + activeOp.Logs = append(activeOp.Logs, l) + break + } + } + } + if len(activeOp.Logs) > 0 { + active.Operators = append(active.Operators, &activeOp) + } + } + return active +} + +// RootCompatible creates a new LogList containing only the logs of original +// LogList that are compatible with the provided cert, according to +// the passed in collection of per-log roots. Logs that are missing from +// the collection are treated as always compatible and included, even if +// an empty cert root is passed in. +// Cert-root when provided is expected to be CA-cert. +func (ll *LogList) RootCompatible(certRoot *x509.Certificate, roots LogRoots) LogList { + var compatible LogList + + // Check whether root is a CA-cert. + if certRoot != nil && !certRoot.IsCA { + // Compatible method expects fully rooted chain, while last cert of the chain provided is not root. + // Proceed anyway. + return compatible + } + + for _, op := range ll.Operators { + compatibleOp := *op + compatibleOp.Logs = []*Log{} + for _, l := range op.Logs { + // If root set is not defined, we treat Log as compatible assuming no + // knowledge of its roots. + if _, ok := roots[l.URL]; !ok { + compatibleOp.Logs = append(compatibleOp.Logs, l) + continue + } + + if certRoot == nil { + continue + } + + // Check root is accepted. + if roots[l.URL].Included(certRoot) { + compatibleOp.Logs = append(compatibleOp.Logs, l) + } + } + if len(compatibleOp.Logs) > 0 { + compatible.Operators = append(compatible.Operators, &compatibleOp) + } + } + return compatible +} + +// TemporallyCompatible creates a new LogList containing only the logs of +// original LogList that are compatible with the provided cert, according to +// NotAfter and TemporalInterval matching. +// Returns empty LogList if nil-cert is provided. +func (ll *LogList) TemporallyCompatible(cert *x509.Certificate) LogList { + var compatible LogList + if cert == nil { + return compatible + } + + for _, op := range ll.Operators { + compatibleOp := *op + compatibleOp.Logs = []*Log{} + for _, l := range op.Logs { + if l.TemporalInterval == nil { + compatibleOp.Logs = append(compatibleOp.Logs, l) + continue + } + if cert.NotAfter.Before(l.TemporalInterval.EndExclusive) && (cert.NotAfter.After(l.TemporalInterval.StartInclusive) || cert.NotAfter.Equal(l.TemporalInterval.StartInclusive)) { + compatibleOp.Logs = append(compatibleOp.Logs, l) + } + } + if len(compatibleOp.Logs) > 0 { + compatible.Operators = append(compatible.Operators, &compatibleOp) + } + } + return compatible +} diff --git a/vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go b/vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go new file mode 100644 index 000000000000..c5e94f1874ff --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go @@ -0,0 +1,427 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package loglist3 allows parsing and searching of the master CT Log list. +// It expects the log list to conform to the v3 schema. +package loglist3 + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "regexp" + "strings" + "time" + "unicode" + + "github.com/google/certificate-transparency-go/tls" +) + +const ( + // LogListURL has the master URL for Google Chrome's log list. + LogListURL = "https://www.gstatic.com/ct/log_list/v3/log_list.json" + // LogListSignatureURL has the URL for the signature over Google Chrome's log list. + LogListSignatureURL = "https://www.gstatic.com/ct/log_list/v3/log_list.sig" + // AllLogListURL has the URL for the list of all known logs. + AllLogListURL = "https://www.gstatic.com/ct/log_list/v3/all_logs_list.json" + // AllLogListSignatureURL has the URL for the signature over the list of all known logs. + AllLogListSignatureURL = "https://www.gstatic.com/ct/log_list/v3/all_logs_list.sig" +) + +// Manually mapped from https://www.gstatic.com/ct/log_list/v3/log_list_schema.json + +// LogList holds a collection of CT logs, grouped by operator. +type LogList struct { + // IsAllLogs is set to true if the list contains all known logs, not + // only usable ones. + IsAllLogs bool `json:"is_all_logs,omitempty"` + // Version is the version of the log list. + Version string `json:"version,omitempty"` + // LogListTimestamp is the time at which the log list was published. + LogListTimestamp time.Time `json:"log_list_timestamp,omitempty"` + // Operators is a list of CT log operators and the logs they operate. + Operators []*Operator `json:"operators"` +} + +// Operator holds a collection of CT logs run by the same organisation. +// It also provides information about that organisation, e.g. contact details. +type Operator struct { + // Name is the name of the CT log operator. + Name string `json:"name"` + // Email lists the email addresses that can be used to contact this log + // operator. + Email []string `json:"email"` + // Logs is a list of RFC 6962 CT logs run by this operator. + Logs []*Log `json:"logs"` + // TiledLogs is a list of Static CT API CT logs run by this operator. + TiledLogs []*TiledLog `json:"tiled_logs"` +} + +// Log describes a single RFC 6962 CT log. It is nearly the same as the TiledLog struct, +// but has a single URL field instead of SubmissionURL and MonitoringURL fields. +type Log struct { + // Description is a human-readable string that describes the log. + Description string `json:"description,omitempty"` + // LogID is the SHA-256 hash of the log's public key. + LogID []byte `json:"log_id"` + // Key is the public key with which signatures can be verified. + Key []byte `json:"key"` + // URL is the address of the HTTPS API. + URL string `json:"url"` + // DNS is the address of the DNS API. + DNS string `json:"dns,omitempty"` + // MMD is the Maximum Merge Delay, in seconds. All submitted + // certificates must be incorporated into the log within this time. + MMD int32 `json:"mmd"` + // PreviousOperators is a list of previous operators and the timestamp + // of when they stopped running the log. + PreviousOperators []*PreviousOperator `json:"previous_operators,omitempty"` + // State is the current state of the log, from the perspective of the + // log list distributor. + State *LogStates `json:"state,omitempty"` + // TemporalInterval, if set, indicates that this log only accepts + // certificates with a NotAfter date in this time range. + TemporalInterval *TemporalInterval `json:"temporal_interval,omitempty"` + // Type indicates the purpose of this log, e.g. "test" or "prod". + Type string `json:"log_type,omitempty"` +} + +// TiledLog describes a Static CT API log. It is nearly the same as the Log struct, +// but has both SubmissionURL and MonitoringURL fields instead of a single URL field. +type TiledLog struct { + // Description is a human-readable string that describes the log. + Description string `json:"description,omitempty"` + // LogID is the SHA-256 hash of the log's public key. + LogID []byte `json:"log_id"` + // Key is the public key with which signatures can be verified. + Key []byte `json:"key"` + // SubmissionURL + SubmissionURL string `json:"submission_url"` + // MonitoringURL + MonitoringURL string `json:"monitoring_url"` + // DNS is the address of the DNS API. + DNS string `json:"dns,omitempty"` + // MMD is the Maximum Merge Delay, in seconds. All submitted + // certificates must be incorporated into the log within this time. + MMD int32 `json:"mmd"` + // PreviousOperators is a list of previous operators and the timestamp + // of when they stopped running the log. + PreviousOperators []*PreviousOperator `json:"previous_operators,omitempty"` + // State is the current state of the log, from the perspective of the + // log list distributor. + State *LogStates `json:"state,omitempty"` + // TemporalInterval, if set, indicates that this log only accepts + // certificates with a NotAfter date in this time range. + TemporalInterval *TemporalInterval `json:"temporal_interval,omitempty"` + // Type indicates the purpose of this log, e.g. "test" or "prod". + Type string `json:"log_type,omitempty"` +} + +// PreviousOperator holds information about a log operator and the time at which +// they stopped running a log. +type PreviousOperator struct { + // Name is the name of the CT log operator. + Name string `json:"name"` + // EndTime is the time at which the operator stopped running a log. + EndTime time.Time `json:"end_time"` +} + +// TemporalInterval is a time range. +type TemporalInterval struct { + // StartInclusive is the beginning of the time range. + StartInclusive time.Time `json:"start_inclusive"` + // EndExclusive is just after the end of the time range. + EndExclusive time.Time `json:"end_exclusive"` +} + +// LogStatus indicates Log status. +type LogStatus int + +// LogStatus values +const ( + UndefinedLogStatus LogStatus = iota + PendingLogStatus + QualifiedLogStatus + UsableLogStatus + ReadOnlyLogStatus + RetiredLogStatus + RejectedLogStatus +) + +//go:generate stringer -type=LogStatus + +// LogStates are the states that a CT log can be in, from the perspective of a +// user agent. Only one should be set - this is the current state. +type LogStates struct { + // Pending indicates that the log is in the "pending" state. + Pending *LogState `json:"pending,omitempty"` + // Qualified indicates that the log is in the "qualified" state. + Qualified *LogState `json:"qualified,omitempty"` + // Usable indicates that the log is in the "usable" state. + Usable *LogState `json:"usable,omitempty"` + // ReadOnly indicates that the log is in the "readonly" state. + ReadOnly *ReadOnlyLogState `json:"readonly,omitempty"` + // Retired indicates that the log is in the "retired" state. + Retired *LogState `json:"retired,omitempty"` + // Rejected indicates that the log is in the "rejected" state. + Rejected *LogState `json:"rejected,omitempty"` +} + +// LogState contains details on the current state of a CT log. +type LogState struct { + // Timestamp is the time when the state began. + Timestamp time.Time `json:"timestamp"` +} + +// ReadOnlyLogState contains details on the current state of a read-only CT log. +type ReadOnlyLogState struct { + LogState + // FinalTreeHead is the root hash and tree size at which the CT log was + // made read-only. This should never change while the log is read-only. + FinalTreeHead TreeHead `json:"final_tree_head"` +} + +// TreeHead is the root hash and tree size of a CT log. +type TreeHead struct { + // SHA256RootHash is the root hash of the CT log's Merkle tree. + SHA256RootHash []byte `json:"sha256_root_hash"` + // TreeSize is the size of the CT log's Merkle tree. + TreeSize int64 `json:"tree_size"` +} + +// LogStatus method returns Log-status enum value for descriptive struct. +func (ls *LogStates) LogStatus() LogStatus { + switch { + case ls == nil: + return UndefinedLogStatus + case ls.Pending != nil: + return PendingLogStatus + case ls.Qualified != nil: + return QualifiedLogStatus + case ls.Usable != nil: + return UsableLogStatus + case ls.ReadOnly != nil: + return ReadOnlyLogStatus + case ls.Retired != nil: + return RetiredLogStatus + case ls.Rejected != nil: + return RejectedLogStatus + default: + return UndefinedLogStatus + } +} + +// String method returns printable name of the state. +func (ls *LogStates) String() string { + return ls.LogStatus().String() +} + +// Active picks the set-up state. If multiple states are set (not expected) picks one of them. +func (ls *LogStates) Active() (*LogState, *ReadOnlyLogState) { + if ls == nil { + return nil, nil + } + switch { + case ls.Pending != nil: + return ls.Pending, nil + case ls.Qualified != nil: + return ls.Qualified, nil + case ls.Usable != nil: + return ls.Usable, nil + case ls.ReadOnly != nil: + return nil, ls.ReadOnly + case ls.Retired != nil: + return ls.Retired, nil + case ls.Rejected != nil: + return ls.Rejected, nil + default: + return nil, nil + } +} + +// GoogleOperated returns whether Operator is considered to be Google. +func (op *Operator) GoogleOperated() bool { + for _, email := range op.Email { + if strings.Contains(email, "google-ct-logs@googlegroups") { + return true + } + } + return false +} + +// NewFromJSON creates a LogList from JSON encoded data. +func NewFromJSON(llData []byte) (*LogList, error) { + var ll LogList + if err := json.Unmarshal(llData, &ll); err != nil { + return nil, fmt.Errorf("failed to parse log list: %v", err) + } + return &ll, nil +} + +// NewFromSignedJSON creates a LogList from JSON encoded data, checking a +// signature along the way. The signature data should be provided as the +// raw signature data. +func NewFromSignedJSON(llData, rawSig []byte, pubKey crypto.PublicKey) (*LogList, error) { + var sigAlgo tls.SignatureAlgorithm + switch pkType := pubKey.(type) { + case *rsa.PublicKey: + sigAlgo = tls.RSA + case *ecdsa.PublicKey: + sigAlgo = tls.ECDSA + default: + return nil, fmt.Errorf("unsupported public key type %v", pkType) + } + tlsSig := tls.DigitallySigned{ + Algorithm: tls.SignatureAndHashAlgorithm{ + Hash: tls.SHA256, + Signature: sigAlgo, + }, + Signature: rawSig, + } + if err := tls.VerifySignature(pubKey, llData, tlsSig); err != nil { + return nil, fmt.Errorf("failed to verify signature: %v", err) + } + return NewFromJSON(llData) +} + +// FindLogByName returns all logs whose names contain the given string. +func (ll *LogList) FindLogByName(name string) []*Log { + name = strings.ToLower(name) + var results []*Log + for _, op := range ll.Operators { + for _, log := range op.Logs { + if strings.Contains(strings.ToLower(log.Description), name) { + results = append(results, log) + } + } + } + return results +} + +// FindLogByURL finds the log with the given URL. +func (ll *LogList) FindLogByURL(url string) *Log { + for _, op := range ll.Operators { + for _, log := range op.Logs { + // Don't count trailing slashes + if strings.TrimRight(log.URL, "/") == strings.TrimRight(url, "/") { + return log + } + } + } + return nil +} + +// FindLogByKeyHash finds the log with the given key hash. +func (ll *LogList) FindLogByKeyHash(keyhash [sha256.Size]byte) *Log { + for _, op := range ll.Operators { + for _, log := range op.Logs { + if bytes.Equal(log.LogID, keyhash[:]) { + return log + } + } + } + return nil +} + +// FindLogByKeyHashPrefix finds all logs whose key hash starts with the prefix. +func (ll *LogList) FindLogByKeyHashPrefix(prefix string) []*Log { + var results []*Log + for _, op := range ll.Operators { + for _, log := range op.Logs { + hh := hex.EncodeToString(log.LogID[:]) + if strings.HasPrefix(hh, prefix) { + results = append(results, log) + } + } + } + return results +} + +// FindLogByKey finds the log with the given DER-encoded key. +func (ll *LogList) FindLogByKey(key []byte) *Log { + for _, op := range ll.Operators { + for _, log := range op.Logs { + if bytes.Equal(log.Key[:], key) { + return log + } + } + } + return nil +} + +var hexDigits = regexp.MustCompile("^[0-9a-fA-F]+$") + +// FuzzyFindLog tries to find logs that match the given unspecified input, +// whose format is unspecified. This generally returns a single log, but +// if text input that matches multiple log descriptions is provided, then +// multiple logs may be returned. +func (ll *LogList) FuzzyFindLog(input string) []*Log { + input = strings.Trim(input, " \t") + if logs := ll.FindLogByName(input); len(logs) > 0 { + return logs + } + if log := ll.FindLogByURL(input); log != nil { + return []*Log{log} + } + // Try assuming the input is binary data of some form. First base64: + if data, err := base64.StdEncoding.DecodeString(input); err == nil { + if len(data) == sha256.Size { + var hash [sha256.Size]byte + copy(hash[:], data) + if log := ll.FindLogByKeyHash(hash); log != nil { + return []*Log{log} + } + } + if log := ll.FindLogByKey(data); log != nil { + return []*Log{log} + } + } + // Now hex, but strip all internal whitespace first. + input = stripInternalSpace(input) + if data, err := hex.DecodeString(input); err == nil { + if len(data) == sha256.Size { + var hash [sha256.Size]byte + copy(hash[:], data) + if log := ll.FindLogByKeyHash(hash); log != nil { + return []*Log{log} + } + } + if log := ll.FindLogByKey(data); log != nil { + return []*Log{log} + } + } + // Finally, allow hex strings with an odd number of digits. + if hexDigits.MatchString(input) { + if logs := ll.FindLogByKeyHashPrefix(input); len(logs) > 0 { + return logs + } + } + + return nil +} + +func stripInternalSpace(input string) string { + return strings.Map(func(r rune) rune { + if !unicode.IsSpace(r) { + return r + } + return -1 + }, input) +} diff --git a/vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go b/vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go new file mode 100644 index 000000000000..84c7bbdf4e43 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type=LogStatus"; DO NOT EDIT. + +package loglist3 + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UndefinedLogStatus-0] + _ = x[PendingLogStatus-1] + _ = x[QualifiedLogStatus-2] + _ = x[UsableLogStatus-3] + _ = x[ReadOnlyLogStatus-4] + _ = x[RetiredLogStatus-5] + _ = x[RejectedLogStatus-6] +} + +const _LogStatus_name = "UndefinedLogStatusPendingLogStatusQualifiedLogStatusUsableLogStatusReadOnlyLogStatusRetiredLogStatusRejectedLogStatus" + +var _LogStatus_index = [...]uint8{0, 18, 34, 52, 67, 84, 100, 117} + +func (i LogStatus) String() string { + if i < 0 || i >= LogStatus(len(_LogStatus_index)-1) { + return "LogStatus(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _LogStatus_name[_LogStatus_index[i]:_LogStatus_index[i+1]] +} diff --git a/vendor/github.com/google/certificate-transparency-go/proto_gen.go b/vendor/github.com/google/certificate-transparency-go/proto_gen.go new file mode 100644 index 000000000000..565c6bbbc827 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/proto_gen.go @@ -0,0 +1,25 @@ +// Copyright 2021 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ct + +// We do the protoc generation here (rather than in the individual directories) +// in order to work around the newly-enforced rule that all protobuf file "names" +// must be unique. +// See https://developers.google.com/protocol-buffers/docs/proto#packages and +// https://github.com/golang/protobuf/issues/1122 + +//go:generate sh -c "protoc -I=. -I$(go list -f '{{ .Dir }}' github.com/google/trillian) -I$(go list -f '{{ .Dir }}' github.com/google/certificate-transparency-go) --go_out=paths=source_relative:. trillian/ctfe/configpb/config.proto" +//go:generate sh -c "protoc -I=. -I$(go list -f '{{ .Dir }}' github.com/google/trillian) -I$(go list -f '{{ .Dir }}' github.com/google/certificate-transparency-go) --go_out=paths=source_relative:. trillian/migrillian/configpb/config.proto" +//go:generate sh -c "protoc -I=. -I$(go list -f '{{ .Dir }}' github.com/google/certificate-transparency-go) --go_out=paths=source_relative:. client/configpb/multilog.proto" diff --git a/vendor/github.com/google/certificate-transparency-go/serialization.go b/vendor/github.com/google/certificate-transparency-go/serialization.go new file mode 100644 index 000000000000..2a6c21ed4cfd --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/serialization.go @@ -0,0 +1,317 @@ +// Copyright 2015 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ct + +import ( + "crypto" + "crypto/sha256" + "fmt" + "time" + + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509" +) + +// SerializeSCTSignatureInput serializes the passed in sct and log entry into +// the correct format for signing. +func SerializeSCTSignatureInput(sct SignedCertificateTimestamp, entry LogEntry) ([]byte, error) { + switch sct.SCTVersion { + case V1: + input := CertificateTimestamp{ + SCTVersion: sct.SCTVersion, + SignatureType: CertificateTimestampSignatureType, + Timestamp: sct.Timestamp, + EntryType: entry.Leaf.TimestampedEntry.EntryType, + Extensions: sct.Extensions, + } + switch entry.Leaf.TimestampedEntry.EntryType { + case X509LogEntryType: + input.X509Entry = entry.Leaf.TimestampedEntry.X509Entry + case PrecertLogEntryType: + input.PrecertEntry = &PreCert{ + IssuerKeyHash: entry.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash, + TBSCertificate: entry.Leaf.TimestampedEntry.PrecertEntry.TBSCertificate, + } + default: + return nil, fmt.Errorf("unsupported entry type %s", entry.Leaf.TimestampedEntry.EntryType) + } + return tls.Marshal(input) + default: + return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion) + } +} + +// SerializeSTHSignatureInput serializes the passed in STH into the correct +// format for signing. +func SerializeSTHSignatureInput(sth SignedTreeHead) ([]byte, error) { + switch sth.Version { + case V1: + if len(sth.SHA256RootHash) != crypto.SHA256.Size() { + return nil, fmt.Errorf("invalid TreeHash length, got %d expected %d", len(sth.SHA256RootHash), crypto.SHA256.Size()) + } + + input := TreeHeadSignature{ + Version: sth.Version, + SignatureType: TreeHashSignatureType, + Timestamp: sth.Timestamp, + TreeSize: sth.TreeSize, + SHA256RootHash: sth.SHA256RootHash, + } + return tls.Marshal(input) + default: + return nil, fmt.Errorf("unsupported STH version %d", sth.Version) + } +} + +// CreateX509MerkleTreeLeaf generates a MerkleTreeLeaf for an X509 cert +func CreateX509MerkleTreeLeaf(cert ASN1Cert, timestamp uint64) *MerkleTreeLeaf { + return &MerkleTreeLeaf{ + Version: V1, + LeafType: TimestampedEntryLeafType, + TimestampedEntry: &TimestampedEntry{ + Timestamp: timestamp, + EntryType: X509LogEntryType, + X509Entry: &cert, + }, + } +} + +// MerkleTreeLeafFromRawChain generates a MerkleTreeLeaf from a chain (in DER-encoded form) and timestamp. +func MerkleTreeLeafFromRawChain(rawChain []ASN1Cert, etype LogEntryType, timestamp uint64) (*MerkleTreeLeaf, error) { + // Need at most 3 of the chain + count := 3 + if count > len(rawChain) { + count = len(rawChain) + } + chain := make([]*x509.Certificate, count) + for i := range chain { + cert, err := x509.ParseCertificate(rawChain[i].Data) + if x509.IsFatal(err) { + return nil, fmt.Errorf("failed to parse chain[%d] cert: %v", i, err) + } + chain[i] = cert + } + return MerkleTreeLeafFromChain(chain, etype, timestamp) +} + +// MerkleTreeLeafFromChain generates a MerkleTreeLeaf from a chain and timestamp. +func MerkleTreeLeafFromChain(chain []*x509.Certificate, etype LogEntryType, timestamp uint64) (*MerkleTreeLeaf, error) { + leaf := MerkleTreeLeaf{ + Version: V1, + LeafType: TimestampedEntryLeafType, + TimestampedEntry: &TimestampedEntry{ + EntryType: etype, + Timestamp: timestamp, + }, + } + if etype == X509LogEntryType { + leaf.TimestampedEntry.X509Entry = &ASN1Cert{Data: chain[0].Raw} + return &leaf, nil + } + if etype != PrecertLogEntryType { + return nil, fmt.Errorf("unknown LogEntryType %d", etype) + } + + // Pre-certs are more complicated. First, parse the leaf pre-cert and its + // putative issuer. + if len(chain) < 2 { + return nil, fmt.Errorf("no issuer cert available for precert leaf building") + } + issuer := chain[1] + cert := chain[0] + + var preIssuer *x509.Certificate + if IsPreIssuer(issuer) { + // Replace the cert's issuance information with details from the pre-issuer. + preIssuer = issuer + + // The issuer of the pre-cert is not going to be the issuer of the final + // cert. Change to use the final issuer's key hash. + if len(chain) < 3 { + return nil, fmt.Errorf("no issuer cert available for pre-issuer") + } + issuer = chain[2] + } + + // Next, post-process the DER-encoded TBSCertificate, to remove the CT poison + // extension and possibly update the issuer field. + defangedTBS, err := x509.BuildPrecertTBS(cert.RawTBSCertificate, preIssuer) + if err != nil { + return nil, fmt.Errorf("failed to remove poison extension: %v", err) + } + + leaf.TimestampedEntry.EntryType = PrecertLogEntryType + leaf.TimestampedEntry.PrecertEntry = &PreCert{ + IssuerKeyHash: sha256.Sum256(issuer.RawSubjectPublicKeyInfo), + TBSCertificate: defangedTBS, + } + return &leaf, nil +} + +// MerkleTreeLeafForEmbeddedSCT generates a MerkleTreeLeaf from a chain and an +// SCT timestamp, where the leaf certificate at chain[0] is a certificate that +// contains embedded SCTs. It is assumed that the timestamp provided is from +// one of the SCTs embedded within the leaf certificate. +func MerkleTreeLeafForEmbeddedSCT(chain []*x509.Certificate, timestamp uint64) (*MerkleTreeLeaf, error) { + // For building the leaf for a certificate and SCT where the SCT is embedded + // in the certificate, we need to build the original precertificate TBS + // data. First, parse the leaf cert and its issuer. + if len(chain) < 2 { + return nil, fmt.Errorf("no issuer cert available for precert leaf building") + } + issuer := chain[1] + cert := chain[0] + + // Next, post-process the DER-encoded TBSCertificate, to remove the SCTList + // extension. + tbs, err := x509.RemoveSCTList(cert.RawTBSCertificate) + if err != nil { + return nil, fmt.Errorf("failed to remove SCT List extension: %v", err) + } + + return &MerkleTreeLeaf{ + Version: V1, + LeafType: TimestampedEntryLeafType, + TimestampedEntry: &TimestampedEntry{ + EntryType: PrecertLogEntryType, + Timestamp: timestamp, + PrecertEntry: &PreCert{ + IssuerKeyHash: sha256.Sum256(issuer.RawSubjectPublicKeyInfo), + TBSCertificate: tbs, + }, + }, + }, nil +} + +// LeafHashForLeaf returns the leaf hash for a Merkle tree leaf. +func LeafHashForLeaf(leaf *MerkleTreeLeaf) ([sha256.Size]byte, error) { + leafData, err := tls.Marshal(*leaf) + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("failed to tls-encode MerkleTreeLeaf: %s", err) + } + + data := append([]byte{TreeLeafPrefix}, leafData...) + leafHash := sha256.Sum256(data) + return leafHash, nil +} + +// IsPreIssuer indicates whether a certificate is a pre-cert issuer with the specific +// certificate transparency extended key usage. +func IsPreIssuer(issuer *x509.Certificate) bool { + for _, eku := range issuer.ExtKeyUsage { + if eku == x509.ExtKeyUsageCertificateTransparency { + return true + } + } + return false +} + +// RawLogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data +// after JSON parsing) into a RawLogEntry object (i.e. a TLS-parsed structure). +func RawLogEntryFromLeaf(index int64, entry *LeafEntry) (*RawLogEntry, error) { + ret := RawLogEntry{Index: index} + if rest, err := tls.Unmarshal(entry.LeafInput, &ret.Leaf); err != nil { + return nil, fmt.Errorf("failed to unmarshal MerkleTreeLeaf: %v", err) + } else if len(rest) > 0 { + return nil, fmt.Errorf("MerkleTreeLeaf: trailing data %d bytes", len(rest)) + } + + switch eType := ret.Leaf.TimestampedEntry.EntryType; eType { + case X509LogEntryType: + var certChain CertificateChain + if rest, err := tls.Unmarshal(entry.ExtraData, &certChain); err != nil { + return nil, fmt.Errorf("failed to unmarshal CertificateChain: %v", err) + } else if len(rest) > 0 { + return nil, fmt.Errorf("CertificateChain: trailing data %d bytes", len(rest)) + } + ret.Cert = *ret.Leaf.TimestampedEntry.X509Entry + ret.Chain = certChain.Entries + + case PrecertLogEntryType: + var precertChain PrecertChainEntry + if rest, err := tls.Unmarshal(entry.ExtraData, &precertChain); err != nil { + return nil, fmt.Errorf("failed to unmarshal PrecertChainEntry: %v", err) + } else if len(rest) > 0 { + return nil, fmt.Errorf("PrecertChainEntry: trailing data %d bytes", len(rest)) + } + ret.Cert = precertChain.PreCertificate + ret.Chain = precertChain.CertificateChain + + default: + // TODO(pavelkalinnikov): Section 4.6 of RFC6962 implies that unknown types + // are not errors. We should revisit how we process this case. + return nil, fmt.Errorf("unknown entry type: %v", eType) + } + + return &ret, nil +} + +// ToLogEntry converts RawLogEntry to a LogEntry, which includes an x509-parsed +// (pre-)certificate. +// +// Note that this function may return a valid LogEntry object and a non-nil +// error value, when the error indicates a non-fatal parsing error. +func (rle *RawLogEntry) ToLogEntry() (*LogEntry, error) { + var err error + entry := LogEntry{Index: rle.Index, Leaf: rle.Leaf, Chain: rle.Chain} + + switch eType := rle.Leaf.TimestampedEntry.EntryType; eType { + case X509LogEntryType: + entry.X509Cert, err = rle.Leaf.X509Certificate() + if x509.IsFatal(err) { + return nil, fmt.Errorf("failed to parse certificate: %v", err) + } + + case PrecertLogEntryType: + var tbsCert *x509.Certificate + tbsCert, err = rle.Leaf.Precertificate() + if x509.IsFatal(err) { + return nil, fmt.Errorf("failed to parse precertificate: %v", err) + } + entry.Precert = &Precertificate{ + Submitted: rle.Cert, + IssuerKeyHash: rle.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash, + TBSCertificate: tbsCert, + } + + default: + return nil, fmt.Errorf("unknown entry type: %v", eType) + } + + // err may be non-nil for a non-fatal error. + return &entry, err +} + +// LogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data +// after JSON parsing) into a LogEntry object (which includes x509.Certificate +// objects, after TLS and ASN.1 parsing). +// +// Note that this function may return a valid LogEntry object and a non-nil +// error value, when the error indicates a non-fatal parsing error. +func LogEntryFromLeaf(index int64, leaf *LeafEntry) (*LogEntry, error) { + rle, err := RawLogEntryFromLeaf(index, leaf) + if err != nil { + return nil, err + } + return rle.ToLogEntry() +} + +// TimestampToTime converts a timestamp in the style of RFC 6962 (milliseconds +// since UNIX epoch) to a Go Time. +func TimestampToTime(ts uint64) time.Time { + secs := int64(ts / 1000) + msecs := int64(ts % 1000) + return time.Unix(secs, msecs*1000000) +} diff --git a/vendor/github.com/google/certificate-transparency-go/signatures.go b/vendor/github.com/google/certificate-transparency-go/signatures.go new file mode 100644 index 000000000000..b009008c6f59 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/signatures.go @@ -0,0 +1,110 @@ +// Copyright 2015 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ct + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/pem" + "fmt" + "log" + + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509" +) + +// AllowVerificationWithNonCompliantKeys may be set to true in order to allow +// SignatureVerifier to use keys which are technically non-compliant with +// RFC6962. +var AllowVerificationWithNonCompliantKeys = false + +// PublicKeyFromPEM parses a PEM formatted block and returns the public key contained within and any remaining unread bytes, or an error. +func PublicKeyFromPEM(b []byte) (crypto.PublicKey, SHA256Hash, []byte, error) { + p, rest := pem.Decode(b) + if p == nil { + return nil, [sha256.Size]byte{}, rest, fmt.Errorf("no PEM block found in %s", string(b)) + } + k, err := x509.ParsePKIXPublicKey(p.Bytes) + return k, sha256.Sum256(p.Bytes), rest, err +} + +// PublicKeyFromB64 parses a base64-encoded public key. +func PublicKeyFromB64(b64PubKey string) (crypto.PublicKey, error) { + der, err := base64.StdEncoding.DecodeString(b64PubKey) + if err != nil { + return nil, fmt.Errorf("error decoding public key: %s", err) + } + return x509.ParsePKIXPublicKey(der) +} + +// SignatureVerifier can verify signatures on SCTs and STHs +type SignatureVerifier struct { + PubKey crypto.PublicKey +} + +// NewSignatureVerifier creates a new SignatureVerifier using the passed in PublicKey. +func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) { + switch pkType := pk.(type) { + case *rsa.PublicKey: + if pkType.N.BitLen() < 2048 { + e := fmt.Errorf("public key is RSA with < 2048 bits (size:%d)", pkType.N.BitLen()) + if !AllowVerificationWithNonCompliantKeys { + return nil, e + } + log.Printf("WARNING: %v", e) + } + case *ecdsa.PublicKey: + params := *(pkType.Params()) + if params != *elliptic.P256().Params() { + e := fmt.Errorf("public is ECDSA, but not on the P256 curve") + if !AllowVerificationWithNonCompliantKeys { + return nil, e + } + log.Printf("WARNING: %v", e) + + } + default: + return nil, fmt.Errorf("unsupported public key type %v", pkType) + } + + return &SignatureVerifier{PubKey: pk}, nil +} + +// VerifySignature verifies the given signature sig matches the data. +func (s SignatureVerifier) VerifySignature(data []byte, sig tls.DigitallySigned) error { + return tls.VerifySignature(s.PubKey, data, sig) +} + +// VerifySCTSignature verifies that the SCT's signature is valid for the given LogEntry. +func (s SignatureVerifier) VerifySCTSignature(sct SignedCertificateTimestamp, entry LogEntry) error { + sctData, err := SerializeSCTSignatureInput(sct, entry) + if err != nil { + return err + } + return s.VerifySignature(sctData, tls.DigitallySigned(sct.Signature)) +} + +// VerifySTHSignature verifies that the STH's signature is valid. +func (s SignatureVerifier) VerifySTHSignature(sth SignedTreeHead) error { + sthData, err := SerializeSTHSignatureInput(sth) + if err != nil { + return err + } + return s.VerifySignature(sthData, tls.DigitallySigned(sth.TreeHeadSignature)) +} diff --git a/vendor/github.com/google/certificate-transparency-go/tls/signature.go b/vendor/github.com/google/certificate-transparency-go/tls/signature.go new file mode 100644 index 000000000000..bc174df2124a --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/tls/signature.go @@ -0,0 +1,152 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tls + +import ( + "crypto" + "crypto/dsa" //nolint:staticcheck + "crypto/ecdsa" + _ "crypto/md5" // For registration side-effect + "crypto/rand" + "crypto/rsa" + _ "crypto/sha1" // For registration side-effect + _ "crypto/sha256" // For registration side-effect + _ "crypto/sha512" // For registration side-effect + "errors" + "fmt" + "log" + "math/big" + + "github.com/google/certificate-transparency-go/asn1" +) + +type dsaSig struct { + R, S *big.Int +} + +func generateHash(algo HashAlgorithm, data []byte) ([]byte, crypto.Hash, error) { + var hashType crypto.Hash + switch algo { + case MD5: + hashType = crypto.MD5 + case SHA1: + hashType = crypto.SHA1 + case SHA224: + hashType = crypto.SHA224 + case SHA256: + hashType = crypto.SHA256 + case SHA384: + hashType = crypto.SHA384 + case SHA512: + hashType = crypto.SHA512 + default: + return nil, hashType, fmt.Errorf("unsupported Algorithm.Hash in signature: %v", algo) + } + + hasher := hashType.New() + if _, err := hasher.Write(data); err != nil { + return nil, hashType, fmt.Errorf("failed to write to hasher: %v", err) + } + return hasher.Sum([]byte{}), hashType, nil +} + +// VerifySignature verifies that the passed in signature over data was created by the given PublicKey. +func VerifySignature(pubKey crypto.PublicKey, data []byte, sig DigitallySigned) error { + hash, hashType, err := generateHash(sig.Algorithm.Hash, data) + if err != nil { + return err + } + + switch sig.Algorithm.Signature { + case RSA: + rsaKey, ok := pubKey.(*rsa.PublicKey) + if !ok { + return fmt.Errorf("cannot verify RSA signature with %T key", pubKey) + } + if err := rsa.VerifyPKCS1v15(rsaKey, hashType, hash, sig.Signature); err != nil { + return fmt.Errorf("failed to verify rsa signature: %v", err) + } + case DSA: + dsaKey, ok := pubKey.(*dsa.PublicKey) + if !ok { + return fmt.Errorf("cannot verify DSA signature with %T key", pubKey) + } + var dsaSig dsaSig + rest, err := asn1.Unmarshal(sig.Signature, &dsaSig) + if err != nil { + return fmt.Errorf("failed to unmarshal DSA signature: %v", err) + } + if len(rest) != 0 { + log.Printf("Garbage following signature %q", rest) + } + if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 { + return errors.New("DSA signature contained zero or negative values") + } + if !dsa.Verify(dsaKey, hash, dsaSig.R, dsaSig.S) { + return errors.New("failed to verify DSA signature") + } + case ECDSA: + ecdsaKey, ok := pubKey.(*ecdsa.PublicKey) + if !ok { + return fmt.Errorf("cannot verify ECDSA signature with %T key", pubKey) + } + var ecdsaSig dsaSig + rest, err := asn1.Unmarshal(sig.Signature, &ecdsaSig) + if err != nil { + return fmt.Errorf("failed to unmarshal ECDSA signature: %v", err) + } + if len(rest) != 0 { + log.Printf("Garbage following signature %q", rest) + } + if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { + return errors.New("ECDSA signature contained zero or negative values") + } + + if !ecdsa.Verify(ecdsaKey, hash, ecdsaSig.R, ecdsaSig.S) { + return errors.New("failed to verify ECDSA signature") + } + default: + return fmt.Errorf("unsupported Algorithm.Signature in signature: %v", sig.Algorithm.Hash) + } + return nil +} + +// CreateSignature builds a signature over the given data using the specified hash algorithm and private key. +func CreateSignature(privKey crypto.PrivateKey, hashAlgo HashAlgorithm, data []byte) (DigitallySigned, error) { + var sig DigitallySigned + sig.Algorithm.Hash = hashAlgo + hash, hashType, err := generateHash(sig.Algorithm.Hash, data) + if err != nil { + return sig, err + } + + switch privKey := privKey.(type) { + case rsa.PrivateKey: + sig.Algorithm.Signature = RSA + sig.Signature, err = rsa.SignPKCS1v15(rand.Reader, &privKey, hashType, hash) + return sig, err + case ecdsa.PrivateKey: + sig.Algorithm.Signature = ECDSA + var ecdsaSig dsaSig + ecdsaSig.R, ecdsaSig.S, err = ecdsa.Sign(rand.Reader, &privKey, hash) + if err != nil { + return sig, err + } + sig.Signature, err = asn1.Marshal(ecdsaSig) + return sig, err + default: + return sig, fmt.Errorf("unsupported private key type %T", privKey) + } +} diff --git a/vendor/github.com/google/certificate-transparency-go/tls/tls.go b/vendor/github.com/google/certificate-transparency-go/tls/tls.go new file mode 100644 index 000000000000..a48c998f48dc --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/tls/tls.go @@ -0,0 +1,711 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tls implements functionality for dealing with TLS-encoded data, +// as defined in RFC 5246. This includes parsing and generation of TLS-encoded +// data, together with utility functions for dealing with the DigitallySigned +// TLS type. +package tls + +import ( + "bytes" + "encoding/binary" + "fmt" + "reflect" + "strconv" + "strings" +) + +// This file holds utility functions for TLS encoding/decoding data +// as per RFC 5246 section 4. + +// A structuralError suggests that the TLS data is valid, but the Go type +// which is receiving it doesn't match. +type structuralError struct { + field string + msg string +} + +func (e structuralError) Error() string { + var prefix string + if e.field != "" { + prefix = e.field + ": " + } + return "tls: structure error: " + prefix + e.msg +} + +// A syntaxError suggests that the TLS data is invalid. +type syntaxError struct { + field string + msg string +} + +func (e syntaxError) Error() string { + var prefix string + if e.field != "" { + prefix = e.field + ": " + } + return "tls: syntax error: " + prefix + e.msg +} + +// Uint24 is an unsigned 3-byte integer. +type Uint24 uint32 + +// Enum is an unsigned integer. +type Enum uint64 + +var ( + uint8Type = reflect.TypeOf(uint8(0)) + uint16Type = reflect.TypeOf(uint16(0)) + uint24Type = reflect.TypeOf(Uint24(0)) + uint32Type = reflect.TypeOf(uint32(0)) + uint64Type = reflect.TypeOf(uint64(0)) + enumType = reflect.TypeOf(Enum(0)) +) + +// Unmarshal parses the TLS-encoded data in b and uses the reflect package to +// fill in an arbitrary value pointed at by val. Because Unmarshal uses the +// reflect package, the structs being written to must use exported fields +// (upper case names). +// +// The mappings between TLS types and Go types is as follows; some fields +// must have tags (to indicate their encoded size). +// +// TLS Go Required Tags +// opaque byte / uint8 +// uint8 byte / uint8 +// uint16 uint16 +// uint24 tls.Uint24 +// uint32 uint32 +// uint64 uint64 +// enum tls.Enum size:S or maxval:N +// Type []Type minlen:N,maxlen:M +// opaque[N] [N]byte / [N]uint8 +// uint8[N] [N]byte / [N]uint8 +// struct { } struct { } +// select(T) { +// case e1: Type *T selector:Field,val:e1 +// } +// +// TLS variants (RFC 5246 s4.6.1) are only supported when the value of the +// associated enumeration type is available earlier in the same enclosing +// struct, and each possible variant is marked with a selector tag (to +// indicate which field selects the variants) and a val tag (to indicate +// what value of the selector picks this particular field). +// +// For example, a TLS structure: +// +// enum { e1(1), e2(2) } EnumType; +// struct { +// EnumType sel; +// select(sel) { +// case e1: uint16 +// case e2: uint32 +// } data; +// } VariantItem; +// +// would have a corresponding Go type: +// +// type VariantItem struct { +// Sel tls.Enum `tls:"maxval:2"` +// Data16 *uint16 `tls:"selector:Sel,val:1"` +// Data32 *uint32 `tls:"selector:Sel,val:2"` +// } +// +// TLS fixed-length vectors of types other than opaque or uint8 are not supported. +// +// For TLS variable-length vectors that are themselves used in other vectors, +// create a single-field structure to represent the inner type. For example, for: +// +// opaque InnerType<1..65535>; +// struct { +// InnerType inners<1,65535>; +// } Something; +// +// convert to: +// +// type InnerType struct { +// Val []byte `tls:"minlen:1,maxlen:65535"` +// } +// type Something struct { +// Inners []InnerType `tls:"minlen:1,maxlen:65535"` +// } +// +// If the encoded value does not fit in the Go type, Unmarshal returns a parse error. +func Unmarshal(b []byte, val interface{}) ([]byte, error) { + return UnmarshalWithParams(b, val, "") +} + +// UnmarshalWithParams allows field parameters to be specified for the +// top-level element. The form of the params is the same as the field tags. +func UnmarshalWithParams(b []byte, val interface{}, params string) ([]byte, error) { + info, err := fieldTagToFieldInfo(params, "") + if err != nil { + return nil, err + } + // The passed in interface{} is a pointer (to allow the value to be written + // to); extract the pointed-to object as a reflect.Value, so parseField + // can do various introspection things. + v := reflect.ValueOf(val).Elem() + offset, err := parseField(v, b, 0, info) + if err != nil { + return nil, err + } + return b[offset:], nil +} + +// Return the number of bytes needed to encode values up to (and including) x. +func byteCount(x uint64) uint { + switch { + case x < 0x100: + return 1 + case x < 0x10000: + return 2 + case x < 0x1000000: + return 3 + case x < 0x100000000: + return 4 + case x < 0x10000000000: + return 5 + case x < 0x1000000000000: + return 6 + case x < 0x100000000000000: + return 7 + default: + return 8 + } +} + +type fieldInfo struct { + count uint // Number of bytes + countSet bool + minlen uint64 // Only relevant for slices + maxlen uint64 // Only relevant for slices + selector string // Only relevant for select sub-values + val uint64 // Only relevant for select sub-values + name string // Used for better error messages +} + +func (i *fieldInfo) fieldName() string { + if i == nil { + return "" + } + return i.name +} + +// Given a tag string, return a fieldInfo describing the field. +func fieldTagToFieldInfo(str string, name string) (*fieldInfo, error) { + var info *fieldInfo + // Iterate over clauses in the tag, ignoring any that don't parse properly. + for _, part := range strings.Split(str, ",") { + switch { + case strings.HasPrefix(part, "maxval:"): + if v, err := strconv.ParseUint(part[7:], 10, 64); err == nil { + info = &fieldInfo{count: byteCount(v), countSet: true} + } + case strings.HasPrefix(part, "size:"): + if sz, err := strconv.ParseUint(part[5:], 10, 32); err == nil { + info = &fieldInfo{count: uint(sz), countSet: true} + } + case strings.HasPrefix(part, "maxlen:"): + v, err := strconv.ParseUint(part[7:], 10, 64) + if err != nil { + continue + } + if info == nil { + info = &fieldInfo{} + } + info.count = byteCount(v) + info.countSet = true + info.maxlen = v + case strings.HasPrefix(part, "minlen:"): + v, err := strconv.ParseUint(part[7:], 10, 64) + if err != nil { + continue + } + if info == nil { + info = &fieldInfo{} + } + info.minlen = v + case strings.HasPrefix(part, "selector:"): + if info == nil { + info = &fieldInfo{} + } + info.selector = part[9:] + case strings.HasPrefix(part, "val:"): + v, err := strconv.ParseUint(part[4:], 10, 64) + if err != nil { + continue + } + if info == nil { + info = &fieldInfo{} + } + info.val = v + } + } + if info != nil { + info.name = name + if info.selector == "" { + if info.count < 1 { + return nil, structuralError{name, "field of unknown size in " + str} + } else if info.count > 8 { + return nil, structuralError{name, "specified size too large in " + str} + } else if info.minlen > info.maxlen { + return nil, structuralError{name, "specified length range inverted in " + str} + } else if info.val > 0 { + return nil, structuralError{name, "specified selector value but not field in " + str} + } + } + } else if name != "" { + info = &fieldInfo{name: name} + } + return info, nil +} + +// Check that a value fits into a field described by a fieldInfo structure. +func (i fieldInfo) check(val uint64, fldName string) error { + if val >= (1 << (8 * i.count)) { + return structuralError{fldName, fmt.Sprintf("value %d too large for size", val)} + } + if i.maxlen != 0 { + if val < i.minlen { + return structuralError{fldName, fmt.Sprintf("value %d too small for minimum %d", val, i.minlen)} + } + if val > i.maxlen { + return structuralError{fldName, fmt.Sprintf("value %d too large for maximum %d", val, i.maxlen)} + } + } + return nil +} + +// readVarUint reads an big-endian unsigned integer of the given size in +// bytes. +func readVarUint(data []byte, info *fieldInfo) (uint64, error) { + if info == nil || !info.countSet { + return 0, structuralError{info.fieldName(), "no field size information available"} + } + if len(data) < int(info.count) { + return 0, syntaxError{info.fieldName(), "truncated variable-length integer"} + } + var result uint64 + for i := uint(0); i < info.count; i++ { + result = (result << 8) | uint64(data[i]) + } + if err := info.check(result, info.name); err != nil { + return 0, err + } + return result, nil +} + +// parseField is the main parsing function. Given a byte slice and an offset +// (in bytes) into the data, it will try to parse a suitable ASN.1 value out +// and store it in the given Value. +func parseField(v reflect.Value, data []byte, initOffset int, info *fieldInfo) (int, error) { + offset := initOffset + rest := data[offset:] + + fieldType := v.Type() + // First look for known fixed types. + switch fieldType { + case uint8Type: + if len(rest) < 1 { + return offset, syntaxError{info.fieldName(), "truncated uint8"} + } + v.SetUint(uint64(rest[0])) + offset++ + return offset, nil + case uint16Type: + if len(rest) < 2 { + return offset, syntaxError{info.fieldName(), "truncated uint16"} + } + v.SetUint(uint64(binary.BigEndian.Uint16(rest))) + offset += 2 + return offset, nil + case uint24Type: + if len(rest) < 3 { + return offset, syntaxError{info.fieldName(), "truncated uint24"} + } + v.SetUint(uint64(data[0])<<16 | uint64(data[1])<<8 | uint64(data[2])) + offset += 3 + return offset, nil + case uint32Type: + if len(rest) < 4 { + return offset, syntaxError{info.fieldName(), "truncated uint32"} + } + v.SetUint(uint64(binary.BigEndian.Uint32(rest))) + offset += 4 + return offset, nil + case uint64Type: + if len(rest) < 8 { + return offset, syntaxError{info.fieldName(), "truncated uint64"} + } + v.SetUint(uint64(binary.BigEndian.Uint64(rest))) + offset += 8 + return offset, nil + } + + // Now deal with user-defined types. + switch v.Kind() { + case enumType.Kind(): + // Assume that anything of the same kind as Enum is an Enum, so that + // users can alias types of their own to Enum. + val, err := readVarUint(rest, info) + if err != nil { + return offset, err + } + v.SetUint(val) + offset += int(info.count) + return offset, nil + case reflect.Struct: + structType := fieldType + // TLS includes a select(Enum) {..} construct, where the value of an enum + // indicates which variant field is present (like a C union). We require + // that the enum value be an earlier field in the same structure (the selector), + // and that each of the possible variant destination fields be pointers. + // So the Go mapping looks like: + // type variantType struct { + // Which tls.Enum `tls:"size:1"` // this is the selector + // Val1 *type1 `tls:"selector:Which,val:1"` // this is a destination + // Val2 *type2 `tls:"selector:Which,val:1"` // this is a destination + // } + + // To deal with this, we track any enum-like fields and their values... + enums := make(map[string]uint64) + // .. and we track which selector names we've seen (in the destination field tags), + // and whether a destination for that selector has been chosen. + selectorSeen := make(map[string]bool) + for i := 0; i < structType.NumField(); i++ { + // Find information about this field. + tag := structType.Field(i).Tag.Get("tls") + fieldInfo, err := fieldTagToFieldInfo(tag, structType.Field(i).Name) + if err != nil { + return offset, err + } + + destination := v.Field(i) + if fieldInfo.selector != "" { + // This is a possible select(Enum) destination, so first check that the referenced + // selector field has already been seen earlier in the struct. + choice, ok := enums[fieldInfo.selector] + if !ok { + return offset, structuralError{fieldInfo.name, "selector not seen: " + fieldInfo.selector} + } + if structType.Field(i).Type.Kind() != reflect.Ptr { + return offset, structuralError{fieldInfo.name, "choice field not a pointer type"} + } + // Is this the first mention of the selector field name? If so, remember it. + seen, ok := selectorSeen[fieldInfo.selector] + if !ok { + selectorSeen[fieldInfo.selector] = false + } + if choice != fieldInfo.val { + // This destination field was not the chosen one, so make it nil (we checked + // it was a pointer above). + v.Field(i).Set(reflect.Zero(structType.Field(i).Type)) + continue + } + if seen { + // We already saw a different destination field receive the value for this + // selector value, which indicates a badly annotated structure. + return offset, structuralError{fieldInfo.name, "duplicate selector value for " + fieldInfo.selector} + } + selectorSeen[fieldInfo.selector] = true + // Make an object of the pointed-to type and parse into that. + v.Field(i).Set(reflect.New(structType.Field(i).Type.Elem())) + destination = v.Field(i).Elem() + } + offset, err = parseField(destination, data, offset, fieldInfo) + if err != nil { + return offset, err + } + + // Remember any possible tls.Enum values encountered in case they are selectors. + if structType.Field(i).Type.Kind() == enumType.Kind() { + enums[structType.Field(i).Name] = v.Field(i).Uint() + } + + } + + // Now we have seen all fields in the structure, check that all select(Enum) {..} selector + // fields found a destination to put their data in. + for selector, seen := range selectorSeen { + if !seen { + return offset, syntaxError{info.fieldName(), selector + ": unhandled value for selector"} + } + } + return offset, nil + case reflect.Array: + datalen := v.Len() + + if datalen > len(rest) { + return offset, syntaxError{info.fieldName(), "truncated array"} + } + inner := rest[:datalen] + offset += datalen + if fieldType.Elem().Kind() != reflect.Uint8 { + // Only byte/uint8 arrays are supported + return offset, structuralError{info.fieldName(), "unsupported array type: " + v.Type().String()} + } + reflect.Copy(v, reflect.ValueOf(inner)) + return offset, nil + + case reflect.Slice: + sliceType := fieldType + // Slices represent variable-length vectors, which are prefixed by a length field. + // The fieldInfo indicates the size of that length field. + varlen, err := readVarUint(rest, info) + if err != nil { + return offset, err + } + datalen := int(varlen) + offset += int(info.count) + rest = rest[info.count:] + + if datalen > len(rest) { + return offset, syntaxError{info.fieldName(), "truncated slice"} + } + inner := rest[:datalen] + offset += datalen + if fieldType.Elem().Kind() == reflect.Uint8 { + // Fast version for []byte + v.Set(reflect.MakeSlice(sliceType, datalen, datalen)) + reflect.Copy(v, reflect.ValueOf(inner)) + return offset, nil + } + + v.Set(reflect.MakeSlice(sliceType, 0, datalen)) + single := reflect.New(sliceType.Elem()) + for innerOffset := 0; innerOffset < len(inner); { + var err error + innerOffset, err = parseField(single.Elem(), inner, innerOffset, nil) + if err != nil { + return offset, err + } + v.Set(reflect.Append(v, single.Elem())) + } + return offset, nil + + default: + return offset, structuralError{info.fieldName(), fmt.Sprintf("unsupported type: %s of kind %s", fieldType, v.Kind())} + } +} + +// Marshal returns the TLS encoding of val. +func Marshal(val interface{}) ([]byte, error) { + return MarshalWithParams(val, "") +} + +// MarshalWithParams returns the TLS encoding of val, and allows field +// parameters to be specified for the top-level element. The form +// of the params is the same as the field tags. +func MarshalWithParams(val interface{}, params string) ([]byte, error) { + info, err := fieldTagToFieldInfo(params, "") + if err != nil { + return nil, err + } + var out bytes.Buffer + v := reflect.ValueOf(val) + if err := marshalField(&out, v, info); err != nil { + return nil, err + } + return out.Bytes(), err +} + +func marshalField(out *bytes.Buffer, v reflect.Value, info *fieldInfo) error { + var prefix string + if info != nil && len(info.name) > 0 { + prefix = info.name + ": " + } + fieldType := v.Type() + // First look for known fixed types. + switch fieldType { + case uint8Type: + out.WriteByte(byte(v.Uint())) + return nil + case uint16Type: + scratch := make([]byte, 2) + binary.BigEndian.PutUint16(scratch, uint16(v.Uint())) + out.Write(scratch) + return nil + case uint24Type: + i := v.Uint() + if i > 0xffffff { + return structuralError{info.fieldName(), fmt.Sprintf("uint24 overflow %d", i)} + } + scratch := make([]byte, 4) + binary.BigEndian.PutUint32(scratch, uint32(i)) + out.Write(scratch[1:]) + return nil + case uint32Type: + scratch := make([]byte, 4) + binary.BigEndian.PutUint32(scratch, uint32(v.Uint())) + out.Write(scratch) + return nil + case uint64Type: + scratch := make([]byte, 8) + binary.BigEndian.PutUint64(scratch, uint64(v.Uint())) + out.Write(scratch) + return nil + } + + // Now deal with user-defined types. + switch v.Kind() { + case enumType.Kind(): + i := v.Uint() + if info == nil { + return structuralError{info.fieldName(), "enum field tag missing"} + } + if err := info.check(i, prefix); err != nil { + return err + } + scratch := make([]byte, 8) + binary.BigEndian.PutUint64(scratch, uint64(i)) + out.Write(scratch[(8 - info.count):]) + return nil + case reflect.Struct: + structType := fieldType + enums := make(map[string]uint64) // Values of any Enum fields + // The comment parseField() describes the mapping of the TLS select(Enum) {..} construct; + // here we have selector and source (rather than destination) fields. + + // Track which selector names we've seen (in the source field tags), and whether a source + // value for that selector has been processed. + selectorSeen := make(map[string]bool) + for i := 0; i < structType.NumField(); i++ { + // Find information about this field. + tag := structType.Field(i).Tag.Get("tls") + fieldInfo, err := fieldTagToFieldInfo(tag, structType.Field(i).Name) + if err != nil { + return err + } + + source := v.Field(i) + if fieldInfo.selector != "" { + // This field is a possible source for a select(Enum) {..}. First check + // the selector field name has been seen. + choice, ok := enums[fieldInfo.selector] + if !ok { + return structuralError{fieldInfo.name, "selector not seen: " + fieldInfo.selector} + } + if structType.Field(i).Type.Kind() != reflect.Ptr { + return structuralError{fieldInfo.name, "choice field not a pointer type"} + } + // Is this the first mention of the selector field name? If so, remember it. + seen, ok := selectorSeen[fieldInfo.selector] + if !ok { + selectorSeen[fieldInfo.selector] = false + } + if choice != fieldInfo.val { + // This source was not chosen; police that it should be nil. + if v.Field(i).Pointer() != uintptr(0) { + return structuralError{fieldInfo.name, "unchosen field is non-nil"} + } + continue + } + if seen { + // We already saw a different source field generate the value for this + // selector value, which indicates a badly annotated structure. + return structuralError{fieldInfo.name, "duplicate selector value for " + fieldInfo.selector} + } + selectorSeen[fieldInfo.selector] = true + if v.Field(i).Pointer() == uintptr(0) { + return structuralError{fieldInfo.name, "chosen field is nil"} + } + // Marshal from the pointed-to source object. + source = v.Field(i).Elem() + } + + var fieldData bytes.Buffer + if err := marshalField(&fieldData, source, fieldInfo); err != nil { + return err + } + out.Write(fieldData.Bytes()) + + // Remember any tls.Enum values encountered in case they are selectors. + if structType.Field(i).Type.Kind() == enumType.Kind() { + enums[structType.Field(i).Name] = v.Field(i).Uint() + } + } + // Now we have seen all fields in the structure, check that all select(Enum) {..} selector + // fields found a source field to get their data from. + for selector, seen := range selectorSeen { + if !seen { + return syntaxError{info.fieldName(), selector + ": unhandled value for selector"} + } + } + return nil + + case reflect.Array: + datalen := v.Len() + arrayType := fieldType + if arrayType.Elem().Kind() != reflect.Uint8 { + // Only byte/uint8 arrays are supported + return structuralError{info.fieldName(), "unsupported array type"} + } + bytes := make([]byte, datalen) + for i := 0; i < datalen; i++ { + bytes[i] = uint8(v.Index(i).Uint()) + } + _, err := out.Write(bytes) + return err + + case reflect.Slice: + if info == nil { + return structuralError{info.fieldName(), "slice field tag missing"} + } + + sliceType := fieldType + if sliceType.Elem().Kind() == reflect.Uint8 { + // Fast version for []byte: first write the length as info.count bytes. + datalen := v.Len() + scratch := make([]byte, 8) + binary.BigEndian.PutUint64(scratch, uint64(datalen)) + out.Write(scratch[(8 - info.count):]) + + if err := info.check(uint64(datalen), prefix); err != nil { + return err + } + // Then just write the data. + bytes := make([]byte, datalen) + for i := 0; i < datalen; i++ { + bytes[i] = uint8(v.Index(i).Uint()) + } + _, err := out.Write(bytes) + return err + } + // General version: use a separate Buffer to write the slice entries into. + var innerBuf bytes.Buffer + for i := 0; i < v.Len(); i++ { + if err := marshalField(&innerBuf, v.Index(i), nil); err != nil { + return err + } + } + + // Now insert (and check) the size. + size := uint64(innerBuf.Len()) + if err := info.check(size, prefix); err != nil { + return err + } + scratch := make([]byte, 8) + binary.BigEndian.PutUint64(scratch, size) + out.Write(scratch[(8 - info.count):]) + + // Then copy the data. + _, err := out.Write(innerBuf.Bytes()) + return err + + default: + return structuralError{info.fieldName(), fmt.Sprintf("unsupported type: %s of kind %s", fieldType, v.Kind())} + } +} diff --git a/vendor/github.com/google/certificate-transparency-go/tls/types.go b/vendor/github.com/google/certificate-transparency-go/tls/types.go new file mode 100644 index 000000000000..b8eaf24bdd58 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/tls/types.go @@ -0,0 +1,117 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tls + +import ( + "crypto" + "crypto/dsa" //nolint:staticcheck + "crypto/ecdsa" + "crypto/rsa" + "fmt" +) + +// DigitallySigned gives information about a signature, including the algorithm used +// and the signature value. Defined in RFC 5246 s4.7. +type DigitallySigned struct { + Algorithm SignatureAndHashAlgorithm + Signature []byte `tls:"minlen:0,maxlen:65535"` +} + +func (d DigitallySigned) String() string { + return fmt.Sprintf("Signature: HashAlgo=%v SignAlgo=%v Value=%x", d.Algorithm.Hash, d.Algorithm.Signature, d.Signature) +} + +// SignatureAndHashAlgorithm gives information about the algorithms used for a +// signature. Defined in RFC 5246 s7.4.1.4.1. +type SignatureAndHashAlgorithm struct { + Hash HashAlgorithm `tls:"maxval:255"` + Signature SignatureAlgorithm `tls:"maxval:255"` +} + +// HashAlgorithm enum from RFC 5246 s7.4.1.4.1. +type HashAlgorithm Enum + +// HashAlgorithm constants from RFC 5246 s7.4.1.4.1. +const ( + None HashAlgorithm = 0 + MD5 HashAlgorithm = 1 + SHA1 HashAlgorithm = 2 + SHA224 HashAlgorithm = 3 + SHA256 HashAlgorithm = 4 + SHA384 HashAlgorithm = 5 + SHA512 HashAlgorithm = 6 +) + +func (h HashAlgorithm) String() string { + switch h { + case None: + return "None" + case MD5: + return "MD5" + case SHA1: + return "SHA1" + case SHA224: + return "SHA224" + case SHA256: + return "SHA256" + case SHA384: + return "SHA384" + case SHA512: + return "SHA512" + default: + return fmt.Sprintf("UNKNOWN(%d)", h) + } +} + +// SignatureAlgorithm enum from RFC 5246 s7.4.1.4.1. +type SignatureAlgorithm Enum + +// SignatureAlgorithm constants from RFC 5246 s7.4.1.4.1. +const ( + Anonymous SignatureAlgorithm = 0 + RSA SignatureAlgorithm = 1 + DSA SignatureAlgorithm = 2 + ECDSA SignatureAlgorithm = 3 +) + +func (s SignatureAlgorithm) String() string { + switch s { + case Anonymous: + return "Anonymous" + case RSA: + return "RSA" + case DSA: + return "DSA" + case ECDSA: + return "ECDSA" + default: + return fmt.Sprintf("UNKNOWN(%d)", s) + } +} + +// SignatureAlgorithmFromPubKey returns the algorithm used for this public key. +// ECDSA, RSA, and DSA keys are supported. Other key types will return Anonymous. +func SignatureAlgorithmFromPubKey(k crypto.PublicKey) SignatureAlgorithm { + switch k.(type) { + case *ecdsa.PublicKey: + return ECDSA + case *rsa.PublicKey: + return RSA + case *dsa.PublicKey: + return DSA + default: + return Anonymous + } +} diff --git a/vendor/github.com/google/certificate-transparency-go/types.go b/vendor/github.com/google/certificate-transparency-go/types.go new file mode 100644 index 000000000000..2a96f6a09f59 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/types.go @@ -0,0 +1,591 @@ +// Copyright 2015 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ct holds core types and utilities for Certificate Transparency. +package ct + +import ( + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509" +) + +/////////////////////////////////////////////////////////////////////////////// +// The following structures represent those outlined in RFC6962; any section +// numbers mentioned refer to that RFC. +/////////////////////////////////////////////////////////////////////////////// + +// LogEntryType represents the LogEntryType enum from section 3.1: +// +// enum { x509_entry(0), precert_entry(1), (65535) } LogEntryType; +type LogEntryType tls.Enum // tls:"maxval:65535" + +// LogEntryType constants from section 3.1. +const ( + X509LogEntryType LogEntryType = 0 + PrecertLogEntryType LogEntryType = 1 +) + +func (e LogEntryType) String() string { + switch e { + case X509LogEntryType: + return "X509LogEntryType" + case PrecertLogEntryType: + return "PrecertLogEntryType" + default: + return fmt.Sprintf("UnknownEntryType(%d)", e) + } +} + +// RFC6962 section 2.1 requires a prefix byte on hash inputs for second preimage resistance. +const ( + TreeLeafPrefix = byte(0x00) + TreeNodePrefix = byte(0x01) +) + +// MerkleLeafType represents the MerkleLeafType enum from section 3.4: +// +// enum { timestamped_entry(0), (255) } MerkleLeafType; +type MerkleLeafType tls.Enum // tls:"maxval:255" + +// TimestampedEntryLeafType is the only defined MerkleLeafType constant from section 3.4. +const TimestampedEntryLeafType MerkleLeafType = 0 // Entry type for an SCT + +func (m MerkleLeafType) String() string { + switch m { + case TimestampedEntryLeafType: + return "TimestampedEntryLeafType" + default: + return fmt.Sprintf("UnknownLeafType(%d)", m) + } +} + +// Version represents the Version enum from section 3.2: +// +// enum { v1(0), (255) } Version; +type Version tls.Enum // tls:"maxval:255" + +// CT Version constants from section 3.2. +const ( + V1 Version = 0 +) + +func (v Version) String() string { + switch v { + case V1: + return "V1" + default: + return fmt.Sprintf("UnknownVersion(%d)", v) + } +} + +// SignatureType differentiates STH signatures from SCT signatures, see section 3.2. +// +// enum { certificate_timestamp(0), tree_hash(1), (255) } SignatureType; +type SignatureType tls.Enum // tls:"maxval:255" + +// SignatureType constants from section 3.2. +const ( + CertificateTimestampSignatureType SignatureType = 0 + TreeHashSignatureType SignatureType = 1 +) + +func (st SignatureType) String() string { + switch st { + case CertificateTimestampSignatureType: + return "CertificateTimestamp" + case TreeHashSignatureType: + return "TreeHash" + default: + return fmt.Sprintf("UnknownSignatureType(%d)", st) + } +} + +// ASN1Cert type for holding the raw DER bytes of an ASN.1 Certificate +// (section 3.1). +type ASN1Cert struct { + Data []byte `tls:"minlen:1,maxlen:16777215"` +} + +// LogID holds the hash of the Log's public key (section 3.2). +// TODO(pphaneuf): Users should be migrated to the one in the logid package. +type LogID struct { + KeyID [sha256.Size]byte +} + +// PreCert represents a Precertificate (section 3.2). +type PreCert struct { + IssuerKeyHash [sha256.Size]byte + TBSCertificate []byte `tls:"minlen:1,maxlen:16777215"` // DER-encoded TBSCertificate +} + +// CTExtensions is a representation of the raw bytes of any CtExtension +// structure (see section 3.2). +// nolint: revive +type CTExtensions []byte // tls:"minlen:0,maxlen:65535"` + +// MerkleTreeNode represents an internal node in the CT tree. +type MerkleTreeNode []byte + +// ConsistencyProof represents a CT consistency proof (see sections 2.1.2 and +// 4.4). +type ConsistencyProof []MerkleTreeNode + +// AuditPath represents a CT inclusion proof (see sections 2.1.1 and 4.5). +type AuditPath []MerkleTreeNode + +// LeafInput represents a serialized MerkleTreeLeaf structure. +type LeafInput []byte + +// DigitallySigned is a local alias for tls.DigitallySigned so that we can +// attach a MarshalJSON method. +type DigitallySigned tls.DigitallySigned + +// FromBase64String populates the DigitallySigned structure from the base64 data passed in. +// Returns an error if the base64 data is invalid. +func (d *DigitallySigned) FromBase64String(b64 string) error { + raw, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return fmt.Errorf("failed to unbase64 DigitallySigned: %v", err) + } + var ds tls.DigitallySigned + if rest, err := tls.Unmarshal(raw, &ds); err != nil { + return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err) + } else if len(rest) > 0 { + return fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)) + } + *d = DigitallySigned(ds) + return nil +} + +// Base64String returns the base64 representation of the DigitallySigned struct. +func (d DigitallySigned) Base64String() (string, error) { + b, err := tls.Marshal(d) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(b), nil +} + +// MarshalJSON implements the json.Marshaller interface. +func (d DigitallySigned) MarshalJSON() ([]byte, error) { + b64, err := d.Base64String() + if err != nil { + return []byte{}, err + } + return []byte(`"` + b64 + `"`), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (d *DigitallySigned) UnmarshalJSON(b []byte) error { + var content string + if err := json.Unmarshal(b, &content); err != nil { + return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err) + } + return d.FromBase64String(content) +} + +// RawLogEntry represents the (TLS-parsed) contents of an entry in a CT log. +type RawLogEntry struct { + // Index is a position of the entry in the log. + Index int64 + // Leaf is a parsed Merkle leaf hash input. + Leaf MerkleTreeLeaf + // Cert is: + // - A certificate if Leaf.TimestampedEntry.EntryType is X509LogEntryType. + // - A precertificate if Leaf.TimestampedEntry.EntryType is + // PrecertLogEntryType, in the form of a DER-encoded Certificate as + // originally added (which includes the poison extension and a signature + // generated over the pre-cert by the pre-cert issuer). + // - Empty otherwise. + Cert ASN1Cert + // Chain is the issuing certificate chain starting with the issuer of Cert, + // or an empty slice if Cert is empty. + Chain []ASN1Cert +} + +// LogEntry represents the (parsed) contents of an entry in a CT log. This is described +// in section 3.1, but note that this structure does *not* match the TLS structure +// defined there (the TLS structure is never used directly in RFC6962). +type LogEntry struct { + Index int64 + Leaf MerkleTreeLeaf + // Exactly one of the following three fields should be non-empty. + X509Cert *x509.Certificate // Parsed X.509 certificate + Precert *Precertificate // Extracted precertificate + JSONData []byte + + // Chain holds the issuing certificate chain, starting with the + // issuer of the leaf certificate / pre-certificate. + Chain []ASN1Cert +} + +// PrecertChainEntry holds an precertificate together with a validation chain +// for it; see section 3.1. +type PrecertChainEntry struct { + PreCertificate ASN1Cert `tls:"minlen:1,maxlen:16777215"` + CertificateChain []ASN1Cert `tls:"minlen:0,maxlen:16777215"` +} + +// CertificateChain holds a chain of certificates, as returned as extra data +// for get-entries (section 4.6). +type CertificateChain struct { + Entries []ASN1Cert `tls:"minlen:0,maxlen:16777215"` +} + +// PrecertChainEntryHash is an extended PrecertChainEntry type with the +// IssuanceChainHash field added to store the hash of the +// CertificateChain field of PrecertChainEntry. +type PrecertChainEntryHash struct { + PreCertificate ASN1Cert `tls:"minlen:1,maxlen:16777215"` + IssuanceChainHash []byte `tls:"minlen:0,maxlen:256"` +} + +// CertificateChainHash is an extended CertificateChain type with the +// IssuanceChainHash field added to store the hash of the +// Entries field of CertificateChain. +type CertificateChainHash struct { + IssuanceChainHash []byte `tls:"minlen:0,maxlen:256"` +} + +// JSONDataEntry holds arbitrary data. +type JSONDataEntry struct { + Data []byte `tls:"minlen:0,maxlen:1677215"` +} + +// SHA256Hash represents the output from the SHA256 hash function. +type SHA256Hash [sha256.Size]byte + +// FromBase64String populates the SHA256 struct with the contents of the base64 data passed in. +func (s *SHA256Hash) FromBase64String(b64 string) error { + bs, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return fmt.Errorf("failed to unbase64 LogID: %v", err) + } + if len(bs) != sha256.Size { + return fmt.Errorf("invalid SHA256 length, expected 32 but got %d", len(bs)) + } + copy(s[:], bs) + return nil +} + +// Base64String returns the base64 representation of this SHA256Hash. +func (s SHA256Hash) Base64String() string { + return base64.StdEncoding.EncodeToString(s[:]) +} + +// MarshalJSON implements the json.Marshaller interface for SHA256Hash. +func (s SHA256Hash) MarshalJSON() ([]byte, error) { + return []byte(`"` + s.Base64String() + `"`), nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (s *SHA256Hash) UnmarshalJSON(b []byte) error { + var content string + if err := json.Unmarshal(b, &content); err != nil { + return fmt.Errorf("failed to unmarshal SHA256Hash: %v", err) + } + return s.FromBase64String(content) +} + +// SignedTreeHead represents the structure returned by the get-sth CT method +// after base64 decoding; see sections 3.5 and 4.3. +type SignedTreeHead struct { + Version Version `json:"sth_version"` // The version of the protocol to which the STH conforms + TreeSize uint64 `json:"tree_size"` // The number of entries in the new tree + Timestamp uint64 `json:"timestamp"` // The time at which the STH was created + SHA256RootHash SHA256Hash `json:"sha256_root_hash"` // The root hash of the log's Merkle tree + TreeHeadSignature DigitallySigned `json:"tree_head_signature"` // Log's signature over a TLS-encoded TreeHeadSignature + LogID SHA256Hash `json:"log_id"` // The SHA256 hash of the log's public key +} + +func (s SignedTreeHead) String() string { + sigStr, err := s.TreeHeadSignature.Base64String() + if err != nil { + sigStr = tls.DigitallySigned(s.TreeHeadSignature).String() + } + + // If the LogID field in the SignedTreeHead is empty, don't include it in + // the string. + var logIDStr string + if id, empty := s.LogID, (SHA256Hash{}); id != empty { + logIDStr = fmt.Sprintf("LogID:%s, ", id.Base64String()) + } + + return fmt.Sprintf("{%sTreeSize:%d, Timestamp:%d, SHA256RootHash:%q, TreeHeadSignature:%q}", + logIDStr, s.TreeSize, s.Timestamp, s.SHA256RootHash.Base64String(), sigStr) +} + +// TreeHeadSignature holds the data over which the signature in an STH is +// generated; see section 3.5 +type TreeHeadSignature struct { + Version Version `tls:"maxval:255"` + SignatureType SignatureType `tls:"maxval:255"` // == TreeHashSignatureType + Timestamp uint64 + TreeSize uint64 + SHA256RootHash SHA256Hash +} + +// SignedCertificateTimestamp represents the structure returned by the +// add-chain and add-pre-chain methods after base64 decoding; see sections +// 3.2, 4.1 and 4.2. +type SignedCertificateTimestamp struct { + SCTVersion Version `tls:"maxval:255"` + LogID LogID + Timestamp uint64 + Extensions CTExtensions `tls:"minlen:0,maxlen:65535"` + Signature DigitallySigned // Signature over TLS-encoded CertificateTimestamp +} + +// CertificateTimestamp is the collection of data that the signature in an +// SCT is over; see section 3.2. +type CertificateTimestamp struct { + SCTVersion Version `tls:"maxval:255"` + SignatureType SignatureType `tls:"maxval:255"` + Timestamp uint64 + EntryType LogEntryType `tls:"maxval:65535"` + X509Entry *ASN1Cert `tls:"selector:EntryType,val:0"` + PrecertEntry *PreCert `tls:"selector:EntryType,val:1"` + JSONEntry *JSONDataEntry `tls:"selector:EntryType,val:32768"` + Extensions CTExtensions `tls:"minlen:0,maxlen:65535"` +} + +func (s SignedCertificateTimestamp) String() string { + return fmt.Sprintf("{Version:%d LogId:%s Timestamp:%d Extensions:'%s' Signature:%v}", s.SCTVersion, + base64.StdEncoding.EncodeToString(s.LogID.KeyID[:]), + s.Timestamp, + s.Extensions, + s.Signature) +} + +// TimestampedEntry is part of the MerkleTreeLeaf structure; see section 3.4. +type TimestampedEntry struct { + Timestamp uint64 + EntryType LogEntryType `tls:"maxval:65535"` + X509Entry *ASN1Cert `tls:"selector:EntryType,val:0"` + PrecertEntry *PreCert `tls:"selector:EntryType,val:1"` + JSONEntry *JSONDataEntry `tls:"selector:EntryType,val:32768"` + Extensions CTExtensions `tls:"minlen:0,maxlen:65535"` +} + +// MerkleTreeLeaf represents the deserialized structure of the hash input for the +// leaves of a log's Merkle tree; see section 3.4. +type MerkleTreeLeaf struct { + Version Version `tls:"maxval:255"` + LeafType MerkleLeafType `tls:"maxval:255"` + TimestampedEntry *TimestampedEntry `tls:"selector:LeafType,val:0"` +} + +// Precertificate represents the parsed CT Precertificate structure. +type Precertificate struct { + // DER-encoded pre-certificate as originally added, which includes a + // poison extension and a signature generated over the pre-cert by + // the pre-cert issuer (which might differ from the issuer of the final + // cert, see RFC6962 s3.1). + Submitted ASN1Cert + // SHA256 hash of the issuing key + IssuerKeyHash [sha256.Size]byte + // Parsed TBSCertificate structure, held in an x509.Certificate for convenience. + TBSCertificate *x509.Certificate +} + +// X509Certificate returns the X.509 Certificate contained within the +// MerkleTreeLeaf. +func (m *MerkleTreeLeaf) X509Certificate() (*x509.Certificate, error) { + if m.TimestampedEntry.EntryType != X509LogEntryType { + return nil, fmt.Errorf("cannot call X509Certificate on a MerkleTreeLeaf that is not an X509 entry") + } + return x509.ParseCertificate(m.TimestampedEntry.X509Entry.Data) +} + +// Precertificate returns the X.509 Precertificate contained within the MerkleTreeLeaf. +// +// The returned precertificate is embedded in an x509.Certificate, but is in the +// form stored internally in the log rather than the original submitted form +// (i.e. it does not include the poison extension and any changes to reflect the +// final certificate's issuer have been made; see x509.BuildPrecertTBS). +func (m *MerkleTreeLeaf) Precertificate() (*x509.Certificate, error) { + if m.TimestampedEntry.EntryType != PrecertLogEntryType { + return nil, fmt.Errorf("cannot call Precertificate on a MerkleTreeLeaf that is not a precert entry") + } + return x509.ParseTBSCertificate(m.TimestampedEntry.PrecertEntry.TBSCertificate) +} + +// APIEndpoint is a string that represents one of the Certificate Transparency +// Log API endpoints. +type APIEndpoint string + +// Certificate Transparency Log API endpoints; see section 4. +// WARNING: Should match the URI paths without the "/ct/v1/" prefix. If +// changing these constants, may need to change those too. +const ( + AddChainStr APIEndpoint = "add-chain" + AddPreChainStr APIEndpoint = "add-pre-chain" + GetSTHStr APIEndpoint = "get-sth" + GetEntriesStr APIEndpoint = "get-entries" + GetProofByHashStr APIEndpoint = "get-proof-by-hash" + GetSTHConsistencyStr APIEndpoint = "get-sth-consistency" + GetRootsStr APIEndpoint = "get-roots" + GetEntryAndProofStr APIEndpoint = "get-entry-and-proof" +) + +// URI paths for Log requests; see section 4. +// WARNING: Should match the API endpoints, with the "/ct/v1/" prefix. If +// changing these constants, may need to change those too. +const ( + AddChainPath = "/ct/v1/add-chain" + AddPreChainPath = "/ct/v1/add-pre-chain" + GetSTHPath = "/ct/v1/get-sth" + GetEntriesPath = "/ct/v1/get-entries" + GetProofByHashPath = "/ct/v1/get-proof-by-hash" + GetSTHConsistencyPath = "/ct/v1/get-sth-consistency" + GetRootsPath = "/ct/v1/get-roots" + GetEntryAndProofPath = "/ct/v1/get-entry-and-proof" + + AddJSONPath = "/ct/v1/add-json" // Experimental addition +) + +// AddChainRequest represents the JSON request body sent to the add-chain and +// add-pre-chain POST methods from sections 4.1 and 4.2. +type AddChainRequest struct { + Chain [][]byte `json:"chain"` +} + +// AddChainResponse represents the JSON response to the add-chain and +// add-pre-chain POST methods. +// An SCT represents a Log's promise to integrate a [pre-]certificate into the +// log within a defined period of time. +type AddChainResponse struct { + SCTVersion Version `json:"sct_version"` // SCT structure version + ID []byte `json:"id"` // Log ID + Timestamp uint64 `json:"timestamp"` // Timestamp of issuance + Extensions string `json:"extensions"` // Holder for any CT extensions + Signature []byte `json:"signature"` // Log signature for this SCT +} + +// ToSignedCertificateTimestamp creates a SignedCertificateTimestamp from the +// AddChainResponse. +func (r *AddChainResponse) ToSignedCertificateTimestamp() (*SignedCertificateTimestamp, error) { + sct := SignedCertificateTimestamp{ + SCTVersion: r.SCTVersion, + Timestamp: r.Timestamp, + } + + if len(r.ID) != sha256.Size { + return nil, fmt.Errorf("id is invalid length, expected %d got %d", sha256.Size, len(r.ID)) + } + copy(sct.LogID.KeyID[:], r.ID) + + exts, err := base64.StdEncoding.DecodeString(r.Extensions) + if err != nil { + return nil, fmt.Errorf("invalid base64 data in Extensions (%q): %v", r.Extensions, err) + } + sct.Extensions = CTExtensions(exts) + + var ds DigitallySigned + if rest, err := tls.Unmarshal(r.Signature, &ds); err != nil { + return nil, fmt.Errorf("tls.Unmarshal(): %s", err) + } else if len(rest) > 0 { + return nil, fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)) + } + sct.Signature = ds + + return &sct, nil +} + +// AddJSONRequest represents the JSON request body sent to the add-json POST method. +// The corresponding response re-uses AddChainResponse. +// This is an experimental addition not covered by RFC6962. +type AddJSONRequest struct { + Data interface{} `json:"data"` +} + +// GetSTHResponse represents the JSON response to the get-sth GET method from section 4.3. +type GetSTHResponse struct { + TreeSize uint64 `json:"tree_size"` // Number of certs in the current tree + Timestamp uint64 `json:"timestamp"` // Time that the tree was created + SHA256RootHash []byte `json:"sha256_root_hash"` // Root hash of the tree + TreeHeadSignature []byte `json:"tree_head_signature"` // Log signature for this STH +} + +// ToSignedTreeHead creates a SignedTreeHead from the GetSTHResponse. +func (r *GetSTHResponse) ToSignedTreeHead() (*SignedTreeHead, error) { + sth := SignedTreeHead{ + TreeSize: r.TreeSize, + Timestamp: r.Timestamp, + } + + if len(r.SHA256RootHash) != sha256.Size { + return nil, fmt.Errorf("sha256_root_hash is invalid length, expected %d got %d", sha256.Size, len(r.SHA256RootHash)) + } + copy(sth.SHA256RootHash[:], r.SHA256RootHash) + + var ds DigitallySigned + if rest, err := tls.Unmarshal(r.TreeHeadSignature, &ds); err != nil { + return nil, fmt.Errorf("tls.Unmarshal(): %s", err) + } else if len(rest) > 0 { + return nil, fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)) + } + sth.TreeHeadSignature = ds + + return &sth, nil +} + +// GetSTHConsistencyResponse represents the JSON response to the get-sth-consistency +// GET method from section 4.4. (The corresponding GET request has parameters 'first' and +// 'second'.) +type GetSTHConsistencyResponse struct { + Consistency [][]byte `json:"consistency"` +} + +// GetProofByHashResponse represents the JSON response to the get-proof-by-hash GET +// method from section 4.5. (The corresponding GET request has parameters 'hash' +// and 'tree_size'.) +type GetProofByHashResponse struct { + LeafIndex int64 `json:"leaf_index"` // The 0-based index of the end entity corresponding to the "hash" parameter. + AuditPath [][]byte `json:"audit_path"` // An array of base64-encoded Merkle Tree nodes proving the inclusion of the chosen certificate. +} + +// LeafEntry represents a leaf in the Log's Merkle tree, as returned by the get-entries +// GET method from section 4.6. +type LeafEntry struct { + // LeafInput is a TLS-encoded MerkleTreeLeaf + LeafInput []byte `json:"leaf_input"` + // ExtraData holds (unsigned) extra data, normally the cert validation chain. + ExtraData []byte `json:"extra_data"` +} + +// GetEntriesResponse represents the JSON response to the get-entries GET method +// from section 4.6. +type GetEntriesResponse struct { + Entries []LeafEntry `json:"entries"` // the list of returned entries +} + +// GetRootsResponse represents the JSON response to the get-roots GET method from section 4.7. +type GetRootsResponse struct { + Certificates []string `json:"certificates"` +} + +// GetEntryAndProofResponse represents the JSON response to the get-entry-and-proof +// GET method from section 4.8. (The corresponding GET request has parameters 'leaf_index' +// and 'tree_size'.) +type GetEntryAndProofResponse struct { + LeafInput []byte `json:"leaf_input"` // the entry itself + ExtraData []byte `json:"extra_data"` // any chain provided when the entry was added to the log + AuditPath [][]byte `json:"audit_path"` // the corresponding proof +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/README.md b/vendor/github.com/google/certificate-transparency-go/x509/README.md new file mode 100644 index 000000000000..6f22f5f8344f --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/README.md @@ -0,0 +1,7 @@ +# Important Notice + +This is a fork of the `crypto/x509` Go package. The original source can be found on +[GitHub](https://github.com/golang/go). + +Be careful about making local modifications to this code as it will +make maintenance harder in future. diff --git a/vendor/github.com/google/certificate-transparency-go/x509/cert_pool.go b/vendor/github.com/google/certificate-transparency-go/x509/cert_pool.go new file mode 100644 index 000000000000..4823d594633a --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/cert_pool.go @@ -0,0 +1,159 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "encoding/pem" + "errors" + "runtime" +) + +// CertPool is a set of certificates. +type CertPool struct { + bySubjectKeyId map[string][]int + byName map[string][]int + certs []*Certificate +} + +// NewCertPool returns a new, empty CertPool. +func NewCertPool() *CertPool { + return &CertPool{ + bySubjectKeyId: make(map[string][]int), + byName: make(map[string][]int), + } +} + +func (s *CertPool) copy() *CertPool { + p := &CertPool{ + bySubjectKeyId: make(map[string][]int, len(s.bySubjectKeyId)), + byName: make(map[string][]int, len(s.byName)), + certs: make([]*Certificate, len(s.certs)), + } + for k, v := range s.bySubjectKeyId { + indexes := make([]int, len(v)) + copy(indexes, v) + p.bySubjectKeyId[k] = indexes + } + for k, v := range s.byName { + indexes := make([]int, len(v)) + copy(indexes, v) + p.byName[k] = indexes + } + copy(p.certs, s.certs) + return p +} + +// SystemCertPool returns a copy of the system cert pool. +// +// Any mutations to the returned pool are not written to disk and do +// not affect any other pool returned by SystemCertPool. +// +// New changes in the system cert pool might not be reflected +// in subsequent calls. +func SystemCertPool() (*CertPool, error) { + if runtime.GOOS == "windows" { + // Issue 16736, 18609: + return nil, errors.New("crypto/x509: system root pool is not available on Windows") + } + + if sysRoots := systemRootsPool(); sysRoots != nil { + return sysRoots.copy(), nil + } + + return loadSystemRoots() +} + +// findPotentialParents returns the indexes of certificates in s which might +// have signed cert. The caller must not modify the returned slice. +func (s *CertPool) findPotentialParents(cert *Certificate) []int { + if s == nil { + return nil + } + + var candidates []int + if len(cert.AuthorityKeyId) > 0 { + candidates = s.bySubjectKeyId[string(cert.AuthorityKeyId)] + } + if len(candidates) == 0 { + candidates = s.byName[string(cert.RawIssuer)] + } + return candidates +} + +func (s *CertPool) contains(cert *Certificate) bool { + if s == nil { + return false + } + + candidates := s.byName[string(cert.RawSubject)] + for _, c := range candidates { + if s.certs[c].Equal(cert) { + return true + } + } + + return false +} + +// AddCert adds a certificate to a pool. +func (s *CertPool) AddCert(cert *Certificate) { + if cert == nil { + panic("adding nil Certificate to CertPool") + } + + // Check that the certificate isn't being added twice. + if s.contains(cert) { + return + } + + n := len(s.certs) + s.certs = append(s.certs, cert) + + if len(cert.SubjectKeyId) > 0 { + keyId := string(cert.SubjectKeyId) + s.bySubjectKeyId[keyId] = append(s.bySubjectKeyId[keyId], n) + } + name := string(cert.RawSubject) + s.byName[name] = append(s.byName[name], n) +} + +// AppendCertsFromPEM attempts to parse a series of PEM encoded certificates. +// It appends any certificates found to s and reports whether any certificates +// were successfully parsed. +// +// On many Linux systems, /etc/ssl/cert.pem will contain the system wide set +// of root CAs in a format suitable for this function. +func (s *CertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) { + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + + cert, err := ParseCertificate(block.Bytes) + if IsFatal(err) { + continue + } + + s.AddCert(cert) + ok = true + } + + return +} + +// Subjects returns a list of the DER-encoded subjects of +// all of the certificates in the pool. +func (s *CertPool) Subjects() [][]byte { + res := make([][]byte, len(s.certs)) + for i, c := range s.certs { + res[i] = c.RawSubject + } + return res +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/curves.go b/vendor/github.com/google/certificate-transparency-go/x509/curves.go new file mode 100644 index 000000000000..0e2778cb353e --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/curves.go @@ -0,0 +1,37 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "crypto/elliptic" + "math/big" + "sync" +) + +// This file holds ECC curves that are not supported by the main Go crypto/elliptic +// library, but which have been observed in certificates in the wild. + +var initonce sync.Once +var p192r1 *elliptic.CurveParams + +func initAllCurves() { + initSECP192R1() +} + +func initSECP192R1() { + // See SEC-2, section 2.2.2 + p192r1 = &elliptic.CurveParams{Name: "P-192"} + p192r1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF", 16) + p192r1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22831", 16) + p192r1.B, _ = new(big.Int).SetString("64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1", 16) + p192r1.Gx, _ = new(big.Int).SetString("188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012", 16) + p192r1.Gy, _ = new(big.Int).SetString("07192B95FFC8DA78631011ED6B24CDD573F977A11E794811", 16) + p192r1.BitSize = 192 +} + +func secp192r1() elliptic.Curve { + initonce.Do(initAllCurves) + return p192r1 +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/error.go b/vendor/github.com/google/certificate-transparency-go/x509/error.go new file mode 100644 index 000000000000..40b7ef7d9fe6 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/error.go @@ -0,0 +1,236 @@ +package x509 + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +// Error implements the error interface and describes a single error in an X.509 certificate or CRL. +type Error struct { + ID ErrorID + Category ErrCategory + Summary string + Field string + SpecRef string + SpecText string + // Fatal indicates that parsing has been aborted. + Fatal bool +} + +func (err Error) Error() string { + var msg bytes.Buffer + if err.ID != ErrInvalidID { + if err.Fatal { + msg.WriteRune('E') + } else { + msg.WriteRune('W') + } + msg.WriteString(fmt.Sprintf("%03d: ", err.ID)) + } + msg.WriteString(err.Summary) + return msg.String() +} + +// VerboseError creates a more verbose error string, including spec details. +func (err Error) VerboseError() string { + var msg bytes.Buffer + msg.WriteString(err.Error()) + if len(err.Field) > 0 || err.Category != UnknownCategory || len(err.SpecRef) > 0 || len(err.SpecText) > 0 { + msg.WriteString(" (") + needSep := false + if len(err.Field) > 0 { + msg.WriteString(err.Field) + needSep = true + } + if err.Category != UnknownCategory { + if needSep { + msg.WriteString(": ") + } + msg.WriteString(err.Category.String()) + needSep = true + } + if len(err.SpecRef) > 0 { + if needSep { + msg.WriteString(": ") + } + msg.WriteString(err.SpecRef) + needSep = true + } + if len(err.SpecText) > 0 { + if needSep { + if len(err.SpecRef) > 0 { + msg.WriteString(", ") + } else { + msg.WriteString(": ") + } + } + msg.WriteString("'") + msg.WriteString(err.SpecText) + msg.WriteString("'") + } + msg.WriteString(")") + } + + return msg.String() +} + +// ErrCategory indicates the category of an x509.Error. +type ErrCategory int + +// ErrCategory values. +const ( + UnknownCategory ErrCategory = iota + // Errors in ASN.1 encoding + InvalidASN1Encoding + InvalidASN1Content + InvalidASN1DER + // Errors in ASN.1 relative to schema + InvalidValueRange + InvalidASN1Type + UnexpectedAdditionalData + // Errors in X.509 + PoorlyFormedCertificate // Fails a SHOULD clause + MalformedCertificate // Fails a MUST clause + PoorlyFormedCRL // Fails a SHOULD clause + MalformedCRL // Fails a MUST clause + // Errors relative to CA/Browser Forum guidelines + BaselineRequirementsFailure + EVRequirementsFailure + // Other errors + InsecureAlgorithm + UnrecognizedValue +) + +func (category ErrCategory) String() string { + switch category { + case InvalidASN1Encoding: + return "Invalid ASN.1 encoding" + case InvalidASN1Content: + return "Invalid ASN.1 content" + case InvalidASN1DER: + return "Invalid ASN.1 distinguished encoding" + case InvalidValueRange: + return "Invalid value for range given in schema" + case InvalidASN1Type: + return "Invalid ASN.1 type for schema" + case UnexpectedAdditionalData: + return "Unexpected additional data present" + case PoorlyFormedCertificate: + return "Certificate does not comply with SHOULD clause in spec" + case MalformedCertificate: + return "Certificate does not comply with MUST clause in spec" + case PoorlyFormedCRL: + return "Certificate Revocation List does not comply with SHOULD clause in spec" + case MalformedCRL: + return "Certificate Revocation List does not comply with MUST clause in spec" + case BaselineRequirementsFailure: + return "Certificate does not comply with CA/BF baseline requirements" + case EVRequirementsFailure: + return "Certificate does not comply with CA/BF EV requirements" + case InsecureAlgorithm: + return "Certificate uses an insecure algorithm" + case UnrecognizedValue: + return "Certificate uses an unrecognized value" + default: + return fmt.Sprintf("Unknown (%d)", category) + } +} + +// ErrorID is an identifier for an x509.Error, to allow filtering. +type ErrorID int + +// Errors implements the error interface and holds a collection of errors found in a certificate or CRL. +type Errors struct { + Errs []Error +} + +// Error converts to a string. +func (e *Errors) Error() string { + return e.combineErrors(Error.Error) +} + +// VerboseError creates a more verbose error string, including spec details. +func (e *Errors) VerboseError() string { + return e.combineErrors(Error.VerboseError) +} + +// Fatal indicates whether e includes a fatal error +func (e *Errors) Fatal() bool { + return (e.FirstFatal() != nil) +} + +// Empty indicates whether e has no errors. +func (e *Errors) Empty() bool { + if e == nil { + return true + } + return len(e.Errs) == 0 +} + +// FirstFatal returns the first fatal error in e, or nil +// if there is no fatal error. +func (e *Errors) FirstFatal() error { + if e == nil { + return nil + } + for _, err := range e.Errs { + if err.Fatal { + return err + } + } + return nil + +} + +// AddID adds the Error identified by the given id to an x509.Errors. +func (e *Errors) AddID(id ErrorID, args ...interface{}) { + e.Errs = append(e.Errs, NewError(id, args...)) +} + +func (e Errors) combineErrors(errfn func(Error) string) string { + if len(e.Errs) == 0 { + return "" + } + if len(e.Errs) == 1 { + return errfn((e.Errs)[0]) + } + var msg bytes.Buffer + msg.WriteString("Errors:") + for _, err := range e.Errs { + msg.WriteString("\n ") + msg.WriteString(errfn(err)) + } + return msg.String() +} + +// Filter creates a new Errors object with any entries from the filtered +// list of IDs removed. +func (e Errors) Filter(filtered []ErrorID) Errors { + var results Errors +eloop: + for _, v := range e.Errs { + for _, f := range filtered { + if v.ID == f { + break eloop + } + } + results.Errs = append(results.Errs, v) + } + return results +} + +// ErrorFilter builds a list of error IDs (suitable for use with Errors.Filter) from a comma-separated string. +func ErrorFilter(ignore string) []ErrorID { + var ids []ErrorID + filters := strings.Split(ignore, ",") + for _, f := range filters { + v, err := strconv.Atoi(f) + if err != nil { + continue + } + ids = append(ids, ErrorID(v)) + } + return ids +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/errors.go b/vendor/github.com/google/certificate-transparency-go/x509/errors.go new file mode 100644 index 000000000000..ec2fe06a99fb --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/errors.go @@ -0,0 +1,302 @@ +package x509 + +import "fmt" + +// To preserve error IDs, only append to this list, never insert. +const ( + ErrInvalidID ErrorID = iota + ErrInvalidCertList + ErrTrailingCertList + ErrUnexpectedlyCriticalCertListExtension + ErrUnexpectedlyNonCriticalCertListExtension + ErrInvalidCertListAuthKeyID + ErrTrailingCertListAuthKeyID + ErrInvalidCertListIssuerAltName + ErrInvalidCertListCRLNumber + ErrTrailingCertListCRLNumber + ErrNegativeCertListCRLNumber + ErrInvalidCertListDeltaCRL + ErrTrailingCertListDeltaCRL + ErrNegativeCertListDeltaCRL + ErrInvalidCertListIssuingDP + ErrTrailingCertListIssuingDP + ErrCertListIssuingDPMultipleTypes + ErrCertListIssuingDPInvalidFullName + ErrInvalidCertListFreshestCRL + ErrInvalidCertListAuthInfoAccess + ErrTrailingCertListAuthInfoAccess + ErrUnhandledCriticalCertListExtension + ErrUnexpectedlyCriticalRevokedCertExtension + ErrUnexpectedlyNonCriticalRevokedCertExtension + ErrInvalidRevocationReason + ErrTrailingRevocationReason + ErrInvalidRevocationInvalidityDate + ErrTrailingRevocationInvalidityDate + ErrInvalidRevocationIssuer + ErrUnhandledCriticalRevokedCertExtension + + ErrMaxID +) + +// idToError gives a template x509.Error for each defined ErrorID; where the Summary +// field may hold format specifiers that take field parameters. +var idToError map[ErrorID]Error + +var errorInfo = []Error{ + { + ID: ErrInvalidCertList, + Summary: "x509: failed to parse CertificateList: %v", + Field: "CertificateList", + SpecRef: "RFC 5280 s5.1", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingCertList, + Summary: "x509: trailing data after CertificateList", + Field: "CertificateList", + SpecRef: "RFC 5280 s5.1", + Category: InvalidASN1Content, + Fatal: true, + }, + + { + ID: ErrUnexpectedlyCriticalCertListExtension, + Summary: "x509: certificate list extension %v marked critical but expected to be non-critical", + Field: "tbsCertList.crlExtensions.*.critical", + SpecRef: "RFC 5280 s5.2", + Category: MalformedCRL, + }, + { + ID: ErrUnexpectedlyNonCriticalCertListExtension, + Summary: "x509: certificate list extension %v marked non-critical but expected to be critical", + Field: "tbsCertList.crlExtensions.*.critical", + SpecRef: "RFC 5280 s5.2", + Category: MalformedCRL, + }, + + { + ID: ErrInvalidCertListAuthKeyID, + Summary: "x509: failed to unmarshal certificate-list authority key-id: %v", + Field: "tbsCertList.crlExtensions.*.AuthorityKeyIdentifier", + SpecRef: "RFC 5280 s5.2.1", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingCertListAuthKeyID, + Summary: "x509: trailing data after certificate list auth key ID", + Field: "tbsCertList.crlExtensions.*.AuthorityKeyIdentifier", + SpecRef: "RFC 5280 s5.2.1", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrInvalidCertListIssuerAltName, + Summary: "x509: failed to parse CRL issuer alt name: %v", + Field: "tbsCertList.crlExtensions.*.IssuerAltName", + SpecRef: "RFC 5280 s5.2.2", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrInvalidCertListCRLNumber, + Summary: "x509: failed to unmarshal certificate-list crl-number: %v", + Field: "tbsCertList.crlExtensions.*.CRLNumber", + SpecRef: "RFC 5280 s5.2.3", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingCertListCRLNumber, + Summary: "x509: trailing data after certificate list crl-number", + Field: "tbsCertList.crlExtensions.*.CRLNumber", + SpecRef: "RFC 5280 s5.2.3", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrNegativeCertListCRLNumber, + Summary: "x509: negative certificate list crl-number: %d", + Field: "tbsCertList.crlExtensions.*.CRLNumber", + SpecRef: "RFC 5280 s5.2.3", + Category: MalformedCRL, + Fatal: true, + }, + { + ID: ErrInvalidCertListDeltaCRL, + Summary: "x509: failed to unmarshal certificate-list delta-crl: %v", + Field: "tbsCertList.crlExtensions.*.BaseCRLNumber", + SpecRef: "RFC 5280 s5.2.4", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingCertListDeltaCRL, + Summary: "x509: trailing data after certificate list delta-crl", + Field: "tbsCertList.crlExtensions.*.BaseCRLNumber", + SpecRef: "RFC 5280 s5.2.4", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrNegativeCertListDeltaCRL, + Summary: "x509: negative certificate list base-crl-number: %d", + Field: "tbsCertList.crlExtensions.*.BaseCRLNumber", + SpecRef: "RFC 5280 s5.2.4", + Category: MalformedCRL, + Fatal: true, + }, + { + ID: ErrInvalidCertListIssuingDP, + Summary: "x509: failed to unmarshal certificate list issuing distribution point: %v", + Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint", + SpecRef: "RFC 5280 s5.2.5", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingCertListIssuingDP, + Summary: "x509: trailing data after certificate list issuing distribution point", + Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint", + SpecRef: "RFC 5280 s5.2.5", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrCertListIssuingDPMultipleTypes, + Summary: "x509: multiple cert types set in issuing-distribution-point: user:%v CA:%v attr:%v", + Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint", + SpecRef: "RFC 5280 s5.2.5", + SpecText: "at most one of onlyContainsUserCerts, onlyContainsCACerts, and onlyContainsAttributeCerts may be set to TRUE.", + Category: MalformedCRL, + Fatal: true, + }, + { + ID: ErrCertListIssuingDPInvalidFullName, + Summary: "x509: failed to parse CRL issuing-distribution-point fullName: %v", + Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint.distributionPoint", + SpecRef: "RFC 5280 s5.2.5", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrInvalidCertListFreshestCRL, + Summary: "x509: failed to unmarshal certificate list freshestCRL: %v", + Field: "tbsCertList.crlExtensions.*.FreshestCRL", + SpecRef: "RFC 5280 s5.2.6", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrInvalidCertListAuthInfoAccess, + Summary: "x509: failed to unmarshal certificate list authority info access: %v", + Field: "tbsCertList.crlExtensions.*.AuthorityInfoAccess", + SpecRef: "RFC 5280 s5.2.7", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingCertListAuthInfoAccess, + Summary: "x509: trailing data after certificate list authority info access", + Field: "tbsCertList.crlExtensions.*.AuthorityInfoAccess", + SpecRef: "RFC 5280 s5.2.7", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrUnhandledCriticalCertListExtension, + Summary: "x509: unhandled critical extension in certificate list: %v", + Field: "tbsCertList.revokedCertificates.crlExtensions.*", + SpecRef: "RFC 5280 s5.2", + SpecText: "If a CRL contains a critical extension that the application cannot process, then the application MUST NOT use that CRL to determine the status of certificates.", + Category: MalformedCRL, + Fatal: true, + }, + + { + ID: ErrUnexpectedlyCriticalRevokedCertExtension, + Summary: "x509: revoked certificate extension %v marked critical but expected to be non-critical", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.critical", + SpecRef: "RFC 5280 s5.3", + Category: MalformedCRL, + }, + { + ID: ErrUnexpectedlyNonCriticalRevokedCertExtension, + Summary: "x509: revoked certificate extension %v marked non-critical but expected to be critical", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.critical", + SpecRef: "RFC 5280 s5.3", + Category: MalformedCRL, + }, + + { + ID: ErrInvalidRevocationReason, + Summary: "x509: failed to parse revocation reason: %v", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.CRLReason", + SpecRef: "RFC 5280 s5.3.1", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingRevocationReason, + Summary: "x509: trailing data after revoked certificate reason", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.CRLReason", + SpecRef: "RFC 5280 s5.3.1", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrInvalidRevocationInvalidityDate, + Summary: "x509: failed to parse revoked certificate invalidity date: %v", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.InvalidityDate", + SpecRef: "RFC 5280 s5.3.2", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingRevocationInvalidityDate, + Summary: "x509: trailing data after revoked certificate invalidity date", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.InvalidityDate", + SpecRef: "RFC 5280 s5.3.2", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrInvalidRevocationIssuer, + Summary: "x509: failed to parse revocation issuer %v", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.CertificateIssuer", + SpecRef: "RFC 5280 s5.3.3", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrUnhandledCriticalRevokedCertExtension, + Summary: "x509: unhandled critical extension in revoked certificate: %v", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*", + SpecRef: "RFC 5280 s5.3", + SpecText: "If a CRL contains a critical CRL entry extension that the application cannot process, then the application MUST NOT use that CRL to determine the status of any certificates.", + Category: MalformedCRL, + Fatal: true, + }, +} + +func init() { + idToError = make(map[ErrorID]Error, len(errorInfo)) + for _, info := range errorInfo { + idToError[info.ID] = info + } +} + +// NewError builds a new x509.Error based on the template for the given id. +func NewError(id ErrorID, args ...interface{}) Error { + var err Error + if id >= ErrMaxID { + err.ID = id + err.Summary = fmt.Sprintf("Unknown error ID %v: args %+v", id, args) + err.Fatal = true + } else { + err = idToError[id] + err.Summary = fmt.Sprintf(err.Summary, args...) + } + return err +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/names.go b/vendor/github.com/google/certificate-transparency-go/x509/names.go new file mode 100644 index 000000000000..4829edeb04b6 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/names.go @@ -0,0 +1,165 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "fmt" + "net" + + "github.com/google/certificate-transparency-go/asn1" + "github.com/google/certificate-transparency-go/x509/pkix" +) + +const ( + // GeneralName tag values from RFC 5280, 4.2.1.6 + tagOtherName = 0 + tagRFC822Name = 1 + tagDNSName = 2 + tagX400Address = 3 + tagDirectoryName = 4 + tagEDIPartyName = 5 + tagURI = 6 + tagIPAddress = 7 + tagRegisteredID = 8 +) + +// OtherName describes a name related to a certificate which is not in one +// of the standard name formats. RFC 5280, 4.2.1.6: +// +// OtherName ::= SEQUENCE { +// type-id OBJECT IDENTIFIER, +// value [0] EXPLICIT ANY DEFINED BY type-id } +type OtherName struct { + TypeID asn1.ObjectIdentifier + Value asn1.RawValue +} + +// GeneralNames holds a collection of names related to a certificate. +type GeneralNames struct { + DNSNames []string + EmailAddresses []string + DirectoryNames []pkix.Name + URIs []string + IPNets []net.IPNet + RegisteredIDs []asn1.ObjectIdentifier + OtherNames []OtherName +} + +// Len returns the total number of names in a GeneralNames object. +func (gn GeneralNames) Len() int { + return (len(gn.DNSNames) + len(gn.EmailAddresses) + len(gn.DirectoryNames) + + len(gn.URIs) + len(gn.IPNets) + len(gn.RegisteredIDs) + len(gn.OtherNames)) +} + +// Empty indicates whether a GeneralNames object is empty. +func (gn GeneralNames) Empty() bool { + return gn.Len() == 0 +} + +func parseGeneralNames(value []byte, gname *GeneralNames) error { + // RFC 5280, 4.2.1.6 + // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName + // + // GeneralName ::= CHOICE { + // otherName [0] OtherName, + // rfc822Name [1] IA5String, + // dNSName [2] IA5String, + // x400Address [3] ORAddress, + // directoryName [4] Name, + // ediPartyName [5] EDIPartyName, + // uniformResourceIdentifier [6] IA5String, + // iPAddress [7] OCTET STRING, + // registeredID [8] OBJECT IDENTIFIER } + var seq asn1.RawValue + var rest []byte + if rest, err := asn1.Unmarshal(value, &seq); err != nil { + return fmt.Errorf("x509: failed to parse GeneralNames: %v", err) + } else if len(rest) != 0 { + return fmt.Errorf("x509: trailing data after GeneralNames") + } + if !seq.IsCompound || seq.Tag != asn1.TagSequence || seq.Class != asn1.ClassUniversal { + return fmt.Errorf("x509: failed to parse GeneralNames sequence, tag %+v", seq) + } + + rest = seq.Bytes + for len(rest) > 0 { + var err error + rest, err = parseGeneralName(rest, gname, false) + if err != nil { + return fmt.Errorf("x509: failed to parse GeneralName: %v", err) + } + } + return nil +} + +func parseGeneralName(data []byte, gname *GeneralNames, withMask bool) ([]byte, error) { + var v asn1.RawValue + var rest []byte + var err error + rest, err = asn1.Unmarshal(data, &v) + if err != nil { + return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames: %v", err) + } + switch v.Tag { + case tagOtherName: + if !v.IsCompound { + return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.otherName: not compound") + } + var other OtherName + v.FullBytes = append([]byte{}, v.FullBytes...) + v.FullBytes[0] = asn1.TagSequence | 0x20 + _, err = asn1.Unmarshal(v.FullBytes, &other) + if err != nil { + return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.otherName: %v", err) + } + gname.OtherNames = append(gname.OtherNames, other) + case tagRFC822Name: + gname.EmailAddresses = append(gname.EmailAddresses, string(v.Bytes)) + case tagDNSName: + dns := string(v.Bytes) + gname.DNSNames = append(gname.DNSNames, dns) + case tagDirectoryName: + var rdnSeq pkix.RDNSequence + if _, err := asn1.Unmarshal(v.Bytes, &rdnSeq); err != nil { + return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.directoryName: %v", err) + } + var dirName pkix.Name + dirName.FillFromRDNSequence(&rdnSeq) + gname.DirectoryNames = append(gname.DirectoryNames, dirName) + case tagURI: + gname.URIs = append(gname.URIs, string(v.Bytes)) + case tagIPAddress: + vlen := len(v.Bytes) + if withMask { + switch vlen { + case (2 * net.IPv4len), (2 * net.IPv6len): + ipNet := net.IPNet{IP: v.Bytes[0 : vlen/2], Mask: v.Bytes[vlen/2:]} + gname.IPNets = append(gname.IPNets, ipNet) + default: + return nil, fmt.Errorf("x509: invalid IP/mask length %d in GeneralNames.iPAddress", vlen) + } + } else { + switch vlen { + case net.IPv4len, net.IPv6len: + ipNet := net.IPNet{IP: v.Bytes} + gname.IPNets = append(gname.IPNets, ipNet) + default: + return nil, fmt.Errorf("x509: invalid IP length %d in GeneralNames.iPAddress", vlen) + } + } + case tagRegisteredID: + var oid asn1.ObjectIdentifier + v.FullBytes = append([]byte{}, v.FullBytes...) + v.FullBytes[0] = asn1.TagOID + _, err = asn1.Unmarshal(v.FullBytes, &oid) + if err != nil { + return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.registeredID: %v", err) + } + gname.RegisteredIDs = append(gname.RegisteredIDs, oid) + default: + return nil, fmt.Errorf("x509: failed to unmarshal GeneralName: unknown tag %d", v.Tag) + } + return rest, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/pem_decrypt.go b/vendor/github.com/google/certificate-transparency-go/x509/pem_decrypt.go new file mode 100644 index 000000000000..93d1e4a922dc --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/pem_decrypt.go @@ -0,0 +1,240 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +// RFC 1423 describes the encryption of PEM blocks. The algorithm used to +// generate a key from the password was derived by looking at the OpenSSL +// implementation. + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/md5" + "encoding/hex" + "encoding/pem" + "errors" + "io" + "strings" +) + +type PEMCipher int + +// Possible values for the EncryptPEMBlock encryption algorithm. +const ( + _ PEMCipher = iota + PEMCipherDES + PEMCipher3DES + PEMCipherAES128 + PEMCipherAES192 + PEMCipherAES256 +) + +// rfc1423Algo holds a method for enciphering a PEM block. +type rfc1423Algo struct { + cipher PEMCipher + name string + cipherFunc func(key []byte) (cipher.Block, error) + keySize int + blockSize int +} + +// rfc1423Algos holds a slice of the possible ways to encrypt a PEM +// block. The ivSize numbers were taken from the OpenSSL source. +var rfc1423Algos = []rfc1423Algo{{ + cipher: PEMCipherDES, + name: "DES-CBC", + cipherFunc: des.NewCipher, + keySize: 8, + blockSize: des.BlockSize, +}, { + cipher: PEMCipher3DES, + name: "DES-EDE3-CBC", + cipherFunc: des.NewTripleDESCipher, + keySize: 24, + blockSize: des.BlockSize, +}, { + cipher: PEMCipherAES128, + name: "AES-128-CBC", + cipherFunc: aes.NewCipher, + keySize: 16, + blockSize: aes.BlockSize, +}, { + cipher: PEMCipherAES192, + name: "AES-192-CBC", + cipherFunc: aes.NewCipher, + keySize: 24, + blockSize: aes.BlockSize, +}, { + cipher: PEMCipherAES256, + name: "AES-256-CBC", + cipherFunc: aes.NewCipher, + keySize: 32, + blockSize: aes.BlockSize, +}, +} + +// deriveKey uses a key derivation function to stretch the password into a key +// with the number of bits our cipher requires. This algorithm was derived from +// the OpenSSL source. +func (c rfc1423Algo) deriveKey(password, salt []byte) []byte { + hash := md5.New() + out := make([]byte, c.keySize) + var digest []byte + + for i := 0; i < len(out); i += len(digest) { + hash.Reset() + hash.Write(digest) + hash.Write(password) + hash.Write(salt) + digest = hash.Sum(digest[:0]) + copy(out[i:], digest) + } + return out +} + +// IsEncryptedPEMBlock returns if the PEM block is password encrypted. +func IsEncryptedPEMBlock(b *pem.Block) bool { + _, ok := b.Headers["DEK-Info"] + return ok +} + +// IncorrectPasswordError is returned when an incorrect password is detected. +var IncorrectPasswordError = errors.New("x509: decryption password incorrect") + +// DecryptPEMBlock takes a password encrypted PEM block and the password used to +// encrypt it and returns a slice of decrypted DER encoded bytes. It inspects +// the DEK-Info header to determine the algorithm used for decryption. If no +// DEK-Info header is present, an error is returned. If an incorrect password +// is detected an IncorrectPasswordError is returned. Because of deficiencies +// in the encrypted-PEM format, it's not always possible to detect an incorrect +// password. In these cases no error will be returned but the decrypted DER +// bytes will be random noise. +func DecryptPEMBlock(b *pem.Block, password []byte) ([]byte, error) { + dek, ok := b.Headers["DEK-Info"] + if !ok { + return nil, errors.New("x509: no DEK-Info header in block") + } + + idx := strings.Index(dek, ",") + if idx == -1 { + return nil, errors.New("x509: malformed DEK-Info header") + } + + mode, hexIV := dek[:idx], dek[idx+1:] + ciph := cipherByName(mode) + if ciph == nil { + return nil, errors.New("x509: unknown encryption mode") + } + iv, err := hex.DecodeString(hexIV) + if err != nil { + return nil, err + } + if len(iv) != ciph.blockSize { + return nil, errors.New("x509: incorrect IV size") + } + + // Based on the OpenSSL implementation. The salt is the first 8 bytes + // of the initialization vector. + key := ciph.deriveKey(password, iv[:8]) + block, err := ciph.cipherFunc(key) + if err != nil { + return nil, err + } + + if len(b.Bytes)%block.BlockSize() != 0 { + return nil, errors.New("x509: encrypted PEM data is not a multiple of the block size") + } + + data := make([]byte, len(b.Bytes)) + dec := cipher.NewCBCDecrypter(block, iv) + dec.CryptBlocks(data, b.Bytes) + + // Blocks are padded using a scheme where the last n bytes of padding are all + // equal to n. It can pad from 1 to blocksize bytes inclusive. See RFC 1423. + // For example: + // [x y z 2 2] + // [x y 7 7 7 7 7 7 7] + // If we detect a bad padding, we assume it is an invalid password. + dlen := len(data) + if dlen == 0 || dlen%ciph.blockSize != 0 { + return nil, errors.New("x509: invalid padding") + } + last := int(data[dlen-1]) + if dlen < last { + return nil, IncorrectPasswordError + } + if last == 0 || last > ciph.blockSize { + return nil, IncorrectPasswordError + } + for _, val := range data[dlen-last:] { + if int(val) != last { + return nil, IncorrectPasswordError + } + } + return data[:dlen-last], nil +} + +// EncryptPEMBlock returns a PEM block of the specified type holding the +// given DER-encoded data encrypted with the specified algorithm and +// password. +func EncryptPEMBlock(rand io.Reader, blockType string, data, password []byte, alg PEMCipher) (*pem.Block, error) { + ciph := cipherByKey(alg) + if ciph == nil { + return nil, errors.New("x509: unknown encryption mode") + } + iv := make([]byte, ciph.blockSize) + if _, err := io.ReadFull(rand, iv); err != nil { + return nil, errors.New("x509: cannot generate IV: " + err.Error()) + } + // The salt is the first 8 bytes of the initialization vector, + // matching the key derivation in DecryptPEMBlock. + key := ciph.deriveKey(password, iv[:8]) + block, err := ciph.cipherFunc(key) + if err != nil { + return nil, err + } + enc := cipher.NewCBCEncrypter(block, iv) + pad := ciph.blockSize - len(data)%ciph.blockSize + encrypted := make([]byte, len(data), len(data)+pad) + // We could save this copy by encrypting all the whole blocks in + // the data separately, but it doesn't seem worth the additional + // code. + copy(encrypted, data) + // See RFC 1423, Section 1.1. + for i := 0; i < pad; i++ { + encrypted = append(encrypted, byte(pad)) + } + enc.CryptBlocks(encrypted, encrypted) + + return &pem.Block{ + Type: blockType, + Headers: map[string]string{ + "Proc-Type": "4,ENCRYPTED", + "DEK-Info": ciph.name + "," + hex.EncodeToString(iv), + }, + Bytes: encrypted, + }, nil +} + +func cipherByName(name string) *rfc1423Algo { + for i := range rfc1423Algos { + alg := &rfc1423Algos[i] + if alg.name == name { + return alg + } + } + return nil +} + +func cipherByKey(key PEMCipher) *rfc1423Algo { + for i := range rfc1423Algos { + alg := &rfc1423Algos[i] + if alg.cipher == key { + return alg + } + } + return nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/pkcs1.go b/vendor/github.com/google/certificate-transparency-go/x509/pkcs1.go new file mode 100644 index 000000000000..bea05b57fd85 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/pkcs1.go @@ -0,0 +1,174 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "crypto/rsa" + "errors" + "math/big" + + "github.com/google/certificate-transparency-go/asn1" +) + +// pkcs1PrivateKey is a structure which mirrors the PKCS#1 ASN.1 for an RSA private key. +type pkcs1PrivateKey struct { + Version int + N *big.Int + E int + D *big.Int + P *big.Int + Q *big.Int + // We ignore these values, if present, because rsa will calculate them. + Dp *big.Int `asn1:"optional"` + Dq *big.Int `asn1:"optional"` + Qinv *big.Int `asn1:"optional"` + + AdditionalPrimes []pkcs1AdditionalRSAPrime `asn1:"optional,omitempty"` +} + +type pkcs1AdditionalRSAPrime struct { + Prime *big.Int + + // We ignore these values because rsa will calculate them. + Exp *big.Int + Coeff *big.Int +} + +// pkcs1PublicKey reflects the ASN.1 structure of a PKCS#1 public key. +type pkcs1PublicKey struct { + N *big.Int + E int +} + +// ParsePKCS1PrivateKey parses an RSA private key in PKCS#1, ASN.1 DER form. +// +// This kind of key is commonly encoded in PEM blocks of type "RSA PRIVATE KEY". +func ParsePKCS1PrivateKey(der []byte) (*rsa.PrivateKey, error) { + var priv pkcs1PrivateKey + rest, err := asn1.Unmarshal(der, &priv) + if len(rest) > 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + if err != nil { + if _, err := asn1.Unmarshal(der, &ecPrivateKey{}); err == nil { + return nil, errors.New("x509: failed to parse private key (use ParseECPrivateKey instead for this key format)") + } + if _, err := asn1.Unmarshal(der, &pkcs8{}); err == nil { + return nil, errors.New("x509: failed to parse private key (use ParsePKCS8PrivateKey instead for this key format)") + } + return nil, err + } + + if priv.Version > 1 { + return nil, errors.New("x509: unsupported private key version") + } + + if priv.N.Sign() <= 0 || priv.D.Sign() <= 0 || priv.P.Sign() <= 0 || priv.Q.Sign() <= 0 { + return nil, errors.New("x509: private key contains zero or negative value") + } + + key := new(rsa.PrivateKey) + key.PublicKey = rsa.PublicKey{ + E: priv.E, + N: priv.N, + } + + key.D = priv.D + key.Primes = make([]*big.Int, 2+len(priv.AdditionalPrimes)) + key.Primes[0] = priv.P + key.Primes[1] = priv.Q + for i, a := range priv.AdditionalPrimes { + if a.Prime.Sign() <= 0 { + return nil, errors.New("x509: private key contains zero or negative prime") + } + key.Primes[i+2] = a.Prime + // We ignore the other two values because rsa will calculate + // them as needed. + } + + err = key.Validate() + if err != nil { + return nil, err + } + key.Precompute() + + return key, nil +} + +// MarshalPKCS1PrivateKey converts an RSA private key to PKCS#1, ASN.1 DER form. +// +// This kind of key is commonly encoded in PEM blocks of type "RSA PRIVATE KEY". +// For a more flexible key format which is not RSA specific, use +// MarshalPKCS8PrivateKey. +func MarshalPKCS1PrivateKey(key *rsa.PrivateKey) []byte { + key.Precompute() + + version := 0 + if len(key.Primes) > 2 { + version = 1 + } + + priv := pkcs1PrivateKey{ + Version: version, + N: key.N, + E: key.PublicKey.E, + D: key.D, + P: key.Primes[0], + Q: key.Primes[1], + Dp: key.Precomputed.Dp, + Dq: key.Precomputed.Dq, + Qinv: key.Precomputed.Qinv, + } + + priv.AdditionalPrimes = make([]pkcs1AdditionalRSAPrime, len(key.Precomputed.CRTValues)) + for i, values := range key.Precomputed.CRTValues { + priv.AdditionalPrimes[i].Prime = key.Primes[2+i] + priv.AdditionalPrimes[i].Exp = values.Exp + priv.AdditionalPrimes[i].Coeff = values.Coeff + } + + b, _ := asn1.Marshal(priv) + return b +} + +// ParsePKCS1PublicKey parses an RSA public key in PKCS#1, ASN.1 DER form. +// +// This kind of key is commonly encoded in PEM blocks of type "RSA PUBLIC KEY". +func ParsePKCS1PublicKey(der []byte) (*rsa.PublicKey, error) { + var pub pkcs1PublicKey + rest, err := asn1.Unmarshal(der, &pub) + if err != nil { + if _, err := asn1.Unmarshal(der, &publicKeyInfo{}); err == nil { + return nil, errors.New("x509: failed to parse public key (use ParsePKIXPublicKey instead for this key format)") + } + return nil, err + } + if len(rest) > 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + + if pub.N.Sign() <= 0 || pub.E <= 0 { + return nil, errors.New("x509: public key contains zero or negative value") + } + if pub.E > 1<<31-1 { + return nil, errors.New("x509: public key contains large public exponent") + } + + return &rsa.PublicKey{ + E: pub.E, + N: pub.N, + }, nil +} + +// MarshalPKCS1PublicKey converts an RSA public key to PKCS#1, ASN.1 DER form. +// +// This kind of key is commonly encoded in PEM blocks of type "RSA PUBLIC KEY". +func MarshalPKCS1PublicKey(key *rsa.PublicKey) []byte { + derBytes, _ := asn1.Marshal(pkcs1PublicKey{ + N: key.N, + E: key.E, + }) + return derBytes +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/pkcs8.go b/vendor/github.com/google/certificate-transparency-go/x509/pkcs8.go new file mode 100644 index 000000000000..a144eb6a5d49 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/pkcs8.go @@ -0,0 +1,139 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "crypto/ecdsa" + "crypto/rsa" + "errors" + "fmt" + + "github.com/google/certificate-transparency-go/asn1" + "github.com/google/certificate-transparency-go/x509/pkix" + + // TODO(robpercival): change this to crypto/ed25519 when Go 1.13 is min version + "golang.org/x/crypto/ed25519" +) + +// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey. See +// ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-8/pkcs-8v1_2.asn +// and RFC 5208. +type pkcs8 struct { + Version int + Algo pkix.AlgorithmIdentifier + PrivateKey []byte + // optional attributes omitted. +} + +// ParsePKCS8PrivateKey parses an unencrypted private key in PKCS#8, ASN.1 DER form. +// +// It returns a *rsa.PrivateKey, a *ecdsa.PrivateKey, or a ed25519.PrivateKey. +// More types might be supported in the future. +// +// This kind of key is commonly encoded in PEM blocks of type "PRIVATE KEY". +func ParsePKCS8PrivateKey(der []byte) (key interface{}, err error) { + var privKey pkcs8 + if _, err := asn1.Unmarshal(der, &privKey); err != nil { + if _, err := asn1.Unmarshal(der, &ecPrivateKey{}); err == nil { + return nil, errors.New("x509: failed to parse private key (use ParseECPrivateKey instead for this key format)") + } + if _, err := asn1.Unmarshal(der, &pkcs1PrivateKey{}); err == nil { + return nil, errors.New("x509: failed to parse private key (use ParsePKCS1PrivateKey instead for this key format)") + } + return nil, err + } + switch { + case privKey.Algo.Algorithm.Equal(OIDPublicKeyRSA): + key, err = ParsePKCS1PrivateKey(privKey.PrivateKey) + if err != nil { + return nil, errors.New("x509: failed to parse RSA private key embedded in PKCS#8: " + err.Error()) + } + return key, nil + + case privKey.Algo.Algorithm.Equal(OIDPublicKeyECDSA): + bytes := privKey.Algo.Parameters.FullBytes + namedCurveOID := new(asn1.ObjectIdentifier) + if _, err := asn1.Unmarshal(bytes, namedCurveOID); err != nil { + namedCurveOID = nil + } + key, err = parseECPrivateKey(namedCurveOID, privKey.PrivateKey) + if err != nil { + return nil, errors.New("x509: failed to parse EC private key embedded in PKCS#8: " + err.Error()) + } + return key, nil + + case privKey.Algo.Algorithm.Equal(OIDPublicKeyEd25519): + if l := len(privKey.Algo.Parameters.FullBytes); l != 0 { + return nil, errors.New("x509: invalid Ed25519 private key parameters") + } + var curvePrivateKey []byte + if _, err := asn1.Unmarshal(privKey.PrivateKey, &curvePrivateKey); err != nil { + return nil, fmt.Errorf("x509: invalid Ed25519 private key: %v", err) + } + if l := len(curvePrivateKey); l != ed25519.SeedSize { + return nil, fmt.Errorf("x509: invalid Ed25519 private key length: %d", l) + } + return ed25519.NewKeyFromSeed(curvePrivateKey), nil + + default: + return nil, fmt.Errorf("x509: PKCS#8 wrapping contained private key with unknown algorithm: %v", privKey.Algo.Algorithm) + } +} + +// MarshalPKCS8PrivateKey converts a private key to PKCS#8, ASN.1 DER form. +// +// The following key types are currently supported: *rsa.PrivateKey, *ecdsa.PrivateKey +// and ed25519.PrivateKey. Unsupported key types result in an error. +// +// This kind of key is commonly encoded in PEM blocks of type "PRIVATE KEY". +func MarshalPKCS8PrivateKey(key interface{}) ([]byte, error) { + var privKey pkcs8 + + switch k := key.(type) { + case *rsa.PrivateKey: + privKey.Algo = pkix.AlgorithmIdentifier{ + Algorithm: OIDPublicKeyRSA, + Parameters: asn1.NullRawValue, + } + privKey.PrivateKey = MarshalPKCS1PrivateKey(k) + + case *ecdsa.PrivateKey: + oid, ok := OIDFromNamedCurve(k.Curve) + if !ok { + return nil, errors.New("x509: unknown curve while marshaling to PKCS#8") + } + + oidBytes, err := asn1.Marshal(oid) + if err != nil { + return nil, errors.New("x509: failed to marshal curve OID: " + err.Error()) + } + + privKey.Algo = pkix.AlgorithmIdentifier{ + Algorithm: OIDPublicKeyECDSA, + Parameters: asn1.RawValue{ + FullBytes: oidBytes, + }, + } + + if privKey.PrivateKey, err = marshalECPrivateKeyWithOID(k, nil); err != nil { + return nil, errors.New("x509: failed to marshal EC private key while building PKCS#8: " + err.Error()) + } + + case ed25519.PrivateKey: + privKey.Algo = pkix.AlgorithmIdentifier{ + Algorithm: OIDPublicKeyEd25519, + } + curvePrivateKey, err := asn1.Marshal(k.Seed()) + if err != nil { + return nil, fmt.Errorf("x509: failed to marshal private key: %v", err) + } + privKey.PrivateKey = curvePrivateKey + + default: + return nil, fmt.Errorf("x509: unknown key type while marshaling PKCS#8: %T", key) + } + + return asn1.Marshal(privKey) +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/pkix/pkix.go b/vendor/github.com/google/certificate-transparency-go/x509/pkix/pkix.go new file mode 100644 index 000000000000..1716f908abce --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/pkix/pkix.go @@ -0,0 +1,287 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkix contains shared, low level structures used for ASN.1 parsing +// and serialization of X.509 certificates, CRL and OCSP. +package pkix + +import ( + "encoding/hex" + "fmt" + "math/big" + "time" + + "github.com/google/certificate-transparency-go/asn1" +) + +// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC +// 5280, section 4.1.1.2. +type AlgorithmIdentifier struct { + Raw asn1.RawContent + Algorithm asn1.ObjectIdentifier + Parameters asn1.RawValue `asn1:"optional"` +} + +type RDNSequence []RelativeDistinguishedNameSET + +var attributeTypeNames = map[string]string{ + "2.5.4.6": "C", + "2.5.4.10": "O", + "2.5.4.11": "OU", + "2.5.4.3": "CN", + "2.5.4.5": "SERIALNUMBER", + "2.5.4.7": "L", + "2.5.4.8": "ST", + "2.5.4.9": "STREET", + "2.5.4.17": "POSTALCODE", +} + +// String returns a string representation of the sequence r, +// roughly following the RFC 2253 Distinguished Names syntax. +func (r RDNSequence) String() string { + s := "" + for i := 0; i < len(r); i++ { + rdn := r[len(r)-1-i] + if i > 0 { + s += "," + } + for j, tv := range rdn { + if j > 0 { + s += "+" + } + + oidString := tv.Type.String() + typeName, ok := attributeTypeNames[oidString] + if !ok { + derBytes, err := asn1.Marshal(tv.Value) + if err == nil { + s += oidString + "=#" + hex.EncodeToString(derBytes) + continue // No value escaping necessary. + } + + typeName = oidString + } + + valueString := fmt.Sprint(tv.Value) + escaped := make([]rune, 0, len(valueString)) + + for k, c := range valueString { + escape := false + + switch c { + case ',', '+', '"', '\\', '<', '>', ';': + escape = true + + case ' ': + escape = k == 0 || k == len(valueString)-1 + + case '#': + escape = k == 0 + } + + if escape { + escaped = append(escaped, '\\', c) + } else { + escaped = append(escaped, c) + } + } + + s += typeName + "=" + string(escaped) + } + } + + return s +} + +type RelativeDistinguishedNameSET []AttributeTypeAndValue + +// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in +// RFC 5280, Section 4.1.2.4. +type AttributeTypeAndValue struct { + Type asn1.ObjectIdentifier + Value interface{} +} + +// AttributeTypeAndValueSET represents a set of ASN.1 sequences of +// AttributeTypeAndValue sequences from RFC 2986 (PKCS #10). +type AttributeTypeAndValueSET struct { + Type asn1.ObjectIdentifier + Value [][]AttributeTypeAndValue `asn1:"set"` +} + +// Extension represents the ASN.1 structure of the same name. See RFC +// 5280, section 4.2. +type Extension struct { + Id asn1.ObjectIdentifier + Critical bool `asn1:"optional"` + Value []byte +} + +// Name represents an X.509 distinguished name. This only includes the common +// elements of a DN. When parsing, all elements are stored in Names and +// non-standard elements can be extracted from there. When marshaling, elements +// in ExtraNames are appended and override other values with the same OID. +type Name struct { + Country, Organization, OrganizationalUnit []string + Locality, Province []string + StreetAddress, PostalCode []string + SerialNumber, CommonName string + + Names []AttributeTypeAndValue + ExtraNames []AttributeTypeAndValue +} + +func (n *Name) FillFromRDNSequence(rdns *RDNSequence) { + for _, rdn := range *rdns { + if len(rdn) == 0 { + continue + } + + for _, atv := range rdn { + n.Names = append(n.Names, atv) + value, ok := atv.Value.(string) + if !ok { + continue + } + + t := atv.Type + if len(t) == 4 && t[0] == OIDAttribute[0] && t[1] == OIDAttribute[1] && t[2] == OIDAttribute[2] { + switch t[3] { + case OIDCommonName[3]: + n.CommonName = value + case OIDSerialNumber[3]: + n.SerialNumber = value + case OIDCountry[3]: + n.Country = append(n.Country, value) + case OIDLocality[3]: + n.Locality = append(n.Locality, value) + case OIDProvince[3]: + n.Province = append(n.Province, value) + case OIDStreetAddress[3]: + n.StreetAddress = append(n.StreetAddress, value) + case OIDOrganization[3]: + n.Organization = append(n.Organization, value) + case OIDOrganizationalUnit[3]: + n.OrganizationalUnit = append(n.OrganizationalUnit, value) + case OIDPostalCode[3]: + n.PostalCode = append(n.PostalCode, value) + } + } + } + } +} + +var ( + OIDAttribute = asn1.ObjectIdentifier{2, 5, 4} + OIDCountry = asn1.ObjectIdentifier{2, 5, 4, 6} + OIDOrganization = asn1.ObjectIdentifier{2, 5, 4, 10} + OIDOrganizationalUnit = asn1.ObjectIdentifier{2, 5, 4, 11} + OIDCommonName = asn1.ObjectIdentifier{2, 5, 4, 3} + OIDSerialNumber = asn1.ObjectIdentifier{2, 5, 4, 5} + OIDLocality = asn1.ObjectIdentifier{2, 5, 4, 7} + OIDProvince = asn1.ObjectIdentifier{2, 5, 4, 8} + OIDStreetAddress = asn1.ObjectIdentifier{2, 5, 4, 9} + OIDPostalCode = asn1.ObjectIdentifier{2, 5, 4, 17} + + OIDPseudonym = asn1.ObjectIdentifier{2, 5, 4, 65} + OIDTitle = asn1.ObjectIdentifier{2, 5, 4, 12} + OIDDnQualifier = asn1.ObjectIdentifier{2, 5, 4, 46} + OIDName = asn1.ObjectIdentifier{2, 5, 4, 41} + OIDSurname = asn1.ObjectIdentifier{2, 5, 4, 4} + OIDGivenName = asn1.ObjectIdentifier{2, 5, 4, 42} + OIDInitials = asn1.ObjectIdentifier{2, 5, 4, 43} + OIDGenerationQualifier = asn1.ObjectIdentifier{2, 5, 4, 44} +) + +// appendRDNs appends a relativeDistinguishedNameSET to the given RDNSequence +// and returns the new value. The relativeDistinguishedNameSET contains an +// attributeTypeAndValue for each of the given values. See RFC 5280, A.1, and +// search for AttributeTypeAndValue. +func (n Name) appendRDNs(in RDNSequence, values []string, oid asn1.ObjectIdentifier) RDNSequence { + if len(values) == 0 || oidInAttributeTypeAndValue(oid, n.ExtraNames) { + return in + } + + s := make([]AttributeTypeAndValue, len(values)) + for i, value := range values { + s[i].Type = oid + s[i].Value = value + } + + return append(in, s) +} + +func (n Name) ToRDNSequence() (ret RDNSequence) { + ret = n.appendRDNs(ret, n.Country, OIDCountry) + ret = n.appendRDNs(ret, n.Province, OIDProvince) + ret = n.appendRDNs(ret, n.Locality, OIDLocality) + ret = n.appendRDNs(ret, n.StreetAddress, OIDStreetAddress) + ret = n.appendRDNs(ret, n.PostalCode, OIDPostalCode) + ret = n.appendRDNs(ret, n.Organization, OIDOrganization) + ret = n.appendRDNs(ret, n.OrganizationalUnit, OIDOrganizationalUnit) + if len(n.CommonName) > 0 { + ret = n.appendRDNs(ret, []string{n.CommonName}, OIDCommonName) + } + if len(n.SerialNumber) > 0 { + ret = n.appendRDNs(ret, []string{n.SerialNumber}, OIDSerialNumber) + } + for _, atv := range n.ExtraNames { + ret = append(ret, []AttributeTypeAndValue{atv}) + } + + return ret +} + +// String returns the string form of n, roughly following +// the RFC 2253 Distinguished Names syntax. +func (n Name) String() string { + return n.ToRDNSequence().String() +} + +// oidInAttributeTypeAndValue reports whether a type with the given OID exists +// in atv. +func oidInAttributeTypeAndValue(oid asn1.ObjectIdentifier, atv []AttributeTypeAndValue) bool { + for _, a := range atv { + if a.Type.Equal(oid) { + return true + } + } + return false +} + +// CertificateList represents the ASN.1 structure of the same name. See RFC +// 5280, section 5.1. Use Certificate.CheckCRLSignature to verify the +// signature. +type CertificateList struct { + TBSCertList TBSCertificateList + SignatureAlgorithm AlgorithmIdentifier + SignatureValue asn1.BitString +} + +// HasExpired reports whether certList should have been updated by now. +func (certList *CertificateList) HasExpired(now time.Time) bool { + return !now.Before(certList.TBSCertList.NextUpdate) +} + +// TBSCertificateList represents the ASN.1 structure TBSCertList. See RFC +// 5280, section 5.1. +type TBSCertificateList struct { + Raw asn1.RawContent + Version int `asn1:"optional,default:0"` + Signature AlgorithmIdentifier + Issuer RDNSequence + ThisUpdate time.Time + NextUpdate time.Time `asn1:"optional"` + RevokedCertificates []RevokedCertificate `asn1:"optional"` + Extensions []Extension `asn1:"tag:0,optional,explicit"` +} + +// RevokedCertificate represents the unnamed ASN.1 structure that makes up the +// revokedCertificates member of the TBSCertList structure. See RFC +// 5280, section 5.1. +type RevokedCertificate struct { + SerialNumber *big.Int + RevocationTime time.Time + Extensions []Extension `asn1:"optional"` +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go b/vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go new file mode 100644 index 000000000000..06fd439c1fb3 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.11 +// +build go1.11 + +package x509 + +import ( + "syscall" + "unsafe" +) + +// For Go versions >= 1.11, the ExtraPolicyPara field in +// syscall.CertChainPolicyPara is of type syscall.Pointer. See: +// https://github.com/golang/go/commit/4869ec00e87ef + +func convertToPolicyParaType(p unsafe.Pointer) syscall.Pointer { + return (syscall.Pointer)(p) +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go b/vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go new file mode 100644 index 000000000000..f13a47adfbe3 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go @@ -0,0 +1,18 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.11 +// +build !go1.11 + +package x509 + +import "unsafe" + +// For Go versions before 1.11, the ExtraPolicyPara field in +// syscall.CertChainPolicyPara was of type uintptr. See: +// https://github.com/golang/go/commit/4869ec00e87ef + +func convertToPolicyParaType(p unsafe.Pointer) uintptr { + return uintptr(p) +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/revoked.go b/vendor/github.com/google/certificate-transparency-go/x509/revoked.go new file mode 100644 index 000000000000..e5fa6dd15f5f --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/revoked.go @@ -0,0 +1,365 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "bytes" + "encoding/pem" + "time" + + "github.com/google/certificate-transparency-go/asn1" + "github.com/google/certificate-transparency-go/x509/pkix" +) + +// OID values for CRL extensions (TBSCertList.Extensions), RFC 5280 s5.2. +var ( + OIDExtensionCRLNumber = asn1.ObjectIdentifier{2, 5, 29, 20} + OIDExtensionDeltaCRLIndicator = asn1.ObjectIdentifier{2, 5, 29, 27} + OIDExtensionIssuingDistributionPoint = asn1.ObjectIdentifier{2, 5, 29, 28} +) + +// OID values for CRL entry extensions (RevokedCertificate.Extensions), RFC 5280 s5.3 +var ( + OIDExtensionCRLReasons = asn1.ObjectIdentifier{2, 5, 29, 21} + OIDExtensionInvalidityDate = asn1.ObjectIdentifier{2, 5, 29, 24} + OIDExtensionCertificateIssuer = asn1.ObjectIdentifier{2, 5, 29, 29} +) + +// RevocationReasonCode represents the reason for a certificate revocation; see RFC 5280 s5.3.1. +type RevocationReasonCode asn1.Enumerated + +// RevocationReasonCode values. +var ( + Unspecified = RevocationReasonCode(0) + KeyCompromise = RevocationReasonCode(1) + CACompromise = RevocationReasonCode(2) + AffiliationChanged = RevocationReasonCode(3) + Superseded = RevocationReasonCode(4) + CessationOfOperation = RevocationReasonCode(5) + CertificateHold = RevocationReasonCode(6) + RemoveFromCRL = RevocationReasonCode(8) + PrivilegeWithdrawn = RevocationReasonCode(9) + AACompromise = RevocationReasonCode(10) +) + +// ReasonFlag holds a bitmask of applicable revocation reasons, from RFC 5280 s4.2.1.13 +type ReasonFlag int + +// ReasonFlag values. +const ( + UnusedFlag ReasonFlag = 1 << iota + KeyCompromiseFlag + CACompromiseFlag + AffiliationChangedFlag + SupersededFlag + CessationOfOperationFlag + CertificateHoldFlag + PrivilegeWithdrawnFlag + AACompromiseFlag +) + +// CertificateList represents the ASN.1 structure of the same name from RFC 5280, s5.1. +// It has the same content as pkix.CertificateList, but the contents include parsed versions +// of any extensions. +type CertificateList struct { + Raw asn1.RawContent + TBSCertList TBSCertList + SignatureAlgorithm pkix.AlgorithmIdentifier + SignatureValue asn1.BitString +} + +// ExpiredAt reports whether now is past the expiry time of certList. +func (certList *CertificateList) ExpiredAt(now time.Time) bool { + return now.After(certList.TBSCertList.NextUpdate) +} + +// Indication of whether extensions need to be critical or non-critical. Extensions that +// can be either are omitted from the map. +var listExtCritical = map[string]bool{ + // From RFC 5280... + OIDExtensionAuthorityKeyId.String(): false, // s5.2.1 + OIDExtensionIssuerAltName.String(): false, // s5.2.2 + OIDExtensionCRLNumber.String(): false, // s5.2.3 + OIDExtensionDeltaCRLIndicator.String(): true, // s5.2.4 + OIDExtensionIssuingDistributionPoint.String(): true, // s5.2.5 + OIDExtensionFreshestCRL.String(): false, // s5.2.6 + OIDExtensionAuthorityInfoAccess.String(): false, // s5.2.7 +} + +var certExtCritical = map[string]bool{ + // From RFC 5280... + OIDExtensionCRLReasons.String(): false, // s5.3.1 + OIDExtensionInvalidityDate.String(): false, // s5.3.2 + OIDExtensionCertificateIssuer.String(): true, // s5.3.3 +} + +// IssuingDistributionPoint represents the ASN.1 structure of the same +// name +type IssuingDistributionPoint struct { + DistributionPoint distributionPointName `asn1:"optional,tag:0"` + OnlyContainsUserCerts bool `asn1:"optional,tag:1"` + OnlyContainsCACerts bool `asn1:"optional,tag:2"` + OnlySomeReasons asn1.BitString `asn1:"optional,tag:3"` + IndirectCRL bool `asn1:"optional,tag:4"` + OnlyContainsAttributeCerts bool `asn1:"optional,tag:5"` +} + +// TBSCertList represents the ASN.1 structure of the same name from RFC +// 5280, section 5.1. It has the same content as pkix.TBSCertificateList +// but the extensions are included in a parsed format. +type TBSCertList struct { + Raw asn1.RawContent + Version int + Signature pkix.AlgorithmIdentifier + Issuer pkix.RDNSequence + ThisUpdate time.Time + NextUpdate time.Time + RevokedCertificates []*RevokedCertificate + Extensions []pkix.Extension + // Cracked out extensions: + AuthorityKeyID []byte + IssuerAltNames GeneralNames + CRLNumber int + BaseCRLNumber int // -1 if no delta CRL present + IssuingDistributionPoint IssuingDistributionPoint + IssuingDPFullNames GeneralNames + FreshestCRLDistributionPoint []string + OCSPServer []string + IssuingCertificateURL []string +} + +// ParseCertificateList parses a CertificateList (e.g. a CRL) from the given +// bytes. It's often the case that PEM encoded CRLs will appear where they +// should be DER encoded, so this function will transparently handle PEM +// encoding as long as there isn't any leading garbage. +func ParseCertificateList(clBytes []byte) (*CertificateList, error) { + if bytes.HasPrefix(clBytes, pemCRLPrefix) { + block, _ := pem.Decode(clBytes) + if block != nil && block.Type == pemType { + clBytes = block.Bytes + } + } + return ParseCertificateListDER(clBytes) +} + +// ParseCertificateListDER parses a DER encoded CertificateList from the given bytes. +// For non-fatal errors, this function returns both an error and a CertificateList +// object. +func ParseCertificateListDER(derBytes []byte) (*CertificateList, error) { + var errs Errors + // First parse the DER into the pkix structures. + pkixList := new(pkix.CertificateList) + if rest, err := asn1.Unmarshal(derBytes, pkixList); err != nil { + errs.AddID(ErrInvalidCertList, err) + return nil, &errs + } else if len(rest) != 0 { + errs.AddID(ErrTrailingCertList) + return nil, &errs + } + + // Transcribe the revoked certs but crack out extensions. + revokedCerts := make([]*RevokedCertificate, len(pkixList.TBSCertList.RevokedCertificates)) + for i, pkixRevoked := range pkixList.TBSCertList.RevokedCertificates { + revokedCerts[i] = parseRevokedCertificate(pkixRevoked, &errs) + if revokedCerts[i] == nil { + return nil, &errs + } + } + + certList := CertificateList{ + Raw: derBytes, + TBSCertList: TBSCertList{ + Raw: pkixList.TBSCertList.Raw, + Version: pkixList.TBSCertList.Version, + Signature: pkixList.TBSCertList.Signature, + Issuer: pkixList.TBSCertList.Issuer, + ThisUpdate: pkixList.TBSCertList.ThisUpdate, + NextUpdate: pkixList.TBSCertList.NextUpdate, + RevokedCertificates: revokedCerts, + Extensions: pkixList.TBSCertList.Extensions, + CRLNumber: -1, + BaseCRLNumber: -1, + }, + SignatureAlgorithm: pkixList.SignatureAlgorithm, + SignatureValue: pkixList.SignatureValue, + } + + // Now crack out extensions. + for _, e := range certList.TBSCertList.Extensions { + if expectCritical, present := listExtCritical[e.Id.String()]; present { + if e.Critical && !expectCritical { + errs.AddID(ErrUnexpectedlyCriticalCertListExtension, e.Id) + } else if !e.Critical && expectCritical { + errs.AddID(ErrUnexpectedlyNonCriticalCertListExtension, e.Id) + } + } + switch { + case e.Id.Equal(OIDExtensionAuthorityKeyId): + // RFC 5280 s5.2.1 + var a authKeyId + if rest, err := asn1.Unmarshal(e.Value, &a); err != nil { + errs.AddID(ErrInvalidCertListAuthKeyID, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingCertListAuthKeyID) + } + certList.TBSCertList.AuthorityKeyID = a.Id + case e.Id.Equal(OIDExtensionIssuerAltName): + // RFC 5280 s5.2.2 + if err := parseGeneralNames(e.Value, &certList.TBSCertList.IssuerAltNames); err != nil { + errs.AddID(ErrInvalidCertListIssuerAltName, err) + } + case e.Id.Equal(OIDExtensionCRLNumber): + // RFC 5280 s5.2.3 + if rest, err := asn1.Unmarshal(e.Value, &certList.TBSCertList.CRLNumber); err != nil { + errs.AddID(ErrInvalidCertListCRLNumber, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingCertListCRLNumber) + } + if certList.TBSCertList.CRLNumber < 0 { + errs.AddID(ErrNegativeCertListCRLNumber, certList.TBSCertList.CRLNumber) + } + case e.Id.Equal(OIDExtensionDeltaCRLIndicator): + // RFC 5280 s5.2.4 + if rest, err := asn1.Unmarshal(e.Value, &certList.TBSCertList.BaseCRLNumber); err != nil { + errs.AddID(ErrInvalidCertListDeltaCRL, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingCertListDeltaCRL) + } + if certList.TBSCertList.BaseCRLNumber < 0 { + errs.AddID(ErrNegativeCertListDeltaCRL, certList.TBSCertList.BaseCRLNumber) + } + case e.Id.Equal(OIDExtensionIssuingDistributionPoint): + parseIssuingDistributionPoint(e.Value, &certList.TBSCertList.IssuingDistributionPoint, &certList.TBSCertList.IssuingDPFullNames, &errs) + case e.Id.Equal(OIDExtensionFreshestCRL): + // RFC 5280 s5.2.6 + if err := parseDistributionPoints(e.Value, &certList.TBSCertList.FreshestCRLDistributionPoint); err != nil { + errs.AddID(ErrInvalidCertListFreshestCRL, err) + return nil, err + } + case e.Id.Equal(OIDExtensionAuthorityInfoAccess): + // RFC 5280 s5.2.7 + var aia []accessDescription + if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil { + errs.AddID(ErrInvalidCertListAuthInfoAccess, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingCertListAuthInfoAccess) + } + + for _, v := range aia { + // GeneralName: uniformResourceIdentifier [6] IA5String + if v.Location.Tag != tagURI { + continue + } + switch { + case v.Method.Equal(OIDAuthorityInfoAccessOCSP): + certList.TBSCertList.OCSPServer = append(certList.TBSCertList.OCSPServer, string(v.Location.Bytes)) + case v.Method.Equal(OIDAuthorityInfoAccessIssuers): + certList.TBSCertList.IssuingCertificateURL = append(certList.TBSCertList.IssuingCertificateURL, string(v.Location.Bytes)) + } + // TODO(drysdale): cope with more possibilities + } + default: + if e.Critical { + errs.AddID(ErrUnhandledCriticalCertListExtension, e.Id) + } + } + } + + if errs.Fatal() { + return nil, &errs + } + if errs.Empty() { + return &certList, nil + } + return &certList, &errs +} + +func parseIssuingDistributionPoint(data []byte, idp *IssuingDistributionPoint, name *GeneralNames, errs *Errors) { + // RFC 5280 s5.2.5 + if rest, err := asn1.Unmarshal(data, idp); err != nil { + errs.AddID(ErrInvalidCertListIssuingDP, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingCertListIssuingDP) + } + + typeCount := 0 + if idp.OnlyContainsUserCerts { + typeCount++ + } + if idp.OnlyContainsCACerts { + typeCount++ + } + if idp.OnlyContainsAttributeCerts { + typeCount++ + } + if typeCount > 1 { + errs.AddID(ErrCertListIssuingDPMultipleTypes, idp.OnlyContainsUserCerts, idp.OnlyContainsCACerts, idp.OnlyContainsAttributeCerts) + } + for _, fn := range idp.DistributionPoint.FullName { + if _, err := parseGeneralName(fn.FullBytes, name, false); err != nil { + errs.AddID(ErrCertListIssuingDPInvalidFullName, err) + } + } +} + +// RevokedCertificate represents the unnamed ASN.1 structure that makes up the +// revokedCertificates member of the TBSCertList structure from RFC 5280, s5.1. +// It has the same content as pkix.RevokedCertificate but the extensions are +// included in a parsed format. +type RevokedCertificate struct { + pkix.RevokedCertificate + // Cracked out extensions: + RevocationReason RevocationReasonCode + InvalidityDate time.Time + Issuer GeneralNames +} + +func parseRevokedCertificate(pkixRevoked pkix.RevokedCertificate, errs *Errors) *RevokedCertificate { + result := RevokedCertificate{RevokedCertificate: pkixRevoked} + for _, e := range pkixRevoked.Extensions { + if expectCritical, present := certExtCritical[e.Id.String()]; present { + if e.Critical && !expectCritical { + errs.AddID(ErrUnexpectedlyCriticalRevokedCertExtension, e.Id) + } else if !e.Critical && expectCritical { + errs.AddID(ErrUnexpectedlyNonCriticalRevokedCertExtension, e.Id) + } + } + switch { + case e.Id.Equal(OIDExtensionCRLReasons): + // RFC 5280, s5.3.1 + var reason asn1.Enumerated + if rest, err := asn1.Unmarshal(e.Value, &reason); err != nil { + errs.AddID(ErrInvalidRevocationReason, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingRevocationReason) + } + result.RevocationReason = RevocationReasonCode(reason) + case e.Id.Equal(OIDExtensionInvalidityDate): + // RFC 5280, s5.3.2 + if rest, err := asn1.Unmarshal(e.Value, &result.InvalidityDate); err != nil { + errs.AddID(ErrInvalidRevocationInvalidityDate, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingRevocationInvalidityDate) + } + case e.Id.Equal(OIDExtensionCertificateIssuer): + // RFC 5280, s5.3.3 + if err := parseGeneralNames(e.Value, &result.Issuer); err != nil { + errs.AddID(ErrInvalidRevocationIssuer, err) + } + default: + if e.Critical { + errs.AddID(ErrUnhandledCriticalRevokedCertExtension, e.Id) + } + } + } + return &result +} + +// CheckCertificateListSignature checks that the signature in crl is from c. +func (c *Certificate) CheckCertificateListSignature(crl *CertificateList) error { + algo := SignatureAlgorithmFromAI(crl.SignatureAlgorithm) + return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign()) +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root.go b/vendor/github.com/google/certificate-transparency-go/x509/root.go new file mode 100644 index 000000000000..240296247df3 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root.go @@ -0,0 +1,25 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import "sync" + +var ( + once sync.Once + systemRoots *CertPool + systemRootsErr error +) + +func systemRootsPool() *CertPool { + once.Do(initSystemRoots) + return systemRoots +} + +func initSystemRoots() { + systemRoots, systemRootsErr = loadSystemRoots() + if systemRootsErr != nil { + systemRoots = nil + } +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_aix.go b/vendor/github.com/google/certificate-transparency-go/x509/root_aix.go new file mode 100644 index 000000000000..6d427739a431 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_aix.go @@ -0,0 +1,10 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +// Possible certificate files; stop after finding one. +var certFiles = []string{ + "/var/ssl/certs/ca-bundle.crt", +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_bsd.go b/vendor/github.com/google/certificate-transparency-go/x509/root_bsd.go new file mode 100644 index 000000000000..8c04bdcdfac8 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_bsd.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build dragonfly || freebsd || netbsd || openbsd +// +build dragonfly freebsd netbsd openbsd + +package x509 + +// Possible certificate files; stop after finding one. +var certFiles = []string{ + "/usr/local/etc/ssl/cert.pem", // FreeBSD + "/etc/ssl/cert.pem", // OpenBSD + "/usr/local/share/certs/ca-root-nss.crt", // DragonFly + "/etc/openssl/certs/ca-certificates.crt", // NetBSD +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_cgo_darwin.go b/vendor/github.com/google/certificate-transparency-go/x509/root_cgo_darwin.go new file mode 100644 index 000000000000..dba99bb8dc15 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_cgo_darwin.go @@ -0,0 +1,315 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo && !arm && !arm64 && !ios +// +build cgo,!arm,!arm64,!ios + +package x509 + +/* +#cgo CFLAGS: -mmacosx-version-min=10.10 -D__MAC_OS_X_VERSION_MAX_ALLOWED=101300 +#cgo LDFLAGS: -framework CoreFoundation -framework Security + +#include +#include + +#include +#include + +static Boolean isSSLPolicy(SecPolicyRef policyRef) { + if (!policyRef) { + return false; + } + CFDictionaryRef properties = SecPolicyCopyProperties(policyRef); + if (properties == NULL) { + return false; + } + Boolean isSSL = false; + CFTypeRef value = NULL; + if (CFDictionaryGetValueIfPresent(properties, kSecPolicyOid, (const void **)&value)) { + isSSL = CFEqual(value, kSecPolicyAppleSSL); + } + CFRelease(properties); + return isSSL; +} + +// sslTrustSettingsResult obtains the final kSecTrustSettingsResult value +// for a certificate in the user or admin domain, combining usage constraints +// for the SSL SecTrustSettingsPolicy, ignoring SecTrustSettingsKeyUsage and +// kSecTrustSettingsAllowedError. +// https://developer.apple.com/documentation/security/1400261-sectrustsettingscopytrustsetting +static SInt32 sslTrustSettingsResult(SecCertificateRef cert) { + CFArrayRef trustSettings = NULL; + OSStatus err = SecTrustSettingsCopyTrustSettings(cert, kSecTrustSettingsDomainUser, &trustSettings); + + // According to Apple's SecTrustServer.c, "user trust settings overrule admin trust settings", + // but the rules of the override are unclear. Let's assume admin trust settings are applicable + // if and only if user trust settings fail to load or are NULL. + if (err != errSecSuccess || trustSettings == NULL) { + if (trustSettings != NULL) CFRelease(trustSettings); + err = SecTrustSettingsCopyTrustSettings(cert, kSecTrustSettingsDomainAdmin, &trustSettings); + } + + // > no trust settings [...] means "this certificate must be verified to a known trusted certificate” + // (Should this cause a fallback from user to admin domain? It's unclear.) + if (err != errSecSuccess || trustSettings == NULL) { + if (trustSettings != NULL) CFRelease(trustSettings); + return kSecTrustSettingsResultUnspecified; + } + + // > An empty trust settings array means "always trust this certificate” with an + // > overall trust setting for the certificate of kSecTrustSettingsResultTrustRoot. + if (CFArrayGetCount(trustSettings) == 0) { + CFRelease(trustSettings); + return kSecTrustSettingsResultTrustRoot; + } + + // kSecTrustSettingsResult is defined as CFSTR("kSecTrustSettingsResult"), + // but the Go linker's internal linking mode can't handle CFSTR relocations. + // Create our own dynamic string instead and release it below. + CFStringRef _kSecTrustSettingsResult = CFStringCreateWithCString( + NULL, "kSecTrustSettingsResult", kCFStringEncodingUTF8); + CFStringRef _kSecTrustSettingsPolicy = CFStringCreateWithCString( + NULL, "kSecTrustSettingsPolicy", kCFStringEncodingUTF8); + CFStringRef _kSecTrustSettingsPolicyString = CFStringCreateWithCString( + NULL, "kSecTrustSettingsPolicyString", kCFStringEncodingUTF8); + + CFIndex m; SInt32 result = 0; + for (m = 0; m < CFArrayGetCount(trustSettings); m++) { + CFDictionaryRef tSetting = (CFDictionaryRef)CFArrayGetValueAtIndex(trustSettings, m); + + // First, check if this trust setting is constrained to a non-SSL policy. + SecPolicyRef policyRef; + if (CFDictionaryGetValueIfPresent(tSetting, _kSecTrustSettingsPolicy, (const void**)&policyRef)) { + if (!isSSLPolicy(policyRef)) { + continue; + } + } + + if (CFDictionaryContainsKey(tSetting, _kSecTrustSettingsPolicyString)) { + // Restricted to a hostname, not a root. + continue; + } + + CFNumberRef cfNum; + if (CFDictionaryGetValueIfPresent(tSetting, _kSecTrustSettingsResult, (const void**)&cfNum)) { + CFNumberGetValue(cfNum, kCFNumberSInt32Type, &result); + } else { + // > If this key is not present, a default value of + // > kSecTrustSettingsResultTrustRoot is assumed. + result = kSecTrustSettingsResultTrustRoot; + } + + // If multiple dictionaries match, we are supposed to "OR" them, + // the semantics of which are not clear. Since TrustRoot and TrustAsRoot + // are mutually exclusive, Deny should probably override, and Invalid and + // Unspecified be overridden, approximate this by stopping at the first + // TrustRoot, TrustAsRoot or Deny. + if (result == kSecTrustSettingsResultTrustRoot) { + break; + } else if (result == kSecTrustSettingsResultTrustAsRoot) { + break; + } else if (result == kSecTrustSettingsResultDeny) { + break; + } + } + + // If trust settings are present, but none of them match the policy... + // the docs don't tell us what to do. + // + // "Trust settings for a given use apply if any of the dictionaries in the + // certificate’s trust settings array satisfies the specified use." suggests + // that it's as if there were no trust settings at all, so we should probably + // fallback to the admin trust settings. TODO. + if (result == 0) { + result = kSecTrustSettingsResultUnspecified; + } + + CFRelease(_kSecTrustSettingsPolicy); + CFRelease(_kSecTrustSettingsPolicyString); + CFRelease(_kSecTrustSettingsResult); + CFRelease(trustSettings); + + return result; +} + +// isRootCertificate reports whether Subject and Issuer match. +static Boolean isRootCertificate(SecCertificateRef cert, CFErrorRef *errRef) { + CFDataRef subjectName = SecCertificateCopyNormalizedSubjectContent(cert, errRef); + if (*errRef != NULL) { + return false; + } + CFDataRef issuerName = SecCertificateCopyNormalizedIssuerContent(cert, errRef); + if (*errRef != NULL) { + CFRelease(subjectName); + return false; + } + Boolean equal = CFEqual(subjectName, issuerName); + CFRelease(subjectName); + CFRelease(issuerName); + return equal; +} + +// CopyPEMRootsCTX509 fetches the system's list of trusted X.509 root certificates +// for the kSecTrustSettingsPolicy SSL. +// +// On success it returns 0 and fills pemRoots with a CFDataRef that contains the extracted root +// certificates of the system. On failure, the function returns -1. +// Additionally, it fills untrustedPemRoots with certs that must be removed from pemRoots. +// +// Note: The CFDataRef returned in pemRoots and untrustedPemRoots must +// be released (using CFRelease) after we've consumed its content. +static int CopyPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots, bool debugDarwinRoots) { + int i; + + if (debugDarwinRoots) { + fprintf(stderr, "crypto/x509: kSecTrustSettingsResultInvalid = %d\n", kSecTrustSettingsResultInvalid); + fprintf(stderr, "crypto/x509: kSecTrustSettingsResultTrustRoot = %d\n", kSecTrustSettingsResultTrustRoot); + fprintf(stderr, "crypto/x509: kSecTrustSettingsResultTrustAsRoot = %d\n", kSecTrustSettingsResultTrustAsRoot); + fprintf(stderr, "crypto/x509: kSecTrustSettingsResultDeny = %d\n", kSecTrustSettingsResultDeny); + fprintf(stderr, "crypto/x509: kSecTrustSettingsResultUnspecified = %d\n", kSecTrustSettingsResultUnspecified); + } + + // Get certificates from all domains, not just System, this lets + // the user add CAs to their "login" keychain, and Admins to add + // to the "System" keychain + SecTrustSettingsDomain domains[] = { kSecTrustSettingsDomainSystem, + kSecTrustSettingsDomainAdmin, kSecTrustSettingsDomainUser }; + + int numDomains = sizeof(domains)/sizeof(SecTrustSettingsDomain); + if (pemRoots == NULL || untrustedPemRoots == NULL) { + return -1; + } + + CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0); + CFMutableDataRef combinedUntrustedData = CFDataCreateMutable(kCFAllocatorDefault, 0); + for (i = 0; i < numDomains; i++) { + int j; + CFArrayRef certs = NULL; + OSStatus err = SecTrustSettingsCopyCertificates(domains[i], &certs); + if (err != noErr) { + continue; + } + + CFIndex numCerts = CFArrayGetCount(certs); + for (j = 0; j < numCerts; j++) { + SecCertificateRef cert = (SecCertificateRef)CFArrayGetValueAtIndex(certs, j); + if (cert == NULL) { + continue; + } + + SInt32 result; + if (domains[i] == kSecTrustSettingsDomainSystem) { + // Certs found in the system domain are always trusted. If the user + // configures "Never Trust" on such a cert, it will also be found in the + // admin or user domain, causing it to be added to untrustedPemRoots. The + // Go code will then clean this up. + result = kSecTrustSettingsResultTrustRoot; + } else { + result = sslTrustSettingsResult(cert); + if (debugDarwinRoots) { + CFErrorRef errRef = NULL; + CFStringRef summary = SecCertificateCopyShortDescription(NULL, cert, &errRef); + if (errRef != NULL) { + fprintf(stderr, "crypto/x509: SecCertificateCopyShortDescription failed\n"); + CFRelease(errRef); + continue; + } + + CFIndex length = CFStringGetLength(summary); + CFIndex maxSize = CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8) + 1; + char *buffer = malloc(maxSize); + if (CFStringGetCString(summary, buffer, maxSize, kCFStringEncodingUTF8)) { + fprintf(stderr, "crypto/x509: %s returned %d\n", buffer, (int)result); + } + free(buffer); + CFRelease(summary); + } + } + + CFMutableDataRef appendTo; + // > Note the distinction between the results kSecTrustSettingsResultTrustRoot + // > and kSecTrustSettingsResultTrustAsRoot: The former can only be applied to + // > root (self-signed) certificates; the latter can only be applied to + // > non-root certificates. + if (result == kSecTrustSettingsResultTrustRoot) { + CFErrorRef errRef = NULL; + if (!isRootCertificate(cert, &errRef) || errRef != NULL) { + if (errRef != NULL) CFRelease(errRef); + continue; + } + + appendTo = combinedData; + } else if (result == kSecTrustSettingsResultTrustAsRoot) { + CFErrorRef errRef = NULL; + if (isRootCertificate(cert, &errRef) || errRef != NULL) { + if (errRef != NULL) CFRelease(errRef); + continue; + } + + appendTo = combinedData; + } else if (result == kSecTrustSettingsResultDeny) { + appendTo = combinedUntrustedData; + } else if (result == kSecTrustSettingsResultUnspecified) { + // Certificates with unspecified trust should probably be added to a pool of + // intermediates for chain building, or checked for transitive trust and + // added to the root pool (which is an imprecise approximation because it + // cuts chains short) but we don't support either at the moment. TODO. + continue; + } else { + continue; + } + + CFDataRef data = NULL; + err = SecItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data); + if (err != noErr) { + continue; + } + if (data != NULL) { + CFDataAppendBytes(appendTo, CFDataGetBytePtr(data), CFDataGetLength(data)); + CFRelease(data); + } + } + CFRelease(certs); + } + *pemRoots = combinedData; + *untrustedPemRoots = combinedUntrustedData; + return 0; +} +*/ +import "C" +import ( + "errors" + "unsafe" +) + +func loadSystemRoots() (*CertPool, error) { + var data, untrustedData C.CFDataRef + err := C.CopyPEMRootsCTX509(&data, &untrustedData, C.bool(debugDarwinRoots)) + if err == -1 { + return nil, errors.New("crypto/x509: failed to load darwin system roots with cgo") + } + defer C.CFRelease(C.CFTypeRef(data)) + defer C.CFRelease(C.CFTypeRef(untrustedData)) + + buf := C.GoBytes(unsafe.Pointer(C.CFDataGetBytePtr(data)), C.int(C.CFDataGetLength(data))) + roots := NewCertPool() + roots.AppendCertsFromPEM(buf) + + if C.CFDataGetLength(untrustedData) == 0 { + return roots, nil + } + + buf = C.GoBytes(unsafe.Pointer(C.CFDataGetBytePtr(untrustedData)), C.int(C.CFDataGetLength(untrustedData))) + untrustedRoots := NewCertPool() + untrustedRoots.AppendCertsFromPEM(buf) + + trustedRoots := NewCertPool() + for _, c := range roots.certs { + if !untrustedRoots.contains(c) { + trustedRoots.AddCert(c) + } + } + return trustedRoots, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_darwin.go b/vendor/github.com/google/certificate-transparency-go/x509/root_darwin.go new file mode 100644 index 000000000000..4330ae97a448 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_darwin.go @@ -0,0 +1,288 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run root_darwin_arm_gen.go -output root_darwin_armx.go + +package x509 + +import ( + "bufio" + "bytes" + "crypto/sha1" + "encoding/pem" + "fmt" + "io" + "os" + "os/exec" + "os/user" + "path/filepath" + "strings" + "sync" +) + +var debugDarwinRoots = strings.Contains(os.Getenv("GODEBUG"), "x509roots=1") + +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + return nil, nil +} + +// This code is only used when compiling without cgo. +// It is here, instead of root_nocgo_darwin.go, so that tests can check it +// even if the tests are run with cgo enabled. +// The linker will not include these unused functions in binaries built with cgo enabled. + +// execSecurityRoots finds the macOS list of trusted root certificates +// using only command-line tools. This is our fallback path when cgo isn't available. +// +// The strategy is as follows: +// +// 1. Run "security trust-settings-export" and "security +// trust-settings-export -d" to discover the set of certs with some +// user-tweaked trust policy. We're too lazy to parse the XML +// (Issue 26830) to understand what the trust +// policy actually is. We just learn that there is _some_ policy. +// +// 2. Run "security find-certificate" to dump the list of system root +// CAs in PEM format. +// +// 3. For each dumped cert, conditionally verify it with "security +// verify-cert" if that cert was in the set discovered in Step 1. +// Without the Step 1 optimization, running "security verify-cert" +// 150-200 times takes 3.5 seconds. With the optimization, the +// whole process takes about 180 milliseconds with 1 untrusted root +// CA. (Compared to 110ms in the cgo path) +func execSecurityRoots() (*CertPool, error) { + hasPolicy, err := getCertsWithTrustPolicy() + if err != nil { + return nil, err + } + if debugDarwinRoots { + fmt.Fprintf(os.Stderr, "crypto/x509: %d certs have a trust policy\n", len(hasPolicy)) + } + + keychains := []string{"/Library/Keychains/System.keychain"} + + // Note that this results in trusting roots from $HOME/... (the environment + // variable), which might not be expected. + u, err := user.Current() + if err != nil { + if debugDarwinRoots { + fmt.Fprintf(os.Stderr, "crypto/x509: can't get user home directory: %v\n", err) + } + } else { + keychains = append(keychains, + filepath.Join(u.HomeDir, "/Library/Keychains/login.keychain"), + + // Fresh installs of Sierra use a slightly different path for the login keychain + filepath.Join(u.HomeDir, "/Library/Keychains/login.keychain-db"), + ) + } + + type rootCandidate struct { + c *Certificate + system bool + } + + var ( + mu sync.Mutex + roots = NewCertPool() + numVerified int // number of execs of 'security verify-cert', for debug stats + wg sync.WaitGroup + verifyCh = make(chan rootCandidate) + ) + + // Using 4 goroutines to pipe into verify-cert seems to be + // about the best we can do. The verify-cert binary seems to + // just RPC to another server with coarse locking anyway, so + // running 16 at a time for instance doesn't help at all. Due + // to the "if hasPolicy" check below, though, we will rarely + // (or never) call verify-cert on stock macOS systems, though. + // The hope is that we only call verify-cert when the user has + // tweaked their trust policy. These 4 goroutines are only + // defensive in the pathological case of many trust edits. + for i := 0; i < 4; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for cert := range verifyCh { + sha1CapHex := fmt.Sprintf("%X", sha1.Sum(cert.c.Raw)) + + var valid bool + verifyChecks := 0 + if hasPolicy[sha1CapHex] { + verifyChecks++ + valid = verifyCertWithSystem(cert.c) + } else { + // Certificates not in SystemRootCertificates without user + // or admin trust settings are not trusted. + valid = cert.system + } + + mu.Lock() + numVerified += verifyChecks + if valid { + roots.AddCert(cert.c) + } + mu.Unlock() + } + }() + } + err = forEachCertInKeychains(keychains, func(cert *Certificate) { + verifyCh <- rootCandidate{c: cert, system: false} + }) + if err != nil { + close(verifyCh) + return nil, err + } + err = forEachCertInKeychains([]string{ + "/System/Library/Keychains/SystemRootCertificates.keychain", + }, func(cert *Certificate) { + verifyCh <- rootCandidate{c: cert, system: true} + }) + if err != nil { + close(verifyCh) + return nil, err + } + close(verifyCh) + wg.Wait() + + if debugDarwinRoots { + fmt.Fprintf(os.Stderr, "crypto/x509: ran security verify-cert %d times\n", numVerified) + } + + return roots, nil +} + +func forEachCertInKeychains(paths []string, f func(*Certificate)) error { + args := append([]string{"find-certificate", "-a", "-p"}, paths...) + cmd := exec.Command("/usr/bin/security", args...) + data, err := cmd.Output() + if err != nil { + return err + } + for len(data) > 0 { + var block *pem.Block + block, data = pem.Decode(data) + if block == nil { + break + } + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + cert, err := ParseCertificate(block.Bytes) + if err != nil { + continue + } + f(cert) + } + return nil +} + +func verifyCertWithSystem(cert *Certificate) bool { + data := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", Bytes: cert.Raw, + }) + + f, err := os.CreateTemp("", "cert") + if err != nil { + fmt.Fprintf(os.Stderr, "can't create temporary file for cert: %v", err) + return false + } + defer os.Remove(f.Name()) + if _, err := f.Write(data); err != nil { + fmt.Fprintf(os.Stderr, "can't write temporary file for cert: %v", err) + return false + } + if err := f.Close(); err != nil { + fmt.Fprintf(os.Stderr, "can't write temporary file for cert: %v", err) + return false + } + cmd := exec.Command("/usr/bin/security", "verify-cert", "-p", "ssl", "-c", f.Name(), "-l", "-L") + var stderr bytes.Buffer + if debugDarwinRoots { + cmd.Stderr = &stderr + } + if err := cmd.Run(); err != nil { + if debugDarwinRoots { + fmt.Fprintf(os.Stderr, "crypto/x509: verify-cert rejected %s: %q\n", cert.Subject, bytes.TrimSpace(stderr.Bytes())) + } + return false + } + if debugDarwinRoots { + fmt.Fprintf(os.Stderr, "crypto/x509: verify-cert approved %s\n", cert.Subject) + } + return true +} + +// getCertsWithTrustPolicy returns the set of certs that have a +// possibly-altered trust policy. The keys of the map are capitalized +// sha1 hex of the raw cert. +// They are the certs that should be checked against `security +// verify-cert` to see whether the user altered the default trust +// settings. This code is only used for cgo-disabled builds. +func getCertsWithTrustPolicy() (map[string]bool, error) { + set := map[string]bool{} + td, err := os.MkdirTemp("", "x509trustpolicy") + if err != nil { + return nil, err + } + defer os.RemoveAll(td) + run := func(file string, args ...string) error { + file = filepath.Join(td, file) + args = append(args, file) + cmd := exec.Command("/usr/bin/security", args...) + var stderr bytes.Buffer + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + // If there are no trust settings, the + // `security trust-settings-export` command + // fails with: + // exit status 1, SecTrustSettingsCreateExternalRepresentation: No Trust Settings were found. + // Rather than match on English substrings that are probably + // localized on macOS, just interpret any failure to mean that + // there are no trust settings. + if debugDarwinRoots { + fmt.Fprintf(os.Stderr, "crypto/x509: exec %q: %v, %s\n", cmd.Args, err, stderr.Bytes()) + } + return nil + } + + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() + + // Gather all the runs of 40 capitalized hex characters. + br := bufio.NewReader(f) + var hexBuf bytes.Buffer + for { + b, err := br.ReadByte() + isHex := ('A' <= b && b <= 'F') || ('0' <= b && b <= '9') + if isHex { + hexBuf.WriteByte(b) + } else { + if hexBuf.Len() == 40 { + set[hexBuf.String()] = true + } + hexBuf.Reset() + } + if err == io.EOF { + break + } + if err != nil { + return err + } + } + + return nil + } + if err := run("user", "trust-settings-export"); err != nil { + return nil, fmt.Errorf("dump-trust-settings (user): %v", err) + } + if err := run("admin", "trust-settings-export", "-d"); err != nil { + return nil, fmt.Errorf("dump-trust-settings (admin): %v", err) + } + return set, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_darwin_armx.go b/vendor/github.com/google/certificate-transparency-go/x509/root_darwin_armx.go new file mode 100644 index 000000000000..5c93349b0bc8 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_darwin_armx.go @@ -0,0 +1,4314 @@ +// Code generated by root_darwin_arm_gen --output root_darwin_armx.go; DO NOT EDIT. + +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo && darwin && (arm || arm64 || ios) +// +build cgo +// +build darwin +// +build arm arm64 ios + +package x509 + +func loadSystemRoots() (*CertPool, error) { + p := NewCertPool() + p.AppendCertsFromPEM([]byte(systemRootsPEM)) + return p, nil +} + +const systemRootsPEM = ` +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj +YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM +GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua +BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe +3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4 +YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR +rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm +ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU +oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v +QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t +b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF +AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q +GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2 +G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi +l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3 +smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE +BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w +MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC +SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1 +ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv +UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX +4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9 +KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/ +gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb +rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ +51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F +be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe +KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F +v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn +fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7 +jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz +ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL +e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70 +jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz +WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V +SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j +pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX +X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok +fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R +K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU +ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU +LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT +LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEU +MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3 +b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMw +MTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYD +VQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ul +CDtbKRY654eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6n +tGO0/7Gcrjyvd7ZWxbWroulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyl +dI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1Zmne3yzxbrww2ywkEtvrNTVokMsAsJch +PXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJuiGMx1I4S+6+JNM3GOGvDC ++Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8wHQYDVR0O +BBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBl +MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFk +ZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENB +IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxtZBsfzQ3duQH6lmM0MkhHma6X +7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0PhiVYrqW9yTkkz +43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY +eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJl +pz/+0WatC7xrmYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOA +WiFeIc9TVPC6b4nbqKqVz4vjccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU +MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs +IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290 +MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h +bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v +dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt +H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9 +uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX +mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX +a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN +E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0 +WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD +VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0 +Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU +cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx +IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN +AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH +YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 +6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC +Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX +c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a +mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFVTCCBD2gAwIBAgIEO/OB0DANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQGEwJj +aDEOMAwGA1UEChMFYWRtaW4xETAPBgNVBAsTCFNlcnZpY2VzMSIwIAYDVQQLExlD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRYwFAYDVQQDEw1BZG1pbi1Sb290LUNB +MB4XDTAxMTExNTA4NTEwN1oXDTIxMTExMDA3NTEwN1owbDELMAkGA1UEBhMCY2gx +DjAMBgNVBAoTBWFkbWluMREwDwYDVQQLEwhTZXJ2aWNlczEiMCAGA1UECxMZQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdGllczEWMBQGA1UEAxMNQWRtaW4tUm9vdC1DQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMvgr0QUIv5qF0nyXZ3PXAJi +C4C5Wr+oVTN7oxIkXkxvO0GJToM9n7OVJjSmzBL0zJ2HXj0MDRcvhSY+KiZZc6Go +vDvr5Ua481l7ILFeQAFtumeza+vvxeL5Nd0Maga2miiacLNAKXbAcUYRa0Ov5VZB +++YcOYNNt/aisWbJqA2y8He+NsEgJzK5zNdayvYXQTZN+7tVgWOck16Da3+4FXdy +fH1NCWtZlebtMKtERtkVAaVbiWW24CjZKAiVfggjsiLo3yVMPGj3budLx5D9hEEm +vlyDOtcjebca+AcZglppWMX/iHIrx7740y0zd6cWEqiLIcZCrnpkr/KzwO135GkC +AwEAAaOCAf0wggH5MA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIASBkTCBjjCBiwYI +YIV0AREDAQAwfzArBggrBgEFBQcCAjAfGh1UaGlzIGlzIHRoZSBBZG1pbi1Sb290 +LUNBIENQUzBQBggrBgEFBQcCARZEaHR0cDovL3d3dy5pbmZvcm1hdGlrLmFkbWlu +LmNoL1BLSS9saW5rcy9DUFNfMl8xNl83NTZfMV8xN18zXzFfMC5wZGYwfwYDVR0f +BHgwdjB0oHKgcKRuMGwxFjAUBgNVBAMTDUFkbWluLVJvb3QtQ0ExIjAgBgNVBAsT +GUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxETAPBgNVBAsTCFNlcnZpY2VzMQ4w +DAYDVQQKEwVhZG1pbjELMAkGA1UEBhMCY2gwHQYDVR0OBBYEFIKf+iNzIPGXi7JM +Tb5CxX9mzWToMIGZBgNVHSMEgZEwgY6AFIKf+iNzIPGXi7JMTb5CxX9mzWTooXCk +bjBsMQswCQYDVQQGEwJjaDEOMAwGA1UEChMFYWRtaW4xETAPBgNVBAsTCFNlcnZp +Y2VzMSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRYwFAYDVQQD +Ew1BZG1pbi1Sb290LUNBggQ784HQMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0B +AQUFAAOCAQEAeE96XCYRpy6umkPKXDWCRn7INo96ZrWpMggcDORuofHIwdTkgOeM +vWOxDN/yuT7CC3FAaUajbPRbDw0hRMcqKz0aC8CgwcyIyhw/rFK29mfNTG3EviP9 +QSsEbnelFnjpm1wjz4EaBiFjatwpUbI6+Zv3XbEt9QQXBn+c6DeFLe4xvC4B+MTr +a440xTk59pSYux8OHhEvqIwHCkiijGqZhTS3KmGFeBopaR+dJVBRBMoXwzk4B3Hn +0Zib1dEYFZa84vPJZyvxCbLOnPRDJgH6V2uQqbG+6DXVaf/wORVOvF/wzzv0viM/ +RWbEtJZdvo8N3sdtCULzifnxP/V0T9+4ZQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP +Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr +ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL +MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 +yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr +VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ +nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG +XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj +vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt +Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g +N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC +nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y +YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua +kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL +QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp +6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG +yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i +QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO +tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu +QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ +Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u +olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 +x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC +VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ +cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ +BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt +VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D +0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 +ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G +A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs +aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I +flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz +dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG +A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U +cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf +qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ +JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ ++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS +s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 +HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 +70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG +V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S +qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S +5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia +C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX +OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE +FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 +KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B +8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ +MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc +0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ +u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF +u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH +YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 +GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO +RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e +KeC2uAloGRwYQw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIIGDCCBgCgAwIBAgIGAT8vMXfmMA0GCSqGSIb3DQEBCwUAMIIBCjELMAkGA1UE +BhMCRVMxEjAQBgNVBAgMCUJhcmNlbG9uYTFYMFYGA1UEBwxPQmFyY2Vsb25hIChz +ZWUgY3VycmVudCBhZGRyZXNzIGF0IGh0dHA6Ly93d3cuYW5mLmVzL2VzL2FkZHJl +c3MtZGlyZWNjaW9uLmh0bWwgKTEnMCUGA1UECgweQU5GIEF1dG9yaWRhZCBkZSBD +ZXJ0aWZpY2FjaW9uMRcwFQYDVQQLDA5BTkYgQ2xhc2UgMSBDQTEaMBgGCSqGSIb3 +DQEJARYLaW5mb0BhbmYuZXMxEjAQBgNVBAUTCUc2MzI4NzUxMDEbMBkGA1UEAwwS +QU5GIEdsb2JhbCBSb290IENBMB4XDTEzMDYxMDE3NDUzOFoXDTMzMDYwNTE3NDUz +OFowggEKMQswCQYDVQQGEwJFUzESMBAGA1UECAwJQmFyY2Vsb25hMVgwVgYDVQQH +DE9CYXJjZWxvbmEgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgaHR0cDovL3d3dy5h +bmYuZXMvZXMvYWRkcmVzcy1kaXJlY2Npb24uaHRtbCApMScwJQYDVQQKDB5BTkYg +QXV0b3JpZGFkIGRlIENlcnRpZmljYWNpb24xFzAVBgNVBAsMDkFORiBDbGFzZSAx +IENBMRowGAYJKoZIhvcNAQkBFgtpbmZvQGFuZi5lczESMBAGA1UEBRMJRzYzMjg3 +NTEwMRswGQYDVQQDDBJBTkYgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQDHPi9xy4wynbcUbWjorVUgQKeUAVh937J7P37XmsfH +ZLOBZKIIlhhCtRwnDlg7x+BUvtJOTkIbEGMujDygUQ2s3HDYr5I41hTyM2Pl0cq2 +EuSGEbPIHb3dEX8NAguFexM0jqNjrreN3hM2/+TOkAxSdDJP2aMurlySC5zwl47K +ZLHtcVrkZnkDa0o5iN24hJT4vBDT4t2q9khQ+qb1D8KgCOb02r1PxWXu3vfd6Ha2 +mkdB97iGuEh5gO2n4yOmFS5goFlVA2UdPbbhJsb8oKVKDd+YdCKGQDCkQyG4AjmC +YiNm3UPG/qtftTH5cWri67DlLtm6fyUFOMmO6NSh0RtR745pL8GyWJUanyq/Q4bF +HQB21E+WtTsCaqjGaoFcrBunMypmCd+jUZXl27TYENRFbrwNdAh7m2UztcIyb+Sg +VJFyfvVsBQNvnp7GPimVxXZNc4VpxEXObRuPWQN1oZN/90PcZVqTia/SHzEyTryL +ckhiLG3jZiaFZ7pTZ5I9wti9Pn+4kOHvE3Y/4nEnUo4mTxPX9pOlinF+VCiybtV2 +u1KSlc+YaIM7VmuyndDZCJRXm3v0/qTE7t5A5fArZl9lvibigMbWB8fpD+c1GpGH +Eo8NRY0lkaM+DkIqQoaziIsz3IKJrfdKaq9bQMSlIfameKBZ8fNYTBZrH9KZAIhz +YwIDAQABo4IBfjCCAXowHQYDVR0OBBYEFIf6nt9SdnXsSUogb1twlo+d77sXMB8G +A1UdIwQYMBaAFIf6nt9SdnXsSUogb1twlo+d77sXMA8GA1UdEwEB/wQFMAMBAf8w +DgYDVR0PAQH/BAQDAgEGMIIBFQYDVR0RBIIBDDCCAQiCEWh0dHA6Ly93d3cuYW5m +LmVzgQtpbmZvQGFuZi5lc6SB5TCB4jE0MDIGA1UECQwrR3JhbiBWaWEgZGUgbGVz +IENvcnRzIENhdGFsYW5lcy4gOTk2LiAwODAxODESMBAGA1UEBwwJQmFyY2Vsb25h +MScwJQYDVQQKDB5BTkYgQXV0b3JpZGFkIGRlIENlcnRpZmljYWNpb24xEjAQBgNV +BAUTCUc2MzI4NzUxMDFZMFcGA1UECwxQSW5zY3JpdGEgZW4gZWwgTWluaXN0ZXJp +byBkZWwgSW50ZXJpb3IgZGUgRXNwYcOxYSBjb24gZWwgbnVtZXJvIG5hY2lvbmFs +IDE3MS40NDMwDQYJKoZIhvcNAQELBQADggIBAIgR9tFTZ9BCYg+HViMxOfF0MHN2 +Pe/eC128ARdS+GH8A4thtbqiH/SOYbWofO/0zssHhNKa5iQEj45lCAb8BANpWJMD +nWkPr6jq2+50a6d0MMgSS2l1rvjSF+3nIrEuicshHXSTi3q/vBLKr7uGKMVFaM68 +XAropIwk6ndlA0JseARSPsbetv7ALESMIZAxlHV1TcctYHd0bB3c/Jz+PLszJQqs +Cg/kBPo2D111OXZkIY8W/fJuG9veR783khAK2gUnC0zLLCNsYzEbdGt8zUmBsAsM +cGxqGm6B6vDXd65OxWqw13xdq/24+5R8Ng1PF9tvfjZkUFBF30CxjWur7P90WiKI +G7IGfr6BE1NgXlhEQQu4F+HizB1ypEPzGWltecXQ4yOzO+H0WfFTjLTYX6VSveyW +DQV18ixF8M4tHP/SwNE+yyv2b2JJ3/3RpxjtFlLk+opJ574x0gD/dMJuWTH0JqVY +3PbRfE1jIxFpk164Qz/Xp7H7w7f6xh+tQCkBs3PUYmnGIZcPwq44Q6JHlCNsKx4K +hxfggTvRCk4w79cUID45c2qDsRCqTPoOo/cbOpcfVhbH9LdMORpmuLwNogRZEUSE +fWpqR9q+0kcQf4zGSWIURIyDrogdpDgoHDxktqgMgc+qA4ZE2WQl1D8hmev53A46 +lUSrWUiWfDXtK3ux +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFkjCCA3qgAwIBAgIIAeDltYNno+AwDQYJKoZIhvcNAQEMBQAwZzEbMBkGA1UE +AwwSQXBwbGUgUm9vdCBDQSAtIEcyMSYwJAYDVQQLDB1BcHBsZSBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eTETMBEGA1UECgwKQXBwbGUgSW5jLjELMAkGA1UEBhMCVVMw +HhcNMTQwNDMwMTgxMDA5WhcNMzkwNDMwMTgxMDA5WjBnMRswGQYDVQQDDBJBcHBs +ZSBSb290IENBIC0gRzIxJjAkBgNVBAsMHUFwcGxlIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MRMwEQYDVQQKDApBcHBsZSBJbmMuMQswCQYDVQQGEwJVUzCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBANgREkhI2imKScUcx+xuM23+TfvgHN6s +XuI2pyT5f1BrTM65MFQn5bPW7SXmMLYFN14UIhHF6Kob0vuy0gmVOKTvKkmMXT5x +ZgM4+xb1hYjkWpIMBDLyyED7Ul+f9sDx47pFoFDVEovy3d6RhiPw9bZyLgHaC/Yu +OQhfGaFjQQscp5TBhsRTL3b2CtcM0YM/GlMZ81fVJ3/8E7j4ko380yhDPLVoACVd +J2LT3VXdRCCQgzWTxb+4Gftr49wIQuavbfqeQMpOhYV4SbHXw8EwOTKrfl+q04tv +ny0aIWhwZ7Oj8ZhBbZF8+NfbqOdfIRqMM78xdLe40fTgIvS/cjTf94FNcX1RoeKz +8NMoFnNvzcytN31O661A4T+B/fc9Cj6i8b0xlilZ3MIZgIxbdMYs0xBTJh0UT8TU +gWY8h2czJxQI6bR3hDRSj4n4aJgXv8O7qhOTH11UL6jHfPsNFL4VPSQ08prcdUFm +IrQB1guvkJ4M6mL4m1k8COKWNORj3rw31OsMiANDC1CvoDTdUE0V+1ok2Az6DGOe +HwOx4e7hqkP0ZmUoNwIx7wHHHtHMn23KVDpA287PT0aLSmWaasZobNfMmRtHsHLD +d4/E92GcdB/O/WuhwpyUgquUoue9G7q5cDmVF8Up8zlYNPXEpMZ7YLlmQ1A/bmH8 +DvmGqmAMQ0uVAgMBAAGjQjBAMB0GA1UdDgQWBBTEmRNsGAPCe8CjoA1/coB6HHcm +jTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQwF +AAOCAgEAUabz4vS4PZO/Lc4Pu1vhVRROTtHlznldgX/+tvCHM/jvlOV+3Gp5pxy+ +8JS3ptEwnMgNCnWefZKVfhidfsJxaXwU6s+DDuQUQp50DhDNqxq6EWGBeNjxtUVA +eKuowM77fWM3aPbn+6/Gw0vsHzYmE1SGlHKy6gLti23kDKaQwFd1z4xCfVzmMX3z +ybKSaUYOiPjjLUKyOKimGY3xn83uamW8GrAlvacp/fQ+onVJv57byfenHmOZ4VxG +/5IFjPoeIPmGlFYl5bRXOJ3riGQUIUkhOb9iZqmxospvPyFgxYnURTbImHy99v6Z +SYA7LNKmp4gDBDEZt7Y6YUX6yfIjyGNzv1aJMbDZfGKnexWoiIqrOEDCzBL/FePw +N983csvMmOa/orz6JopxVtfnJBtIRD6e/J/JzBrsQzwBvDR4yGn1xuZW7AYJNpDr +FEobXsmII9oDMJELuDY++ee1KG++P+w8j2Ud5cAeh6Squpj9kuNsJnfdBrRkBof0 +Tta6SqoWqPQFZ2aWuuJVecMsXUmPgEkrihLHdoBR37q9ZV0+N0djMenl9MU/S60E +inpxLK8JQzcPqOMyT/RFtm2XNuyE9QoB6he7hY1Ck3DDUOUUi78/w0EP3SIEIwiK +um1xRKtzCTrJ+VKACd+66eYWyi4uTLLT3OUEVLLUNIAytbwPF+E= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICQzCCAcmgAwIBAgIILcX8iNLFS5UwCgYIKoZIzj0EAwMwZzEbMBkGA1UEAwwS +QXBwbGUgUm9vdCBDQSAtIEczMSYwJAYDVQQLDB1BcHBsZSBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTETMBEGA1UECgwKQXBwbGUgSW5jLjELMAkGA1UEBhMCVVMwHhcN +MTQwNDMwMTgxOTA2WhcNMzkwNDMwMTgxOTA2WjBnMRswGQYDVQQDDBJBcHBsZSBS +b290IENBIC0gRzMxJjAkBgNVBAsMHUFwcGxlIENlcnRpZmljYXRpb24gQXV0aG9y +aXR5MRMwEQYDVQQKDApBcHBsZSBJbmMuMQswCQYDVQQGEwJVUzB2MBAGByqGSM49 +AgEGBSuBBAAiA2IABJjpLz1AcqTtkyJygRMc3RCV8cWjTnHcFBbZDuWmBSp3ZHtf +TjjTuxxEtX/1H7YyYl3J6YRbTzBPEVoA/VhYDKX1DyxNB0cTddqXl5dvMVztK517 +IDvYuVTZXpmkOlEKMaNCMEAwHQYDVR0OBBYEFLuw3qFYM4iapIqZ3r6966/ayySr +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMAoGCCqGSM49BAMDA2gA +MGUCMQCD6cHEFl4aXTQY2e3v9GwOAEZLuN+yRhHFD/3meoyhpmvOwgPUnPWTxnS4 +at+qIxUCMG1mihDK1A3UT82NQz60imOlM27jbdoXt2QfyFMm+YhidDkLF1vLUagM +6BgD56KyKA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEuzCCA6OgAwIBAgIBAjANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQGEwJVUzET +MBEGA1UEChMKQXBwbGUgSW5jLjEmMCQGA1UECxMdQXBwbGUgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkxFjAUBgNVBAMTDUFwcGxlIFJvb3QgQ0EwHhcNMDYwNDI1MjE0 +MDM2WhcNMzUwMjA5MjE0MDM2WjBiMQswCQYDVQQGEwJVUzETMBEGA1UEChMKQXBw +bGUgSW5jLjEmMCQGA1UECxMdQXBwbGUgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkx +FjAUBgNVBAMTDUFwcGxlIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDkkakJH5HbHkdQ6wXtXnmELes2oldMVeyLGYne+Uts9QerIjAC6Bg+ ++FAJ039BqJj50cpmnCRrEdCju+QbKsMflZ56DKRHi1vUFjczy8QPTc4UadHJGXL1 +XQ7Vf1+b8iUDulWPTV0N8WQ1IxVLFVkds5T39pyez1C6wVhQZ48ItCD3y6wsIG9w +tj8BMIy3Q88PnT3zK0koGsj+zrW5DtleHNbLPbU6rfQPDgCSC7EhFi501TwN22IW +q6NxkkdTVcGvL0Gz+PvjcM3mo0xFfh9Ma1CWQYnEdGILEINBhzOKgbEwWOxaBDKM +aLOPHd5lc/9nXmW8Sdh2nzMUZaF3lMktAgMBAAGjggF6MIIBdjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUK9BpR5R2Cf70a40uQKb3 +R01/CF4wHwYDVR0jBBgwFoAUK9BpR5R2Cf70a40uQKb3R01/CF4wggERBgNVHSAE +ggEIMIIBBDCCAQAGCSqGSIb3Y2QFATCB8jAqBggrBgEFBQcCARYeaHR0cHM6Ly93 +d3cuYXBwbGUuY29tL2FwcGxlY2EvMIHDBggrBgEFBQcCAjCBthqBs1JlbGlhbmNl +IG9uIHRoaXMgY2VydGlmaWNhdGUgYnkgYW55IHBhcnR5IGFzc3VtZXMgYWNjZXB0 +YW5jZSBvZiB0aGUgdGhlbiBhcHBsaWNhYmxlIHN0YW5kYXJkIHRlcm1zIGFuZCBj +b25kaXRpb25zIG9mIHVzZSwgY2VydGlmaWNhdGUgcG9saWN5IGFuZCBjZXJ0aWZp +Y2F0aW9uIHByYWN0aWNlIHN0YXRlbWVudHMuMA0GCSqGSIb3DQEBBQUAA4IBAQBc +NplMLXi37Yyb3PN3m/J20ncwT8EfhYOFG5k9RzfyqZtAjizUsZAS2L70c5vu0mQP +y3lPNNiiPvl4/2vIB+x9OYOLUyDTOMSxv5pPCmv/K/xZpwUJfBdAVhEedNO3iyM7 +R6PVbyTi69G3cN8PReEnyvFteO3ntRcXqNx+IjXKJdXZD9Zr1KIkIxH3oayPc4Fg +xhtbCS+SsvhESPBgOJ4V9T0mZyCKM2r3DYLP3uujL/lTaltkwGMzd/c6ByxW69oP +IQ7aunMZT7XZNn/Bh1XZp5m5MkL72NVxnn6hUrcbvZNCJBIqxw8dtk2cXmPIS4AX +UKqK1drk/NAJBzewdXUh +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFujCCBKKgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBhjELMAkGA1UEBhMCVVMx +HTAbBgNVBAoTFEFwcGxlIENvbXB1dGVyLCBJbmMuMS0wKwYDVQQLEyRBcHBsZSBD +b21wdXRlciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxKTAnBgNVBAMTIEFwcGxlIFJv +b3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MB4XDTA1MDIxMDAwMTgxNFoXDTI1MDIx +MDAwMTgxNFowgYYxCzAJBgNVBAYTAlVTMR0wGwYDVQQKExRBcHBsZSBDb21wdXRl +ciwgSW5jLjEtMCsGA1UECxMkQXBwbGUgQ29tcHV0ZXIgQ2VydGlmaWNhdGUgQXV0 +aG9yaXR5MSkwJwYDVQQDEyBBcHBsZSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0 +eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOSRqQkfkdseR1DrBe1e +eYQt6zaiV0xV7IsZid75S2z1B6siMALoGD74UAnTf0GomPnRymacJGsR0KO75Bsq +wx+VnnoMpEeLW9QWNzPLxA9NzhRp0ckZcvVdDtV/X5vyJQO6VY9NXQ3xZDUjFUsV +WR2zlPf2nJ7PULrBWFBnjwi0IPfLrCwgb3C2PwEwjLdDzw+dPfMrSSgayP7OtbkO +2V4c1ss9tTqt9A8OAJILsSEWLnTVPA3bYharo3GSR1NVwa8vQbP4++NwzeajTEV+ +H0xrUJZBicR0YgsQg0GHM4qBsTBY7FoEMoxos48d3mVz/2deZbxJ2HafMxRloXeU +yS0CAwEAAaOCAi8wggIrMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/ +MB0GA1UdDgQWBBQr0GlHlHYJ/vRrjS5ApvdHTX8IXjAfBgNVHSMEGDAWgBQr0GlH +lHYJ/vRrjS5ApvdHTX8IXjCCASkGA1UdIASCASAwggEcMIIBGAYJKoZIhvdjZAUB +MIIBCTBBBggrBgEFBQcCARY1aHR0cHM6Ly93d3cuYXBwbGUuY29tL2NlcnRpZmlj +YXRlYXV0aG9yaXR5L3Rlcm1zLmh0bWwwgcMGCCsGAQUFBwICMIG2GoGzUmVsaWFu +Y2Ugb24gdGhpcyBjZXJ0aWZpY2F0ZSBieSBhbnkgcGFydHkgYXNzdW1lcyBhY2Nl +cHRhbmNlIG9mIHRoZSB0aGVuIGFwcGxpY2FibGUgc3RhbmRhcmQgdGVybXMgYW5k +IGNvbmRpdGlvbnMgb2YgdXNlLCBjZXJ0aWZpY2F0ZSBwb2xpY3kgYW5kIGNlcnRp +ZmljYXRpb24gcHJhY3RpY2Ugc3RhdGVtZW50cy4wRAYDVR0fBD0wOzA5oDegNYYz +aHR0cHM6Ly93d3cuYXBwbGUuY29tL2NlcnRpZmljYXRlYXV0aG9yaXR5L3Jvb3Qu +Y3JsMFUGCCsGAQUFBwEBBEkwRzBFBggrBgEFBQcwAoY5aHR0cHM6Ly93d3cuYXBw +bGUuY29tL2NlcnRpZmljYXRlYXV0aG9yaXR5L2Nhc2lnbmVycy5odG1sMA0GCSqG +SIb3DQEBBQUAA4IBAQCd2i0oWC99dgS5BNM+zrdmY06PL9T+S61yvaM5xlJNBZhS +9YlRASR5vhoy9+VEi0tEBzmC1lrKtCBe2a4VXR2MHTK/ODFiSF3H4ZCx+CRA+F9Y +m1FdV53B5f88zHIhbsTp6aF31ywXJsM/65roCwO66bNKcuszCVut5mIxauivL9Wv +Hld2j383LS4CXN1jyfJxuCZA3xWNdUQ/eb3mHZnhQyw+rW++uaT+DjUZUWOxw961 +kj5ReAFziqQjyqSI8R5cH0EWLX6VCqrpiUGYGxrdyyC/R14MJsVVNU3GMIuZZxTH +CR+6R8faAQmHJEKVvRNgGQrv6n8Obs3BREM6StXj +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID9zCCAt+gAwIBAgILMTI1MzcyODI4MjgwDQYJKoZIhvcNAQELBQAwWDELMAkG +A1UEBhMCSlAxHDAaBgNVBAoTE0phcGFuZXNlIEdvdmVybm1lbnQxDTALBgNVBAsT +BEdQS0kxHDAaBgNVBAMTE0FwcGxpY2F0aW9uQ0EyIFJvb3QwHhcNMTMwMzEyMTUw +MDAwWhcNMzMwMzEyMTUwMDAwWjBYMQswCQYDVQQGEwJKUDEcMBoGA1UEChMTSmFw +YW5lc2UgR292ZXJubWVudDENMAsGA1UECxMER1BLSTEcMBoGA1UEAxMTQXBwbGlj +YXRpb25DQTIgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKaq +rSVl1gAR1uh6dqr05rRL88zDUrSNrKZPtZJxb0a11a2LEiIXJc5F6BR6hZrkIxCo ++rFnUOVtR+BqiRPjrq418fRCxQX3TZd+PCj8sCaRHoweOBqW3FhEl2LjMsjRFUFN +dZh4vqtoqV7tR76kuo6hApfek3SZbWe0BSXulMjtqqS6MmxCEeu+yxcGkOGThchk +KM4fR8fAXWDudjbcMztR63vPctgPeKgZggiQPhqYjY60zxU2pm7dt+JNQCBT2XYq +0HisifBPizJtROouurCp64ndt295D6uBbrjmiykLWa+2SQ1RLKn9nShjZrhwlXOa +2Po7M7xCQhsyrLEy+z0CAwEAAaOBwTCBvjAdBgNVHQ4EFgQUVqesqgIdsqw9kA6g +by5Bxnbne9owDgYDVR0PAQH/BAQDAgEGMHwGA1UdEQR1MHOkcTBvMQswCQYDVQQG +EwJKUDEYMBYGA1UECgwP5pel5pys5Zu95pS/5bqcMRswGQYDVQQLDBLmlL/lupzo +qo3oqLzln7rnm6QxKTAnBgNVBAMMIOOCouODl+ODquOCseODvOOCt+ODp+ODs0NB +MiBSb290MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAH+aCXWs +B9FydC53VzDCBJzUgKaD56WgG5/+q/OAvdVKo6GPtkxgEefK4WCB10jBIFmlYTKL +nZ6X02aD2mUuWD7b5S+lzYxzplG+WCigeVxpL0PfY7KJR8q73rk0EWOgDiUX5Yf0 +HbCwpc9BqHTG6FPVQvSCLVMJEWgmcZR1E02qdog8dLHW40xPYsNJTE5t8XB+w3+m +Bcx4m+mB26jIx1ye/JKSLaaX8ji1bnOVDMA/zqaUMLX6BbfeniCq/BNkyYq6ZO/i +Y+TYmK5rtT6mVbgzPixy+ywRAPtbFi+E0hOe+gXFwctyTiLdhMpLvNIthhoEdlkf +SUJiOxMfFui61/0= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE +AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG +EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM +FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC +REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp +Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM +VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+ +SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ +4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L +cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi +eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG +A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3 +DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j +vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP +DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc +maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D +lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv +KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE +BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h +cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy +MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg +Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 +thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM +cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG +L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i +NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h +X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b +m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy +Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja +EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T +KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF +6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh +OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD +VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv +ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl +AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF +661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9 +am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1 +ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481 +PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS +3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k +SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF +3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM +ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g +StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz +Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB +jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIJmzCCB4OgAwIBAgIBATANBgkqhkiG9w0BAQwFADCCAR4xPjA8BgNVBAMTNUF1 +dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9s +YW5vMQswCQYDVQQGEwJWRTEQMA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlz +dHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0 +aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBlcmludGVuZGVuY2lh +IGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUwIwYJ +KoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyMjE4MDgy +MVoXDTMwMTIxNzIzNTk1OVowggEeMT4wPAYDVQQDEzVBdXRvcmlkYWQgZGUgQ2Vy +dGlmaWNhY2lvbiBSYWl6IGRlbCBFc3RhZG8gVmVuZXpvbGFubzELMAkGA1UEBhMC +VkUxEDAOBgNVBAcTB0NhcmFjYXMxGTAXBgNVBAgTEERpc3RyaXRvIENhcGl0YWwx +NjA0BgNVBAoTLVNpc3RlbWEgTmFjaW9uYWwgZGUgQ2VydGlmaWNhY2lvbiBFbGVj +dHJvbmljYTFDMEEGA1UECxM6U3VwZXJpbnRlbmRlbmNpYSBkZSBTZXJ2aWNpb3Mg +ZGUgQ2VydGlmaWNhY2lvbiBFbGVjdHJvbmljYTElMCMGCSqGSIb3DQEJARYWYWNy +YWl6QHN1c2NlcnRlLmdvYi52ZTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC +ggIBAME77xNS8ZlW47RsBeEaaRZhJoZ4rw785UAFCuPZOAVMqNS1wMYqzy95q6Gk +UO81ER/ugiQX/KMcq/4HBn83fwdYWxPZfwBfK7BP2p/JsFgzYeFP0BXOLmvoJIzl +Jb6FW+1MPwGBjuaZGFImWZsSmGUclb51mRYMZETh9/J5CLThR1exStxHQptwSzra +zNFpkQY/zmj7+YZNA9yDoroVFv6sybYOZ7OxNDo7zkSLo45I7gMwtxqWZ8VkJZkC +8+p0dX6mkhUT0QAV64Zc9HsZiH/oLhEkXjhrgZ28cF73MXIqLx1fyM4kPH1yOJi/ +R72nMwL7D+Sd6mZgI035TxuHXc2/uOwXfKrrTjaJDz8Jp6DdessOkxIgkKXRjP+F +K3ze3n4NUIRGhGRtyvEjK95/2g02t6PeYiYVGur6ruS49n0RAaSS0/LJb6XzaAAe +0mmO2evnEqxIKwy2mZRNPfAVW1l3wCnWiUwryBU6OsbFcFFrQm+00wOicXvOTHBM +aiCVAVZTb9RSLyi+LJ1llzJZO3pq3IRiiBj38Nooo+2ZNbMEciSgmig7YXaUcmud +SVQvLSL+Yw+SqawyezwZuASbp7d/0rutQ59d81zlbMt3J7yB567rT2IqIydQ8qBW +k+fmXzghX+/FidYsh/aK+zZ7Wy68kKHuzEw1Vqkat5DGs+VzAgMBAAGjggLeMIIC +2jASBgNVHRMBAf8ECDAGAQH/AgECMDcGA1UdEgQwMC6CD3N1c2NlcnRlLmdvYi52 +ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAzNi0wMB0GA1UdDgQWBBStuyIdxuDS +Aaj9dlBSk+2YwU2u0zCCAVAGA1UdIwSCAUcwggFDgBStuyIdxuDSAaj9dlBSk+2Y +wU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0b3JpZGFkIGRlIENlcnRpZmlj +YWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xhbm8xCzAJBgNVBAYTAlZFMRAw +DgYDVQQHEwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0cml0byBDYXBpdGFsMTYwNAYD +VQQKEy1TaXN0ZW1hIE5hY2lvbmFsIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25p +Y2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5kZW5jaWEgZGUgU2VydmljaW9zIGRlIENl +cnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkqhkiG9w0BCQEWFmFjcmFpekBz +dXNjZXJ0ZS5nb2IudmWCAQEwDgYDVR0PAQH/BAQDAgEGMDcGA1UdEQQwMC6CD3N1 +c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAzNi0wMFQGA1Ud +HwRNMEswJKAioCCGHmhodHA6Ly93d3cuc3VzY2VydGUuZ29iLnZlL2xjcjAjoCGg +H4YdbGRhcDovL2FjcmFpei5zdXNjZXJ0ZS5nb2IudmUwNwYIKwYBBQUHAQEEKzAp +MCcGCCsGAQUFBzABhhtoaHRwOi8vb2NzcC5zdXNjZXJ0ZS5nb2IudmUwQAYDVR0g +BDkwNzA1BgVghl4BAjAsMCoGCCsGAQUFBwIBFh5odHRwOi8vd3d3LnN1c2NlcnRl +LmdvYi52ZS9kcGMwDQYJKoZIhvcNAQEMBQADggIBAK4qy/zmZ9zBwfW3yOYtLcBT +Oy4szJyPz7/RhNH3bPVH7HbDTGpi6JZ4YXdXMBeJE5qBF4a590Kgj8Rlnltt+Rbo +OFQOU1UDqKuTdBsA//Zry5899fmn8jBUkg4nh09jhHHbLlaUScdz704Zz2+UVg7i +s/r3Legxap60KzmdrmTAE9VKte1TQRgavQwVX5/2mO/J+SCas//UngI+h8SyOucq +mjudYEgBrZaodUsagUfn/+AzFNrGLy+al+5nZeHb8JnCfLHWS0M9ZyhgoeO/czyn +99+5G93VWNv4zfc4KiavHZKrkn8F9pg0ycIZh+OwPT/RE2zq4gTazBMlP3ACIe/p +olkNaOEa8KvgzW96sjBZpMW49zFmyINYkcj+uaNCJrVGsXgdBmkuRGJNWFZ9r0cG +woIaxViFBypsz045r1ESfYPlfDOavBhZ/giR/Xocm9CHkPRY2BApMMR0DUCyGETg +Ql+L3kfdTKzuDjUp2DM9FqysQmaM81YDZufWkMhlZPfHwC7KbNougoLroa5Umeos +bqAXWmk46SwIdWRPLLqbUpDTKooynZKpSYIkkotdgJoVZUUCY+RCO8jsVPEU6ece +SxztNUm5UOta1OJPMwSAKRHOo3ilVb9c6lAixDdvV8MeNbqe6asM1mpCHWbJ/0rg +5Ls9Cxx8hracyp0ev7b0 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ +RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD +VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX +DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y +ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy +VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr +mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr +IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK +mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu +XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy +dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye +jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1 +BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3 +DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92 +9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx +jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0 +Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz +ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS +R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIIKv++n6Lw6YcwDQYJKoZIhvcNAQEFBQAwKDELMAkGA1UE +BhMCQkUxGTAXBgNVBAMTEEJlbGdpdW0gUm9vdCBDQTIwHhcNMDcxMDA0MTAwMDAw +WhcNMjExMjE1MDgwMDAwWjAoMQswCQYDVQQGEwJCRTEZMBcGA1UEAxMQQmVsZ2l1 +bSBSb290IENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMZzQh6S +/3UPi790hqc/7bIYLS2X+an7mEoj39WN4IzGMhwWLQdC1i22bi+n9fzGhYJdld61 +IgDMqFNAn68KNaJ6x+HK92AQZw6nUHMXU5WfIp8MXW+2QbyM69odRr2nlL/zGsvU ++40OHjPIltfsjFPekx40HopQcSZYtF3CiInaYNKJIT/e1wEYNm7hLHADBGXvmAYr +XR5i3FVr/mZkIV/4L+HXmymvb82fqgxG0YjFnaKVn6w/Fa7yYd/vw2uaItgscf1Y +HewApDgglVrH1Tdjuk+bqv5WRi5j2Qsj1Yr6tSPwiRuhFA0m2kHwOI8w7QUmecFL +TqG4flVSOmlGhHUCAwEAAaOBuzCBuDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zBCBgNVHSAEOzA5MDcGBWA4CQEBMC4wLAYIKwYBBQUHAgEWIGh0dHA6 +Ly9yZXBvc2l0b3J5LmVpZC5iZWxnaXVtLmJlMB0GA1UdDgQWBBSFiuv0xbu+DlkD +lN7WgAEV4xCcOTARBglghkgBhvhCAQEEBAMCAAcwHwYDVR0jBBgwFoAUhYrr9MW7 +vg5ZA5Te1oABFeMQnDkwDQYJKoZIhvcNAQEFBQADggEBAFHYhd27V2/MoGy1oyCc +UwnzSgEMdL8rs5qauhjyC4isHLMzr87lEwEnkoRYmhC598wUkmt0FoqW6FHvv/pK +JaeJtmMrXZRY0c8RcrYeuTlBFk0pvDVTC9rejg7NqZV3JcqUWumyaa7YwBO+mPyW +nIR/VRPmPIfjvCCkpDZoa01gZhz5v6yAlGYuuUGK02XThIAC71AdXkbc98m6tTR8 +KvPG2F9fVJ3bTc0R5/0UAoNmXsimABKgX77OFP67H6dh96tK8QYUn8pJQsKpvO2F +sauBQeYNxUJpU4c5nUwfAA4+Bw11V0SoU7Q2dmSZ3G7rPUZuFF1eR1ONeE3gJ7uO +hXY= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr +6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV +L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91 +1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx +MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ +QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB +arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr +Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi +FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS +P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN +9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz +uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h +9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t +OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo ++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7 +KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2 +DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us +H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ +I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7 +5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h +3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz +Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y +ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E +N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9 +tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX +0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c +/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X +KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY +zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS +O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D +34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP +K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3 +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv +Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj +QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS +IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2 +HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa +O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv +033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u +dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE +kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41 +3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD +u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq +4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAMMDmu5QkG4oMA0GCSqGSIb3DQEBBQUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIxMB4XDTEyMDcxOTA5MDY1NloXDTQy +MDcxOTA5MDY1NlowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjEw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCqw3j33Jijp1pedxiy3QRk +D2P9m5YJgNXoqqXinCaUOuiZc4yd39ffg/N4T0Dhf9Kn0uXKE5Pn7cZ3Xza1lK/o +OI7bm+V8u8yN63Vz4STN5qctGS7Y1oprFOsIYgrY3LMATcMjfF9DCCMyEtztDK3A +fQ+lekLZWnDZv6fXARz2m6uOt0qGeKAeVjGu74IKgEH3G8muqzIm1Cxr7X1r5OJe +IgpFy4QxTaz+29FHuvlglzmxZcfe+5nkCiKxLU3lSCZpq+Kq8/v8kiky6bM+TR8n +oc2OuRf7JT7JbvN32g0S9l3HuzYQ1VTW8+DiR0jm3hTaYVKvJrT1cU/J19IG32PK +/yHoWQbgCNWEFVP3Q+V8xaCJmGtzxmjOZd69fwX3se72V6FglcXM6pM6vpmumwKj +rckWtc7dXpl4fho5frLABaTAgqWjR56M6ly2vGfb5ipN0gTco65F97yLnByn1tUD +3AjLLhbKXEAz6GfDLuemROoRRRw1ZS0eRWEkG4IupZ0zXWX4Qfkuy5Q/H6MMMSRE +7cderVC6xkGbrPAXZcD4XW9boAo0PO7X6oifmPmvTiT6l7Jkdtqr9O3jw2Dv1fkC +yC2fg69naQanMVXVz0tv/wQFx1isXxYb5dKj6zHbHzMVTdDypVP1y+E9Tmgt2BLd +qvLmTZtJ5cUoobqwWsagtQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUiQq0OJMa5qvum5EY+fU8PjXQ04IwDQYJKoZI +hvcNAQEFBQADggIBADKL9p1Kyb4U5YysOMo6CdQbzoaz3evUuii+Eq5FLAR0rBNR +xVgYZk2C2tXck8An4b58n1KeElb21Zyp9HWc+jcSjxyT7Ff+Bw+r1RL3D65hXlaA +SfX8MPWbTx9BLxyE04nH4toCdu0Jz2zBuByDHBb6lM19oMgY0sidbvW9adRtPTXo +HqJPYNcHKfyyo6SdbhWSVhlMCrDpfNIZTUJG7L399ldb3Zh+pE3McgODWF3vkzpB +emOqfDqo9ayk0d2iLbYq/J8BjuIQscTK5GfbVSUZP/3oNn6z4eGBrxEWi1CXYBmC +AMBrTXO40RMHPuq2MU/wQppt4hF05ZSsjYSVPCGvxdpHyN85YmLLW1AL14FABZyb +7bq2ix4Eb5YgOe2kfSnbSM6C3NQCjR0EMVrHS/BsYVLXtFHCgWzN4funodKSds+x +DzdYpPJScWc/DIh4gInByLUfkmO+p3qKViwaqKactV2zY9ATIKHrkWzQjX2v3wvk +F7mGnjixlAxYjOBVqjtjbZqJYLhkKpLGN/R+Q0O3c+gB53+XD9fyexn9GtePyfqF +a3qdnom2piiZk4hA9z7NUaPK6u95RyG1/jLix8NRb76AdPCkwzryT+lf3xkK8jsT +Q6wxpLPn6/wY1gGp8yqPNg7rtLG8t0zJa7+h89n07eLw4+1knj0vllJPgFOL +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy +MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe +NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH +PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I +x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe +QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR +yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO +QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912 +H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ +QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD +i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs +nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1 +rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI +hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf +GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb +lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka ++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal +TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i +nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3 +gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr +G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os +zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x +L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV +BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X +DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ +BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4 +QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny +gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw +zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q +130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2 +JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw +ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT +AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj +AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG +9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h +bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc +fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu +HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w +t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjET +MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAk +BgNVBAMMHUNlcnRpbm9taXMgLSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4 +Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNl +cnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYwJAYDVQQDDB1DZXJ0 +aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jY +F1AMnmHawE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N +8y4oH3DfVS9O7cdxbwlyLu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWe +rP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K +/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92NjMD2AR5vpTESOH2VwnHu +7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9qc1pkIuVC +28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6 +lSTClrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1E +nn1So2+WLhl+HPNbxxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB +0iSVL1N6aaLwD4ZFjliCK0wi1F6g530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql09 +5gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna4NH4+ej9Uji29YnfAgMBAAGj +WzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBQN +jLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ +KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9s +ov3/4gbIOZ/xWqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZM +OH8oMDX/nyNTt7buFHAAQCvaR6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q +619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40nJ+U8/aGH88bc62UeYdocMMzpXDn +2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1BCxMjidPJC+iKunqj +o3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjvJL1v +nxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG +5ERQL1TEqkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWq +pdEdnV1j6CTmNhTih60bWfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZb +dsLLO7XSAPCjDuGtbkD326C00EauFddEwk01+dIL8hf2rGbVJLJP0RyZwG71fet0 +BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/vgt2Fl43N+bYdJeimUV5 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjET +MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAb +BgNVBAMTFENlcnRpbm9taXMgLSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMz +MTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMx +FzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRDZXJ0aW5vbWlzIC0g +Um9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQosP5L2 +fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJfl +LieY6pOod5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQV +WZUKxkd8aRi5pwP5ynapz8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDF +TKWrteoB4owuZH9kb/2jJZOLyKIOSY008B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb +5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09xRLWtwHkziOC/7aOgFLSc +CbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE6OXWk6Ri +wsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJ +wx3tFvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SG +m/lg0h9tkQPTYKbVPZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4 +F2iw4lNVYC2vPsKD2NkJK/DAZNuHi5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZng +WVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I6tNxIqSSaHh0 +2TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF +AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/ +0KGRHCwPT5iVWVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWw +F6YSjNRieOpWauwK0kDDPAUwPk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZS +g081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAXlCOotQqSD7J6wWAsOMwaplv/8gzj +qh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJy29SWwNyhlCVCNSN +h4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9Iff/ +ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8V +btaw5BngDwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwj +Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ +8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW +gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT +AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD +QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP +MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do +0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ +UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d +RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ +OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv +JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C +AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O +BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ +LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY +MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ +44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I +Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw +i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN +9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBM +MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD +QTAeFw0wMjA2MTExMDQ2MzlaFw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBM +MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD +QTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6xwS7TT3zNJc4YPk/E +jG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdLkKWo +ePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GI +ULdtlkIJ89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapu +Ob7kky/ZR6By6/qmW6/KUz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUg +AKpoC6EahQGcxEZjgoi2IrHu/qpGWX7PNSzVttpd90gzFFS269lvzs2I1qsb2pY7 +HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEA +uI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+GXYkHAQa +TOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTg +xSvgGrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1q +CjqTE5s7FCMTY5w/0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5x +O/fIR/RpbxXyEV6DHpx8Uq79AtoSqFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs +6GAqm4VKQPNriiTsBhYscw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB +gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu +QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG +A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz +OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ +VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3 +b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA +DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn +0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB +OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE +fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E +Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m +o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i +sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW +OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez +Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS +adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n +3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ +F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf +CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29 +XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm +djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/ +WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb +AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq +P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko +b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj +XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P +5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi +DrW5viSP +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM +MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D +ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU +cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3 +WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg +Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw +IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH +UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM +TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU +BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM +kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x +AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV +HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y +sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL +I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8 +J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY +VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD +TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y +aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx +MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j +aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP +T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03 +sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL +TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5 +/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp +7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz +EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt +hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP +a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot +aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg +TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV +PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv +cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL +tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd +BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT +ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL +jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS +ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy +P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19 +xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d +Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN +5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe +/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z +AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ +5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYD +VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 +IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 +MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xKTAnBgNVBAMTIENoYW1iZXJz +IG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEyMjk1MFoXDTM4MDcz +MTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBj +dXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIw +EAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEp +MCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW9 +28sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKAXuFixrYp4YFs8r/lfTJq +VKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorjh40G072Q +DuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR +5gN/ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfL +ZEFHcpOrUMPrCXZkNNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05a +Sd+pZgvMPMZ4fKecHePOjlO+Bd5gD2vlGts/4+EhySnB8esHnFIbAURRPHsl18Tl +UlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331lubKgdaX8ZSD6e2wsWsSaR6s ++12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ0wlf2eOKNcx5 +Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAx +hduub+84Mxh2EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNV +HQ4EFgQU+SSsD7K1+HnA+mCIG8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1 ++HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpN +YWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29t +L2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVy +ZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAt +IDIwMDiCCQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRV +HSAAMCowKAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20w +DQYJKoZIhvcNAQEFBQADggIBAJASryI1wqM58C7e6bXpeHxIvj99RZJe6dqxGfwW +PJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH3qLPaYRgM+gQDROpI9CF +5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbURWpGqOt1 +glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaH +FoI6M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2 +pSB7+R5KBWIBpih1YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MD +xvbxrN8y8NmBGuScvfaAFPDRLLmF9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QG +tjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcKzBIKinmwPQN/aUv0NCB9szTq +jktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvGnrDQWzilm1De +fhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZ +d0jQ +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEn +MCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQL +ExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMg +b2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAxNjEzNDNaFw0zNzA5MzAxNjEzNDRa +MH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZpcm1hIFNBIENJRiBB +ODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3JnMSIw +IAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0B +AQEFAAOCAQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtb +unXF/KGIJPov7coISjlUxFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0d +BmpAPrMMhe5cG3nCYsS4No41XQEMIwRHNaqbYE6gZj3LJgqcQKH0XZi/caulAGgq +7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jWDA+wWFjbw2Y3npuRVDM3 +0pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFVd9oKDMyX +roDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIG +A1UdEwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5j +aGFtYmVyc2lnbi5vcmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p +26EpW1eLTXYGduHRooowDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIA +BzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hhbWJlcnNpZ24ub3JnMCcGA1Ud +EgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYDVR0gBFEwTzBN +BgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz +aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEB +AAxBl8IahsAifJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZd +p0AJPaxJRUXcLo0waLIJuvvDL8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi +1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wNUPf6s+xCX6ndbcj0dc97wXImsQEc +XCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/nADydb47kMgkdTXg0 +eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1erfu +tGWaIZDgqtCYvDi1czyL+Nw= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDQzCCAiugAwIBAgIQX/h7KCtU3I1CoxW1aMmt/zANBgkqhkiG9w0BAQUFADA1 +MRYwFAYDVQQKEw1DaXNjbyBTeXN0ZW1zMRswGQYDVQQDExJDaXNjbyBSb290IENB +IDIwNDgwHhcNMDQwNTE0MjAxNzEyWhcNMjkwNTE0MjAyNTQyWjA1MRYwFAYDVQQK +Ew1DaXNjbyBTeXN0ZW1zMRswGQYDVQQDExJDaXNjbyBSb290IENBIDIwNDgwggEg +MA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCwmrmrp68Kd6ficba0ZmKUeIhH +xmJVhEAyv8CrLqUccda8bnuoqrpu0hWISEWdovyD0My5jOAmaHBKeN8hF570YQXJ +FcjPFto1YYmUQ6iEqDGYeJu5Tm8sUxJszR2tKyS7McQr/4NEb7Y9JHcJ6r8qqB9q +VvYgDxFUl4F1pyXOWWqCZe+36ufijXWLbvLdT6ZeYpzPEApk0E5tzivMW/VgpSdH +jWn0f84bcN5wGyDWbs2mAag8EtKpP6BrXruOIIt6keO1aO6g58QBdKhTCytKmg9l +Eg6CTY5j/e/rmxrbU6YTYK/CfdfHbBcl1HP7R2RQgYCUTOG/rksc35LtLgXfAgED +o1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUJ/PI +FR5umgIJFq0roIlgX9p7L6owEAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEF +BQADggEBAJ2dhISjQal8dwy3U8pORFBi71R803UXHOjgxkhLtv5MOhmBVrBW7hmW +Yqpao2TB9k5UM8Z3/sUcuuVdJcr18JOagxEu5sv4dEX+5wW4q+ffy0vhN4TauYuX +cB7w4ovXsNgOnbFp1iqRe6lJT37mjpXYgyc81WhJDtSd9i7rp77rMKSsH0T8lasz +Bvt9YAretIpjsJyp8qS5UwGH0GikJ3+r/+n6yUA4iGe0OcaEb1fJU9u6ju7AQ7L4 +CYNu/2bPPu8Xs1gYJQk0XuPL1hS27PKSb3TkL4Eq1ZKR4OCXPDJoBYVL0fdX4lId +kxpUnwVwwEpxYB5DC2Ae/qPOgRnhCzU= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAw +PTELMAkGA1UEBhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFz +cyAyIFByaW1hcnkgQ0EwHhcNOTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9 +MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2VydHBsdXMxGzAZBgNVBAMTEkNsYXNz +IDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANxQ +ltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR5aiR +VhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyL +kcAbmXuZVg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCd +EgETjdyAYveVqUSISnFOYFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yas +H7WLO7dDWWuwJKZtkIvEcupdM5i3y95ee++U8Rs+yskhwcWYAqqi9lt3m/V+llU0 +HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRMECDAGAQH/AgEKMAsGA1Ud +DwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJYIZIAYb4 +QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMu +Y29tL0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/ +AN9WM2K191EBkOvDP9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8 +yfFC82x/xXp8HVGIutIKPidd3i1RTtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMR +FcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+7UCmnYR0ObncHoUW2ikbhiMA +ybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW//1IMwrh3KWB +kJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgIQKTZHquOKrIZKI1byyrdhrzANBgkqhkiG9w0BAQUFADBO +MQswCQYDVQQGEwJ1czEYMBYGA1UEChMPVS5TLiBHb3Zlcm5tZW50MQ0wCwYDVQQL +EwRGQkNBMRYwFAYDVQQDEw1Db21tb24gUG9saWN5MB4XDTA3MTAxNTE1NTgwMFoX +DTI3MTAxNTE2MDgwMFowTjELMAkGA1UEBhMCdXMxGDAWBgNVBAoTD1UuUy4gR292 +ZXJubWVudDENMAsGA1UECxMERkJDQTEWMBQGA1UEAxMNQ29tbW9uIFBvbGljeTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJeNvTMn5K1b+3i9L0dHbsd4 +6ZOcpN7JHP0vGzk4rEcXwH53KQA7Ax9oD81Npe53uCxiazH2+nIJfTApBnznfKM9 +hBiKHa4skqgf6F5PjY7rPxr4nApnnbBnTfAu0DDew5SwoM8uCjR/VAnTNr2kSVdS +c+md/uRIeUYbW40y5KVIZPMiDZKdCBW/YDyD90ciJSKtKXG3d+8XyaK2lF7IMJCk +FEhcVlcLQUwF1CpMP64Sm1kRdXAHImktLNMxzJJ+zM2kfpRHqpwJCPZLr1LoakCR +xVW9QLHIbVeGlRfmH3O+Ry4+i0wXubklHKVSFzYIWcBCvgortFZRPBtVyYyQd+sC +AwEAAaN7MHkwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFC9Yl9ipBZilVh/72at17wI8NjTHMBIGCSsGAQQBgjcVAQQFAgMBAAEwIwYJ +KwYBBAGCNxUCBBYEFHa3YJbdFFYprHWF03BjwbxHhhyLMA0GCSqGSIb3DQEBBQUA +A4IBAQBgrvNIFkBypgiIybxHLCRLXaCRc+1leJDwZ5B6pb8KrbYq+Zln34PFdx80 +CTj5fp5B4Ehg/uKqXYeI6oj9XEWyyWrafaStsU+/HA2fHprA1RRzOCuKeEBuMPdi +4c2Z/FFpZ2wR3bgQo2jeJqVW/TZsN5hs++58PGxrcD/3SDcJjwtCga1GRrgLgwb0 +Gzigf0/NC++DiYeXHIowZ9z9VKEDfgHLhUyxCynDvux84T8PCVI8L6eaSP436REG +WOE2QYrEtr+O3c5Ks7wawM36GpnScZv6z7zyxFSjiDV2zBssRm8MtNHDYXaSdBHq +S4CNHIkRi+xb/xfJSPzn4AYR4oRe +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB +gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV +BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw +MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl +YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P +RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 +UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI +2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 +Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp ++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ +DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O +nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW +/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g +PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u +QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY +SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv +IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4 +zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd +BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB +ZQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT +IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw +MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy +ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N +T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR +FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J +cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW +BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm +fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv +GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB +hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV +BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5 +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT +EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR +6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X +pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC +9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV +/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf +Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z ++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w +qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah +SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC +u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf +Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq +crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl +wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM +4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV +2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna +FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ +CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK +boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke +jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL +S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb +QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl +0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB +NVOFBkpdn627G190 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDkzCCAnugAwIBAgIQFBOWgxRVjOp7Y+X8NId3RDANBgkqhkiG9w0BAQUFADA0 +MRMwEQYDVQQDEwpDb21TaWduIENBMRAwDgYDVQQKEwdDb21TaWduMQswCQYDVQQG +EwJJTDAeFw0wNDAzMjQxMTMyMThaFw0yOTAzMTkxNTAyMThaMDQxEzARBgNVBAMT +CkNvbVNpZ24gQ0ExEDAOBgNVBAoTB0NvbVNpZ24xCzAJBgNVBAYTAklMMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA8ORUaSvTx49qROR+WCf4C9DklBKK +8Rs4OC8fMZwG1Cyn3gsqrhqg455qv588x26i+YtkbDqthVVRVKU4VbirgwTyP2Q2 +98CNQ0NqZtH3FyrV7zb6MBBC11PN+fozc0yz6YQgitZBJzXkOPqUm7h65HkfM/sb +2CEJKHxNGGleZIp6GZPKfuzzcuc3B1hZKKxC+cX/zT/npfo4sdAMx9lSGlPWgcxC +ejVb7Us6eva1jsz/D3zkYDaHL63woSV9/9JLEYhwVKZBqGdTUkJe5DSe5L6j7Kpi +Xd3DTKaCQeQzC6zJMw9kglcq/QytNuEMrkvF7zuZ2SOzW120V+x0cAwqTwIDAQAB +o4GgMIGdMAwGA1UdEwQFMAMBAf8wPQYDVR0fBDYwNDAyoDCgLoYsaHR0cDovL2Zl +ZGlyLmNvbXNpZ24uY28uaWwvY3JsL0NvbVNpZ25DQS5jcmwwDgYDVR0PAQH/BAQD +AgGGMB8GA1UdIwQYMBaAFEsBmz5WGmU2dst7l6qSBe4y5ygxMB0GA1UdDgQWBBRL +AZs+VhplNnbLe5eqkgXuMucoMTANBgkqhkiG9w0BAQUFAAOCAQEA0Nmlfv4pYEWd +foPPbrxHbvUanlR2QnG0PFg/LUAlQvaBnPGJEMgOqnhPOAlXsDzACPw1jvFIUY0M +cXS6hMTXcpuEfDhOZAYnKuGntewImbQKDdSFc8gS4TXt8QUxHXOZDOuWyt3T5oWq +8Ir7dcHyCTxlZWTzTNity4hp8+SDtwy9F1qWF8pb/627HOkthIDYIb6FUtnUdLlp +hbpN7Sgy6/lhSuTENh4Z3G+EER+V9YMoGKgzkkMn3V0TBEVPh9VGzT2ouvDzuFYk +Res3x+F2T3I5GN9+dHLHcy056mDmrRGiVod7w2ia/viMcKjfZTL0pECMocJEAw6U +AGegcQCCSA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIGATCCA+mgAwIBAgIRAI9hcRW6eVgXjH0ROqzW264wDQYJKoZIhvcNAQELBQAw +RTEfMB0GA1UEAxMWQ29tU2lnbiBHbG9iYWwgUm9vdCBDQTEVMBMGA1UEChMMQ29t +U2lnbiBMdGQuMQswCQYDVQQGEwJJTDAeFw0xMTA3MTgxMDI0NTRaFw0zNjA3MTYx +MDI0NTVaMEUxHzAdBgNVBAMTFkNvbVNpZ24gR2xvYmFsIFJvb3QgQ0ExFTATBgNV +BAoTDENvbVNpZ24gTHRkLjELMAkGA1UEBhMCSUwwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQCyKClzKh3rm6n1nvigmV/VU1D4hSwYW2ro3VqpzpPo0Ph3 +3LguqjXd5juDwN4mpxTpD99d7Xu5X6KGTlMVtfN+bTbA4t3x7DU0Zqn0BE5XuOgs +3GLH41Vmr5wox1bShVpM+IsjcN4E/hMnDtt/Bkb5s33xCG+ohz5dlq0gA9qfr/g4 +O9lkHZXTCeYrmVzd/il4x79CqNvGkdL3um+OKYl8rg1dPtD8UsytMaDgBAopKR+W +igc16QJzCbvcinlETlrzP/Ny76BWPnAQgaYBULax/Q5thVU+N3sEOKp6uviTdD+X +O6i96gARU4H0xxPFI75PK/YdHrHjfjQevXl4J37FJfPMSHAbgPBhHC+qn/014DOx +46fEGXcdw2BFeIIIwbj2GH70VyJWmuk/xLMCHHpJ/nIF8w25BQtkPpkwESL6esaU +b1CyB4Vgjyf16/0nRiCAKAyC/DY/Yh+rDWtXK8c6QkXD2XamrVJo43DVNFqGZzbf +5bsUXqiVDOz71AxqqK+p4ek9374xPNMJ2rB5MLPAPycwI0bUuLHhLy6nAIFHLhut +TNI+6Y/soYpi5JSaEjcY7pxI8WIkUAzr2r+6UoT0vAdyOt7nt1y8844a7szo/aKf +woziHl2O1w6ZXUC30K+ptXVaOiW79pBDcbLZ9ZdbONhS7Ea3iH4HJNwktrBJLQID +AQABo4HrMIHoMA8GA1UdEwEB/wQFMAMBAf8wgYQGA1UdHwR9MHswPKA6oDiGNmh0 +dHA6Ly9mZWRpci5jb21zaWduLmNvLmlsL2NybC9jb21zaWduZ2xvYmFscm9vdGNh +LmNybDA7oDmgN4Y1aHR0cDovL2NybDEuY29tc2lnbi5jby5pbC9jcmwvY29tc2ln +bmdsb2JhbHJvb3RjYS5jcmwwDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBQCRZPY +DUhirGm6rgZbPvuqJpFQsTAfBgNVHSMEGDAWgBQCRZPYDUhirGm6rgZbPvuqJpFQ +sTANBgkqhkiG9w0BAQsFAAOCAgEAk1V5V9701xsfy4mfX+tP9Ln5e9h3N+QMwUfj +kr+k3e8iXOqADjTpUHeBkEee5tJq09ZLp/43F5tZ2eHdYq2ZEX7iWHCnOQet6Yw9 +SU1TahsrGDA6JJD9sdPFnNZooGsU1520e0zNB0dNWwxrWAmu4RsBxvEpWCJbvzQL +dOfyX85RWwli81OiVMBc5XvJ1mxsIIqli45oRynKtsWP7E+b0ISJ1n+XFLdQo/Nm +WA/5sDfT0F5YPzWdZymudMbXitimxC+n4oQE4mbQ4Zm718Iwg3pP9gMMcSc7Qc1J +kJHPH9O7gVubkKHuSYj9T3Ym6c6egL1pb4pz/uT7cT26Fiopc/jdqbe2EAfoJZkv +hlp/zdzOoXTWjiKNA5zmgWnZn943FuE9KMRyKtyi/ezJXCh8ypnqLIKxeFfZl69C +BwJsPXUTuqj8Fic0s3aZmmr7C4jXycP+Q8V+akMEIoHAxcd960b4wVWKqOcI/kZS +Q0cYqWOY1LNjznRt9lweWEfwDBL3FhrHOmD4++1N3FkkM4W+Q1b2WOL24clDMj+i +2n9Iw0lc1llHMSMvA5D0vpsXZpOgcCVahfXczQKi9wQ3oZyonJeWx4/rXdMtagAB +VBYGFuMEUEQtybI+eIbnp5peO2WAAblQI4eTy/jMVowe5tfMEXovV3sz9ULgmGb3 +DscLP1I= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDqzCCApOgAwIBAgIRAMcoRwmzuGxFjB36JPU2TukwDQYJKoZIhvcNAQEFBQAw +PDEbMBkGA1UEAxMSQ29tU2lnbiBTZWN1cmVkIENBMRAwDgYDVQQKEwdDb21TaWdu +MQswCQYDVQQGEwJJTDAeFw0wNDAzMjQxMTM3MjBaFw0yOTAzMTYxNTA0NTZaMDwx +GzAZBgNVBAMTEkNvbVNpZ24gU2VjdXJlZCBDQTEQMA4GA1UEChMHQ29tU2lnbjEL +MAkGA1UEBhMCSUwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGtWhf +HZQVw6QIVS3joFd67+l0Kru5fFdJGhFeTymHDEjWaueP1H5XJLkGieQcPOqs49oh +gHMhCu95mGwfCP+hUH3ymBvJVG8+pSjsIQQPRbsHPaHA+iqYHU4Gk/v1iDurX8sW +v+bznkqH7Rnqwp9D5PGBpX8QTz7RSmKtUxvLg/8HZaWSLWapW7ha9B20IZFKF3ue +Mv5WJDmyVIRD9YTC2LxBkMyd1mja6YJQqTtoz7VdApRgFrFD2UNd3V2Hbuq7s8lr +9gOUCXDeFhF6K+h2j0kQmHe5Y1yLM5d19guMsqtb3nQgJT/j8xH5h2iGNXHDHYwt +6+UarA9z1YJZQIDTAgMBAAGjgacwgaQwDAYDVR0TBAUwAwEB/zBEBgNVHR8EPTA7 +MDmgN6A1hjNodHRwOi8vZmVkaXIuY29tc2lnbi5jby5pbC9jcmwvQ29tU2lnblNl +Y3VyZWRDQS5jcmwwDgYDVR0PAQH/BAQDAgGGMB8GA1UdIwQYMBaAFMFL7XC29z58 +ADsAj8c+DkWfHl3sMB0GA1UdDgQWBBTBS+1wtvc+fAA7AI/HPg5Fnx5d7DANBgkq +hkiG9w0BAQUFAAOCAQEAFs/ukhNQq3sUnjO2QiBq1BW9Cav8cujvR3qQrFHBZE7p +iL1DRYHjZiM/EoZNGeQFsOY3wo3aBijJD4mkU6l1P7CW+6tMM1X5eCZGbxs2mPtC +dsGCuY7e+0X5YxtiOzkGynd6qDwJz2w2PQ8KRUtpFhpFfTMDZflScZAmlaxMDPWL +kz/MdXSFmLr/YnpNH4n+rr2UAJm/EaXc4HnFFgt9AmEd6oX5AhVP51qJThRv4zdL +hfXBPGHg/QVBspJ/wx2g0K5SZGBrGMYmnNj1ZOQ2GmKfig8+/21OGVZOIJFsnzQz +OjRXUDpvgV4GxvU+fE6OK85lBi5d0ipTdF7Tbieejw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha +ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM +HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03 +UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42 +tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R +ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM +lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp +/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G +A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy +MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl +cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js +L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL +BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni +acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K +zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8 +PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y +Johw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw +NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV +BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn +ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0 +3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z +qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR +p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8 +HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw +ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea +HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw +Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh +c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E +RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt +dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku +Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp +3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF +CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na +xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX +KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEc +MBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2Vj +IFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENB +IDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5MjM1OTAwWjBxMQswCQYDVQQGEwJE +RTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxl +U2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290 +IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEU +ha88EOQ5bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhC +QN/Po7qCWWqSG6wcmtoIKyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1Mjwr +rFDa1sPeg5TKqAyZMg4ISFZbavva4VhYAUlfckE8FQYBjl2tqriTtM2e66foai1S +NNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aKSe5TBY8ZTNXeWHmb0moc +QqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTVjlsB9WoH +txa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAP +BgNVHRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC +AQEAlGRZrTlk5ynrE/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756Abrsp +tJh6sTtU6zkXR34ajgv8HzFZMQSyzhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpa +IzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8rZ7/gFnkm0W09juwzTkZmDLl +6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4Gdyd1Lx+4ivn+ +xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c +JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP +mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ +wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 +VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ +AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB +AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun +pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC +dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf +fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm +NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx +H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA +n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc +biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp +EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA +bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu +YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW +BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI +QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I +0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni +lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9 +B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv +ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg +RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf +Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q +RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD +AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY +JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv +6pZjamVFkpUBtA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD +QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB +CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 +nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt +43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P +T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 +gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR +TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw +DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr +hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg +06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF +PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls +YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH +MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI +2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx +1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ +q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz +tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ +vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV +5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY +1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 +NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG +Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 +8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe +pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe +Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw +EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x +IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG +fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO +Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd +BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx +AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/ +oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8 +sycX +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j +ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 +LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug +RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm ++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW +PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM +xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB +Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 +hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg +EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA +FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec +nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z +eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF +hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 +Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep ++OkuE6N36B9K +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg +RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y +ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If +xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV +ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO +DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ +jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/ +CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi +EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM +fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY +uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK +chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t +9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2 +SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd ++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc +fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa +sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N +cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N +0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie +4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI +r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 +/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm +gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBb +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3Qx +ETAPBgNVBAsTCERTVCBBQ0VTMRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0w +MzExMjAyMTE5NThaFw0xNzExMjAyMTE5NThaMFsxCzAJBgNVBAYTAlVTMSAwHgYD +VQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UECxMIRFNUIEFDRVMx +FzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPu +ktKe1jzIDZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7 +gLFViYsx+tC3dr5BPTCapCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZH +fAjIgrrep4c9oW24MFbCswKBXy314powGCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4a +ahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPyMjwmR/onJALJfh1biEIT +ajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1UdEwEB/wQF +MAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rk +c3QuY29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjto +dHRwOi8vd3d3LnRydXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMt +aW5kZXguaHRtbDAdBgNVHQ4EFgQUCXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZI +hvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V25FYrnJmQ6AgwbN99Pe7lv7Uk +QIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6tFr8hlxCBPeP/ +h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq +nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpR +rscL9yuwNwXsvFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf2 +9w4LTJxoeHtxMcfrHuBnQfO3oKfN5XozNmr6mis= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/ +MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT +DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow +PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD +Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O +rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq +OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b +xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw +7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD +aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG +SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69 +ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr +AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz +R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5 +JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo +Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOzCCAiOgAwIBAgIRANAeRlAAACmMAAAAAgAAAAIwDQYJKoZIhvcNAQEFBQAw +PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD +Ew5EU1QgUm9vdCBDQSBYNDAeFw0wMDA5MTMwNjIyNTBaFw0yMDA5MTMwNjIyNTBa +MD8xJDAiBgNVBAoTG0RpZ2l0YWwgU2lnbmF0dXJlIFRydXN0IENvLjEXMBUGA1UE +AxMORFNUIFJvb3QgQ0EgWDQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCthX3OFEYY8gSeIYur0O4ypOT68HnDrjLfIutL5PZHRwQGjzCPb9PFo/ihboJ8 +RvfGhBAqpQCo47zwYEhpWm1jB+L/OE/dBBiyn98krfU2NiBKSom2J58RBeAwHGEy +cO+lewyjVvbDDLUy4CheY059vfMjPAftCRXjqSZIolQb9FdPcAoa90mFwB7rKniE +J7vppdrUScSS0+eBrHSUPLdvwyn4RGp+lSwbWYcbg5EpSpE0GRJdchic0YDjvIoC +YHpe7Rkj93PYRTQyU4bhC88ck8tMqbvRYqMRqR+vobbkrj5LLCOQCHV5WEoxWh+0 +E2SpIFe7RkV++MmpIAc0h1tZAgMBAAGjMjAwMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFPCD6nPIP1ubWzdf9UyPWvf0hki9MA0GCSqGSIb3DQEBBQUAA4IBAQCE +G85wl5eEWd7adH6XW/ikGN5salvpq/Fix6yVTzE6CrhlP5LBdkf6kx1bSPL18M45 +g0rw2zA/MWOhJ3+S6U+BE0zPGCuu8YQaZibR7snm3HiHUaZNMu5c8D0x0bcMxDjY +AVVcHCoNiL53Q4PLW27nbY6wwG0ffFKmgV3blxrYWfuUDgGpyPwHwkfVFvz9qjaV +mf12VJffL6W8omBPtgteb6UaT/k1oJ7YI0ldGf+ngpVbRhD+LC3cUtT6GO/BEPZu +8YTV/hbiDH5v3khVqMIeKT6o8IuXGG7F6a6vKwP1F1FwTXf4UC/ivhme7vdUH7B/ +Vv4AEbT8dNfEeFxrkDbh +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV +BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC +aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV +BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1 +Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz +MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+ +BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp +em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY +B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH +D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF +Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo +q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D +k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH +fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut +dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM +ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8 +zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX +U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6 +Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5 +XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF +Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR +HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY +GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c +77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3 ++GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK +vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6 +FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl +yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P +AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD +y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d +NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIE5zCCA8+gAwIBAgIBADANBgkqhkiG9w0BAQUFADCBjTELMAkGA1UEBhMCQ0Ex +EDAOBgNVBAgTB09udGFyaW8xEDAOBgNVBAcTB1Rvcm9udG8xHTAbBgNVBAoTFEVj +aG93b3J4IENvcnBvcmF0aW9uMR8wHQYDVQQLExZDZXJ0aWZpY2F0aW9uIFNlcnZp +Y2VzMRowGAYDVQQDExFFY2hvd29yeCBSb290IENBMjAeFw0wNTEwMDYxMDQ5MTNa +Fw0zMDEwMDcxMDQ5MTNaMIGNMQswCQYDVQQGEwJDQTEQMA4GA1UECBMHT250YXJp +bzEQMA4GA1UEBxMHVG9yb250bzEdMBsGA1UEChMURWNob3dvcnggQ29ycG9yYXRp +b24xHzAdBgNVBAsTFkNlcnRpZmljYXRpb24gU2VydmljZXMxGjAYBgNVBAMTEUVj +aG93b3J4IFJvb3QgQ0EyMIIBIDANBgkqhkiG9w0BAQEFAAOCAQ0AMIIBCAKCAQEA +utU/5BkV15UBf+s+JQruKQxr77s3rjp/RpOtmhHILIiO5gsEWP8MMrfrVEiidjI6 +Qh6ans0KAWc2Dw0/j4qKAQzOSyAZgjcdypNTBZ7muv212DA2Pu41rXqwMrlBrVi/ +KTghfdLlNRu6JrC5y8HarrnRFSKF1Thbzz921kLDRoCi+FVs5eVuK5LvIfkhNAqA +byrTgO3T9zfZgk8upmEkANPDL1+8y7dGPB/d6lk0I5mv8PESKX02TlvwgRSIiTHR +k8++iOPLBWlGp7ZfqTEXkPUZhgrQQvxcrwCUo6mk8TqgxCDP5FgPoHFiPLef5szP +ZLBJDWp7GLyE1PmkQI6WiwIBA6OCAVAwggFMMA8GA1UdEwEB/wQFMAMBAf8wCwYD +VR0PBAQDAgEGMB0GA1UdDgQWBBQ74YEboKs/OyGC1eISrq5QqxSlEzCBugYDVR0j +BIGyMIGvgBQ74YEboKs/OyGC1eISrq5QqxSlE6GBk6SBkDCBjTELMAkGA1UEBhMC +Q0ExEDAOBgNVBAgTB09udGFyaW8xEDAOBgNVBAcTB1Rvcm9udG8xHTAbBgNVBAoT +FEVjaG93b3J4IENvcnBvcmF0aW9uMR8wHQYDVQQLExZDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzMRowGAYDVQQDExFFY2hvd29yeCBSb290IENBMoIBADBQBgNVHSAESTBH +MEUGCysGAQQB+REKAQMBMDYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuZWNob3dv +cnguY29tL2NhL3Jvb3QyL2Nwcy5wZGYwDQYJKoZIhvcNAQEFBQADggEBAG+nrPi/ +0RpfEzrj02C6JGPUar4nbjIhcY6N7DWNeqBoUulBSIH/PYGNHYx7/lnJefiixPGE +7TQ5xPgElxb9bK8zoAApO7U33OubqZ7M7DlHnFeCoOoIAZnG1kuwKwD5CXKB2a74 +HzcqNnFW0IsBFCYqrVh/rQgJOzDA8POGbH0DeD0xjwBBooAolkKT+7ZItJF1Pb56 +QpDL9G+16F7GkmnKlAIYT3QTS3yFGYChnJcd+6txUPhKi9sSOOmAIaKHnkH9Scz+ +A2cSi4A3wUYXVatuVNHpRb2lygfH3SuCX9MU8Ure3zBlSU1LALtMqI4JmcQmQpIq +zIzvO2jHyu9PQqo= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1 +MQswCQYDVQQGEwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1 +czEoMCYGA1UEAwwfRUUgQ2VydGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYG +CSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIwMTAxMDMwMTAxMDMwWhgPMjAzMDEy +MTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlBUyBTZXJ0aWZpdHNl +ZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRyZSBS +b290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUy +euuOF0+W2Ap7kaJjbMeMTC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvO +bntl8jixwKIy72KyaOBhU8E2lf/slLo2rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIw +WFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw93X2PaRka9ZP585ArQ/d +MtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtNP2MbRMNE +1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/ +zQas8fElyalL1BSZMEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYB +BQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEF +BQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+RjxY6hUFaTlrg4wCQiZrxTFGGV +v9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqMlIpPnTX/dqQG +E5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIW +iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v +GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG +A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3 +d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu +dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq +RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy +MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD +VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 +L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g +Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi +A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt +ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH +Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC +R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX +hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50 +cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs +IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz +dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy +NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu +dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt +dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0 +aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T +RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN +cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW +wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1 +U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0 +jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN +BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/ +jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v +1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R +nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH +VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 +Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW +KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw +NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw +NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy +ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV +BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo +Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 +4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 +KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI +rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi +94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB +sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi +gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo +kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE +vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t +O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua +AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP +9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ +eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m +0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEXDCCA0SgAwIBAgIEOGO5ZjANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML +RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp +bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 +IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0xOTEy +MjQxODIwNTFaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 +LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp +YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG +A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq +K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe +sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX +MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT +XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ +HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH +4QIDAQABo3QwcjARBglghkgBhvhCAQEEBAMCAAcwHwYDVR0jBBgwFoAUVeSB0RGA +vtiJuQijMfmhJAkWuXAwHQYDVR0OBBYEFFXkgdERgL7YibkIozH5oSQJFrlwMB0G +CSqGSIb2fQdBAAQQMA4bCFY1LjA6NC4wAwIEkDANBgkqhkiG9w0BAQUFAAOCAQEA +WUesIYSKF8mciVMeuoCFGsY8Tj6xnLZ8xpJdGGQC49MGCBFhfGPjK50xA3B20qMo +oPS7mmNz7W3lKtvtFKkrxjYR0CvrB4ul2p5cGZ1WEvVUKcgF7bISKo30Axv/55IQ +h7A6tcOdBTcSo8f0FbnVpDkWm1M6I5HxqIKiaohowXkCIryqptau37AUX7iH0N18 +f3v/rxzP5tsHrV7bhZ3QKw0z2wTR5klAEyt2+z7pnIkPFc4YsIV4IU9rTw76NmfN +B/L/CNDi3tm/Kq+4h4YhPATKt5Rof8886ZjXOP/swNlQ8C5LWK5Gb9Auw2DaclVy +vUxFnmG6v4SBkgPR0ml8xQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML +RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp +bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 +IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3 +MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 +LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp +YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG +A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq +K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe +sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX +MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT +XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ +HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH +4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub +j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo +U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b +u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+ +bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er +fF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw +IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL +SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH +SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh +ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X +DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1 +TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ +fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA +sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU +WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS +nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH +dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip +NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC +AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF +MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB +uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl +PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP +JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/ +gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2 +j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6 +5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB +o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS +/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z +Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE +W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D +hNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEYDCCA0igAwIBAgICATAwDQYJKoZIhvcNAQELBQAwWTELMAkGA1UEBhMCVVMx +GDAWBgNVBAoTD1UuUy4gR292ZXJubWVudDENMAsGA1UECxMERlBLSTEhMB8GA1UE +AxMYRmVkZXJhbCBDb21tb24gUG9saWN5IENBMB4XDTEwMTIwMTE2NDUyN1oXDTMw +MTIwMTE2NDUyN1owWTELMAkGA1UEBhMCVVMxGDAWBgNVBAoTD1UuUy4gR292ZXJu +bWVudDENMAsGA1UECxMERlBLSTEhMB8GA1UEAxMYRmVkZXJhbCBDb21tb24gUG9s +aWN5IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2HX7NRY0WkG/ +Wq9cMAQUHK14RLXqJup1YcfNNnn4fNi9KVFmWSHjeavUeL6wLbCh1bI1FiPQzB6+ +Duir3MPJ1hLXp3JoGDG4FyKyPn66CG3G/dFYLGmgA/Aqo/Y/ISU937cyxY4nsyOl +4FKzXZbpsLjFxZ+7xaBugkC7xScFNknWJidpDDSPzyd6KgqjQV+NHQOGgxXgVcHF +mCye7Bpy3EjBPvmE0oSCwRvDdDa3ucc2Mnr4MrbQNq4iGDGMUHMhnv6DOzCIJOPp +wX7e7ZjHH5IQip9bYi+dpLzVhW86/clTpyBLqtsgqyFOHQ1O5piF5asRR12dP8Qj +wOMUBm7+nQIDAQABo4IBMDCCASwwDwYDVR0TAQH/BAUwAwEB/zCB6QYIKwYBBQUH +AQsEgdwwgdkwPwYIKwYBBQUHMAWGM2h0dHA6Ly9odHRwLmZwa2kuZ292L2ZjcGNh +L2NhQ2VydHNJc3N1ZWRCeWZjcGNhLnA3YzCBlQYIKwYBBQUHMAWGgYhsZGFwOi8v +bGRhcC5mcGtpLmdvdi9jbj1GZWRlcmFsJTIwQ29tbW9uJTIwUG9saWN5JTIwQ0Es +b3U9RlBLSSxvPVUuUy4lMjBHb3Zlcm5tZW50LGM9VVM/Y0FDZXJ0aWZpY2F0ZTti +aW5hcnksY3Jvc3NDZXJ0aWZpY2F0ZVBhaXI7YmluYXJ5MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUrQx6dVzl85jEeZgOrCj9l/TnAvwwDQYJKoZIhvcNAQELBQAD +ggEBAI9z2uF/gLGH9uwsz9GEYx728Yi3mvIRte9UrYpuGDco71wb5O9Qt2wmGCMi +TR0mRyDpCZzicGJxqxHPkYnos/UqoEfAFMtOQsHdDA4b8Idb7OV316rgVNdF9IU+ +7LQd3nyKf1tNnJaK0KIyn9psMQz4pO9+c+iR3Ah6cFqgr2KBWfgAdKLI3VTKQVZH +venAT+0g3eOlCd+uKML80cgX2BLHb94u6b2akfI8WpQukSKAiaGMWMyDeiYZdQKl +Dn0KJnNR6obLB6jI/WNaNZvSr79PMUjBhHDbNXuaGQ/lj/RqDG8z2esccKIN47lQ +A2EC/0rskqTcLe4qNJMHtyznGI8= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT +MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i +YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg +R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9 +9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq +fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv +iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU +1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+ +bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW +MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA +ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l +uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn +Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS +tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF +PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un +hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV +5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL +MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj +KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2 +MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw +NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV +BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH +MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL +So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal +tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG +CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT +qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz +rD6ogRLQy7rQkgu2npaqBA+K +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB +mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT +MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s +eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ +BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0 +BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz ++uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm +hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn +5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W +JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL +DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC +huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw +HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB +AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB +zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN +kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH +SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G +spki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY +MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo +R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx +MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9 +AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA +ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0 +7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W +kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI +mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ +KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1 +6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl +4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K +oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj +UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU +AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYD +VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 +IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 +MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMxNDBaFw0zODA3MzEx +MjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3Vy +cmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAG +A1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAl +BgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZI +hvcNAQEBBQADggIPADCCAgoCggIBAMDfVtPkOpt2RbQT2//BthmLN0EYlVJH6xed +KYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXfXjaOcNFccUMd2drvXNL7 +G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0ZJJ0YPP2 +zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4 +ddPB/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyG +HoiMvvKRhI9lNNgATH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2 +Id3UwD2ln58fQ1DJu7xsepeY7s2MH/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3V +yJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfeOx2YItaswTXbo6Al/3K1dh3e +beksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSFHTynyQbehP9r +6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsog +zCtLkykPAgMBAAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQW +BBS5CcqcHtvTbDprru1U8VuTBjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDpr +ru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UEBhMCRVUxQzBBBgNVBAcTOk1hZHJp +ZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJmaXJtYS5jb20vYWRk +cmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJmaXJt +YSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiC +CQDJzdPp1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCow +KAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZI +hvcNAQEFBQADggIBAICIf3DekijZBZRG/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZ +UohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6ReAJ3spED8IXDneRRXoz +X1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/sdZ7LoR/x +fxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVz +a2Mg9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yyd +Yhz2rXzdpjEetrHHfoUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMd +SqlapskD7+3056huirRXhOukP9DuqqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9O +AP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETrP3iZ8ntxPjzxmKfFGBI/5rso +M0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVqc5iJWzouE4ge +v8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEn +MCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQL +ExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENo +YW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYxNDE4WhcNMzcwOTMwMTYxNDE4WjB9 +MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgy +NzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEgMB4G +A1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUA +A4IBDQAwggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0 +Mi+ITaFgCPS3CU6gSS9J1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/s +QJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8Oby4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpV +eAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl6DJWk0aJqCWKZQbua795 +B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c8lCrEqWh +z0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0T +AQH/BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1i +ZXJzaWduLm9yZy9jaGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4w +TcbOX60Qq+UDpfqpFDAOBgNVHQ8BAf8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAH +MCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBjaGFtYmVyc2lnbi5vcmcwKgYD +VR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9yZzBbBgNVHSAE +VDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh +bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0B +AQUFAAOCAQEAPDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUM +bKGKfKX0j//U2K0X1S0E0T9YgOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXi +ryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJPJ7oKXqJ1/6v/2j1pReQvayZzKWG +VwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4IBHNfTIzSJRUTN3c +ecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREest2d/ +AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG +A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv +b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw +MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i +YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT +aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ +jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp +xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp +1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG +snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ +U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8 +9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B +AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz +yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE +38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP +AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad +DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME +HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ +FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F +uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX +kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs +ewv4n4Q= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc +8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke +hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI +KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg +515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO +xwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1 +MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL +v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8 +eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq +tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd +C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa +zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB +mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH +V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n +bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG +3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs +J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO +291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS +ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd +AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 +MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 +RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT +gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm +KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd +QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ +XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o +LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU +RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp +jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK +6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX +mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs +Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH +WD9f +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh +MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE +YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3 +MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo +ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg +MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN +ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA +PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w +wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi +EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY +avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+ +YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE +sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h +/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5 +IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD +ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy +OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P +TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER +dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf +ReYNnyicsbkqWletNw+vHX/bvZ8= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT +EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp +ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz +NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH +EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE +AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD +E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH +/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy +DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh +GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR +tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA +AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX +WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu +9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr +gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo +2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI +4uJEvlz36hz1 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFSzCCAzOgAwIBAgIRALZLiAfiI+7IXBKtpg4GofIwDQYJKoZIhvcNAQELBQAw +PzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dvdmVybm1lbnQgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTAeFw0xMjA5MjgwODU4NTFaFw0zNzEyMzExNTU5NTla +MD8xCzAJBgNVBAYTAlRXMTAwLgYDVQQKDCdHb3Zlcm5tZW50IFJvb3QgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQC2/5c8gb4BWCQnr44BK9ZykjAyG1+bfNTUf+ihYHMwVxAA+lCWJP5Q5ow6ldFX +eYTVZ1MMKoI+GFy4MCYa1l7GLbIEUQ7v3wxjR+vEEghRK5lxXtVpe+FdyXcdIOxW +juVhYC386RyA3/pqg7sFtR4jEpyCygrzFB0g5AaPQySZn7YKk1pzGxY5vgW28Yyl +ZJKPBeRcdvc5w88tvQ7Yy6gOMZvJRg9nU0MEj8iyyIOAX7ryD6uBNaIgIZfOD4k0 +eA/PH07p+4woPN405+2f0mb1xcoxeNLOUNFggmOd4Ez3B66DNJ1JSUPUfr0t4urH +cWWACOQ2nnlwCjyHKenkkpTqBpIpJ3jmrdc96QoLXvTg1oadLXLLi2RW5vSueKWg +OTNYPNyoj420ai39iHPplVBzBN8RiD5C1gJ0+yzEb7xs1uCAb9GGpTJXA9ZN9E4K +mSJ2fkpAgvjJ5E7LUy3Hsbbi08J1J265DnGyNPy/HE7CPfg26QrMWJqhGIZO4uGq +s3NZbl6dtMIIr69c/aQCb/+4DbvVq9dunxpPkUDwH0ZVbaCSw4nNt7H/HLPLo5wK +4/7NqrwB7N1UypHdTxOHpPaY7/1J1lcqPKZc9mA3v9g+fk5oKiMyOr5u5CI9ByTP +isubXVGzMNJxbc5Gim18SjNE2hIvNkvy6fFRCW3bapcOFwIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBTVZx3gnHosnMvFmOcdByYqhux0zTAOBgNV +HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQADggIBAJA75cJTQijq9TFOjj2Rnk0J +89ixUuZPrAwxIbvx6pnMg/y2KOTshAcOD06Xu29oRo8OURWV+Do7H1+CDgxxDryR +T64zLiNB9CZrTxOH+nj2LsIPkQWXqmrBap+8hJ4IKifd2ocXhuGzyl3tOKkpboTe +Rmv8JxlQpRJ6jH1i/NrnzLyfSa8GuCcn8on3Fj0Y5r3e9YwSkZ/jBI3+BxQaWqw5 +ghvxOBnhY+OvbLamURfr+kvriyL2l/4QOl+UoEtTcT9a4RD4co+WgN2NApgAYT2N +vC2xR8zaXeEgp4wxXPHj2rkKhkfIoT0Hozymc26Uke1uJDr5yTDRB6iBfSZ9fYTf +hsmL5a4NHr6JSFEVg5iWL0rrczTXdM3Jb9DCuiv2mv6Z3WAUjhv5nDk8f0OJU+jl +wqu+Iq0nOJt3KLejY2OngeepaUXrjnhWzAWEx/uttjB8YwWfLYwkf0uLkvw4Hp+g +pVezbp3YZLhwmmBScMip0P/GnO0QYV7Ngw5u6E0CQUridgR51lQ/ipgyFKDdLZzn +uoJxo4ZVKZnSKdt1OvfbQ/+2W/u3fjWAjg1srnm3Ni2XUqGwB5wH5Ss2zQOXlL0t +DjQG/MAWifw3VOTWzz0TBPKR2ck2Lj7FWtClTILD/y58Jnb38/1FoqVuVa4uzM8s +iTTa9g3nkagQ6hed8vbs +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix +RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p +YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw +NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK +EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl +cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz +dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ +fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns +bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD +75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP +FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV +HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp +5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu +b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA +A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p +6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7 +dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys +Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI +l7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx +FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg +Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG +A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr +b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ +jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn +PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh +ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9 +nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h +q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED +MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC +mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3 +7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB +oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs +EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO +fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi +AmvZWg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFHjCCBAagAwIBAgIEAKA3oDANBgkqhkiG9w0BAQsFADCBtzELMAkGA1UEBhMC +Q1oxOjA4BgNVBAMMMUkuQ0EgLSBRdWFsaWZpZWQgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHksIDA5LzIwMDkxLTArBgNVBAoMJFBydm7DrSBjZXJ0aWZpa2HEjW7DrSBh +dXRvcml0YSwgYS5zLjE9MDsGA1UECww0SS5DQSAtIEFjY3JlZGl0ZWQgUHJvdmlk +ZXIgb2YgQ2VydGlmaWNhdGlvbiBTZXJ2aWNlczAeFw0wOTA5MDEwMDAwMDBaFw0x +OTA5MDEwMDAwMDBaMIG3MQswCQYDVQQGEwJDWjE6MDgGA1UEAwwxSS5DQSAtIFF1 +YWxpZmllZCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSwgMDkvMjAwOTEtMCsGA1UE +CgwkUHJ2bsOtIGNlcnRpZmlrYcSNbsOtIGF1dG9yaXRhLCBhLnMuMT0wOwYDVQQL +DDRJLkNBIC0gQWNjcmVkaXRlZCBQcm92aWRlciBvZiBDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtTaEy0KC8M9l +4lSaWHMs4+sVV1LwzyJYiIQNeCrv1HHm/YpGIdY/Z640ceankjQvIX7m23BK4OSC +6KO8kZYA3zopOz6GFCOKV2PvLukbc+c2imF6kLHEv6qNA8WxhPbR3xKwlHDwB2yh +Wzo7V3QVgDRG83sugqQntKYC3LnlTGbJpNP+Az72gpO9AHUn/IBhFk4ksc8lYS2L +9GCy9CsmdKSBP78p9w8Lx7vDLqkDgt1/zBrcUWmSSb7AE/BPEeMryQV1IdI6nlGn +BhWkXOYf6GSdayJw86btuxC7viDKNrbp44HjQRaSxnp6O3eto1x4DfiYdw/YbJFe +7EjkxSQBywIDAQABo4IBLjCCASowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E +BAMCAQYwgecGA1UdIASB3zCB3DCB2QYEVR0gADCB0DCBzQYIKwYBBQUHAgIwgcAa +gb1UZW50byBjZXJ0aWZpa2F0IGplIHZ5ZGFuIGpha28ga3ZhbGlmaWtvdmFueSBz +eXN0ZW1vdnkgY2VydGlmaWthdCBwb2RsZSB6YWtvbmEgYy4gMjI3LzIwMDAgU2Iu +IHYgcGxhdG5lbSB6bmVuaS9UaGlzIGlzIHF1YWxpZmllZCBzeXN0ZW0gY2VydGlm +aWNhdGUgYWNjb3JkaW5nIHRvIEN6ZWNoIEFjdCBOby4gMjI3LzIwMDAgQ29sbC4w +HQYDVR0OBBYEFHnL0CPpOmdwkXRP01Hi4CD94Sj7MA0GCSqGSIb3DQEBCwUAA4IB +AQB9laU214hYaBHPZftbDS/2dIGLWdmdSbj1OZbJ8LIPBMxYjPoEMqzAR74tw96T +i6aWRa5WdOWaS6I/qibEKFZhJAVXX5mkx2ewGFLJ+0Go+eTxnjLOnhVF2V2s+57b +m8c8j6/bS6Ij6DspcHEYpfjjh64hE2r0aSpZDjGzKFM6YpqsCJN8qYe2X1qmGMLQ +wvNdjG+nPzCJOOuUEypIWt555ZDLXqS5F7ZjBjlfyDZjEfS2Es9Idok8alf563Mi +9/o+Ba46wMYOkk3P1IlU0RqCajdbliioACKDztAqubONU1guZVzV8tuMASVzbJeL +/GAB7ECTwe1RuKrLYtglMKI9 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu +VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw +MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw +JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT +3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU ++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp +S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1 +bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi +T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL +vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK +Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK +dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT +c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv +l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N +iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD +ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt +LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93 +nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3 ++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK +W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT +AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq +l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG +4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ +mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A +7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu +VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN +MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0 +MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7 +ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy +RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS +bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF +/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R +3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw +EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy +9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V +GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ +2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV +WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD +W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN +AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV +DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9 +TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G +lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW +mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df +WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5 ++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ +tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA +GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv +8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEXzCCA0egAwIBAgIBATANBgkqhkiG9w0BAQUFADCB0DELMAkGA1UEBhMCRVMx +SDBGBgNVBAoTP0laRU5QRSBTLkEuIC0gQ0lGIEEtMDEzMzcyNjAtUk1lcmMuVml0 +b3JpYS1HYXN0ZWl6IFQxMDU1IEY2MiBTODFCMEAGA1UEBxM5QXZkYSBkZWwgTWVk +aXRlcnJhbmVvIEV0b3JiaWRlYSAzIC0gMDEwMTAgVml0b3JpYS1HYXN0ZWl6MRMw +EQYDVQQDEwpJemVucGUuY29tMR4wHAYJKoZIhvcNAQkBFg9JbmZvQGl6ZW5wZS5j +b20wHhcNMDMwMTMwMjMwMDAwWhcNMTgwMTMwMjMwMDAwWjCB0DELMAkGA1UEBhMC +RVMxSDBGBgNVBAoTP0laRU5QRSBTLkEuIC0gQ0lGIEEtMDEzMzcyNjAtUk1lcmMu +Vml0b3JpYS1HYXN0ZWl6IFQxMDU1IEY2MiBTODFCMEAGA1UEBxM5QXZkYSBkZWwg +TWVkaXRlcnJhbmVvIEV0b3JiaWRlYSAzIC0gMDEwMTAgVml0b3JpYS1HYXN0ZWl6 +MRMwEQYDVQQDEwpJemVucGUuY29tMR4wHAYJKoZIhvcNAQkBFg9JbmZvQGl6ZW5w +ZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC1btoCXXhp3xIW +D+Bxl8nUCxkyiazWfpt0e68t+Qt9+lZjKZSdEw2Omj4qvr+ovRmDXO3iWpWVOWDl +3JHJjAzFCe8ZEBNDH+QNYwZHmPBaMYFOYFdbAFVHWvys152C308hcFJ6xWWGmjvl +2eMiEl9P2nR2LWue368DCu+ak7j3gjAXaCOdP1a7Bfr+RW3X2SC5R4Xyp8iHlL5J +PHJD/WBkLrezwzQPdACw8m9EG7q9kUwlNpL32mROujS3ZkT6mQTzJieLiE3X04s0 +uIUqVkk5MhjcHFf7al0N5CzjtTcnXYJKN2Z9EDVskk4olAdGi46eSoZXbjUOP5gk +Ej6wVZAXAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG +MB0GA1UdDgQWBBTqVk/sPIOhFIh4gbIrBSLAB0FbQjANBgkqhkiG9w0BAQUFAAOC +AQEAYp7mEzzhw6o5Hf5+T5kcI+t4BJyiIWy7vHlLs/G8dLYXO81aN/Mzg928eMTR +TxxYZL8dd9uwsJ50TVfX6L0R4Dyw6wikh3fHRrat9ufXi63j5K91Ysr7aXqnF38d +iAgHYkrwC3kuxHBb9C0KBz6h8Q45/KCyN7d37wWAq38yyhPDlaOvyoE6bdUuK5hT +m5EYA5JmPyrhQ1moDOyueWBAjxzMEMj+OAY1H90cLv6wszsqerxRrdTOHBdv7MjB +EIpvEEQkXUxVXAzFuuT6m2t91Lfnwfl/IvljHaVC7DlyyhRYHD6D4Rx+4QKp4tWL +vpw6LkI+gKNJ/YdMCsRZQzEEFA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF8DCCA9igAwIBAgIPBuhGJy8fCo/RhFzjafbVMA0GCSqGSIb3DQEBBQUAMDgx +CzAJBgNVBAYTAkVTMRQwEgYDVQQKDAtJWkVOUEUgUy5BLjETMBEGA1UEAwwKSXpl +bnBlLmNvbTAeFw0wNzEyMTMxMzA4MjdaFw0zNzEyMTMwODI3MjVaMDgxCzAJBgNV +BAYTAkVTMRQwEgYDVQQKDAtJWkVOUEUgUy5BLjETMBEGA1UEAwwKSXplbnBlLmNv +bTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMnTesoPHqynhugWZWqx +whtFMnGV2f4QW8yv56V5AY+Jw8ryVXH3d753lPNypCxE2J6SmxQ6oeckkAoKVo7F +2CaU4dlI4S0+2gpy3aOZFdqBoof0e24md4lYrdbrDLJBenNubdt6eEHpCIgSfocu +ZhFjbFT7PJ1ywLwu/8K33Q124zrX97RovqL144FuwUZvXY3gTcZUVYkaMzEKsVe5 +o4qYw+w7NMWVQWl+dcI8IMVhulFHoCCQk6GQS/NOfIVFVJrRBSZBsLVNHTO+xAPI +JXzBcNs79AktVCdIrC/hxKw+yMuSTFM5NyPs0wH54AlETU1kwOENWocivK0bo/4m +tRXzp/yEGensoYi0RGmEg/OJ0XQGqcwL1sLeJ4VQJsoXuMl6h1YsGgEebL4TrRCs +tST1OJGh1kva8bvS3ke18byB9llrzxlT6Y0Vy0rLqW9E5RtBz+GGp8rQap+8TI0G +M1qiheWQNaBiXBZO8OOi+gMatCxxs1gs3nsL2xoP694hHwZ3BgOwye+Z/MC5TwuG +KP7Suerj2qXDR2kS4Nvw9hmL7Xtw1wLW7YcYKCwEJEx35EiKGsY7mtQPyvp10gFA +Wo15v4vPS8+qFsGV5K1Mij4XkdSxYuWC5YAEpAN+jb/af6IPl08M0w3719Hlcn4c +yHf/W5oPt64FRuXxqBbsR6QXAgMBAAGjgfYwgfMwgbAGA1UdEQSBqDCBpYEPaW5m +b0BpemVucGUuY29tpIGRMIGOMUcwRQYDVQQKDD5JWkVOUEUgUy5BLiAtIENJRiBB +MDEzMzcyNjAtUk1lcmMuVml0b3JpYS1HYXN0ZWl6IFQxMDU1IEY2MiBTODFDMEEG +A1UECQw6QXZkYSBkZWwgTWVkaXRlcnJhbmVvIEV0b3JiaWRlYSAxNCAtIDAxMDEw +IFZpdG9yaWEtR2FzdGVpejAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUHRxlDqjyJXu0kc/ksbHmvVV0bAUwDQYJKoZIhvcNAQEFBQAD +ggIBAMeBRm8hGE+gBe/n1bqXUKJg7aWSFBpSm/nxiEqg3Hh10dUflU7F57dp5iL0 ++CmoKom+z892j+Mxc50m0xwbRxYpB2iEitL7sRskPtKYGCwkjq/2e+pEFhsqxPqg +l+nqbFik73WrAGLRne0TNtsiC7bw0fRue0aHwp28vb5CO7dz0JoqPLRbEhYArxk5 +ja2DUBzIgU+9Ag89njWW7u/kwgN8KRwCfr00J16vU9adF79XbOnQgxCvv11N75B7 +XSus7Op9ACYXzAJcY9cZGKfsK8eKPlgOiofmg59OsjQerFQJTx0CCzl+gQgVuaBp +E8gyK+OtbBPWg50jLbJtooiGfqgNASYJQNntKE6MkyQP2/EeTXp6WuKlWPHcj1+Z +ggwuz7LdmMySlD/5CbOlliVbN/UShUHiGUzGigjB3Bh6Dx4/glmimj4/+eAJn/3B +kUtdyXvWton83x18hqrNA/ILUpLxYm9/h+qrdslsUMIZgq+qHfUgKGgu1fxkN0/P +pUTEvnK0jHS0bKf68r10OEMr3q/53NjgnZ/cPcqlY0S/kqJPTIAcuxrDmkoEVU3K +7iYLHL8CxWTTnn7S05EcS6L1HOUXHA0MUqORH5zwIe0ClG+poEnK6EOMxPQ02nwi +o8ZmPrgbBYhdurz3vOXcFD2nhqi2WVIhA16L4wTtSyoeo09Q +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4 +MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6 +ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD +VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j +b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq +scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO +xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H +LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX +uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD +yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+ +JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q +rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN +BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L +hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB +QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+ +HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu +Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg +QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB +BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA +A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb +laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56 +awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo +JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw +LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT +VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk +LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb +UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/ +QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+ +naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls +QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDczCCAlugAwIBAgIBBDANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJLUjEN +MAsGA1UECgwES0lTQTEuMCwGA1UECwwlS29yZWEgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkgQ2VudHJhbDEWMBQGA1UEAwwNS0lTQSBSb290Q0EgMTAeFw0wNTA4MjQw +ODA1NDZaFw0yNTA4MjQwODA1NDZaMGQxCzAJBgNVBAYTAktSMQ0wCwYDVQQKDARL +SVNBMS4wLAYDVQQLDCVLb3JlYSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBDZW50 +cmFsMRYwFAYDVQQDDA1LSVNBIFJvb3RDQSAxMIIBIDANBgkqhkiG9w0BAQEFAAOC +AQ0AMIIBCAKCAQEAvATk+hM58DSWIGtsaLv623f/J/es7C/n/fB/bW+MKs0lCVsk +9KFo/CjsySXirO3eyDOE9bClCTqnsUdIxcxPjHmc+QZXfd3uOPbPFLKc6tPAXXdi +8EcNuRpAU1xkcK8IWsD3z3X5bI1kKB4g/rcbGdNaZoNy4rCbvdMlFQ0yb2Q3lIVG +yHK+d9VuHygvx2nt54OJM1jT3qC/QOhDUO7cTWu8peqmyGGO9cNkrwYV3CmLP3WM +vHFE2/yttRcdbYmDz8Yzvb9Fov4Kn6MRXw+5H5wawkbMnChmn3AmPC7fqoD+jMUE +CSVPzZNHPDfqAmeS/vwiJFys0izgXAEzisEZ2wIBA6MyMDAwHQYDVR0OBBYEFL+2 +J9gDWnZlTGEBQVYx5Yt7OtnMMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEF +BQADggEBABOvUQveimpb5poKyLGQSk6hAp3MiNKrZr097LuxQpVqslxa/6FjZJap +aBV/JV6K+KRzwYCKhQoOUugy50X4TmWAkZl0Q+VFnUkq8JSV3enhMNITbslOsXfl +BM+tWh6UCVrXPAgcrnrpFDLBRa3SJkhyrKhB2vAhhzle3/xk/2F0KpzZm4tfwjeT +2KM3LzuTa7IbB6d/CVDv0zq+IWuKkDsnSlFOa56ch534eJAx7REnxqhZvvwYC/uO +fi5C4e3nCSG9uRPFVmf0JqZCQ5BEVLRxm3bkGhKsGigA35vB1fjbXKP4krG9tNT5 +UNkAAk/bg9ART6RCVmE6fhMy04Qfybo= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD +VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0 +ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G +CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y +OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx +FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp +Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP +kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc +cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U +fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7 +N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC +xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1 ++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM +Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG +SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h +mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk +ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c +2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t +HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG +EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3 +MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR +dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB +pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM +b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm +aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz +IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT +lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz +AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5 +VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG +ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2 +BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG +AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M +U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh +bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C ++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F +uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2 +XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi +MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp +dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV +UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO +ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz +c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP +OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl +mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF +BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4 +qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw +gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu +bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp +dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8 +6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/ +h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH +/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN +pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCB +ijELMAkGA1UEBhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHly +aWdodCAoYykgMjAwNTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl +ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQSBDQTAeFw0w +NTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYDVQQGEwJDSDEQMA4G +A1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIwIAYD +VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBX +SVNlS2V5IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAy0+zAJs9Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxR +VVuuk+g3/ytr6dTqvirdqFEr12bDYVxgAsj1znJ7O7jyTmUIms2kahnBAbtzptf2 +w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbDd50kc3vkDIzh2TbhmYsF +mQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ/yxViJGg +4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t9 +4B3RLoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQw +EAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOx +SPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vImMMkQyh2I+3QZH4VFvbBsUfk2 +ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4+vg1YFkCExh8 +vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZi +Fj4A4xylNoEYokxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ +/L7fCg0= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt +MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg +Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i +YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x +CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG +b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh +bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3 +HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx +WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX +1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk +u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P +99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r +M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB +BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh +cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5 +gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO +ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf +aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic +Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00 +MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV +wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe +rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341 +68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh +4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp +UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o +abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc +3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G +KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt +hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO +Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt +zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD +ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2 +cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN +qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5 +YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv +b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2 +8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k +NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj +ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp +q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt +nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 +MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW +n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ +O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j +IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz +8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l +7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga +W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE ++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd +dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM +HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa +GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg +Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J +WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB +rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp ++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1 +ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i +Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz +PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og +/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH +oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI +yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud +EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2 +A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL +MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f +BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn +g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl +fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K +WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha +B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc +hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR +TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD +mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z +ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y +4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza +8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00 +MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR +/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu +FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR +U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c +ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR +FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k +A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw +eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl +sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp +VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q +A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ +ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD +ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI +FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv +oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg +u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP +0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf +3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl +8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+ +DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN +PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ +ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM +V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB +4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr +H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd +8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv +vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT +mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe +btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc +T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt +WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ +c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A +4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD +VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG +CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0 +aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu +dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw +czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G +A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg +Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0 +7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem +d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd ++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B +4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN +t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x +DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57 +k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s +zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j +Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT +mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK +4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAzMTkxODMzMzNaFw0yMTAzMTcxODMz +MzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUw +IwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQDEyVR +dW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Yp +li4kVEAkOPcahdxYTMukJ0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2D +rOpm2RgbaIr1VxqYuvXtdj182d6UajtLF8HVj71lODqV0D1VNk7feVcxKh7YWWVJ +WCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeLYzcS19Dsw3sgQUSj7cug +F+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWenAScOospU +xbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCC +Ak4wPQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVv +dmFkaXNvZmZzaG9yZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREw +ggENMIIBCQYJKwYBBAG+WAABMIH7MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNl +IG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmljYXRlIGJ5IGFueSBwYXJ0eSBh +c3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJsZSBzdGFuZGFy +ZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYI +KwYBBQUHAgEWFmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3T +KbkGGew5Oanwl4Rqy+/fMIGuBgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rq +y+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1p +dGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYD +VQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6tlCL +MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSk +fnIYj9lofFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf8 +7C9TqnN7Az10buYWnuulLsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1R +cHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2xgI4JVrmcGmD+XcHXetwReNDWXcG31a0y +mQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi5upZIof4l/UO/erMkqQW +xFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi5nrQNiOK +SnQ2+Q== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx +MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg +Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ +iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa +/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ +jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI +HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7 +sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w +gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw +KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG +AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L +URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO +H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm +I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY +iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz +MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv +cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz +Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO +0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao +wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj +7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS +8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT +BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg +JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3 +6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/ +3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm +D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS +CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMh +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIz +MloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09N +IFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNlY3VyaXR5IENvbW11 +bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSE +RMqm4miO/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gO +zXppFodEtZDkBp2uoQSXWHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5 +bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4zZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDF +MxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4bepJz11sS6/vmsJWXMY1 +VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK9U2vP9eC +OKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0G +CSqGSIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HW +tWS3irO4G8za+6xmiEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZ +q51ihPZRwSzJIxXYKLerJRO1RuGGAv8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDb +EJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnWmHyojf6GPgcWkuF75x3sM3Z+ +Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEWT1MKZPlO9L9O +VL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY +MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t +dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5 +WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD +VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8 +9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ +DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9 +Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N +QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ +xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G +A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG +kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr +Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5 +Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU +JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot +RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX +DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy +dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj +YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV +OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr +zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM +VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ +hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO +ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw +awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs +OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF +coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc +okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8 +t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy +1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/ +SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEP +MA0GA1UEChMGU29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAx +MDQwNjA3Mjk0MFoXDTIxMDQwNjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNV +BAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJhIENsYXNzMiBDQTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3/Ei9vX+ALTU74W+o +Z6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybTdXnt +5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s +3TmVToMGf+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2Ej +vOr7nQKV0ba5cTppCD8PtOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu +8nYybieDwnPz3BjotJPqdURrBGAgcVeHnfO+oJAjPYok4doh28MCAwEAAaMzMDEw +DwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITTXjwwCwYDVR0PBAQDAgEG +MA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt0jSv9zil +zqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/ +3DEIcbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvD +FNr450kkkdAdavphOe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6 +Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2 +ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y +MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg +TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS +b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS +M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC +UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d +Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p +rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l +pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb +j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC +KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS +/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X +cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH +1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP +px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7 +MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI +eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u +2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS +v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC +wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy +CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e +vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6 +Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa +Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL +eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8 +FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc +7uzXLg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oX +DTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl +ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv +b3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ5291 +qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8Sp +uOUfiUtnvWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPU +Z5uW6M7XxgpT0GtJlvOjCwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvE +pMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiile7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp +5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCROME4HYYEhLoaJXhena/M +UGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpICT0ugpTN +GmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy +5V6548r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv +6q012iDTiIJh8BIitrzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEK +eN5KzlW/HdXZt1bv8Hb/C3m1r737qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6 +B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMBAAGjgZcwgZQwDwYDVR0TAQH/ +BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcCARYxaHR0cDov +L3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqG +SIb3DQEBCwUAA4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLyS +CZa59sCrI2AGeYwRTlHSeYAz+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen +5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwjf/ST7ZwaUb7dRUG/kSS0H4zpX897 +IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaNkqbG9AclVMwWVxJK +gnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfkCpYL ++63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxL +vJxxcypFURmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkm +bEgeqmiSBeGCc1qb3AdbCG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvk +N1trSt8sV4pAWja63XVECDdCcAz+3F4hoKOKwJCcaNpQ5kUQR3i2TtJlycM33+FC +Y7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoVIPVVYpbtbZNQvOSqeK3Z +ywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm66+KAQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloX +DTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl +ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv +b3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4yolQP +cPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WW +IkYFsO2tx1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqX +xz8ecAgwoNzFs21v0IJyEavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFy +KJLZWyNtZrVtB0LrpjPOktvA9mxjeM3KTj215VKb8b475lRgsGYeCasH/lSJEULR +9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUurmkVLoR9BvUhTFXFkC4az +5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU51nus6+N8 +6U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7 +Ngzp07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHP +bMk7ccHViLVlvMDoFxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXt +BznaqB16nzaeErAMZRKQFWDZJkBE41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTt +XUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMBAAGjQjBAMA8GA1UdEwEB/wQF +MAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleuyjWcLhL75Lpd +INyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD +U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwp +LiniyMMB8jPqKqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8 +Ipf3YF3qKS9Ysr1YvY2WTxB1v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixp +gZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA8KCWAg8zxXHzniN9lLf9OtMJgwYh +/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b8KKaa8MFSu1BYBQw +0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0rmj1A +fsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq +4BZ+Extq1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR +1VmiiXTTn74eS9fGbbeIJG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/ +QFH1T/U67cjF68IeHRaVesd+QnGTbksVtzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM +94B7IWcnMFk= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw +MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp +Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg +nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 +HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N +Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN +dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 +HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G +CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU +sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 +4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg +8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 +mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs +ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD +VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy +ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy +dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p +OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2 +8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K +Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe +hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk +6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q +AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI +bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB +ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z +qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn +0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN +sSi6 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEW +MBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkgRzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1 +OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoG +A1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgRzIwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8Oo1XJ +JZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsD +vfOpL9HG4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnoo +D/Uefyf3lLE3PbfHkffiAez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/ +Q0kGi4xDuFby2X8hQxfqp0iVAXV16iulQ5XqFYSdCI0mblWbq9zSOdIxHWDirMxW +RST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbsO+wmETRIjfaAKxojAuuK +HDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8HvKTlXcxN +nw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM +0D4LnMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/i +UUjXuG+v+E5+M5iSFGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9 +Ha90OrInwMEePnWjFqmveiJdnxMaz6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHg +TuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE +AwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJKoZIhvcNAQEL +BQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K +2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfX +UfEpY9Z1zRbkJ4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl +6/2o1PXWT6RbdejF0mCy2wl+JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK +9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG/+gyRr61M3Z3qAFdlsHB1b6uJcDJ +HgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTcnIhT76IxW1hPkWLI +wpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/XldblhY +XzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5l +IxKVCCIcl85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoo +hdVddLHRDiBYmxOlsGOm7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulr +so8uBtjRkcfGEvRM/TAXw8HaOFvjqermobp573PYtlNXLfbQ4ddI +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW +MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg +Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9 +MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi +U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh +cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk +pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf +OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C +Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT +Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi +HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM +Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w ++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+ +Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3 +Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B +26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID +AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE +FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j +ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js +LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM +BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0 +Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy +dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh +cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh +YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg +dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp +bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ +YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT +TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ +9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8 +jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW +FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz +ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1 +ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L +EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu +L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq +yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC +O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V +um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh +NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEW +MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg +Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM3WhcNMzYwOTE3MTk0NjM2WjB9 +MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi +U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh +cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk +pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf +OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C +Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT +Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi +HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM +Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w ++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+ +Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3 +Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B +26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID +AQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFul +F2mHMMo0aEPQQa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCC +ATgwLgYIKwYBBQUHAgEWImh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5w +ZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL2ludGVybWVk +aWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENvbW1lcmNpYWwgKFN0 +YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0aGUg +c2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93 +d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgG +CWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5fPGFf59Jb2vKXfuM/gTF +wWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWmN3PH/UvS +Ta0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst +0OcNOrg+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNc +pRJvkrKTlMeIFw6Ttn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKl +CcWw0bdT82AUuoVpaiF8H3VhFyAXe2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVF +P0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA2MFrLH9ZXF2RsXAiV+uKa0hK +1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBsHvUwyKMQ5bLm +KhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE +JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ +8dCAWZvLMdibD4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnm +fyWl8kgAwKQB2j8= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQXAuFXAvnWUHfV8w/f52oNjANBgkqhkiG9w0BAQUFADBk +MQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0 +YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3Qg +Q0EgMTAeFw0wNTA4MTgxMjA2MjBaFw0yNTA4MTgyMjA2MjBaMGQxCzAJBgNVBAYT +AmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGlnaXRhbCBDZXJ0aWZp +Y2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0LmwqAzZuz8h+BvVM5OAFmUgdbI9 +m2BtRsiMMW8Xw/qabFbtPMWRV8PNq5ZJkCoZSx6jbVfd8StiKHVFXqrWW/oLJdih +FvkcxC7mlSpnzNApbjyFNDhhSbEAn9Y6cV9Nbc5fuankiX9qUvrKm/LcqfmdmUc/ +TilftKaNXXsLmREDA/7n29uj/x2lzZAeAR81sH8A25Bvxn570e56eqeqDFdvpG3F +EzuwpdntMhy0XmeLVNxzh+XTF3xmUHJd1BpYwdnP2IkCb6dJtDZd0KTeByy2dbco +kdaXvij1mB7qWybJvbCXc9qukSbraMH5ORXWZ0sKbU/Lz7DkQnGMU3nn7uHbHaBu +HYwadzVcFh4rUx80i9Fs/PJnB3r1re3WmquhsUvhzDdf/X/NTa64H5xD+SpYVUNF +vJbNcA78yeNmuk6NO4HLFWR7uZToXTNShXEuT46iBhFRyePLoW4xCGQMwtI89Tbo +19AOeCMgkckkKmUpWyL3Ic6DXqTz3kvTaI9GdVyDCW4pa8RwjPWd1yAv/0bSKzjC +L3UcPX7ape8eYIVpQtPM+GP+HkM5haa2Y0EQs3MevNP6yn0WR+Kn1dCjigoIlmJW +bjTb2QK5MHXjBNLnj8KwEUAKrNVxAmKLMb7dxiNYMUJDLXT5xp6mig/p/r+D5kNX +JLrvRjSq1xIBOO0CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0hBBYw +FDASBgdghXQBUwABBgdghXQBUwABMBIGA1UdEwEB/wQIMAYBAf8CAQcwHwYDVR0j +BBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wHQYDVR0OBBYEFAMlL95vggE6XCzc +K6FptWfUjNP9MA0GCSqGSIb3DQEBBQUAA4ICAQA1EMvspgQNDQ/NwNurqPKIlwzf +ky9NfEBWMXrrpA9gzXrzvsMnjgM+pN0S734edAY8PzHyHHuRMSG08NBsl9Tpl7Ik +Vh5WwzW9iAUPWxAaZOHHgjD5Mq2eUCzneAXQMbFamIp1TpBcahQq4FJHgmDmHtqB +sfsUC1rxn9KVuj7QG9YVHaO+htXbD8BJZLsuUBlL0iT43R4HVtA4oJVwIHaM190e +3p9xxCPvgxNcoyQVTSlAPGrEqdi3pkSlDfTgnXceQHAm/NrZNuR55LU/vJtlvrsR +ls/bxig5OgjOR1tTWsWZ/l2p3e9M1MalrQLmjAcSHm8D0W+go/MpvRLHUKKwf4ip +mXeascClOS5cfGniLLDqN2qk4Vrh9VDlg++luyqI54zb/W1elxmofmZ1a3Hqv7HH +b6D0jqTsNFFbjCYDcKF31QESVwA12yPeDooomf2xEG9L/zgtYE4snOtnta1J7ksf +rK/7DZBaZmBwXarNeNQk7shBoJMBkpxqnvy5JMWzFYJ+vq6VK+uxwNrjAWALXmms +hFZhvnEX/h0TD/7Gh0Xp/jKgGg0TpJRVcaUWi7rKibCyx/yP2FS1k2Kdzs9Z+z0Y +zirLNRWCXf9UIltxUvu3yf5gmwBBZPCqKuy2QkPOiWaByIufOVQDJdMWNY6E0F/6 +MBr1mmz0DlP5OlvRHA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQHp4o6Ejy5e/DfEoeWhhntjANBgkqhkiG9w0BAQsFADBk +MQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0 +YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3Qg +Q0EgMjAeFw0xMTA2MjQwODM4MTRaFw0zMTA2MjUwNzM4MTRaMGQxCzAJBgNVBAYT +AmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGlnaXRhbCBDZXJ0aWZp +Y2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAyMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAlUJOhJ1R5tMJ6HJaI2nbeHCOFvEr +jw0DzpPMLgAIe6szjPTpQOYXTKueuEcUMncy3SgM3hhLX3af+Dk7/E6J2HzFZ++r +0rk0X2s682Q2zsKwzxNoysjL67XiPS4h3+os1OD5cJZM/2pYmLcX5BtS5X4HAB1f +2uY+lQS3aYg5oUFgJWFLlTloYhyxCwWJwDaCFCE/rtuh/bxvHGCGtlOUSbkrRsVP +ACu/obvLP+DHVxxX6NZp+MEkUp2IVd3Chy50I9AU/SpHWrumnf2U5NGKpV+GY3aF +y6//SSj8gO1MedK75MDvAe5QQQg1I3ArqRa0jG6F6bYRzzHdUyYb3y1aSgJA/MTA +tukxGggo5WDDH8SQjhBiYEQN7Aq+VRhxLKX0srwVYv8c474d2h5Xszx+zYIdkeNL +6yxSNLCK/RJOlrDrcH+eOfdmQrGrrFLadkBXeyq96G4DsguAhYidDMfCd7Camlf0 +uPoTXGiTOmekl9AbmbeGMktg2M7v0Ax/lZ9vh0+Hio5fCHyqW/xavqGRn1V9TrAL +acywlKinh/LTSlDcX3KwFnUey7QYYpqwpzmqm59m2I2mbJYV4+by+PGDYmy7Velh +k6M99bFXi08jsJvllGov34zflVEpYKELKeRcVVi3qPyZ7iVNTA6z00yPhOgpD/0Q +VAKFyPnlw4vP5w8CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0hBBYw +FDASBgdghXQBUwIBBgdghXQBUwIBMBIGA1UdEwEB/wQIMAYBAf8CAQcwHQYDVR0O +BBYEFE0mICKJS9PVpAqhb97iEoHF8TwuMB8GA1UdIwQYMBaAFE0mICKJS9PVpAqh +b97iEoHF8TwuMA0GCSqGSIb3DQEBCwUAA4ICAQAyCrKkG8t9voJXiblqf/P0wS4R +fbgZPnm3qKhyN2abGu2sEzsOv2LwnN+ee6FTSA5BesogpxcbtnjsQJHzQq0Qw1zv +/2BZf82Fo4s9SBwlAjxnffUy6S8w5X2lejjQ82YqZh6NM4OKb3xuqFp1mrjX2lhI +REeoTPpMSQpKwhI3qEAMw8jh0FcNlzKVxzqfl9NX+Ave5XLzo9v/tdhZsnPdTSpx +srpJ9csc1fV5yJmz/MFMdOO0vSk3FQQoHt5FRnDsr7p4DooqzgB53MBfGWcsa0vv +aGgLQ+OswWIJ76bdZWGgr4RVSJFSHMYlkSrQwSIjYVmvRRGFHQEkNI/Ps/8XciAT +woCqISxxOQ7Qj1zB09GOInJGTB2Wrk9xseEFKZZZ9LuedT3PDTcNYtsmjGOpI99n +Bjx8Oto0QuFmtEYE3saWmA9LSHokMnWRn6z3aOkquVVlzl1h0ydw2Df+n7mvoC5W +t6NlUe07qxS/TFED6F+KBZvuim6c779o+sjaC+NCydAXFJy3SuCvkychVSa1ZC+N +8f+mQAWFBVzKBxlcCxMoTFh/wqXvRdpg065lYZ1Tg3TCrvJcwhbtkj6EPnNgiLx2 +9CzP0H1907he0ZESEOnN3col49XtmS++dYFLJPlFRpTJKSFTnCZFqhMX5OfNeOI5 +wSsSnqaeG8XmDtkx2Q== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF4DCCA8igAwIBAgIRAPL6ZOJ0Y9ON/RAdBB92ylgwDQYJKoZIhvcNAQELBQAw +ZzELMAkGA1UEBhMCY2gxETAPBgNVBAoTCFN3aXNzY29tMSUwIwYDVQQLExxEaWdp +dGFsIENlcnRpZmljYXRlIFNlcnZpY2VzMR4wHAYDVQQDExVTd2lzc2NvbSBSb290 +IEVWIENBIDIwHhcNMTEwNjI0MDk0NTA4WhcNMzEwNjI1MDg0NTA4WjBnMQswCQYD +VQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2Vy +dGlmaWNhdGUgU2VydmljZXMxHjAcBgNVBAMTFVN3aXNzY29tIFJvb3QgRVYgQ0Eg +MjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMT3HS9X6lds93BdY7Bx +UglgRCgzo3pOCvrY6myLURYaVa5UJsTMRQdBTxB5f3HSek4/OE6zAMaVylvNwSqD +1ycfMQ4jFrclyxy0uYAyXhqdk/HoPGAsp15XGVhRXrwsVgu42O+LgrQ8uMIkqBPH +oCE2G3pXKSinLr9xJZDzRINpUKTk4RtiGZQJo/PDvO/0vezbE53PnUgJUmfANykR +HvvSEaeFGHR55E+FFOtSN+KxRdjMDUN/rhPSays/p8LiqG12W0OfvrSdsyaGOx9/ +5fLoZigWJdBLlzin5M8J0TbDC77aO0RYjb7xnglrPvMyxyuHxuxenPaHZa0zKcQv +idm5y8kDnftslFGXEBuGCxobP/YCfnvUxVFkKJ3106yDgYjTdLRZncHrYTNaRdHL +OdAGalNgHa/2+2m8atwBz735j9m9W8E6X47aD0upm50qKGsaCnw8qyIL5XctcfaC +NYGu+HuB5ur+rPQam3Rc6I8k9l2dRsQs0h4rIWqDJ2dVSqTjyDKXZpBy2uPUZC5f +46Fq9mDU5zXNysRojddxyNMkM3OxbPlq4SjbX8Y96L5V5jcb7STZDxmPX2MYWFCB +UWVv8p9+agTnNCRxunZLWB4ZvRVgRaoMEkABnRDixzgHcgplwLa7JSnaFp6LNYth +7eVxV4O1PHGf40+/fh6Bn0GXAgMBAAGjgYYwgYMwDgYDVR0PAQH/BAQDAgGGMB0G +A1UdIQQWMBQwEgYHYIV0AVMCAgYHYIV0AVMCAjASBgNVHRMBAf8ECDAGAQH/AgED +MB0GA1UdDgQWBBRF2aWBbj2ITY1x0kbBbkUe88SAnTAfBgNVHSMEGDAWgBRF2aWB +bj2ITY1x0kbBbkUe88SAnTANBgkqhkiG9w0BAQsFAAOCAgEAlDpzBp9SSzBc1P6x +XCX5145v9Ydkn+0UjrgEjihLj6p7jjm02Vj2e6E1CqGdivdj5eu9OYLU43otb98T +PLr+flaYC/NUn81ETm484T4VvwYmneTwkLbUwp4wLh/vx3rEUMfqe9pQy3omywC0 +Wqu1kx+AiYQElY2NfwmTv9SoqORjbdlk5LgpWgi/UOGED1V7XwgiG/W9mR4U9s70 +WBCCswo9GcG/W6uqmdjyMb3lOGbcWAXH7WMaLgqXfIeTK7KK4/HsGOV1timH59yL +Gn602MnTihdsfSlEvoqq9X46Lmgxk7lq2prg2+kupYTNHAq4Sgj5nPFhJpiTt3tm +7JFe3VE/23MPrQRYCd0EApUKPtN236YQHoA96M2kZNEzx5LH4k5E4wnJTsJdhw4S +nr8PyQUQ3nqjsTzyP6WqJ3mtMX0f/fwZacXduT98zca0wjAefm6S139hdlqP65VN +vBFuIXxZN5nQBrz5Bm0yFqXZaajh3DyAHmBR3NdUIR7KYndP+tiPsys6DXhyyWhB +WkdKwqPrGtcKqzwyVcgKEZzfdNbwQBUdyLmPtTbFr/giuMod89a2GQ+fYWVq6nTI +fI/DT11lgh/ZDYnadXL77/FHZxOzyNEZiCcmmpl5fx7kLD977vHeTYuWl8PVP3wb +I+2ksx0WckNLIOFZfsLorSa/ovc= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln +biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF +MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT +d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8 +76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+ +bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c +6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE +emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd +MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt +MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y +MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y +FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi +aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM +gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB +qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7 +lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn +8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6 +45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO +UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5 +O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC +bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv +GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a +77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC +hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3 +92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp +Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w +ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt +Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFejCCA2KgAwIBAgIJAN7E8kTzHab8MA0GCSqGSIb3DQEBCwUAMEoxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxJDAiBgNVBAMTG1N3aXNzU2ln +biBHb2xkIFJvb3QgQ0EgLSBHMzAeFw0wOTA4MDQxMzMxNDdaFw0zNzA4MDQxMzMx +NDdaMEoxCzAJBgNVBAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxJDAiBgNV +BAMTG1N3aXNzU2lnbiBHb2xkIFJvb3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBAMPon8hlWp1nG8FFl7S0h0NbYWCAnvJ/XvlnRN1E+qu1 +q3f/KhlMzm/Ej0Gf4OLNcuDR1FJhQQkKvwpw++CDaWEpytsimlul5t0XlbBvhI46 +PmRaQfsbWPz9Kz6ypOasyYK8zvaV+Jd37Sb2WK6eJ+IPg+zFNljIe8/Vh6GphxoT +Z2EBbaZpnOKQ8StoZfPosHz8gj3erdgKAAlEeROc8P5udXvCvLNZAQt8xdUt8L// +bVfSSYHrtLNQrFv5CxUVjGn/ozkB7fzc3CeXjnuL1Wqm1uAdX80Bkeb1Ipi6LgkY +OG8TqIHS+yE35y20YueBkLDGeVm3Z3X+vo87+jbsr63ST3Q2AeVXqyMEzEpel89+ +xu+MzJUjaY3LOMcZ9taKABQeND1v2gwLw7qX/BFLUmE+vzNnUxC/eBsJwke6Hq9Y +9XWBf71W8etW19lpDAfpNzGwEhwy71bZvnorfL3TPbxqM006PFAQhyfHegpnU9t/ +gJvoniP6+Qg6i6GONFpIM19k05eGBxl9iJTOKnzFat+vvKmfzTqmurtU+X+P388O +WsStmryzOndzg0yTPJBotXxQlRHIgl6UcdBBGPvJxmXszom2ziKzEVs/4J0+Gxho +DaoDoWdZv2udvPjyZS+aQTpF2F7QNmxvOx5jtI6YTBPbIQ6fe+3qoKpxw+ujoNIl +AgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBRclwZGNKvfMMV8xQ1VcWYwtWCPnjAfBgNVHSMEGDAWgBRclwZGNKvfMMV8 +xQ1VcWYwtWCPnjANBgkqhkiG9w0BAQsFAAOCAgEAd0tN3uqFSqssJ9ZFx/FfIMFb +YO0Hy6Iz3DbPx5TxBsfV2s/NrYQ+/xJIf0HopWZXMMQd5KcaLy1Cwe9Gc7LV9Vr9 +Dnpr0sgxow1IlldlY1UYwPzkisyYhlurDIonN/ojaFlcJtehwcK5Tiz/KV7mlAu+ +zXJPleiP9ve4Pl7Oz54RyawDKUiKqbamNLmsQP/EtnM3scd/qVHbSypHX0AkB4gG +tySz+3/3sIsz+r8jdaNc/qplGsK+8X2BdwOBsY3XlQ16PEKYt4+pfVDh31IGmqBS +VHiDB2FSCTdeipynxlHRXGPRhNzC29L6Wxg2fWa81CiXL3WWHIQHrIuOUxG+JCGq +Z/LBrYic07B4Z3j101gDIApdIPG152XMDiDj1d/mLxkrhWjBBCbPj+0FU6HdBw7r +QSbHtKksW+NpPWbAYhvAqobAN8MxBIZwOb5rXyFAQaB/5dkPOEtwX0n4hbgrLqof +k0FD+PuydDwfS1dbt9RRoZJKzr4Qou7YFCJ7uUG9jemIqdGPAxpg/z+HiaCZJyJm +sD5onnKIUTidEz5FbQXlRrVz7UOGsRQKHrzaDb8eJFxmjw6+of3G62m8Q3nXA3b5 +3IeZuJjEzX9tEPkQvixC/pwpTYNrCr21jsRIiv0hB6aAfR+b6au9gmFECnEnX22b +kJ6u/zYks2gD1pWMa3M= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFwTCCA6mgAwIBAgIITrIAZwwDXU8wDQYJKoZIhvcNAQEFBQAwSTELMAkGA1UE +BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEjMCEGA1UEAxMaU3dpc3NTaWdu +IFBsYXRpbnVtIENBIC0gRzIwHhcNMDYxMDI1MDgzNjAwWhcNMzYxMDI1MDgzNjAw +WjBJMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMSMwIQYDVQQD +ExpTd2lzc1NpZ24gUGxhdGludW0gQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAMrfogLi2vj8Bxax3mCq3pZcZB/HL37PZ/pEQtZ2Y5Wu669y +IIpFR4ZieIbWIDkm9K6j/SPnpZy1IiEZtzeTIsBQnIJ71NUERFzLtMKfkr4k2Htn +IuJpX+UFeNSH2XFwMyVTtIc7KZAoNppVRDBopIOXfw0enHb/FZ1glwCNioUD7IC+ +6ixuEFGSzH7VozPY1kneWCqv9hbrS3uQMpe5up1Y8fhXSQQeol0GcN1x2/ndi5ob +jM89o03Oy3z2u5yg+gnOI2Ky6Q0f4nIoj5+saCB9bzuohTEJfwvH6GXp43gOCWcw +izSC+13gzJ2BbWLuCB4ELE6b7P6pT1/9aXjvCR+htL/68++QHkwFix7qepF6w9fl ++zC8bBsQWJj3Gl/QKTIDE0ZNYWqFTFJ0LwYfexHihJfGmfNtf9dng34TaNhxKFrY +zt3oEBSa/m0jh26OWnA81Y0JAKeqvLAxN23IhBQeW71FYyBrS3SMvds6DsHPWhaP +pZjydomyExI7C3d3rLvlPClKknLKYRorXkzig3R3+jVIeoVNjZpTxN94ypeRSCtF +KwH3HBqi7Ri6Cr2D+m+8jVeTO9TUps4e8aCxzqv9KyiaTxvXw3LbpMS/XUz13XuW +ae5ogObnmLo2t/5u7Su9IPhlGdpVCX4l3P5hYnL5fhgC72O00Puv5TtjjGePAgMB +AAGjgawwgakwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFFCvzAeHFUdvOMW0ZdHelarp35zMMB8GA1UdIwQYMBaAFFCvzAeHFUdvOMW0 +ZdHelarp35zMMEYGA1UdIAQ/MD0wOwYJYIV0AVkBAQEBMC4wLAYIKwYBBQUHAgEW +IGh0dHA6Ly9yZXBvc2l0b3J5LnN3aXNzc2lnbi5jb20vMA0GCSqGSIb3DQEBBQUA +A4ICAQAIhab1Fgz8RBrBY+D5VUYI/HAcQiiWjrfFwUF1TglxeeVtlspLpYhg0DB0 +uMoI3LQwnkAHFmtllXcBrqS3NQuB2nEVqXQXOHtYyvkv+8Bldo1bAbl93oI9ZLi+ +FHSjClTTLJUYFzX1UWs/j6KWYTl4a0vlpqD4U99REJNi54Av4tHgvI42Rncz7Lj7 +jposiU0xEQ8mngS7twSNC/K5/FqdOxa3L8iYq/6KUFkuozv8KV2LwUvJ4ooTHbG/ +u0IdUt1O2BReEMYxB+9xJ/cbOQncguqLs5WGXv312l0xpuAxtpTmREl0xRbl9x8D +YSjFyMsSoEJL+WuICI20MhjzdZ/EfwBPBZWcoxcCw7NTm6ogOSkrZvqdr16zktK1 +puEa+S1BaYEUtLS17Yk9zvupnTVCRLEcFHOBzyoBNZox1S2PbYTfgE1X4z/FhHXa +icYwu+uPyyIIoK6q8QNsOktNCaUOcsZWayFCTiMlFGiudgp8DAdwZPmaL/YFOSbG +DI8Zf0NebvRbFS/bYV3mZy8/CJT5YLSYMdp08YSTcU1f+2BY0fvEwW2JorsgH51x +kcsymxM9Pn2SUjWskpSi0xjCfMfqr3YFFt1nJ8J+HAciIfNAChs0B0QTwoRqjt8Z +Wr9/6x3iGjjRXK9HkmuAtTClyY3YqzGBH9/CZjfTk6mFhnll0g== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFgTCCA2mgAwIBAgIIIj+pFyDegZQwDQYJKoZIhvcNAQELBQAwTjELMAkGA1UE +BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEoMCYGA1UEAxMfU3dpc3NTaWdu +IFBsYXRpbnVtIFJvb3QgQ0EgLSBHMzAeFw0wOTA4MDQxMzM0MDRaFw0zNzA4MDQx +MzM0MDRaME4xCzAJBgNVBAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxKDAm +BgNVBAMTH1N3aXNzU2lnbiBQbGF0aW51bSBSb290IENBIC0gRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCUoO8TG59EIBvNxaoiu9nyUj56Wlh35o2h +K8ncpPPksxOUAGKbHPJDUEOBfq8wNkmsGIkMGEW4PsdUbePYmllriholqba1Dbd9 +I/BffagHqfc+hi7IAU3c5jbtHeU3B2kSS+OD0QQcJPAfcHHnGe1zSG6VKxW2VuYC +31bpm/rqpu7gwsO64MzGyHvXbzqVmzqPvlss0qmgOD7WiOGxYhOO3KswZ82oaqZj +K4Kwy8c9Tu1y9n2rMk5lAusPmXT4HBoojA5FAJMsFJ9txxue9orce3jjtJRHHU0F +bYR6kFSynot1woDfhzk/n/tIVAeNoCn1+WBfWnLou5ugQuAIADSjFTwT49YaawKy +lCGjnUG8KmtOMzumlDj8PccrM7MuKwZ0rJsQb8VORfddoVYDLA1fer0e3h13kGva +pS2KTOnfQfTnS+x9lUKfTKkJD0OIPz2T5yv0ekjaaMTdEoAxGl0kVCamJCGzTK3a +Fwg2AlfGnIZwyXXJnnxh2HjmuegUafkcECgSXUt1ULo80GdwVVVWS/s9HNjbeU2X +37ie2xcs1TUHuFCp9473Vv96Z0NPINnKZtY4YEvulDHWDaJIm/80aZTGNfWWiO+q +ZsyBputMU/8ydKe2nZhXtLomqfEzM2J+OrADEVf/3G8RI60+xgrQzFS3LcKTHeXC +pozH2O9T9wIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUVio/kFj0F1oUstcIG4VbVGpUGigwHwYDVR0jBBgwFoAUVio/ +kFj0F1oUstcIG4VbVGpUGigwDQYJKoZIhvcNAQELBQADggIBAGztiudDqHknm7jP +hz5kOBiMEUKShjfgWMMb7gQu94TsgxBoDH94LZzCl442ThbYDuprSK1Pnl0NzA2p +PhiFfsxomTk11tifhsEy+01lsyIUS8iFZtoX/3GRrJxWV95xLFZCv/jNDvCi0//S +IhX70HgKfuGwWs6ON9upnueVz2PyLA3S+m/zyNX7ALf3NWcQ03tS7BAy+L/dXsmm +gqTxsL8dLt0l5L1N8DWpkQFH+BAClFvrPusNutUdYyylLqvn4x6j7kuqX7FmAbSC +WvlGS8fx+N8svv113ZY4mjc6bqXmMhVus5DAOYp0pZWgvg0uiXnNKVaOw15XUcQF +bwRVj4HpTL1ZRssqvE3JHfLGTwXkyAQN925P2sM6nNLC9enGJHoUPhxCMKgCRTGp +/FCp3NyGOA9bkz9/CE5qDSc6EHlWwxW4PgaG9tlwZ691eoviWMzGdU8yVcVsFAko +O/KV5GreLCgHraB9Byjd1Fqj6aZ8E4yZC1J429nR3z5aQ3Z/RmBTws3ndkd8Vc20 +OWQQW5VLNV1EgyTV4C4kDMGAbmkAgAZ3CmaCEAxRbzeJV9vzTOW4ue4jZpdgt1Ld +2Zb7uoo7oE3OXvBETJDMIU8bOphrjjGD+YMIUssZwTVr7qEVW4g/bazyNJJTpjAq +E9fmhqhd2ULSx52peovL3+6iMcLl +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE +BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu +IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow +RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY +U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv +Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br +YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF +nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH +6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt +eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/ +c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ +MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH +HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf +jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6 +5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB +rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU +F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c +wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB +AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp +WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9 +xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ +2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ +IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8 +aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X +em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR +dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/ +OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+ +hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy +tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFfjCCA2agAwIBAgIJAKqIsFoLsXabMA0GCSqGSIb3DQEBCwUAMEwxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxJjAkBgNVBAMTHVN3aXNzU2ln +biBTaWx2ZXIgUm9vdCBDQSAtIEczMB4XDTA5MDgwNDEzMTkxNFoXDTM3MDgwNDEz +MTkxNFowTDELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEmMCQG +A1UEAxMdU3dpc3NTaWduIFNpbHZlciBSb290IENBIC0gRzMwggIiMA0GCSqGSIb3 +DQEBAQUAA4ICDwAwggIKAoICAQC+h5sF5nF8Um9t7Dep6bPczF9/01DqIZsE8D2/ +vo7JpRQWMhDPmfzscK1INmckDBcy1inlSjmxN+umeAxsbxnKTvdR2hro+iE4bJWc +L9aLzDsCm78mmxFFtrg0Wh2mVEhSyJ14cc5ISsyneIPcaKtmHncH0zYYCNfUbWD4 +8HnTMzYJkmO3BJr1p5baRa90GvyC46hbDjo/UleYfrycjMHAslrfxH7+DKZUdoN+ +ut3nKvRKNk+HZS6lujmNWWEp89OOJHCMU5sRpUcHsnUFXA2E2UTZzckmRFduAn2V +AdSrJIbuPXD7V/qwKRTQnfLFl8sJyvHyPefYS5bpiC+eR1GKVGWYSNIS5FR3DAfm +vluc8d0Dfo2E/L7JYtX8yTroibVfwgVSYfCcPuwuTYxykY7IQ8GiKF71gCTc4i+H +O1MA5cvwsnyNeRmgiM14+MWKWnflBqzdSt7mcG6+r771sasOCLDboD+Uxb4Subx7 +J3m1MildrsUgI5IDe1Q5sIkiVG0S48N46jpA/aSTrOktiDzbpkdmTN/YF+0W3hrW +10Fmvx2A8aTgZBEpXgwnBWLr5cQEYtHEnwxqVdZYOJxmD537q1SAmZzsSdaCn9pF +1j9TBgO3/R/shn104KS06DK2qgcj+O8kQZ5jMHj0VN2O8Fo4jhJ/eMdvAlYhM864 +uK1pVQIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAd +BgNVHQ4EFgQUoYxFkwoSYwunV18ySn3hIee3PmYwHwYDVR0jBBgwFoAUoYxFkwoS +YwunV18ySn3hIee3PmYwDQYJKoZIhvcNAQELBQADggIBAIeuYW1IOCrGHNxKLoR4 +ScAjKkW4NU3RBfq5BTPEZL3brVQWKrA+DVoo2qYagHMMxEFvr7g0tnfUW44dC4tG +kES1s+5JGInBSzSzhzV0op5FZ+1FcWa2uaElc9fCrIj70h2na9rAWubYWWQ0l2Ug +MTMDT86tCZ6u6cI+GHW0MyUSuwXsULpxQOK93ohGBSGEi6MrHuswMIm/EfVcRPiR +i0tZRQswDcoMT29jvgT+we3gh/7IzVa/5dyOetTWKU6A26ubP45lByL3RM2WHy3H +9Qm2mHD/ONxQFRGEO3+p8NgkVMgXjCsTSdaZf0XRD46/aXI3Uwf05q79Wz55uQbN +uIF4tE2g0DW65K7/00m8Ne1jxrP846thWgW2C+T/qSq+31ROwktcaNqjMqLJTVcY +UzRZPGaZ1zwCeKdMcdC/2/HEPOcB5gTyRPZIJjAzybEBGesC8cwh+joCMBedyF+A +P90lrAKb4xfevcqSFNJSgVPm6vwwZzKpYvaTFxUHMV4PG2n19Km3fC2z7YREMkco +BzuGaUWpxzaWkHJ02BKmcyPRTrm2ejrEKaFQBhG52fQmbmIIEiAW8AFXF9QFNmeX +61H5/zMkDAUPVr/vPRxSjoreaQ9aH/DVAzFEs5LG6nWorrvHYAOImP/HBIRSkIbh +tJOpUC/o69I2rDBgp9ADE7UK +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICqDCCAi2gAwIBAgIQIW4zpcvTiKRvKQe0JzzE2DAKBggqhkjOPQQDAzCBlDEL +MAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYD +VQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBD +bGFzcyAxIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0g +RzQwHhcNMTExMDA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBlDELMAkGA1UEBhMC +VVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZTeW1h +bnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAxIFB1 +YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAATXZrUb266zYO5G6ohjdTsqlG3zXxL24w+etgoUU0hS +yNw6s8tIICYSTvqJhNTfkeQpfSgB2dsYQ2mhH7XThhbcx39nI9/fMTGDAzVwsUu3 +yBe7UcvclBfb6gk7dhLeqrWjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBRlwI0l9Qy6l3eQP54u4Fr1ztXh5DAKBggqhkjOPQQD +AwNpADBmAjEApa7jRlP4mDbjIvouKEkN7jB+M/PsP3FezFWJeJmssv3cHFwzjim5 +axfIEWi13IMHAjEAnMhE2mnCNsNUGRCFAtqdR+9B52wmnQk9922Q0QVEL7C8g5No +8gxFSTm/mQQc0xCg +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID9jCCAt6gAwIBAgIQJDJ18h0v0gkz97RqytDzmDANBgkqhkiG9w0BAQsFADCB +lDELMAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8w +HQYDVQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRl +YyBDbGFzcyAxIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRzYwHhcNMTExMDE4MDAwMDAwWhcNMzcxMjAxMjM1OTU5WjCBlDELMAkGA1UE +BhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZT +eW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAx +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzYwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHOddJZKmZgiJM6kXZBxbje/SD +6Jlz+muxNuCad6BAwoGNAcfMjL2Pffd543pMA03Z+/2HOCgs3ZqLVAjbZ/sbjP4o +ki++t7JIp4Gh2F6Iw8w5QEFa0dzl2hCfL9oBTf0uRnz5LicKaTfukaMbasxEvxvH +w9QRslBglwm9LiL1QYRmn81ApqkAgMEflZKf3vNI79sdd2H8f9/ulqRy0LY+/3gn +r8uSFWkI22MQ4uaXrG7crPaizh5HmbmJtxLmodTNWRFnw2+F2EJOKL5ZVVkElauP +N4C/DfD8HzpkMViBeNfiNfYgPym4jxZuPkjctUwH4fIa6n4KedaovetdhitNAgMB +AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBQzQejIORIVk0jyljIuWvXalF9TYDANBgkqhkiG9w0BAQsFAAOCAQEAFeNzV7EX +tl9JaUSm9l56Z6zS3nVJq/4lVcc6yUQVEG6/MWvL2QeTfxyFYwDjMhLgzMv7OWyP +4lPiPEAz2aSMR+atWPuJr+PehilWNCxFuBL6RIluLRQlKCQBZdbqUqwFblYSCT3Q +dPTXvQbKqDqNVkL6jXI+dPEDct+HG14OelWWLDi3mIXNTTNEyZSPWjEwN0ujOhKz +5zbRIWhLLTjmU64cJVYIVgNnhJ3Gw84kYsdMNs+wBkS39V8C3dlU6S+QTnrIToNA +DJqXPDe/v+z28LSFdyjBC8hnghAXOKK3Buqbvzr46SMHv3TgmDgVVXjucgBcGaP0 +0jPg/73RVDkpDw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICqDCCAi2gAwIBAgIQNBdlEkA7t1aALYDLeVWmHjAKBggqhkjOPQQDAzCBlDEL +MAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYD +VQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBD +bGFzcyAyIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0g +RzQwHhcNMTExMDA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBlDELMAkGA1UEBhMC +VVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZTeW1h +bnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAyIFB1 +YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAATR2UqOTA2ESlG6fO/TzPo6mrWnYxM9AeBJPvrBR8mS +szrX/m+c95o6D/UOCgrDP8jnEhSO1dVtmCyzcTIK6yq99tdqIAtnRZzSsr9TImYJ +XdsR8/EFM1ij4rjPfM2Cm72jQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBQ9MvM6qQyQhPmijGkGYVQvh3L+BTAKBggqhkjOPQQD +AwNpADBmAjEAyKapr0F/tckRQhZoaUxcuCcYtpjxwH+QbYfTjEYX8D5P/OqwCMR6 +S7wIL8fip29lAjEA1lnehs5fDspU1cbQFQ78i5Ry1I4AWFPPfrFLDeVQhuuea9// +KabYR9mglhjb8kWz +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID9jCCAt6gAwIBAgIQZIKe/DcedF38l/+XyLH/QTANBgkqhkiG9w0BAQsFADCB +lDELMAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8w +HQYDVQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRl +YyBDbGFzcyAyIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRzYwHhcNMTExMDE4MDAwMDAwWhcNMzcxMjAxMjM1OTU5WjCBlDELMAkGA1UE +BhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZT +eW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAy +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzYwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNzOkFyGOFyz9AYxe9GPo15gRn +V2WYKaRPyVyPDzTS+NqoE2KquB5QZ3iwFkygOakVeq7t0qLA8JA3KRgmXOgNPLZs +ST/B4NzZS7YUGQum05bh1gnjGSYc+R9lS/kaQxwAg9bQqkmi1NvmYji6UBRDbfkx ++FYW2TgCkc/rbN27OU6Z4TBnRfHU8I3D3/7yOAchfQBeVkSz5GC9kSucq1sEcg+y +KNlyqwUgQiWpWwNqIBDMMfAr2jUs0Pual07wgksr2F82owstr2MNHSV/oW5cYqGN +KD6h/Bwg+AEvulWaEbAZ0shQeWsOagXXqgQ2sqPy4V93p3ec5R7c6d9qwWVdAgMB +AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBSHjCCVyJhK0daABkqQNETfHE2/sDANBgkqhkiG9w0BAQsFAAOCAQEAgY6ypWaW +tyGltu9vI1pf24HFQqV4wWn99DzX+VxrcHIa/FqXTQCAiIiCisNxDY7FiZss7Y0L +0nJU9X3UXENX6fOupQIR9nYrgVfdfdp0MP1UR/bgFm6mtApI5ud1Bw8pGTnOefS2 +bMVfmdUfS/rfbSw8DVSAcPCIC4DPxmiiuB1w2XaM/O6lyc+tHc+ZJVdaYkXLFmu9 +Sc2lo4xpeSWuuExsi0BmSxY/zwIa3eFsawdhanYVKZl/G92IgMG/tY9zxaaWI4Sm +KIYkM2oBLldzJbZev4/mHWGoQClnHYebHX+bn5nNMdZUvmK7OaxoEkiRIKXLsd3+ +b/xa5IJVWa8xqQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICpzCCAi2gAwIBAgIQTHm1miicdjFk9YlE0JEC3jAKBggqhkjOPQQDAzCBlDEL +MAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYD +VQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBD +bGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0g +RzQwHhcNMTIxMDE4MDAwMDAwWhcNMzcxMjAxMjM1OTU5WjCBlDELMAkGA1UEBhMC +VVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZTeW1h +bnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAzIFB1 +YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAARXz+qzOU0/oSHgbi84csaHl/OFC0fnD1HI0fSZm8pZ +Zf9M+eoLtyXV0vbsMS0yYhLXdoan+jjJZdT+c+KEOfhMSWIT3brViKBfPchPsD+P +oVAR5JNGrcNfy/GkapVW6MCjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBQknbzScfcdwiW+IvGJpSwVOzQeXjAKBggqhkjOPQQD +AwNoADBlAjEAuWZoZdsF0Dh9DvPIdWG40CjEsUozUVj78jwQyK5HeHbKZiQXhj5Q +Vm6lLZmIuL0kAjAD6qfnqDzqnWLGX1TamPR3vU+PGJyRXEdrQE0QHbPhicoLIsga +xcX+i93B3294n5E= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF9jCCA96gAwIBAgIQZWNxhdNvRcaPfzH5CYeSgjANBgkqhkiG9w0BAQwFADCB +lDELMAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8w +HQYDVQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRl +YyBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRzYwHhcNMTIxMDE4MDAwMDAwWhcNMzcxMjAxMjM1OTU5WjCBlDELMAkGA1UE +BhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZT +eW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAz +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzYwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC3DrL6TbyachX7d1vb/UMPywv3 +YC6zK34Mu1PyzE5l8xm7/zUd99Opu0Attd141Kb5N+qFBXttt+YTSwZ8+3ZjjyAd +LTgrBIXy6LDRX01KIclq2JTqHgJQpqqQB6BHIepm+QSg5oPwxPVeluInTWHDs8GM +IrZmoQDRVin77cF/JMo9+lqUsITDx7pDHP1kDvEo+0dZ8ibhMblE+avd+76+LDfj +rAsY0/wBovGkCjWCR0yrvYpe3xOF/CDMSFmvr0FvyyPNypOn3dVfyGQ7/wEDoApP +LW49hL6vyDKyUymQFfewBZoKPPa5BpDJpeFdoDuw/qi2v/WJKFckOiGGceTciotB +VeweMCRZ0cBZuHivqlp03iWAMJjtMERvIXAc2xJTDtamKGaTLB/MTzwbgcW59nhv +0DI6CHLbaw5GF4WU87zvvPekXo7p6bVk5bdLRRIsTDe3YEMKTXEGAJQmNXQfu3o5 +XE475rgD4seTi4QsJUlF3X8jlGAfy+nN9quX92Hn+39igcjcCjBcGHzmzu/Hbh6H +fLPpysh7avRo/IOlDFa0urKNSgrHl5fFiDAVPRAIVBVycmczM/R8t84AJ1NlziTx +WmTnNi/yLgLCl99y6AIeoPc9tftoYAP6M6nmEm0G4amoXU48/tnnAGWsthlNe4N/ +NEfq4RhtsYsceavnnQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUOXEIAD7eyIbnkP/k/SEPziQZFvYwDQYJKoZIhvcN +AQEMBQADggIBAFBriE1gSM5a4yLOZ3yEp80c/ekMA4w2rwqHDmquV64B0Da78v25 +c8FftaiuTKL6ScsHRhY2vePIVzh+OOS/JTNgxtw3nGO7XpgeGrKC8K6mdxGAREeh +KcXwszrOmPC47NMOgAZ3IzBM/3lkYyJbd5NDS3Wz2ztuO0rd8ciutTeKlYg6EGhw +OLlbcH7VQ8n8X0/l5ns27vAg7UdXEyYQXhQGDXt2B8LGLRb0rqdsD7yID08sAraj +1yLmmUc12I2lT4ESOhF9s8wLdfMecKMbA+r6mujmLjY5zJnOOj8Mt674Q5mwk25v +qtkPajGRu5zTtCj7g0x6c4JQZ9IOrO1gxbJdNZjPh34eWR0kvFa62qRa2MzmvB4Q +jxuMjvPB27e+1LBbZY8WaPNWxSoZFk0PuGWHbSSDuGLc4EdhGoh7zk5//dzGDVqa +pPO1TPbdMaboHREhMzAEYX0c4D5PjT+1ixIAWn2poQDUg+twuxj4pNIcgS23CBHI +Jnu21OUPA0Zy1CVAHr5JXW2T8VyyO3VUaTqg7kwiuqya4gitRWMFSlI1dsQ09V4H +Mq3cfCbRW4+t5OaqG3Wf61206MCpFXxOSgdy30bJ1JGSdVaw4e43NmUoxRXIK3bM +bW8Zg/T92hXiQeczeUaDV/nxpbZt07zXU+fucW14qZen7iCcGRVyFT0E +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDcTCCAlmgAwIBAgIVAOYJ/nrqAGiM4CS07SAbH+9StETRMA0GCSqGSIb3DQEB +BQUAMFAxCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9LcmFqb3dhIEl6YmEgUm96bGlj +emVuaW93YSBTLkEuMRcwFQYDVQQDDA5TWkFGSVIgUk9PVCBDQTAeFw0xMTEyMDYx +MTEwNTdaFw0zMTEyMDYxMTEwNTdaMFAxCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L +cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRcwFQYDVQQDDA5TWkFGSVIg +Uk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKxHL49ZMTml +6g3wpYwrvQKkvc0Kc6oJ5sxfgmp1qZfluwbv88BdocHSiXlY8NzrVYzuWBp7J/9K +ULMAoWoTIzOQ6C9TNm4YbA9A1jdX1wYNL5Akylf8W5L/I4BXhT9KnlI6x+a7BVAm +nr/Ttl+utT/Asms2fRfEsF2vZPMxH4UFqOAhFjxTkmJWf2Cu4nvRQJHcttB+cEAo +ag/hERt/+tzo4URz6x6r19toYmxx4FjjBkUhWQw1X21re//Hof2+0YgiwYT84zLb +eqDqCOMOXxvH480yGDkh/QoazWX3U75HQExT/iJlwnu7I1V6HXztKIwCBjsxffbH +3jOshCJtywcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFFOSo33/gnbwM9TrkmdHYTMbaDsqMA0GCSqGSIb3DQEBBQUA +A4IBAQA5UFWd5EL/pBviIMm1zD2JLUCpp0mJG7JkwznIOzawhGmFFaxGoxAhQBEg +haP+E0KR66oAwVC6xe32QUVSHfWqWndzbODzLB8yj7WAR0cDM45ZngSBPBuFE3Wu +GLJX9g100ETfIX+4YBR/4NR/uvTnpnd9ete7Whl0ZfY94yuu4xQqB5QFv+P7IXXV +lTOjkjuGXEcyQAjQzbFaT9vIABSbeCXWBbjvOXukJy6WgAiclzGNSYprre8Ryydd +fmjW9HIGwsIO03EldivvqEYL1Hv1w/Pur+6FUEOaL68PEIUovfgwIB2BAw+vZDuw +cH0mX548PojGyg434cDjkSXa3mHF +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd +AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC +FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi +1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq +jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ +wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/ +WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy +NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC +uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw +IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6 +g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP +BSeOE6Fuwg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN +8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/ +RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4 +hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5 +ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM +EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1 +A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy +WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ +1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30 +6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT +91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p +TpPDpFQUWw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw +NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv +b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD +VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2 +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F +VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1 +7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X +Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+ +/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs +81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm +dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe +Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu +sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4 +pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs +slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ +arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD +VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG +9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl +dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj +TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed +Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7 +Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI +OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7 +vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW +t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn +HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx +SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp +IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi +BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw +MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig +YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v +dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/ +BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6 +papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K +DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3 +KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox +XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB +rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV +BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa +Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl +LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u +MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl +ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm +gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8 +YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf +b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9 +9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S +zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk +OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV +HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA +2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW +oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c +KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM +m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu +MdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB +qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV +BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw +NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j +LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG +A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs +W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta +3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk +6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6 +Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J +NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP +r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU +DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz +YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2 +/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/ +LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7 +jVaMaA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIGHDCCBASgAwIBAgIES45gAzANBgkqhkiG9w0BAQsFADBFMQswCQYDVQQGEwJE +SzESMBAGA1UEChMJVFJVU1QyNDA4MSIwIAYDVQQDExlUUlVTVDI0MDggT0NFUyBQ +cmltYXJ5IENBMB4XDTEwMDMwMzEyNDEzNFoXDTM3MTIwMzEzMTEzNFowRTELMAkG +A1UEBhMCREsxEjAQBgNVBAoTCVRSVVNUMjQwODEiMCAGA1UEAxMZVFJVU1QyNDA4 +IE9DRVMgUHJpbWFyeSBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +AJlJodr3U1Fa+v8HnyACHV81/wLevLS0KUk58VIABl6Wfs3LLNoj5soVAZv4LBi5 +gs7E8CZ9w0F2CopW8vzM8i5HLKE4eedPdnaFqHiBZ0q5aaaQArW+qKJx1rT/AaXt +alMB63/yvJcYlXS2lpexk5H/zDBUXeEQyvfmK+slAySWT6wKxIPDwVapauFY9QaG ++VBhCa5jBstWS7A5gQfEvYqn6csZ3jW472kW6OFNz6ftBcTwufomGJBMkonf4ZLr +6t0AdRi9jflBPz3MNNRGxyjIuAmFqGocYFA/OODBRjvSHB2DygqQ8k+9tlpvzMRr +kU7jq3RKL+83G1dJ3/LTjCLz4ryEMIC/OJ/gNZfE0qXddpPtzflIPtUFVffXdbFV +1t6XZFhJ+wBHQCpJobq/BjqLWUA86upsDbfwnePtmIPRCemeXkY0qabC+2Qmd2Fe +xyZphwTyMnbqy6FG1tB65dYf3mOqStmLa3RcHn9+2dwNfUkh0tjO2FXD7drWcU0O +I9DW8oAypiPhm/QCjMU6j6t+0pzqJ/S0tdAo+BeiXK5hwk6aR+sRb608QfBbRAs3 +U/q8jSPByenggac2BtTN6cl+AA1Mfcgl8iXWNFVGegzd/VS9vINClJCe3FNVoUnR +YCKkj+x0fqxvBLopOkJkmuZw/yhgMxljUi2qYYGn90OzAgMBAAGjggESMIIBDjAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjARBgNVHSAECjAIMAYGBFUd +IAAwgZcGA1UdHwSBjzCBjDAsoCqgKIYmaHR0cDovL2NybC5vY2VzLnRydXN0MjQw +OC5jb20vb2Nlcy5jcmwwXKBaoFikVjBUMQswCQYDVQQGEwJESzESMBAGA1UEChMJ +VFJVU1QyNDA4MSIwIAYDVQQDExlUUlVTVDI0MDggT0NFUyBQcmltYXJ5IENBMQ0w +CwYDVQQDEwRDUkwxMB8GA1UdIwQYMBaAFPZt+LFIs0FDAduGROUYBbdezAY3MB0G +A1UdDgQWBBT2bfixSLNBQwHbhkTlGAW3XswGNzANBgkqhkiG9w0BAQsFAAOCAgEA +VPAQGrT7dIjD3/sIbQW86f9CBPu0c7JKN6oUoRUtKqgJ2KCdcB5ANhCoyznHpu3m +/dUfVUI5hc31CaPgZyY37hch1q4/c9INcELGZVE/FWfehkH+acpdNr7j8UoRZlkN +15b/0UUBfGeiiJG/ugo4llfoPrp8bUmXEGggK3wyqIPcJatPtHwlb6ympfC2b/Ld +v/0IdIOzIOm+A89Q0utx+1cOBq72OHy8gpGb6MfncVFMoL2fjP652Ypgtr8qN9Ka +/XOazktiIf+2Pzp7hLi92hRc9QMYexrV/nnFSQoWdU8TqULFUoZ3zTEC3F/g2yj+ +FhbrgXHGo5/A4O74X+lpbY2XV47aSuw+DzcPt/EhMj2of7SA55WSgbjPMbmNX0rb +oenSIte2HRFW5Tr2W+qqkc/StixgkKdyzGLoFx/xeTWdJkZKwyjqge2wJqws2upY +EiThhC497+/mTiSuXd69eVUwKyqYp9SD2rTtNmF6TCghRM/dNsJOl+osxDVGcwvt +WIVFF/Onlu5fu1NHXdqNEfzldKDUvCfii3L2iATTZyHwU9CALE+2eIA+PIaLgnM1 +1oCfUnYBkQurTrihvzz9PryCVkLxiqRmBVvUz+D4N5G/wvvKDS6t6cPCS+hqM482 +cbBsn0R9fFLO4El62S9eH1tqOzO20OAOK65yJIsOpSE= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBF +MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQL +ExNUcnVzdGlzIEZQUyBSb290IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTEx +MzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNVBAoTD1RydXN0aXMgTGltaXRlZDEc +MBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQRUN+ +AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihH +iTHcDnlkH5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjj +vSkCqPoc4Vu5g6hBSLwacY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA +0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zto3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlB +OrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEAAaNTMFEwDwYDVR0TAQH/ +BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAdBgNVHQ4E +FgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01 +GX2cGE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmW +zaD+vkAMXBJV+JOCyinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP4 +1BIy+Q7DsdwyhEQsb8tGD+pmQQ9P8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZE +f1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHVl/9D7S3B2l0pKoU/rGXuhg8F +jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN +ZetX2fNXlrtIzYE= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx +EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT +VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5 +NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT +B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF +10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz +0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh +MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH +zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc +46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2 +yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi +laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP +oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA +BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE +qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm +4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL +1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF +H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo +RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+ +nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh +15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW +6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW +nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j +wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz +aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy +KwbQBM0= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES +MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU +V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz +WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO +LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE +AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH +K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX +RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z +rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx +3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq +hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC +MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls +XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D +lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn +aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ +YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRS +MRgwFgYDVQQHDA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJp +bGltc2VsIHZlIFRla25vbG9qaWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSw +VEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ryb25payB2ZSBLcmlwdG9sb2ppIEFy +YcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNVBAsMGkthbXUgU2Vy +dGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUgS8O2 +ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAe +Fw0wNzA4MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIx +GDAWBgNVBAcMD0dlYnplIC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmls +aW1zZWwgdmUgVGVrbm9sb2ppayBBcmHFn3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBU +QUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZlIEtyaXB0b2xvamkgQXJh +xZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2FtdSBTZXJ0 +aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7Zr +IFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4h +gb46ezzb8R1Sf1n68yJMlaCQvEhOEav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yK +O7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1xnnRFDDtG1hba+818qEhTsXO +fJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR6Oqeyjh1jmKw +lZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL +hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQID +AQABo0IwQDAdBgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmP +NOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4N5EY3ATIZJkrGG2AA1nJrvhY0D7t +wyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLTy9LQQfMmNkqblWwM +7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYhLBOh +gLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5n +oN+J1q2MdqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUs +yZyQ2uypQjyttgI= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEPTCCAyWgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvzE/MD0GA1UEAww2VMOc +UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx +c8SxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMV4wXAYDVQQKDFVUw5xS +S1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kg +SGl6bWV0bGVyaSBBLsWeLiAoYykgQXJhbMSxayAyMDA3MB4XDTA3MTIyNTE4Mzcx +OVoXDTE3MTIyMjE4MzcxOVowgb8xPzA9BgNVBAMMNlTDnFJLVFJVU1QgRWxla3Ry +b25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTELMAkGA1UEBhMC +VFIxDzANBgNVBAcMBkFua2FyYTFeMFwGA1UECgxVVMOcUktUUlVTVCBCaWxnaSDE +sGxldGnFn2ltIHZlIEJpbGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7F +ni4gKGMpIEFyYWzEsWsgMjAwNzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAKu3PgqMyKVYFeaK7yc9SrToJdPNM8Ig3BnuiD9NYvDdE3ePYakqtdTyuTFY +KTsvP2qcb3N2Je40IIDu6rfwxArNK4aUyeNgsURSsloptJGXg9i3phQvKUmi8wUG ++7RP2qFsmmaf8EMJyupyj+sA1zU511YXRxcw9L6/P8JorzZAwan0qafoEGsIiveG +HtyaKhUG9qPw9ODHFNRRf8+0222vR5YXm3dx2KdxnSQM9pQ/hTEST7ruToK4uT6P +IzdezKKqdfcYbwnTrqdUKDT74eA7YH2gvnmJhsifLfkKS8RQouf9eRbHegsYz85M +733WB2+Y8a+xwXrXgTW4qhe04MsCAwEAAaNCMEAwHQYDVR0OBBYEFCnFkKslrxHk +Yb+j/4hhkeYO/pyBMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0G +CSqGSIb3DQEBBQUAA4IBAQAQDdr4Ouwo0RSVgrESLFF6QSU2TJ/sPx+EnWVUXKgW +AkD6bho3hO9ynYYKVZ1WKKxmLNA6VpM0ByWtCLCPyA8JWcqdmBzlVPi5RX9ql2+I +aE1KBiY3iAIOtsbWcpnOa3faYjGkVh+uX4132l32iPwa2Z61gfAyuOOI0JzzaqC5 +mxRZNTZPz/OOXl0XrRWV2N2y1RVuAE6zS89mlOTgzbUF2mNXi+WzqtvALhyQRNsa +XRik7r4EW5nVcV9VZWRi1aKbBFmGyGJ353yCRWo9F7/snXUMrqNvWtMvmDb08PUZ +qxFdyKbjKlhqQgnDvZImZjINXQhVdP+MmNAKpoRq0Tl9 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFkjCCA3qgAwIBAgIBCDANBgkqhkiG9w0BAQUFADA6MQswCQYDVQQGEwJDTjER +MA8GA1UEChMIVW5pVHJ1c3QxGDAWBgNVBAMTD1VDQSBHbG9iYWwgUm9vdDAeFw0w +ODAxMDEwMDAwMDBaFw0zNzEyMzEwMDAwMDBaMDoxCzAJBgNVBAYTAkNOMREwDwYD +VQQKEwhVbmlUcnVzdDEYMBYGA1UEAxMPVUNBIEdsb2JhbCBSb290MIICIjANBgkq +hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA2rPlBlA/9nP3xDK/RqUlYjOHsGj+p9+I +A2N9Apb964fJ7uIIu527u+RBj8cwiQ9tJMAEbBSUgU2gDXRm8/CFr/hkGd656YGT +0CiFmUdCSiw8OCdKzP/5bBnXtfPvm65bNAbXj6ITBpyKhELVs6OQaG2BkO5NhOxM +cE4t3iQ5zhkAQ5N4+QiGHUPR9HK8BcBn+sBR0smFBySuOR56zUHSNqth6iur8CBV +mTxtLRwuLnWW2HKX4AzKaXPudSsVCeCObbvaE/9GqOgADKwHLx25urnRoPeZnnRc +GQVmMc8+KlL+b5/zub35wYH1N9ouTIElXfbZlJrTNYsgKDdfUet9Ysepk9H50DTL +qScmLCiQkjtVY7cXDlRzq6987DqrcDOsIfsiJrOGrCOp139tywgg8q9A9f9ER3Hd +J90TKKHqdjn5EKCgTUCkJ7JZFStsLSS3JGN490MYeg9NEePorIdCjedYcaSrbqLA +l3y74xNLytu7awj5abQEctXDRrl36v+6++nwOgw19o8PrgaEFt2UVdTvyie3AzzF +HCYq9TyopZWbhvGKiWf4xwxmse1Bv4KmAGg6IjTuHuvlb4l0T2qqaqhXZ1LUIGHB +zlPL/SR/XybfoQhplqCe/klD4tPq2sTxiDEhbhzhzfN1DiBEFsx9c3Q1RSw7gdQg +7LYJjD5IskkCAwEAAaOBojCBnzALBgNVHQ8EBAMCAQYwDAYDVR0TBAUwAwEB/zBj +BgNVHSUEXDBaBggrBgEFBQcDAQYIKwYBBQUHAwIGCCsGAQUFBwMDBggrBgEFBQcD +BAYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEFBQcDBwYIKwYBBQUHAwgGCCsGAQUF +BwMJMB0GA1UdDgQWBBTZw9P4gJJnzF3SOqLXcaK0xDiALTANBgkqhkiG9w0BAQUF +AAOCAgEA0Ih5ygiq9ws0oE4Jwul+NUiJcIQjL1HDKy9e21NrW3UIKlS6Mg7VxnGF +sZdJgPaE0PC6t3GUyHlrpsVE6EKirSUtVy/m1jEp+hmJVCl+t35HNmktbjK81HXa +QnO4TuWDQHOyXd/URHOmYgvbqm4FjMh/Rk85hZCdvBtUKayl1/7lWFZXbSyZoUkh +1WHGjGHhdSTBAd0tGzbDLxLMC9Z4i3WA6UG5iLHKPKkWxk4V43I29tSgQYWvimVw +TbVEEFDs7d9t5tnGwBLxSzovc+k8qe4bqi81pZufTcU0hF8mFGmzI7GJchT46U1R +IgP/SobEHOh7eQrbRyWBfvw0hKxZuFhD5D1DCVR0wtD92e9uWfdyYJl2b/Unp7uD +pEqB7CmB9HdL4UISVdSGKhK28FWbAS7d9qjjGcPORy/AeGEYWsdl/J1GW1fcfA67 +loMQfFUYCQSu0feLKj6g5lDWMDbX54s4U+xJRODPpN/xU3uLWrb2EZBL1nXz/gLz +Ka/wI3J9FO2pXd96gZ6bkiL8HvgBRUGXx2sBYb4zaPKgZYRmvOAqpGjTcezHCN6j +w8k2SjTxF+KAryAhk5Qe5hXTVGLxtTgv48y5ZwSpuuXu+RBuyy5+E6+SFP7zJ3N7 +OPxzbbm5iPZujAv1/P8JDrMtXnt145Ik4ubhWD5LKAN1axibRww= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDhDCCAmygAwIBAgIBCTANBgkqhkiG9w0BAQUFADAzMQswCQYDVQQGEwJDTjER +MA8GA1UEChMIVW5pVHJ1c3QxETAPBgNVBAMTCFVDQSBSb290MB4XDTA0MDEwMTAw +MDAwMFoXDTI5MTIzMTAwMDAwMFowMzELMAkGA1UEBhMCQ04xETAPBgNVBAoTCFVu +aVRydXN0MREwDwYDVQQDEwhVQ0EgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBALNdB8qGJn1r4vs4CQ7MgsJqGgCiFV/W6dQBt1YDAVmP9ThpJHbC +XivF9iu/r/tB/Q9a/KvXg3BNMJjRnrJ2u5LWu+kQKGkoNkTo8SzXWHwk1n8COvCB +a2FgP/Qz3m3l6ihST/ypHWN8C7rqrsRoRuTej8GnsrZYWm0dLNmMOreIy4XU9+gD +Xv2yTVDo1h//rgI/i0+WITyb1yXJHT/7mLFZ5PCpO6+zzYUs4mBGzG+OoOvwNMXx +QhhgrhLtRnUc5dipllq+3lrWeGeWW5N3UPJuG96WUUqm1ktDdSFmjXfsAoR2XEQQ +th1hbOSjIH23jboPkXXHjd+8AmCoKai9PUMCAwEAAaOBojCBnzALBgNVHQ8EBAMC +AQYwDAYDVR0TBAUwAwEB/zBjBgNVHSUEXDBaBggrBgEFBQcDAQYIKwYBBQUHAwIG +CCsGAQUFBwMDBggrBgEFBQcDBAYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEFBQcD +BwYIKwYBBQUHAwgGCCsGAQUFBwMJMB0GA1UdDgQWBBTbHzXza0z/QjFkm827Wh4d +SBC37jANBgkqhkiG9w0BAQUFAAOCAQEAOGy3iPGt+lg3dNHocN6cJ1nL5BXXoMNg +14iABMUwTD3UGusGXllH5rxmy+AI/Og17GJ9ysDawXiv5UZv+4mCI4/211NmVaDe +JRI7cTYWVRJ2+z34VFsxugAG+H1V5ad2g6pcSpemKijfvcZsCyOVjjN/Hl5AHxNU +LJzltQ7dFyiuawHTUin1Ih+QOfTcYmjwPIZH7LgFRbu3DJaUxmfLI3HQjnQi1kHr +A6i26r7EARK1s11AdgYg1GS4KUYGis4fk5oQ7vuqWrTcL9Ury/bXBYSYBZELhPc9 ++tb5evosFeo2gkO3t7jj83EB7UNDogVFwygFBzXjAaU4HoDU18PZ3g== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT +Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg +VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo +I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng +o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G +A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB +zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW +RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB +kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw +IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD +VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu +dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6 +E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ +D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK +4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq +lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW +bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB +o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT +MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js +LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr +BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB +AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft +Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj +j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH +KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv +2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3 +mfnGV/TJVTl4uix5yaaIK/QI +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEojCCA4qgAwIBAgIQRL4Mi1AAJLQR0zYlJWfJiTANBgkqhkiG9w0BAQUFADCB +rjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xNjA0BgNVBAMTLVVUTi1VU0VSRmlyc3Qt +Q2xpZW50IEF1dGhlbnRpY2F0aW9uIGFuZCBFbWFpbDAeFw05OTA3MDkxNzI4NTBa +Fw0xOTA3MDkxNzM2NThaMIGuMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQxFzAV +BgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5l +dHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRydXN0LmNvbTE2MDQGA1UE +AxMtVVROLVVTRVJGaXJzdC1DbGllbnQgQXV0aGVudGljYXRpb24gYW5kIEVtYWls +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsjmFpPJ9q0E7YkY3rs3B +YHW8OWX5ShpHornMSMxqmNVNNRm5pELlzkniii8efNIxB8dOtINknS4p1aJkxIW9 +hVE1eaROaJB7HHqkkqgX8pgV8pPMyaQylbsMTzC9mKALi+VuG6JG+ni8om+rWV6l +L8/K2m2qL+usobNqqrcuZzWLeeEeaYji5kbNoKXqvgvOdjp6Dpvq/NonWz1zHyLm +SGHGTPNpsaguG7bUMSAsvIKKjqQOpdeJQ/wWWq8dcdcRWdq6hw2v+vPhwvCkxWeM +1tZUOt4KpLoDd7NlyP0e03RiqhjKaJMeoYV+9Udly/hNVyh00jT/MLbu9mIwFIws +6wIDAQABo4G5MIG2MAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBSJgmd9xJ0mcABLtFBIfN49rgRufTBYBgNVHR8EUTBPME2gS6BJhkdodHRw +Oi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLVVTRVJGaXJzdC1DbGllbnRBdXRoZW50 +aWNhdGlvbmFuZEVtYWlsLmNybDAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH +AwQwDQYJKoZIhvcNAQEFBQADggEBALFtYV2mGn98q0rkMPxTbyUkxsrt4jFcKw7u +7mFVbwQ+zznexRtJlOTrIEy05p5QLnLZjfWqo7NK2lYcYJeA3IKirUq9iiv/Cwm0 +xtcgBEXkzYABurorbs6q15L+5K/r9CYdFip/bDCVNy8zEqx/3cfREYxRmLLQo5HQ +rfafnoOTHh1CuEava2bwm3/q4wMC5QJRwarVNZ1yQAOJujEdxRBoUp7fooXFXAim +eOZTT7Hot9MUnpOmw2TjrH5xzbyf6QMbzPvprDHBr3wVdAKZw7JHpsIyYdfHb0gk +USeh1YdV8nuPmD0Wnu51tvjQjvLzxq4oW6fw8zYX/MMF08oDSlQ= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG +A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe +MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v +d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh +cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn +0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ +M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a +MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd +oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI +DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy +oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0 +dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy +bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF +BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli +CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE +CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t +3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS +KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEZjCCA06gAwIBAgIQRL4Mi1AAJLQR0zYt4LNfGzANBgkqhkiG9w0BAQUFADCB +lTELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHTAbBgNVBAMTFFVUTi1VU0VSRmlyc3Qt +T2JqZWN0MB4XDTk5MDcwOTE4MzEyMFoXDTE5MDcwOTE4NDAzNlowgZUxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJVVDEXMBUGA1UEBxMOU2FsdCBMYWtlIENpdHkxHjAc +BgNVBAoTFVRoZSBVU0VSVFJVU1QgTmV0d29yazEhMB8GA1UECxMYaHR0cDovL3d3 +dy51c2VydHJ1c3QuY29tMR0wGwYDVQQDExRVVE4tVVNFUkZpcnN0LU9iamVjdDCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6qgT+jo2F4qjEAVZURnicP +HxzfOpuCaDDASmEd8S8O+r5596Uj71VRloTN2+O5bj4x2AogZ8f02b+U60cEPgLO +KqJdhwQJ9jCdGIqXsqoc/EHSoTbL+z2RuufZcDX65OeQw5ujm9M89RKZd7G3CeBo +5hy485RjiGpq/gt2yb70IuRnuasaXnfBhQfdDWy/7gbHd2pBnqcP1/vulBe3/IW+ +pKvEHDHd17bR5PDv3xaPslKT16HUiaEHLr/hARJCHhrh2JU022R5KP+6LhHC5ehb +kkj7RwvCbNqtMoNB86XlQXD9ZZBt+vpRxPm9lisZBCzTbafc8H9vg2XiaquHhnUC +AwEAAaOBrzCBrDALBgNVHQ8EBAMCAcYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQU2u1kdBScFDyr3ZmpvVsoTYs8ydgwQgYDVR0fBDswOTA3oDWgM4YxaHR0cDov +L2NybC51c2VydHJ1c3QuY29tL1VUTi1VU0VSRmlyc3QtT2JqZWN0LmNybDApBgNV +HSUEIjAgBggrBgEFBQcDAwYIKwYBBQUHAwgGCisGAQQBgjcKAwQwDQYJKoZIhvcN +AQEFBQADggEBAAgfUrE3RHjb/c652pWWmKpVZIC1WkDdIaXFwfNfLEzIR1pp6ujw +NTX00CXzyKakh0q9G7FzCL3Uw8q2NbtZhncxzaeAFK4T7/yxSPlrJSUtUbYsbUXB +mMiKVl0+7kNOPmsnjtA6S4ULX9Ptaqd1y9Fahy85dRNacrACgZ++8A+EVCBibGnU +4U3GDZlDAQ0Slox4nb9QorFEqmrPF3rPbw/U+CRVX/A0FklmPlBGyWNxODFiuGK5 +81OtbLUrohKqGU8J2l7nk8aOFAj+8DCAGKCGhU3IfdeLA/5u1fedFqySLKAj5ZyR +Uh+U3xeUc8OzwcFxBSAAeL0TUh2oPs0AH8g= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCLW3VWhFSFCwDPrzhIzrGkMA0GCSqGSIb3DQEBBQUAMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl +cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu +LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT +aWduIENsYXNzIDEgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD +VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ +bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu +IENsYXNzIDEgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN2E1Lm0+afY8wR4 +nN493GwTFtl63SRRZsDHJlkNrAYIwpTRMx/wgzUfbhvI3qpuFU5UJ+/EbRrsC+MO +8ESlV8dAWB6jRx9x7GD2bZTIGDnt/kIYVt/kTEkQeE4BdjVjEjbdZrwBBDajVWjV +ojYJrKshJlQGrT/KFOCsyq0GHZXi+J3x4GD/wn91K0zM2v6HmSHquv4+VNfSWXjb +PG7PoBMAGrgnoeS+Z5bKoMWznN3JdZ7rMJpfo83ZrngZPyPpXNspva1VyBtUjGP2 +6KbqxzcSXKMpHgLZ2x87tNcPVkeBFQRKr4Mn0cVYiMHd9qqnoxjaaKptEVHhv2Vr +n5Z20T0CAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAq2aN17O6x5q25lXQBfGfMY1a +qtmqRiYPce2lrVNWYgFHKkTp/j90CxObufRNG7LRX7K20ohcs5/Ny9Sn2WCVhDr4 +wTcdYcrnsMXlkdpUpqwxga6X3s0IrLjAl4B/bnKk52kTlWUfxJM8/XmPBNQ+T+r3 +ns7NZ3xPZQL/kYVUc8f/NveGLezQXk//EZ9yBta4GvFMDSZl4kSAHsef493oCtrs +pSCAaWihT37ha88HQfqDjrw43bAuEbFrskLMmrz5SCJ5ShkPshw+IHTZasO+8ih4 +E1Z5T21Q6huwtVexN2ZYI/PcD98Kh8TvhgXVOBRgmaNL3gaWcSzy27YfpO8/7g== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEGTCCAwECEGFwy0mMX5hFKeewptlQW3owDQYJKoZIhvcNAQEFBQAwgcoxCzAJ +BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVy +aVNpZ24gVHJ1c3QgTmV0d29yazE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24s +IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTFFMEMGA1UEAxM8VmVyaVNp +Z24gQ2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eSAtIEczMB4XDTk5MTAwMTAwMDAwMFoXDTM2MDcxNjIzNTk1OVowgcoxCzAJBgNV +BAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVyaVNp +Z24gVHJ1c3QgTmV0d29yazE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24sIElu +Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTFFMEMGA1UEAxM8VmVyaVNpZ24g +Q2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAt +IEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArwoNwtUs22e5LeWU +J92lvuCwTY+zYVY81nzD9M0+hsuiiOLh2KRpxbXiv8GmR1BeRjmL1Za6tW8UvxDO +JxOeBUebMXoT2B/Z0wI3i60sR/COgQanDTAM6/c8DyAd3HJG7qUCyFvDyVZpTMUY +wZF7C9UTAJu878NIPkZgIIUq1ZC2zYugzDLdt/1AVbJQHFauzI13TccgTacxdu9o +koqQHgiBVrKtaaNS0MscxCM9H5n+TOgWY47GCI72MfbS+uV23bUckqNJzc0BzWjN +qWm6o+sdDZykIKbBoMXRRkwXbdKsZj+WjOCE1Db/IlnF+RFgqF8EffIa9iVCYQ/E +Srg+iQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQA0JhU8wI1NQ0kdvekhktdmnLfe +xbjQ5F1fdiLAJvmEOjr5jLX77GDx6M4EsMjdpwOPMPOY36TmpDHf0xwLRtxyID+u +7gU8pDM/CzmscHhzS5kr3zDCVLCoO1Wh/hYozUK9dG6A2ydEp85EXdQbkJgNHkKU +sQAsBNB0owIFImNjzYO1+8FtYmtpdf1dcEG59b98377BMnMiIYtYgXsVkXq642RI +sH/7NiXaldDxJBQX3RiAa0YjOVT1jmIJBB2UkKab5iXiQkWquJCtvgiPqQtCGJTP +cjnhsUPgKM+351psE2tJs//jGHyJizNdrDPXp/naOlXJWBD5qu9ats9LS98q +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl +cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu +LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT +aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD +VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ +bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu +IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b +N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t +KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu +kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm +CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ +Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu +imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te +2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe +DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p +F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt +TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG +A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp +U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg +SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln +biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm +GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve +fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ +aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj +aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW +kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC +4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga +FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW +ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1 +nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex +t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz +SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG +BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+ +rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/ +NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E +BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH +BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv +MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE +p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y +5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK +WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ +4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N +hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB +vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W +ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX +MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0 +IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y +IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh +bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF +9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH +H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H +LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN +/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT +rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw +WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs +exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4 +sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+ +seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz +4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+ +BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR +lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3 +7M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBr +MQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRl +cm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv +bW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2WhcNMjIwNjI0MDAxNjEyWjBrMQsw +CQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5h +dGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1l +cmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h +2mCxlCfLF9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4E +lpF7sDPwsRROEW+1QK8bRaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdV +ZqW1LS7YgFmypw23RuwhY/81q6UCzyr0TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq +299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI/k4+oKsGGelT84ATB+0t +vz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzsGHxBvfaL +dXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUF +AAOCAQEAX/FBfXxcCLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcR +zCSs00Rsca4BIGsDoo8Ytyk6feUWYFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3 +LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pzzkWKsKZJ/0x9nXGIxHYdkFsd +7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBuYQa7FkKMcPcw +++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt +398znM/jra6O1I7mT1GvFpLgXPYHDw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID+TCCAuGgAwIBAgIQW1fXqEywr9nTb0ugMbTW4jANBgkqhkiG9w0BAQUFADB5 +MQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRl +cm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xKjAoBgNVBAMTIVZpc2EgSW5m +b3JtYXRpb24gRGVsaXZlcnkgUm9vdCBDQTAeFw0wNTA2MjcxNzQyNDJaFw0yNTA2 +MjkxNzQyNDJaMHkxCzAJBgNVBAYTAlVTMQ0wCwYDVQQKEwRWSVNBMS8wLQYDVQQL +EyZWaXNhIEludGVybmF0aW9uYWwgU2VydmljZSBBc3NvY2lhdGlvbjEqMCgGA1UE +AxMhVmlzYSBJbmZvcm1hdGlvbiBEZWxpdmVyeSBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyREA4R/QkkfpLx0cYjga/EhIPZpchH0MZsRZ +FfP6C2ITtf/Wc+MtgD4yTK0yoiXvni3d+aCtEgK3GDvkdgYrgF76ROJFZwUQjQ9l +x42gRT05DbXvWFoy7dTglCZ9z/Tt2Cnktv9oxKgmkeHY/CyfpCBg1S8xth2JlGMR +0ug/GMO5zANuegZOv438p5Lt5So+du2Gl+RMFQqEPwqN5uJSqAe0VtmB4gWdQ8on +Bj2ZAM2R73QW7UW0Igt2vA4JaSiNtaAG/Y/58VXWHGgbq7rDtNK1R30X0kJV0rGA +ib3RSwB3LpG7bOjbIucV5mQgJoVjoA1e05w6g1x/KmNTmOGRVwIDAQABo30wezAP +BgNVHRMBAf8EBTADAQH/MDkGA1UdIAQyMDAwLgYFZ4EDAgEwJTAVBggrBgEFBQcC +ARYJMS4yLjMuNC41MAwGCCsGAQUFBwICMAAwDgYDVR0PAQH/BAQDAgEGMB0GA1Ud +DgQWBBRPitp2/2d3I5qmgH1924h1hfeBejANBgkqhkiG9w0BAQUFAAOCAQEACUW1 +QdUHdDJydgDPmYt+telnG/Su+DPaf1cregzlN43bJaJosMP7NwjoJY/H2He4XLWb +5rXEkl+xH1UyUwF7mtaUoxbGxEvt8hPZSTB4da2mzXgwKvXuHyzF5Qjy1hOB0/pS +WaF9ARpVKJJ7TOJQdGKBsF2Ty4fSCLqZLgfxbqwMsd9sysXI3rDXjIhekqvbgeLz +PqZr+pfgFhwCCLSMQWl5Ll3u7Qk9wR094DZ6jj6+JCVCRUS3HyabH4OlM0Vc2K+j +INsF/64Or7GNtRf9HYEJvrPxHINxl3JVwhYj4ASeaO4KwhVbwtw94Tc/XrGcexDo +c5lC3rAi4/UZqweYCw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEGjCCAwKgAwIBAgIDAYagMA0GCSqGSIb3DQEBBQUAMIGjMQswCQYDVQQGEwJG +STEQMA4GA1UECBMHRmlubGFuZDEhMB8GA1UEChMYVmFlc3RvcmVraXN0ZXJpa2Vz +a3VzIENBMSkwJwYDVQQLEyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBTZXJ2aWNl +czEZMBcGA1UECxMQVmFybWVubmVwYWx2ZWx1dDEZMBcGA1UEAxMQVlJLIEdvdi4g +Um9vdCBDQTAeFw0wMjEyMTgxMzUzMDBaFw0yMzEyMTgxMzUxMDhaMIGjMQswCQYD +VQQGEwJGSTEQMA4GA1UECBMHRmlubGFuZDEhMB8GA1UEChMYVmFlc3RvcmVraXN0 +ZXJpa2Vza3VzIENBMSkwJwYDVQQLEyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBT +ZXJ2aWNlczEZMBcGA1UECxMQVmFybWVubmVwYWx2ZWx1dDEZMBcGA1UEAxMQVlJL +IEdvdi4gUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALCF +FdrIAzfQo0Y3bBseljDCWoUSZyPyu5/nioFgJ/gTqTy894aqqvTzJSm0/nWuHoGG +igWyHWWyOOi0zCia+xc28ZPVec7Bg4shT8MNrUHfeJ1I4x9CRPw8bSEga60ihCRC +jxdNwlAfZM0tOSJWiP2yY51U2kJpwMhP1xjiPshphJQ9LIDGfM6911Mf64i5psu7 +hVfvV3ZdDIvTXhJBnyHAOfQmbQj6OLOhd7HuFtjQaNq0mKWgZUZKa41+qk1guPjI +DfxxPu45h4G02fhukO4/DmHXHSto5i7hQkQmeCxY8n0Wf2HASSQqiYe2XS8pGfim +545SnkFLWg6quMJmQlMCAwEAAaNVMFMwDwYDVR0TAQH/BAUwAwEB/zARBglghkgB +hvhCAQEEBAMCAAcwDgYDVR0PAQH/BAQDAgHGMB0GA1UdDgQWBBTb6eGb0tEkC/yr +46Bn6q6cS3f0sDANBgkqhkiG9w0BAQUFAAOCAQEArX1ID1QRnljurw2bEi8hpM2b +uoRH5sklVSPj3xhYKizbXvfNVPVRJHtiZ+GxH0mvNNDrsczZog1Sf0JLiGCXzyVy +t08pLWKfT6HAVVdWDsRol5EfnGTCKTIB6dTI2riBmCguGMcs/OubUpbf9MiQGS0j +8/G7cdqehSO9Gu8u5Hp5t8OdhkktY7ktdM9lDzJmid87Ie4pbzlj2RXBbvbfgD5Q +eBmK3QOjFKU3p7UsfLYRh+cF8ry23tT/l4EohP7+bEaFEEGfTXWMB9SZZ291im/k +UJL2mdUQuMSpe/cXjUu/15WfCdxEDx4yw8DP03kN5Mc7h/CQNIghYkmSBAQfvA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB +gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk +MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY +UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx +NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3 +dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy +dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6 +38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP +KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q +DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4 +qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa +JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi +PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P +BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs +jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0 +eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR +vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa +IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy +i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ +O+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- +` diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_js.go b/vendor/github.com/google/certificate-transparency-go/x509/root_js.go new file mode 100644 index 000000000000..4240207a0a04 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_js.go @@ -0,0 +1,19 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build js && wasm +// +build js,wasm + +package x509 + +// Possible certificate files; stop after finding one. +var certFiles = []string{} + +func loadSystemRoots() (*CertPool, error) { + return NewCertPool(), nil +} + +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + return nil, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_linux.go b/vendor/github.com/google/certificate-transparency-go/x509/root_linux.go new file mode 100644 index 000000000000..267775dc5f06 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_linux.go @@ -0,0 +1,15 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +// Possible certificate files; stop after finding one. +var certFiles = []string{ + "/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc. + "/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL 6 + "/etc/ssl/ca-bundle.pem", // OpenSUSE + "/etc/pki/tls/cacert.pem", // OpenELEC + "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7 + "/etc/ssl/cert.pem", // Alpine Linux +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_nocgo_darwin.go b/vendor/github.com/google/certificate-transparency-go/x509/root_nocgo_darwin.go new file mode 100644 index 000000000000..2ee1d5ce8005 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_nocgo_darwin.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo +// +build !cgo + +package x509 + +func loadSystemRoots() (*CertPool, error) { + return execSecurityRoots() +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_plan9.go b/vendor/github.com/google/certificate-transparency-go/x509/root_plan9.go new file mode 100644 index 000000000000..2bdb2fe7136a --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_plan9.go @@ -0,0 +1,40 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 +// +build plan9 + +package x509 + +import ( + "os" +) + +// Possible certificate files; stop after finding one. +var certFiles = []string{ + "/sys/lib/tls/ca.pem", +} + +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + return nil, nil +} + +func loadSystemRoots() (*CertPool, error) { + roots := NewCertPool() + var bestErr error + for _, file := range certFiles { + data, err := os.ReadFile(file) + if err == nil { + roots.AppendCertsFromPEM(data) + return roots, nil + } + if bestErr == nil || (os.IsNotExist(bestErr) && !os.IsNotExist(err)) { + bestErr = err + } + } + if bestErr == nil { + return roots, nil + } + return nil, bestErr +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_solaris.go b/vendor/github.com/google/certificate-transparency-go/x509/root_solaris.go new file mode 100644 index 000000000000..e6d4e6139947 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_solaris.go @@ -0,0 +1,12 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +// Possible certificate files; stop after finding one. +var certFiles = []string{ + "/etc/certs/ca-certificates.crt", // Solaris 11.2+ + "/etc/ssl/certs/ca-certificates.crt", // Joyent SmartOS + "/etc/ssl/cacert.pem", // OmniOS +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_unix.go b/vendor/github.com/google/certificate-transparency-go/x509/root_unix.go new file mode 100644 index 000000000000..ac2d01d97981 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_unix.go @@ -0,0 +1,89 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix dragonfly freebsd linux netbsd openbsd solaris zos + +package x509 + +import ( + "os" +) + +// Possible directories with certificate files; stop after successfully +// reading at least one file from a directory. +var certDirectories = []string{ + "/etc/ssl/certs", // SLES10/SLES11, https://golang.org/issue/12139 + "/system/etc/security/cacerts", // Android + "/usr/local/share/certs", // FreeBSD + "/etc/pki/tls/certs", // Fedora/RHEL + "/etc/openssl/certs", // NetBSD + "/var/ssl/certs", // AIX +} + +const ( + // certFileEnv is the environment variable which identifies where to locate + // the SSL certificate file. If set this overrides the system default. + certFileEnv = "SSL_CERT_FILE" + + // certDirEnv is the environment variable which identifies which directory + // to check for SSL certificate files. If set this overrides the system default. + certDirEnv = "SSL_CERT_DIR" +) + +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + return nil, nil +} + +func loadSystemRoots() (*CertPool, error) { + roots := NewCertPool() + + files := certFiles + if f := os.Getenv(certFileEnv); f != "" { + files = []string{f} + } + + var firstErr error + for _, file := range files { + data, err := os.ReadFile(file) + if err == nil { + roots.AppendCertsFromPEM(data) + break + } + if firstErr == nil && !os.IsNotExist(err) { + firstErr = err + } + } + + dirs := certDirectories + if d := os.Getenv(certDirEnv); d != "" { + dirs = []string{d} + } + + for _, directory := range dirs { + fis, err := os.ReadDir(directory) + if err != nil { + if firstErr == nil && !os.IsNotExist(err) { + firstErr = err + } + continue + } + rootsAdded := false + for _, fi := range fis { + data, err := os.ReadFile(directory + "/" + fi.Name()) + if err == nil && roots.AppendCertsFromPEM(data) { + rootsAdded = true + } + } + if rootsAdded { + return roots, nil + } + } + + if len(roots.certs) > 0 || firstErr == nil { + return roots, nil + } + + return nil, firstErr +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_wasip1.go b/vendor/github.com/google/certificate-transparency-go/x509/root_wasip1.go new file mode 100644 index 000000000000..e5cf98e0af09 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_wasip1.go @@ -0,0 +1,19 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasip1 +// +build wasip1 + +package x509 + +// Possible certificate files; stop after finding one. +var certFiles = []string{} + +func loadSystemRoots() (*CertPool, error) { + return NewCertPool(), nil +} + +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + return nil, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_windows.go b/vendor/github.com/google/certificate-transparency-go/x509/root_windows.go new file mode 100644 index 000000000000..39ec95ef3aa4 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_windows.go @@ -0,0 +1,286 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "errors" + "syscall" + "unsafe" +) + +// Creates a new *syscall.CertContext representing the leaf certificate in an in-memory +// certificate store containing itself and all of the intermediate certificates specified +// in the opts.Intermediates CertPool. +// +// A pointer to the in-memory store is available in the returned CertContext's Store field. +// The store is automatically freed when the CertContext is freed using +// syscall.CertFreeCertificateContext. +func createStoreContext(leaf *Certificate, opts *VerifyOptions) (*syscall.CertContext, error) { + var storeCtx *syscall.CertContext + + leafCtx, err := syscall.CertCreateCertificateContext(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING, &leaf.Raw[0], uint32(len(leaf.Raw))) + if err != nil { + return nil, err + } + defer syscall.CertFreeCertificateContext(leafCtx) + + handle, err := syscall.CertOpenStore(syscall.CERT_STORE_PROV_MEMORY, 0, 0, syscall.CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG, 0) + if err != nil { + return nil, err + } + defer syscall.CertCloseStore(handle, 0) + + err = syscall.CertAddCertificateContextToStore(handle, leafCtx, syscall.CERT_STORE_ADD_ALWAYS, &storeCtx) + if err != nil { + return nil, err + } + + if opts.Intermediates != nil { + for _, intermediate := range opts.Intermediates.certs { + ctx, err := syscall.CertCreateCertificateContext(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING, &intermediate.Raw[0], uint32(len(intermediate.Raw))) + if err != nil { + return nil, err + } + + err = syscall.CertAddCertificateContextToStore(handle, ctx, syscall.CERT_STORE_ADD_ALWAYS, nil) + syscall.CertFreeCertificateContext(ctx) + if err != nil { + return nil, err + } + } + } + + return storeCtx, nil +} + +// extractSimpleChain extracts the final certificate chain from a CertSimpleChain. +func extractSimpleChain(simpleChain **syscall.CertSimpleChain, count int) (chain []*Certificate, err error) { + if simpleChain == nil || count == 0 { + return nil, errors.New("x509: invalid simple chain") + } + + simpleChains := (*[1 << 20]*syscall.CertSimpleChain)(unsafe.Pointer(simpleChain))[:count:count] + lastChain := simpleChains[count-1] + elements := (*[1 << 20]*syscall.CertChainElement)(unsafe.Pointer(lastChain.Elements))[:lastChain.NumElements:lastChain.NumElements] + for i := 0; i < int(lastChain.NumElements); i++ { + // Copy the buf, since ParseCertificate does not create its own copy. + cert := elements[i].CertContext + encodedCert := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:cert.Length:cert.Length] + buf := make([]byte, cert.Length) + copy(buf, encodedCert) + parsedCert, err := ParseCertificate(buf) + if err != nil { + return nil, err + } + chain = append(chain, parsedCert) + } + + return chain, nil +} + +// checkChainTrustStatus checks the trust status of the certificate chain, translating +// any errors it finds into Go errors in the process. +func checkChainTrustStatus(c *Certificate, chainCtx *syscall.CertChainContext) error { + if chainCtx.TrustStatus.ErrorStatus != syscall.CERT_TRUST_NO_ERROR { + status := chainCtx.TrustStatus.ErrorStatus + switch status { + case syscall.CERT_TRUST_IS_NOT_TIME_VALID: + return CertificateInvalidError{c, Expired, ""} + default: + return UnknownAuthorityError{c, nil, nil} + } + } + return nil +} + +// checkChainSSLServerPolicy checks that the certificate chain in chainCtx is valid for +// use as a certificate chain for a SSL/TLS server. +func checkChainSSLServerPolicy(c *Certificate, chainCtx *syscall.CertChainContext, opts *VerifyOptions) error { + servernamep, err := syscall.UTF16PtrFromString(opts.DNSName) + if err != nil { + return err + } + sslPara := &syscall.SSLExtraCertChainPolicyPara{ + AuthType: syscall.AUTHTYPE_SERVER, + ServerName: servernamep, + } + sslPara.Size = uint32(unsafe.Sizeof(*sslPara)) + + para := &syscall.CertChainPolicyPara{ + ExtraPolicyPara: convertToPolicyParaType(unsafe.Pointer(sslPara)), + } + para.Size = uint32(unsafe.Sizeof(*para)) + + status := syscall.CertChainPolicyStatus{} + err = syscall.CertVerifyCertificateChainPolicy(syscall.CERT_CHAIN_POLICY_SSL, chainCtx, para, &status) + if err != nil { + return err + } + + // TODO(mkrautz): use the lChainIndex and lElementIndex fields + // of the CertChainPolicyStatus to provide proper context, instead + // using c. + if status.Error != 0 { + switch status.Error { + case syscall.CERT_E_EXPIRED: + return CertificateInvalidError{c, Expired, ""} + case syscall.CERT_E_CN_NO_MATCH: + return HostnameError{c, opts.DNSName} + case syscall.CERT_E_UNTRUSTEDROOT: + return UnknownAuthorityError{c, nil, nil} + default: + return UnknownAuthorityError{c, nil, nil} + } + } + + return nil +} + +// systemVerify is like Verify, except that it uses CryptoAPI calls +// to build certificate chains and verify them. +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + hasDNSName := opts != nil && len(opts.DNSName) > 0 + + storeCtx, err := createStoreContext(c, opts) + if err != nil { + return nil, err + } + defer syscall.CertFreeCertificateContext(storeCtx) + + para := new(syscall.CertChainPara) + para.Size = uint32(unsafe.Sizeof(*para)) + + // If there's a DNSName set in opts, assume we're verifying + // a certificate from a TLS server. + if hasDNSName { + oids := []*byte{ + &syscall.OID_PKIX_KP_SERVER_AUTH[0], + // Both IE and Chrome allow certificates with + // Server Gated Crypto as well. Some certificates + // in the wild require them. + &syscall.OID_SERVER_GATED_CRYPTO[0], + &syscall.OID_SGC_NETSCAPE[0], + } + para.RequestedUsage.Type = syscall.USAGE_MATCH_TYPE_OR + para.RequestedUsage.Usage.Length = uint32(len(oids)) + para.RequestedUsage.Usage.UsageIdentifiers = &oids[0] + } else { + para.RequestedUsage.Type = syscall.USAGE_MATCH_TYPE_AND + para.RequestedUsage.Usage.Length = 0 + para.RequestedUsage.Usage.UsageIdentifiers = nil + } + + var verifyTime *syscall.Filetime + if opts != nil && !opts.CurrentTime.IsZero() { + ft := syscall.NsecToFiletime(opts.CurrentTime.UnixNano()) + verifyTime = &ft + } + + // CertGetCertificateChain will traverse Windows's root stores + // in an attempt to build a verified certificate chain. Once + // it has found a verified chain, it stops. MSDN docs on + // CERT_CHAIN_CONTEXT: + // + // When a CERT_CHAIN_CONTEXT is built, the first simple chain + // begins with an end certificate and ends with a self-signed + // certificate. If that self-signed certificate is not a root + // or otherwise trusted certificate, an attempt is made to + // build a new chain. CTLs are used to create the new chain + // beginning with the self-signed certificate from the original + // chain as the end certificate of the new chain. This process + // continues building additional simple chains until the first + // self-signed certificate is a trusted certificate or until + // an additional simple chain cannot be built. + // + // The result is that we'll only get a single trusted chain to + // return to our caller. + var chainCtx *syscall.CertChainContext + err = syscall.CertGetCertificateChain(syscall.Handle(0), storeCtx, verifyTime, storeCtx.Store, para, 0, 0, &chainCtx) + if err != nil { + return nil, err + } + defer syscall.CertFreeCertificateChain(chainCtx) + + err = checkChainTrustStatus(c, chainCtx) + if err != nil { + return nil, err + } + + if hasDNSName { + err = checkChainSSLServerPolicy(c, chainCtx, opts) + if err != nil { + return nil, err + } + } + + chain, err := extractSimpleChain(chainCtx.Chains, int(chainCtx.ChainCount)) + if err != nil { + return nil, err + } + if len(chain) < 1 { + return nil, errors.New("x509: internal error: system verifier returned an empty chain") + } + + // Mitigate CVE-2020-0601, where the Windows system verifier might be + // tricked into using custom curve parameters for a trusted root, by + // double-checking all ECDSA signatures. If the system was tricked into + // using spoofed parameters, the signature will be invalid for the correct + // ones we parsed. (We don't support custom curves ourselves.) + for i, parent := range chain[1:] { + if parent.PublicKeyAlgorithm != ECDSA { + continue + } + if err := parent.CheckSignature(chain[i].SignatureAlgorithm, + chain[i].RawTBSCertificate, chain[i].Signature); err != nil { + return nil, err + } + } + + return [][]*Certificate{chain}, nil +} + +func loadSystemRoots() (*CertPool, error) { + // TODO: restore this functionality on Windows. We tried to do + // it in Go 1.8 but had to revert it. See Issue 18609. + // Returning (nil, nil) was the old behavior, prior to CL 30578. + // The if statement here avoids vet complaining about + // unreachable code below. + if true { + return nil, nil + } + + const CRYPT_E_NOT_FOUND = 0x80092004 + + store, err := syscall.CertOpenSystemStore(0, syscall.StringToUTF16Ptr("ROOT")) + if err != nil { + return nil, err + } + defer syscall.CertCloseStore(store, 0) + + roots := NewCertPool() + var cert *syscall.CertContext + for { + cert, err = syscall.CertEnumCertificatesInStore(store, cert) + if err != nil { + if errno, ok := err.(syscall.Errno); ok { + if errno == CRYPT_E_NOT_FOUND { + break + } + } + return nil, err + } + if cert == nil { + break + } + // Copy the buf, since ParseCertificate does not create its own copy. + buf := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:cert.Length:cert.Length] + buf2 := make([]byte, cert.Length) + copy(buf2, buf) + if c, err := ParseCertificate(buf2); err == nil { + roots.AddCert(c) + } + } + return roots, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_zos.go b/vendor/github.com/google/certificate-transparency-go/x509/root_zos.go new file mode 100644 index 000000000000..54f240af07de --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_zos.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos +// +build zos + +package x509 + +// Possible certificate files; stop after finding one. +var certFiles = []string{ + "/etc/cacert.pem", // IBM zOS default +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/rpki.go b/vendor/github.com/google/certificate-transparency-go/x509/rpki.go new file mode 100644 index 000000000000..520d6dc3abd4 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/rpki.go @@ -0,0 +1,242 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + + "github.com/google/certificate-transparency-go/asn1" +) + +// IPAddressPrefix describes an IP address prefix as an ASN.1 bit string, +// where the BitLength field holds the prefix length. +type IPAddressPrefix asn1.BitString + +// IPAddressRange describes an (inclusive) IP address range. +type IPAddressRange struct { + Min IPAddressPrefix + Max IPAddressPrefix +} + +// Most relevant values for AFI from: +// http://www.iana.org/assignments/address-family-numbers. +const ( + IPv4AddressFamilyIndicator = uint16(1) + IPv6AddressFamilyIndicator = uint16(2) +) + +// IPAddressFamilyBlocks describes a set of ranges of IP addresses. +type IPAddressFamilyBlocks struct { + // AFI holds an address family indicator from + // http://www.iana.org/assignments/address-family-numbers. + AFI uint16 + // SAFI holds a subsequent address family indicator from + // http://www.iana.org/assignments/safi-namespace. + SAFI byte + // InheritFromIssuer indicates that the set of addresses should + // be taken from the issuer's certificate. + InheritFromIssuer bool + // AddressPrefixes holds prefixes if InheritFromIssuer is false. + AddressPrefixes []IPAddressPrefix + // AddressRanges holds ranges if InheritFromIssuer is false. + AddressRanges []IPAddressRange +} + +// Internal types for asn1 unmarshalling. +type ipAddressFamily struct { + AddressFamily []byte // 2-byte AFI plus optional 1 byte SAFI + Choice asn1.RawValue +} + +// Internally, use raw asn1.BitString rather than the IPAddressPrefix +// type alias (so that asn1.Unmarshal() decodes properly). +type ipAddressRange struct { + Min asn1.BitString + Max asn1.BitString +} + +func parseRPKIAddrBlocks(data []byte, nfe *NonFatalErrors) []*IPAddressFamilyBlocks { + // RFC 3779 2.2.3 + // IPAddrBlocks ::= SEQUENCE OF IPAddressFamily + // + // IPAddressFamily ::= SEQUENCE { -- AFI & optional SAFI -- + // addressFamily OCTET STRING (SIZE (2..3)), + // ipAddressChoice IPAddressChoice } + // + // IPAddressChoice ::= CHOICE { + // inherit NULL, -- inherit from issuer -- + // addressesOrRanges SEQUENCE OF IPAddressOrRange } + // + // IPAddressOrRange ::= CHOICE { + // addressPrefix IPAddress, + // addressRange IPAddressRange } + // + // IPAddressRange ::= SEQUENCE { + // min IPAddress, + // max IPAddress } + // + // IPAddress ::= BIT STRING + + var addrBlocks []ipAddressFamily + if rest, err := asn1.Unmarshal(data, &addrBlocks); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks extension: %v", err)) + return nil + } else if len(rest) != 0 { + nfe.AddError(errors.New("trailing data after ipAddrBlocks extension")) + return nil + } + + var results []*IPAddressFamilyBlocks + for i, block := range addrBlocks { + var fam IPAddressFamilyBlocks + if l := len(block.AddressFamily); l < 2 || l > 3 { + nfe.AddError(fmt.Errorf("invalid address family length (%d) for ipAddrBlock.addressFamily", l)) + continue + } + fam.AFI = binary.BigEndian.Uint16(block.AddressFamily[0:2]) + if len(block.AddressFamily) > 2 { + fam.SAFI = block.AddressFamily[2] + } + // IPAddressChoice is an ASN.1 CHOICE where the chosen alternative is indicated by (implicit) + // tagging of the alternatives -- here, either NULL or SEQUENCE OF. + if bytes.Equal(block.Choice.FullBytes, asn1.NullBytes) { + fam.InheritFromIssuer = true + results = append(results, &fam) + continue + } + + var addrRanges []asn1.RawValue + if _, err := asn1.Unmarshal(block.Choice.FullBytes, &addrRanges); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges: %v", i, err)) + continue + } + for j, ar := range addrRanges { + // Each IPAddressOrRange is a CHOICE where the alternatives have distinct (implicit) + // tags -- here, either BIT STRING or SEQUENCE. + switch ar.Tag { + case asn1.TagBitString: + // BIT STRING for single prefix IPAddress + var val asn1.BitString + if _, err := asn1.Unmarshal(ar.FullBytes, &val); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d].addressPrefix: %v", i, j, err)) + continue + } + fam.AddressPrefixes = append(fam.AddressPrefixes, IPAddressPrefix(val)) + + case asn1.TagSequence: + var val ipAddressRange + if _, err := asn1.Unmarshal(ar.FullBytes, &val); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d].addressRange: %v", i, j, err)) + continue + } + fam.AddressRanges = append(fam.AddressRanges, IPAddressRange{Min: IPAddressPrefix(val.Min), Max: IPAddressPrefix(val.Max)}) + + default: + nfe.AddError(fmt.Errorf("unexpected ASN.1 type in ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d]: %+v", i, j, ar)) + } + } + results = append(results, &fam) + } + return results +} + +// ASIDRange describes an inclusive range of AS Identifiers (AS numbers or routing +// domain identifiers). +type ASIDRange struct { + Min int + Max int +} + +// ASIdentifiers describes a collection of AS Identifiers (AS numbers or routing +// domain identifiers). +type ASIdentifiers struct { + // InheritFromIssuer indicates that the set of AS identifiers should + // be taken from the issuer's certificate. + InheritFromIssuer bool + // ASIDs holds AS identifiers if InheritFromIssuer is false. + ASIDs []int + // ASIDs holds AS identifier ranges (inclusive) if InheritFromIssuer is false. + ASIDRanges []ASIDRange +} + +type asIdentifiers struct { + ASNum asn1.RawValue `asn1:"optional,tag:0"` + RDI asn1.RawValue `asn1:"optional,tag:1"` +} + +func parseASIDChoice(val asn1.RawValue, nfe *NonFatalErrors) *ASIdentifiers { + // RFC 3779 2.3.2 + // ASIdentifierChoice ::= CHOICE { + // inherit NULL, -- inherit from issuer -- + // asIdsOrRanges SEQUENCE OF ASIdOrRange } + // ASIdOrRange ::= CHOICE { + // id ASId, + // range ASRange } + // ASRange ::= SEQUENCE { + // min ASId, + // max ASId } + // ASId ::= INTEGER + if len(val.FullBytes) == 0 { // OPTIONAL + return nil + } + // ASIdentifierChoice is an ASN.1 CHOICE where the chosen alternative is indicated by (implicit) + // tagging of the alternatives -- here, either NULL or SEQUENCE OF. + if bytes.Equal(val.Bytes, asn1.NullBytes) { + return &ASIdentifiers{InheritFromIssuer: true} + } + var ids []asn1.RawValue + if rest, err := asn1.Unmarshal(val.Bytes, &ids); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges: %v", err)) + return nil + } else if len(rest) != 0 { + nfe.AddError(errors.New("trailing data after ASIdentifiers.asIdsOrRanges")) + return nil + } + var asID ASIdentifiers + for i, id := range ids { + // Each ASIdOrRange is a CHOICE where the alternatives have distinct (implicit) + // tags -- here, either INTEGER or SEQUENCE. + switch id.Tag { + case asn1.TagInteger: + var val int + if _, err := asn1.Unmarshal(id.FullBytes, &val); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges[%d].id: %v", i, err)) + continue + } + asID.ASIDs = append(asID.ASIDs, val) + + case asn1.TagSequence: + var val ASIDRange + if _, err := asn1.Unmarshal(id.FullBytes, &val); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges[%d].range: %v", i, err)) + continue + } + asID.ASIDRanges = append(asID.ASIDRanges, val) + + default: + nfe.AddError(fmt.Errorf("unexpected value in ASIdentifiers.asIdsOrRanges[%d]: %+v", i, id)) + } + } + return &asID +} + +func parseRPKIASIdentifiers(data []byte, nfe *NonFatalErrors) (*ASIdentifiers, *ASIdentifiers) { + // RFC 3779 2.3.2 + // ASIdentifiers ::= SEQUENCE { + // asnum [0] EXPLICIT ASIdentifierChoice OPTIONAL, + // rdi [1] EXPLICIT ASIdentifierChoice OPTIONAL} + var asIDs asIdentifiers + if rest, err := asn1.Unmarshal(data, &asIDs); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers extension: %v", err)) + return nil, nil + } else if len(rest) != 0 { + nfe.AddError(errors.New("trailing data after ASIdentifiers extension")) + return nil, nil + } + return parseASIDChoice(asIDs.ASNum, nfe), parseASIDChoice(asIDs.RDI, nfe) +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/sec1.go b/vendor/github.com/google/certificate-transparency-go/x509/sec1.go new file mode 100644 index 000000000000..d19407079f3e --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/sec1.go @@ -0,0 +1,127 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "errors" + "fmt" + "math/big" + + "github.com/google/certificate-transparency-go/asn1" +) + +const ecPrivKeyVersion = 1 + +// ecPrivateKey reflects an ASN.1 Elliptic Curve Private Key Structure. +// References: +// +// RFC 5915 +// SEC1 - http://www.secg.org/sec1-v2.pdf +// +// Per RFC 5915 the NamedCurveOID is marked as ASN.1 OPTIONAL, however in +// most cases it is not. +type ecPrivateKey struct { + Version int + PrivateKey []byte + NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"` + PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"` +} + +// ParseECPrivateKey parses an EC private key in SEC 1, ASN.1 DER form. +// +// This kind of key is commonly encoded in PEM blocks of type "EC PRIVATE KEY". +func ParseECPrivateKey(der []byte) (*ecdsa.PrivateKey, error) { + return parseECPrivateKey(nil, der) +} + +// MarshalECPrivateKey converts an EC private key to SEC 1, ASN.1 DER form. +// +// This kind of key is commonly encoded in PEM blocks of type "EC PRIVATE KEY". +// For a more flexible key format which is not EC specific, use +// MarshalPKCS8PrivateKey. +func MarshalECPrivateKey(key *ecdsa.PrivateKey) ([]byte, error) { + oid, ok := OIDFromNamedCurve(key.Curve) + if !ok { + return nil, errors.New("x509: unknown elliptic curve") + } + + return marshalECPrivateKeyWithOID(key, oid) +} + +// marshalECPrivateKey marshals an EC private key into ASN.1, DER format and +// sets the curve ID to the given OID, or omits it if OID is nil. +func marshalECPrivateKeyWithOID(key *ecdsa.PrivateKey, oid asn1.ObjectIdentifier) ([]byte, error) { + privateKeyBytes := key.D.Bytes() + paddedPrivateKey := make([]byte, (key.Curve.Params().N.BitLen()+7)/8) + copy(paddedPrivateKey[len(paddedPrivateKey)-len(privateKeyBytes):], privateKeyBytes) + + return asn1.Marshal(ecPrivateKey{ + Version: 1, + PrivateKey: paddedPrivateKey, + NamedCurveOID: oid, + PublicKey: asn1.BitString{Bytes: elliptic.Marshal(key.Curve, key.X, key.Y)}, + }) +} + +// parseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure. +// The OID for the named curve may be provided from another source (such as +// the PKCS8 container) - if it is provided then use this instead of the OID +// that may exist in the EC private key structure. +func parseECPrivateKey(namedCurveOID *asn1.ObjectIdentifier, der []byte) (key *ecdsa.PrivateKey, err error) { + var privKey ecPrivateKey + if _, err := asn1.Unmarshal(der, &privKey); err != nil { + if _, err := asn1.Unmarshal(der, &pkcs8{}); err == nil { + return nil, errors.New("x509: failed to parse private key (use ParsePKCS8PrivateKey instead for this key format)") + } + if _, err := asn1.Unmarshal(der, &pkcs1PrivateKey{}); err == nil { + return nil, errors.New("x509: failed to parse private key (use ParsePKCS1PrivateKey instead for this key format)") + } + return nil, errors.New("x509: failed to parse EC private key: " + err.Error()) + } + if privKey.Version != ecPrivKeyVersion { + return nil, fmt.Errorf("x509: unknown EC private key version %d", privKey.Version) + } + + var nfe NonFatalErrors + var curve elliptic.Curve + if namedCurveOID != nil { + curve = namedCurveFromOID(*namedCurveOID, &nfe) + } else { + curve = namedCurveFromOID(privKey.NamedCurveOID, &nfe) + } + if curve == nil { + return nil, errors.New("x509: unknown elliptic curve") + } + + k := new(big.Int).SetBytes(privKey.PrivateKey) + curveOrder := curve.Params().N + if k.Cmp(curveOrder) >= 0 { + return nil, errors.New("x509: invalid elliptic curve private key value") + } + priv := new(ecdsa.PrivateKey) + priv.Curve = curve + priv.D = k + + privateKey := make([]byte, (curveOrder.BitLen()+7)/8) + + // Some private keys have leading zero padding. This is invalid + // according to [SEC1], but this code will ignore it. + for len(privKey.PrivateKey) > len(privateKey) { + if privKey.PrivateKey[0] != 0 { + return nil, errors.New("x509: invalid private key length") + } + privKey.PrivateKey = privKey.PrivateKey[1:] + } + + // Some private keys remove all leading zeros, this is also invalid + // according to [SEC1] but since OpenSSL used to do this, we ignore + // this too. + copy(privateKey[len(privateKey)-len(privKey.PrivateKey):], privKey.PrivateKey) + priv.X, priv.Y = curve.ScalarBaseMult(privateKey) + + return priv, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/test-dir.crt b/vendor/github.com/google/certificate-transparency-go/x509/test-dir.crt new file mode 100644 index 000000000000..b7fc9c518619 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/test-dir.crt @@ -0,0 +1,31 @@ +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIJAL8a/lsnspOqMA0GCSqGSIb3DQEBCwUAMEwxCzAJBgNV +BAYTAlVLMRMwEQYDVQQIDApUZXN0LVN0YXRlMRUwEwYDVQQKDAxHb2xhbmcgVGVz +dHMxETAPBgNVBAMMCHRlc3QtZGlyMB4XDTE3MDIwMTIzNTAyN1oXDTI3MDEzMDIz +NTAyN1owTDELMAkGA1UEBhMCVUsxEzARBgNVBAgMClRlc3QtU3RhdGUxFTATBgNV +BAoMDEdvbGFuZyBUZXN0czERMA8GA1UEAwwIdGVzdC1kaXIwggIiMA0GCSqGSIb3 +DQEBAQUAA4ICDwAwggIKAoICAQDzBoi43Yn30KN13PKFHu8LA4UmgCRToTukLItM +WK2Je45grs/axg9n3YJOXC6hmsyrkOnyBcx1xVNgSrOAll7fSjtChRIX72Xrloxu +XewtWVIrijqz6oylbvEmbRT3O8uynu5rF82Pmdiy8oiSfdywjKuPnE0hjV1ZSCql +MYcXqA+f0JFD8kMv4pbtxjGH8f2DkYQz+hHXLrJH4/MEYdVMQXoz/GDzLyOkrXBN +hpMaBBqg1p0P+tRdfLXuliNzA9vbZylzpF1YZ0gvsr0S5Y6LVtv7QIRygRuLY4kF +k+UYuFq8NrV8TykS7FVnO3tf4XcYZ7r2KV5FjYSrJtNNo85BV5c3xMD3fJ2XcOWk ++oD1ATdgAM3aKmSOxNtNItKKxBe1mkqDH41NbWx7xMad78gDznyeT0tjEOltN2bM +uXU1R/jgR/vq5Ec0AhXJyL/ziIcmuV2fSl/ZxT4ARD+16tgPiIx+welTf0v27/JY +adlfkkL5XsPRrbSguISrj7JeaO/gjG3KnDVHcZvYBpDfHqRhCgrosfe26TZcTXx2 +cRxOfvBjMz1zJAg+esuUzSkerreyRhzD7RpeZTwi6sxvx82MhYMbA3w1LtgdABio +9JRqZy3xqsIbNv7N46WO/qXL1UMRKb1UyHeW8g8btboz+B4zv1U0Nj+9qxPBbQui +dgL9LQIDAQABo1AwTjAdBgNVHQ4EFgQUy0/0W8nwQfz2tO6AZ2jPkEiTzvUwHwYD +VR0jBBgwFoAUy0/0W8nwQfz2tO6AZ2jPkEiTzvUwDAYDVR0TBAUwAwEB/zANBgkq +hkiG9w0BAQsFAAOCAgEAvEVnUYsIOt87rggmLPqEueynkuQ+562M8EDHSQl82zbe +xDCxeg3DvPgKb+RvaUdt1362z/szK10SoeMgx6+EQLoV9LiVqXwNqeYfixrhrdw3 +ppAhYYhymdkbUQCEMHypmXP1vPhAz4o8Bs+eES1M+zO6ErBiD7SqkmBElT+GixJC +6epC9ZQFs+dw3lPlbiZSsGE85sqc3VAs0/JgpL/pb1/Eg4s0FUhZD2C2uWdSyZGc +g0/v3aXJCp4j/9VoNhI1WXz3M45nysZIL5OQgXymLqJElQa1pZ3Wa4i/nidvT4AT +Xlxc/qijM8set/nOqp7hVd5J0uG6qdwLRILUddZ6OpXd7ZNi1EXg+Bpc7ehzGsDt +3UFGzYXDjxYnK2frQfjLS8stOQIqSrGthW6x0fdkVx0y8BByvd5J6+JmZl4UZfzA +m99VxXSt4B9x6BvnY7ktzcFDOjtuLc4B/7yg9fv1eQuStA4cHGGAttsCg1X/Kx8W +PvkkeH0UWDZ9vhH9K36703z89da6MWF+bz92B0+4HoOmlVaXRkvblsNaynJnL0LC +Ayry7QBxuh5cMnDdRwJB3AVJIiJ1GVpb7aGvBOnx+s2lwRv9HWtghb+cbwwktx1M +JHyBf3GZNSWTpKY7cD8V+NnBv3UuioOVVo+XAU4LF/bYUjdRpxWADJizNtZrtFo= +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/certificate-transparency-go/x509/test-file.crt b/vendor/github.com/google/certificate-transparency-go/x509/test-file.crt new file mode 100644 index 000000000000..caa83b9f824c --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/test-file.crt @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFbTCCA1WgAwIBAgIJAN338vEmMtLsMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNV +BAYTAlVLMRMwEQYDVQQIDApUZXN0LVN0YXRlMRUwEwYDVQQKDAxHb2xhbmcgVGVz +dHMxEjAQBgNVBAMMCXRlc3QtZmlsZTAeFw0xNzAyMDEyMzUyMDhaFw0yNzAxMzAy +MzUyMDhaME0xCzAJBgNVBAYTAlVLMRMwEQYDVQQIDApUZXN0LVN0YXRlMRUwEwYD +VQQKDAxHb2xhbmcgVGVzdHMxEjAQBgNVBAMMCXRlc3QtZmlsZTCCAiIwDQYJKoZI +hvcNAQEBBQADggIPADCCAgoCggIBAPMGiLjdiffQo3Xc8oUe7wsDhSaAJFOhO6Qs +i0xYrYl7jmCuz9rGD2fdgk5cLqGazKuQ6fIFzHXFU2BKs4CWXt9KO0KFEhfvZeuW +jG5d7C1ZUiuKOrPqjKVu8SZtFPc7y7Ke7msXzY+Z2LLyiJJ93LCMq4+cTSGNXVlI +KqUxhxeoD5/QkUPyQy/ilu3GMYfx/YORhDP6Edcuskfj8wRh1UxBejP8YPMvI6St +cE2GkxoEGqDWnQ/61F18te6WI3MD29tnKXOkXVhnSC+yvRLljotW2/tAhHKBG4tj +iQWT5Ri4Wrw2tXxPKRLsVWc7e1/hdxhnuvYpXkWNhKsm002jzkFXlzfEwPd8nZdw +5aT6gPUBN2AAzdoqZI7E200i0orEF7WaSoMfjU1tbHvExp3vyAPOfJ5PS2MQ6W03 +Zsy5dTVH+OBH++rkRzQCFcnIv/OIhya5XZ9KX9nFPgBEP7Xq2A+IjH7B6VN/S/bv +8lhp2V+SQvlew9GttKC4hKuPsl5o7+CMbcqcNUdxm9gGkN8epGEKCuix97bpNlxN +fHZxHE5+8GMzPXMkCD56y5TNKR6ut7JGHMPtGl5lPCLqzG/HzYyFgxsDfDUu2B0A +GKj0lGpnLfGqwhs2/s3jpY7+pcvVQxEpvVTId5byDxu1ujP4HjO/VTQ2P72rE8Ft +C6J2Av0tAgMBAAGjUDBOMB0GA1UdDgQWBBTLT/RbyfBB/Pa07oBnaM+QSJPO9TAf +BgNVHSMEGDAWgBTLT/RbyfBB/Pa07oBnaM+QSJPO9TAMBgNVHRMEBTADAQH/MA0G +CSqGSIb3DQEBCwUAA4ICAQB3sCntCcQwhMgRPPyvOCMyTcQ/Iv+cpfxz2Ck14nlx +AkEAH2CH0ov5GWTt07/ur3aa5x+SAKi0J3wTD1cdiw4U/6Uin6jWGKKxvoo4IaeK +SbM8w/6eKx6UbmHx7PA/eRABY9tTlpdPCVgw7/o3WDr03QM+IAtatzvaCPPczake +pbdLwmBZB/v8V+6jUajy6jOgdSH0PyffGnt7MWgDETmNC6p/Xigp5eh+C8Fb4NGT +xgHES5PBC+sruWp4u22bJGDKTvYNdZHsnw/CaKQWNsQqwisxa3/8N5v+PCff/pxl +r05pE3PdHn9JrCl4iWdVlgtiI9BoPtQyDfa/OEFaScE8KYR8LxaAgdgp3zYncWls +BpwQ6Y/A2wIkhlD9eEp5Ib2hz7isXOs9UwjdriKqrBXqcIAE5M+YIk3+KAQKxAtd +4YsK3CSJ010uphr12YKqlScj4vuKFjuOtd5RyyMIxUG3lrrhAu2AzCeKCLdVgA8+ +75FrYMApUdvcjp4uzbBoED4XRQlx9kdFHVbYgmE/+yddBYJM8u4YlgAL0hW2/D8p +z9JWIfxVmjJnBnXaKGBuiUyZ864A3PJndP6EMMo7TzS2CDnfCYuJjvI0KvDjFNmc +rQA04+qfMSEz3nmKhbbZu4eYLzlADhfH8tT4GMtXf71WLA5AUHGf2Y4+HIHTsmHG +vQ== +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/certificate-transparency-go/x509/verify.go b/vendor/github.com/google/certificate-transparency-go/x509/verify.go new file mode 100644 index 000000000000..07118c2bf654 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/verify.go @@ -0,0 +1,1109 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/url" + "os" + "reflect" + "runtime" + "strings" + "time" + "unicode/utf8" +) + +// ignoreCN disables interpreting Common Name as a hostname. See issue 24151. +var ignoreCN = strings.Contains(os.Getenv("GODEBUG"), "x509ignoreCN=1") + +type InvalidReason int + +const ( + // NotAuthorizedToSign results when a certificate is signed by another + // which isn't marked as a CA certificate. + NotAuthorizedToSign InvalidReason = iota + // Expired results when a certificate has expired, based on the time + // given in the VerifyOptions. + Expired + // CANotAuthorizedForThisName results when an intermediate or root + // certificate has a name constraint which doesn't permit a DNS or + // other name (including IP address) in the leaf certificate. + CANotAuthorizedForThisName + // TooManyIntermediates results when a path length constraint is + // violated. + TooManyIntermediates + // IncompatibleUsage results when the certificate's key usage indicates + // that it may only be used for a different purpose. + IncompatibleUsage + // NameMismatch results when the subject name of a parent certificate + // does not match the issuer name in the child. + NameMismatch + // NameConstraintsWithoutSANs results when a leaf certificate doesn't + // contain a Subject Alternative Name extension, but a CA certificate + // contains name constraints, and the Common Name can be interpreted as + // a hostname. + // + // You can avoid this error by setting the experimental GODEBUG environment + // variable to "x509ignoreCN=1", disabling Common Name matching entirely. + // This behavior might become the default in the future. + NameConstraintsWithoutSANs + // UnconstrainedName results when a CA certificate contains permitted + // name constraints, but leaf certificate contains a name of an + // unsupported or unconstrained type. + UnconstrainedName + // TooManyConstraints results when the number of comparison operations + // needed to check a certificate exceeds the limit set by + // VerifyOptions.MaxConstraintComparisions. This limit exists to + // prevent pathological certificates can consuming excessive amounts of + // CPU time to verify. + TooManyConstraints + // CANotAuthorizedForExtKeyUsage results when an intermediate or root + // certificate does not permit a requested extended key usage. + CANotAuthorizedForExtKeyUsage +) + +// CertificateInvalidError results when an odd error occurs. Users of this +// library probably want to handle all these errors uniformly. +type CertificateInvalidError struct { + Cert *Certificate + Reason InvalidReason + Detail string +} + +func (e CertificateInvalidError) Error() string { + switch e.Reason { + case NotAuthorizedToSign: + return "x509: certificate is not authorized to sign other certificates" + case Expired: + return "x509: certificate has expired or is not yet valid: " + e.Detail + case CANotAuthorizedForThisName: + return "x509: a root or intermediate certificate is not authorized to sign for this name: " + e.Detail + case CANotAuthorizedForExtKeyUsage: + return "x509: a root or intermediate certificate is not authorized for an extended key usage: " + e.Detail + case TooManyIntermediates: + return "x509: too many intermediates for path length constraint" + case IncompatibleUsage: + return "x509: certificate specifies an incompatible key usage" + case NameMismatch: + return "x509: issuer name does not match subject from issuing certificate" + case NameConstraintsWithoutSANs: + return "x509: issuer has name constraints but leaf doesn't have a SAN extension" + case UnconstrainedName: + return "x509: issuer has name constraints but leaf contains unknown or unconstrained name: " + e.Detail + } + return "x509: unknown error" +} + +// HostnameError results when the set of authorized names doesn't match the +// requested name. +type HostnameError struct { + Certificate *Certificate + Host string +} + +func (h HostnameError) Error() string { + c := h.Certificate + + if !c.hasSANExtension() && !validHostname(c.Subject.CommonName) && + matchHostnames(toLowerCaseASCII(c.Subject.CommonName), toLowerCaseASCII(h.Host)) { + // This would have validated, if it weren't for the validHostname check on Common Name. + return "x509: Common Name is not a valid hostname: " + c.Subject.CommonName + } + + var valid string + if ip := net.ParseIP(h.Host); ip != nil { + // Trying to validate an IP + if len(c.IPAddresses) == 0 { + return "x509: cannot validate certificate for " + h.Host + " because it doesn't contain any IP SANs" + } + for _, san := range c.IPAddresses { + if len(valid) > 0 { + valid += ", " + } + valid += san.String() + } + } else { + if c.commonNameAsHostname() { + valid = c.Subject.CommonName + } else { + valid = strings.Join(c.DNSNames, ", ") + } + } + + if len(valid) == 0 { + return "x509: certificate is not valid for any names, but wanted to match " + h.Host + } + return "x509: certificate is valid for " + valid + ", not " + h.Host +} + +// UnknownAuthorityError results when the certificate issuer is unknown +type UnknownAuthorityError struct { + Cert *Certificate + // hintErr contains an error that may be helpful in determining why an + // authority wasn't found. + hintErr error + // hintCert contains a possible authority certificate that was rejected + // because of the error in hintErr. + hintCert *Certificate +} + +func (e UnknownAuthorityError) Error() string { + s := "x509: certificate signed by unknown authority" + if e.hintErr != nil { + certName := e.hintCert.Subject.CommonName + if len(certName) == 0 { + if len(e.hintCert.Subject.Organization) > 0 { + certName = e.hintCert.Subject.Organization[0] + } else { + certName = "serial:" + e.hintCert.SerialNumber.String() + } + } + s += fmt.Sprintf(" (possibly because of %q while trying to verify candidate authority certificate %q)", e.hintErr, certName) + } + return s +} + +// SystemRootsError results when we fail to load the system root certificates. +type SystemRootsError struct { + Err error +} + +func (se SystemRootsError) Error() string { + msg := "x509: failed to load system roots and no roots provided" + if se.Err != nil { + return msg + "; " + se.Err.Error() + } + return msg +} + +// errNotParsed is returned when a certificate without ASN.1 contents is +// verified. Platform-specific verification needs the ASN.1 contents. +var errNotParsed = errors.New("x509: missing ASN.1 contents; use ParseCertificate") + +// VerifyOptions contains parameters for Certificate.Verify. It's a structure +// because other PKIX verification APIs have ended up needing many options. +type VerifyOptions struct { + DNSName string + Intermediates *CertPool + Roots *CertPool // if nil, the system roots are used + CurrentTime time.Time // if zero, the current time is used + // Options to disable various verification checks. + DisableTimeChecks bool + DisableCriticalExtensionChecks bool + DisableNameChecks bool + DisableEKUChecks bool + DisablePathLenChecks bool + DisableNameConstraintChecks bool + // KeyUsage specifies which Extended Key Usage values are acceptable. A leaf + // certificate is accepted if it contains any of the listed values. An empty + // list means ExtKeyUsageServerAuth. To accept any key usage, include + // ExtKeyUsageAny. + // + // Certificate chains are required to nest these extended key usage values. + // (This matches the Windows CryptoAPI behavior, but not the spec.) + KeyUsages []ExtKeyUsage + // MaxConstraintComparisions is the maximum number of comparisons to + // perform when checking a given certificate's name constraints. If + // zero, a sensible default is used. This limit prevents pathological + // certificates from consuming excessive amounts of CPU time when + // validating. + MaxConstraintComparisions int +} + +const ( + leafCertificate = iota + intermediateCertificate + rootCertificate +) + +// rfc2821Mailbox represents a “mailbox” (which is an email address to most +// people) by breaking it into the “local” (i.e. before the '@') and “domain” +// parts. +type rfc2821Mailbox struct { + local, domain string +} + +// parseRFC2821Mailbox parses an email address into local and domain parts, +// based on the ABNF for a “Mailbox” from RFC 2821. According to RFC 5280, +// Section 4.2.1.6 that's correct for an rfc822Name from a certificate: “The +// format of an rfc822Name is a "Mailbox" as defined in RFC 2821, Section 4.1.2”. +func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) { + if len(in) == 0 { + return mailbox, false + } + + localPartBytes := make([]byte, 0, len(in)/2) + + if in[0] == '"' { + // Quoted-string = DQUOTE *qcontent DQUOTE + // non-whitespace-control = %d1-8 / %d11 / %d12 / %d14-31 / %d127 + // qcontent = qtext / quoted-pair + // qtext = non-whitespace-control / + // %d33 / %d35-91 / %d93-126 + // quoted-pair = ("\" text) / obs-qp + // text = %d1-9 / %d11 / %d12 / %d14-127 / obs-text + // + // (Names beginning with “obs-” are the obsolete syntax from RFC 2822, + // Section 4. Since it has been 16 years, we no longer accept that.) + in = in[1:] + QuotedString: + for { + if len(in) == 0 { + return mailbox, false + } + c := in[0] + in = in[1:] + + switch { + case c == '"': + break QuotedString + + case c == '\\': + // quoted-pair + if len(in) == 0 { + return mailbox, false + } + if in[0] == 11 || + in[0] == 12 || + (1 <= in[0] && in[0] <= 9) || + (14 <= in[0] && in[0] <= 127) { + localPartBytes = append(localPartBytes, in[0]) + in = in[1:] + } else { + return mailbox, false + } + + case c == 11 || + c == 12 || + // Space (char 32) is not allowed based on the + // BNF, but RFC 3696 gives an example that + // assumes that it is. Several “verified” + // errata continue to argue about this point. + // We choose to accept it. + c == 32 || + c == 33 || + c == 127 || + (1 <= c && c <= 8) || + (14 <= c && c <= 31) || + (35 <= c && c <= 91) || + (93 <= c && c <= 126): + // qtext + localPartBytes = append(localPartBytes, c) + + default: + return mailbox, false + } + } + } else { + // Atom ("." Atom)* + NextChar: + for len(in) > 0 { + // atext from RFC 2822, Section 3.2.4 + c := in[0] + + switch { + case c == '\\': + // Examples given in RFC 3696 suggest that + // escaped characters can appear outside of a + // quoted string. Several “verified” errata + // continue to argue the point. We choose to + // accept it. + in = in[1:] + if len(in) == 0 { + return mailbox, false + } + fallthrough + + case ('0' <= c && c <= '9') || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + c == '!' || c == '#' || c == '$' || c == '%' || + c == '&' || c == '\'' || c == '*' || c == '+' || + c == '-' || c == '/' || c == '=' || c == '?' || + c == '^' || c == '_' || c == '`' || c == '{' || + c == '|' || c == '}' || c == '~' || c == '.': + localPartBytes = append(localPartBytes, in[0]) + in = in[1:] + + default: + break NextChar + } + } + + if len(localPartBytes) == 0 { + return mailbox, false + } + + // From RFC 3696, Section 3: + // “period (".") may also appear, but may not be used to start + // or end the local part, nor may two or more consecutive + // periods appear.” + twoDots := []byte{'.', '.'} + if localPartBytes[0] == '.' || + localPartBytes[len(localPartBytes)-1] == '.' || + bytes.Contains(localPartBytes, twoDots) { + return mailbox, false + } + } + + if len(in) == 0 || in[0] != '@' { + return mailbox, false + } + in = in[1:] + + // The RFC species a format for domains, but that's known to be + // violated in practice so we accept that anything after an '@' is the + // domain part. + if _, ok := domainToReverseLabels(in); !ok { + return mailbox, false + } + + mailbox.local = string(localPartBytes) + mailbox.domain = in + return mailbox, true +} + +// domainToReverseLabels converts a textual domain name like foo.example.com to +// the list of labels in reverse order, e.g. ["com", "example", "foo"]. +func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) { + for len(domain) > 0 { + if i := strings.LastIndexByte(domain, '.'); i == -1 { + reverseLabels = append(reverseLabels, domain) + domain = "" + } else { + reverseLabels = append(reverseLabels, domain[i+1:]) + domain = domain[:i] + } + } + + if len(reverseLabels) > 0 && len(reverseLabels[0]) == 0 { + // An empty label at the end indicates an absolute value. + return nil, false + } + + for _, label := range reverseLabels { + if len(label) == 0 { + // Empty labels are otherwise invalid. + return nil, false + } + + for _, c := range label { + if c < 33 || c > 126 { + // Invalid character. + return nil, false + } + } + } + + return reverseLabels, true +} + +func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, error) { + // If the constraint contains an @, then it specifies an exact mailbox + // name. + if strings.Contains(constraint, "@") { + constraintMailbox, ok := parseRFC2821Mailbox(constraint) + if !ok { + return false, fmt.Errorf("x509: internal error: cannot parse constraint %q", constraint) + } + return mailbox.local == constraintMailbox.local && strings.EqualFold(mailbox.domain, constraintMailbox.domain), nil + } + + // Otherwise the constraint is like a DNS constraint of the domain part + // of the mailbox. + return matchDomainConstraint(mailbox.domain, constraint) +} + +func matchURIConstraint(uri *url.URL, constraint string) (bool, error) { + // From RFC 5280, Section 4.2.1.10: + // “a uniformResourceIdentifier that does not include an authority + // component with a host name specified as a fully qualified domain + // name (e.g., if the URI either does not include an authority + // component or includes an authority component in which the host name + // is specified as an IP address), then the application MUST reject the + // certificate.” + + host := uri.Host + if len(host) == 0 { + return false, fmt.Errorf("URI with empty host (%q) cannot be matched against constraints", uri.String()) + } + + if strings.Contains(host, ":") && !strings.HasSuffix(host, "]") { + var err error + host, _, err = net.SplitHostPort(uri.Host) + if err != nil { + return false, err + } + } + + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") || + net.ParseIP(host) != nil { + return false, fmt.Errorf("URI with IP (%q) cannot be matched against constraints", uri.String()) + } + + return matchDomainConstraint(host, constraint) +} + +func matchIPConstraint(ip net.IP, constraint *net.IPNet) (bool, error) { + if len(ip) != len(constraint.IP) { + return false, nil + } + + for i := range ip { + if mask := constraint.Mask[i]; ip[i]&mask != constraint.IP[i]&mask { + return false, nil + } + } + + return true, nil +} + +func matchDomainConstraint(domain, constraint string) (bool, error) { + // The meaning of zero length constraints is not specified, but this + // code follows NSS and accepts them as matching everything. + if len(constraint) == 0 { + return true, nil + } + + domainLabels, ok := domainToReverseLabels(domain) + if !ok { + return false, fmt.Errorf("x509: internal error: cannot parse domain %q", domain) + } + + // RFC 5280 says that a leading period in a domain name means that at + // least one label must be prepended, but only for URI and email + // constraints, not DNS constraints. The code also supports that + // behaviour for DNS constraints. + + mustHaveSubdomains := false + if constraint[0] == '.' { + mustHaveSubdomains = true + constraint = constraint[1:] + } + + constraintLabels, ok := domainToReverseLabels(constraint) + if !ok { + return false, fmt.Errorf("x509: internal error: cannot parse domain %q", constraint) + } + + if len(domainLabels) < len(constraintLabels) || + (mustHaveSubdomains && len(domainLabels) == len(constraintLabels)) { + return false, nil + } + + for i, constraintLabel := range constraintLabels { + if !strings.EqualFold(constraintLabel, domainLabels[i]) { + return false, nil + } + } + + return true, nil +} + +// checkNameConstraints checks that c permits a child certificate to claim the +// given name, of type nameType. The argument parsedName contains the parsed +// form of name, suitable for passing to the match function. The total number +// of comparisons is tracked in the given count and should not exceed the given +// limit. +func (c *Certificate) checkNameConstraints(count *int, + maxConstraintComparisons int, + nameType string, + name string, + parsedName interface{}, + match func(parsedName, constraint interface{}) (match bool, err error), + permitted, excluded interface{}) error { + + excludedValue := reflect.ValueOf(excluded) + + *count += excludedValue.Len() + if *count > maxConstraintComparisons { + return CertificateInvalidError{c, TooManyConstraints, ""} + } + + for i := 0; i < excludedValue.Len(); i++ { + constraint := excludedValue.Index(i).Interface() + match, err := match(parsedName, constraint) + if err != nil { + return CertificateInvalidError{c, CANotAuthorizedForThisName, err.Error()} + } + + if match { + return CertificateInvalidError{c, CANotAuthorizedForThisName, fmt.Sprintf("%s %q is excluded by constraint %q", nameType, name, constraint)} + } + } + + permittedValue := reflect.ValueOf(permitted) + + *count += permittedValue.Len() + if *count > maxConstraintComparisons { + return CertificateInvalidError{c, TooManyConstraints, ""} + } + + ok := true + for i := 0; i < permittedValue.Len(); i++ { + constraint := permittedValue.Index(i).Interface() + + var err error + if ok, err = match(parsedName, constraint); err != nil { + return CertificateInvalidError{c, CANotAuthorizedForThisName, err.Error()} + } + + if ok { + break + } + } + + if !ok { + return CertificateInvalidError{c, CANotAuthorizedForThisName, fmt.Sprintf("%s %q is not permitted by any constraint", nameType, name)} + } + + return nil +} + +// isValid performs validity checks on c given that it is a candidate to append +// to the chain in currentChain. +func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error { + if !opts.DisableCriticalExtensionChecks && len(c.UnhandledCriticalExtensions) > 0 { + return UnhandledCriticalExtension{ID: c.UnhandledCriticalExtensions[0]} + } + + if !opts.DisableNameChecks && len(currentChain) > 0 { + child := currentChain[len(currentChain)-1] + if !bytes.Equal(child.RawIssuer, c.RawSubject) { + return CertificateInvalidError{c, NameMismatch, ""} + } + } + + if !opts.DisableTimeChecks { + now := opts.CurrentTime + if now.IsZero() { + now = time.Now() + } + if now.Before(c.NotBefore) { + return CertificateInvalidError{ + Cert: c, + Reason: Expired, + Detail: fmt.Sprintf("current time %s is before %s", now.Format(time.RFC3339), c.NotBefore.Format(time.RFC3339)), + } + } else if now.After(c.NotAfter) { + return CertificateInvalidError{ + Cert: c, + Reason: Expired, + Detail: fmt.Sprintf("current time %s is after %s", now.Format(time.RFC3339), c.NotAfter.Format(time.RFC3339)), + } + } + } + + maxConstraintComparisons := opts.MaxConstraintComparisions + if maxConstraintComparisons == 0 { + maxConstraintComparisons = 250000 + } + comparisonCount := 0 + + var leaf *Certificate + if certType == intermediateCertificate || certType == rootCertificate { + if len(currentChain) == 0 { + return errors.New("x509: internal error: empty chain when appending CA cert") + } + leaf = currentChain[0] + } + + checkNameConstraints := !opts.DisableNameConstraintChecks && (certType == intermediateCertificate || certType == rootCertificate) && c.hasNameConstraints() + if checkNameConstraints && leaf.commonNameAsHostname() { + // This is the deprecated, legacy case of depending on the commonName as + // a hostname. We don't enforce name constraints against the CN, but + // VerifyHostname will look for hostnames in there if there are no SANs. + // In order to ensure VerifyHostname will not accept an unchecked name, + // return an error here. + return CertificateInvalidError{c, NameConstraintsWithoutSANs, ""} + } else if checkNameConstraints && leaf.hasSANExtension() { + err := forEachSAN(leaf.getSANExtension(), func(tag int, data []byte) error { + switch tag { + case nameTypeEmail: + name := string(data) + mailbox, ok := parseRFC2821Mailbox(name) + if !ok { + return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox) + } + + if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox, + func(parsedName, constraint interface{}) (bool, error) { + return matchEmailConstraint(parsedName.(rfc2821Mailbox), constraint.(string)) + }, c.PermittedEmailAddresses, c.ExcludedEmailAddresses); err != nil { + return err + } + + case nameTypeDNS: + name := string(data) + if _, ok := domainToReverseLabels(name); !ok { + return fmt.Errorf("x509: cannot parse dnsName %q", name) + } + + if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name, + func(parsedName, constraint interface{}) (bool, error) { + return matchDomainConstraint(parsedName.(string), constraint.(string)) + }, c.PermittedDNSDomains, c.ExcludedDNSDomains); err != nil { + return err + } + + case nameTypeURI: + name := string(data) + uri, err := url.Parse(name) + if err != nil { + return fmt.Errorf("x509: internal error: URI SAN %q failed to parse", name) + } + + if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "URI", name, uri, + func(parsedName, constraint interface{}) (bool, error) { + return matchURIConstraint(parsedName.(*url.URL), constraint.(string)) + }, c.PermittedURIDomains, c.ExcludedURIDomains); err != nil { + return err + } + + case nameTypeIP: + ip := net.IP(data) + if l := len(ip); l != net.IPv4len && l != net.IPv6len { + return fmt.Errorf("x509: internal error: IP SAN %x failed to parse", data) + } + + if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "IP address", ip.String(), ip, + func(parsedName, constraint interface{}) (bool, error) { + return matchIPConstraint(parsedName.(net.IP), constraint.(*net.IPNet)) + }, c.PermittedIPRanges, c.ExcludedIPRanges); err != nil { + return err + } + + default: + // Unknown SAN types are ignored. + } + + return nil + }) + + if err != nil { + return err + } + } + + // KeyUsage status flags are ignored. From Engineering Security, Peter + // Gutmann: A European government CA marked its signing certificates as + // being valid for encryption only, but no-one noticed. Another + // European CA marked its signature keys as not being valid for + // signatures. A different CA marked its own trusted root certificate + // as being invalid for certificate signing. Another national CA + // distributed a certificate to be used to encrypt data for the + // country’s tax authority that was marked as only being usable for + // digital signatures but not for encryption. Yet another CA reversed + // the order of the bit flags in the keyUsage due to confusion over + // encoding endianness, essentially setting a random keyUsage in + // certificates that it issued. Another CA created a self-invalidating + // certificate by adding a certificate policy statement stipulating + // that the certificate had to be used strictly as specified in the + // keyUsage, and a keyUsage containing a flag indicating that the RSA + // encryption key could only be used for Diffie-Hellman key agreement. + + if certType == intermediateCertificate && (!c.BasicConstraintsValid || !c.IsCA) { + return CertificateInvalidError{c, NotAuthorizedToSign, ""} + } + + if !opts.DisablePathLenChecks && c.BasicConstraintsValid && c.MaxPathLen >= 0 { + numIntermediates := len(currentChain) - 1 + if numIntermediates > c.MaxPathLen { + return CertificateInvalidError{c, TooManyIntermediates, ""} + } + } + + return nil +} + +// Verify attempts to verify c by building one or more chains from c to a +// certificate in opts.Roots, using certificates in opts.Intermediates if +// needed. If successful, it returns one or more chains where the first +// element of the chain is c and the last element is from opts.Roots. +// +// If opts.Roots is nil and system roots are unavailable the returned error +// will be of type SystemRootsError. +// +// Name constraints in the intermediates will be applied to all names claimed +// in the chain, not just opts.DNSName. Thus it is invalid for a leaf to claim +// example.com if an intermediate doesn't permit it, even if example.com is not +// the name being validated. Note that DirectoryName constraints are not +// supported. +// +// Extended Key Usage values are enforced down a chain, so an intermediate or +// root that enumerates EKUs prevents a leaf from asserting an EKU not in that +// list. +// +// WARNING: this function doesn't do any revocation checking. +func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err error) { + // Platform-specific verification needs the ASN.1 contents so + // this makes the behavior consistent across platforms. + if len(c.Raw) == 0 { + return nil, errNotParsed + } + if opts.Intermediates != nil { + for _, intermediate := range opts.Intermediates.certs { + if len(intermediate.Raw) == 0 { + return nil, errNotParsed + } + } + } + + // Use Windows's own verification and chain building. + if opts.Roots == nil && runtime.GOOS == "windows" { + return c.systemVerify(&opts) + } + + if opts.Roots == nil { + opts.Roots = systemRootsPool() + if opts.Roots == nil { + return nil, SystemRootsError{systemRootsErr} + } + } + + err = c.isValid(leafCertificate, nil, &opts) + if err != nil { + return + } + + if len(opts.DNSName) > 0 { + err = c.VerifyHostname(opts.DNSName) + if err != nil { + return + } + } + + var candidateChains [][]*Certificate + if opts.Roots.contains(c) { + candidateChains = append(candidateChains, []*Certificate{c}) + } else { + if candidateChains, err = c.buildChains(nil, []*Certificate{c}, nil, &opts); err != nil { + return nil, err + } + } + + keyUsages := opts.KeyUsages + if len(keyUsages) == 0 { + keyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth} + } + + // If any key usage is acceptable then we're done. + for _, usage := range keyUsages { + if usage == ExtKeyUsageAny { + return candidateChains, nil + } + } + + for _, candidate := range candidateChains { + if opts.DisableEKUChecks || checkChainForKeyUsage(candidate, keyUsages) { + chains = append(chains, candidate) + } + } + + if len(chains) == 0 { + return nil, CertificateInvalidError{c, IncompatibleUsage, ""} + } + + return chains, nil +} + +func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate { + n := make([]*Certificate, len(chain)+1) + copy(n, chain) + n[len(chain)] = cert + return n +} + +// maxChainSignatureChecks is the maximum number of CheckSignatureFrom calls +// that an invocation of buildChains will (tranistively) make. Most chains are +// less than 15 certificates long, so this leaves space for multiple chains and +// for failed checks due to different intermediates having the same Subject. +const maxChainSignatureChecks = 100 + +func (c *Certificate) buildChains(cache map[*Certificate][][]*Certificate, currentChain []*Certificate, sigChecks *int, opts *VerifyOptions) (chains [][]*Certificate, err error) { + var ( + hintErr error + hintCert *Certificate + ) + + considerCandidate := func(certType int, candidate *Certificate) { + for _, cert := range currentChain { + if cert.Equal(candidate) { + return + } + } + + if sigChecks == nil { + sigChecks = new(int) + } + *sigChecks++ + if *sigChecks > maxChainSignatureChecks { + err = errors.New("x509: signature check attempts limit reached while verifying certificate chain") + return + } + + if err := c.CheckSignatureFrom(candidate); err != nil { + if hintErr == nil { + hintErr = err + hintCert = candidate + } + return + } + + err = candidate.isValid(certType, currentChain, opts) + if err != nil { + return + } + + switch certType { + case rootCertificate: + chains = append(chains, appendToFreshChain(currentChain, candidate)) + case intermediateCertificate: + if cache == nil { + cache = make(map[*Certificate][][]*Certificate) + } + childChains, ok := cache[candidate] + if !ok { + childChains, err = candidate.buildChains(cache, appendToFreshChain(currentChain, candidate), sigChecks, opts) + cache[candidate] = childChains + } + chains = append(chains, childChains...) + } + } + + for _, rootNum := range opts.Roots.findPotentialParents(c) { + considerCandidate(rootCertificate, opts.Roots.certs[rootNum]) + } + for _, intermediateNum := range opts.Intermediates.findPotentialParents(c) { + considerCandidate(intermediateCertificate, opts.Intermediates.certs[intermediateNum]) + } + + if len(chains) > 0 { + err = nil + } + if len(chains) == 0 && err == nil { + err = UnknownAuthorityError{c, hintErr, hintCert} + } + + return +} + +// validHostname reports whether host is a valid hostname that can be matched or +// matched against according to RFC 6125 2.2, with some leniency to accommodate +// legacy values. +func validHostname(host string) bool { + host = strings.TrimSuffix(host, ".") + + if len(host) == 0 { + return false + } + + for i, part := range strings.Split(host, ".") { + if part == "" { + // Empty label. + return false + } + if i == 0 && part == "*" { + // Only allow full left-most wildcards, as those are the only ones + // we match, and matching literal '*' characters is probably never + // the expected behavior. + continue + } + for j, c := range part { + if 'a' <= c && c <= 'z' { + continue + } + if '0' <= c && c <= '9' { + continue + } + if 'A' <= c && c <= 'Z' { + continue + } + if c == '-' && j != 0 { + continue + } + if c == '_' || c == ':' { + // Not valid characters in hostnames, but commonly + // found in deployments outside the WebPKI. + continue + } + return false + } + } + + return true +} + +// commonNameAsHostname reports whether the Common Name field should be +// considered the hostname that the certificate is valid for. This is a legacy +// behavior, disabled if the Subject Alt Name extension is present. +// +// It applies the strict validHostname check to the Common Name field, so that +// certificates without SANs can still be validated against CAs with name +// constraints if there is no risk the CN would be matched as a hostname. +// See NameConstraintsWithoutSANs and issue 24151. +func (c *Certificate) commonNameAsHostname() bool { + return !ignoreCN && !c.hasSANExtension() && validHostname(c.Subject.CommonName) +} + +func matchHostnames(pattern, host string) bool { + host = strings.TrimSuffix(host, ".") + pattern = strings.TrimSuffix(pattern, ".") + + if len(pattern) == 0 || len(host) == 0 { + return false + } + + patternParts := strings.Split(pattern, ".") + hostParts := strings.Split(host, ".") + + if len(patternParts) != len(hostParts) { + return false + } + + for i, patternPart := range patternParts { + if i == 0 && patternPart == "*" { + continue + } + if patternPart != hostParts[i] { + return false + } + } + + return true +} + +// toLowerCaseASCII returns a lower-case version of in. See RFC 6125 6.4.1. We use +// an explicitly ASCII function to avoid any sharp corners resulting from +// performing Unicode operations on DNS labels. +func toLowerCaseASCII(in string) string { + // If the string is already lower-case then there's nothing to do. + isAlreadyLowerCase := true + for _, c := range in { + if c == utf8.RuneError { + // If we get a UTF-8 error then there might be + // upper-case ASCII bytes in the invalid sequence. + isAlreadyLowerCase = false + break + } + if 'A' <= c && c <= 'Z' { + isAlreadyLowerCase = false + break + } + } + + if isAlreadyLowerCase { + return in + } + + out := []byte(in) + for i, c := range out { + if 'A' <= c && c <= 'Z' { + out[i] += 'a' - 'A' + } + } + return string(out) +} + +// VerifyHostname returns nil if c is a valid certificate for the named host. +// Otherwise it returns an error describing the mismatch. +func (c *Certificate) VerifyHostname(h string) error { + // IP addresses may be written in [ ]. + candidateIP := h + if len(h) >= 3 && h[0] == '[' && h[len(h)-1] == ']' { + candidateIP = h[1 : len(h)-1] + } + if ip := net.ParseIP(candidateIP); ip != nil { + // We only match IP addresses against IP SANs. + // See RFC 6125, Appendix B.2. + for _, candidate := range c.IPAddresses { + if ip.Equal(candidate) { + return nil + } + } + return HostnameError{c, candidateIP} + } + + lowered := toLowerCaseASCII(h) + + if c.commonNameAsHostname() { + if matchHostnames(toLowerCaseASCII(c.Subject.CommonName), lowered) { + return nil + } + } else { + for _, match := range c.DNSNames { + if matchHostnames(toLowerCaseASCII(match), lowered) { + return nil + } + } + } + + return HostnameError{c, h} +} + +func checkChainForKeyUsage(chain []*Certificate, keyUsages []ExtKeyUsage) bool { + usages := make([]ExtKeyUsage, len(keyUsages)) + copy(usages, keyUsages) + + if len(chain) == 0 { + return false + } + + usagesRemaining := len(usages) + + // We walk down the list and cross out any usages that aren't supported + // by each certificate. If we cross out all the usages, then the chain + // is unacceptable. + +NextCert: + for i := len(chain) - 1; i >= 0; i-- { + cert := chain[i] + if len(cert.ExtKeyUsage) == 0 && len(cert.UnknownExtKeyUsage) == 0 { + // The certificate doesn't have any extended key usage specified. + continue + } + + for _, usage := range cert.ExtKeyUsage { + if usage == ExtKeyUsageAny { + // The certificate is explicitly good for any usage. + continue NextCert + } + } + + const invalidUsage ExtKeyUsage = -1 + + NextRequestedUsage: + for i, requestedUsage := range usages { + if requestedUsage == invalidUsage { + continue + } + + for _, usage := range cert.ExtKeyUsage { + if requestedUsage == usage { + continue NextRequestedUsage + } else if requestedUsage == ExtKeyUsageServerAuth && + (usage == ExtKeyUsageNetscapeServerGatedCrypto || + usage == ExtKeyUsageMicrosoftServerGatedCrypto) { + // In order to support COMODO + // certificate chains, we have to + // accept Netscape or Microsoft SGC + // usages as equal to ServerAuth. + continue NextRequestedUsage + } + } + + usages[i] = invalidUsage + usagesRemaining-- + if usagesRemaining == 0 { + return false + } + } + } + + return true +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/x509.go b/vendor/github.com/google/certificate-transparency-go/x509/x509.go new file mode 100644 index 000000000000..917d78779f32 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/x509.go @@ -0,0 +1,3307 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package x509 parses X.509-encoded keys and certificates. +// +// On UNIX systems the environment variables SSL_CERT_FILE and SSL_CERT_DIR +// can be used to override the system default locations for the SSL certificate +// file and SSL certificate files directory, respectively. +// +// This is a fork of the Go library crypto/x509 package, primarily adapted for +// use with Certificate Transparency. Main areas of difference are: +// +// Life as a fork: +// - Rename OS-specific cgo code so it doesn't clash with main Go library. +// - Use local library imports (asn1, pkix) throughout. +// - Add version-specific wrappers for Go version-incompatible code (in +// ptr_*_windows.go). +// Laxer certificate parsing: +// - Add options to disable various validation checks (times, EKUs etc). +// - Use NonFatalErrors type for some errors and continue parsing; this +// can be checked with IsFatal(err). +// - Support for short bitlength ECDSA curves (in curves.go). +// Certificate Transparency specific function: +// - Parsing and marshaling of SCTList extension. +// - RemoveSCTList() function for rebuilding CT leaf entry. +// - Pre-certificate processing (RemoveCTPoison(), BuildPrecertTBS(), +// ParseTBSCertificate(), IsPrecertificate()). +// Revocation list processing: +// - Detailed CRL parsing (in revoked.go) +// - Detailed error recording mechanism (in error.go, errors.go) +// - Factor out parseDistributionPoints() for reuse. +// - Factor out and generalize GeneralNames parsing (in names.go) +// - Fix CRL commenting. +// RPKI support: +// - Support for SubjectInfoAccess extension +// - Support for RFC3779 extensions (in rpki.go) +// RSAES-OAEP support: +// - Support for parsing RSASES-OAEP public keys from certificates +// Ed25519 support: +// - Support for parsing and marshaling Ed25519 keys +// General improvements: +// - Export and use OID values throughout. +// - Export OIDFromNamedCurve(). +// - Export SignatureAlgorithmFromAI(). +// - Add OID value to UnhandledCriticalExtension error. +// - Minor typo/lint fixes. +package x509 + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "net" + "net/url" + "strconv" + "strings" + "time" + "unicode/utf8" + + "golang.org/x/crypto/cryptobyte" + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" + "golang.org/x/crypto/ed25519" + + "github.com/google/certificate-transparency-go/asn1" + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509/pkix" +) + +// pkixPublicKey reflects a PKIX public key structure. See SubjectPublicKeyInfo +// in RFC 3280. +type pkixPublicKey struct { + Algo pkix.AlgorithmIdentifier + BitString asn1.BitString +} + +// ParsePKIXPublicKey parses a public key in PKIX, ASN.1 DER form. +// +// It returns a *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, or +// ed25519.PublicKey. More types might be supported in the future. +// +// This kind of key is commonly encoded in PEM blocks of type "PUBLIC KEY". +func ParsePKIXPublicKey(derBytes []byte) (pub interface{}, err error) { + var pki publicKeyInfo + if rest, err := asn1.Unmarshal(derBytes, &pki); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after ASN.1 of public-key") + } + algo := getPublicKeyAlgorithmFromOID(pki.Algorithm.Algorithm) + if algo == UnknownPublicKeyAlgorithm { + return nil, errors.New("x509: unknown public key algorithm") + } + var nfe NonFatalErrors + pub, err = parsePublicKey(algo, &pki, &nfe) + if err != nil { + return pub, err + } + // Treat non-fatal errors as fatal for this entrypoint. + if len(nfe.Errors) > 0 { + return nil, nfe.Errors[0] + } + return pub, nil +} + +func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, publicKeyAlgorithm pkix.AlgorithmIdentifier, err error) { + switch pub := pub.(type) { + case *rsa.PublicKey: + publicKeyBytes, err = asn1.Marshal(pkcs1PublicKey{ + N: pub.N, + E: pub.E, + }) + if err != nil { + return nil, pkix.AlgorithmIdentifier{}, err + } + publicKeyAlgorithm.Algorithm = OIDPublicKeyRSA + // This is a NULL parameters value which is required by + // RFC 3279, Section 2.3.1. + publicKeyAlgorithm.Parameters = asn1.NullRawValue + case *ecdsa.PublicKey: + publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) + oid, ok := OIDFromNamedCurve(pub.Curve) + if !ok { + return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: unsupported elliptic curve") + } + publicKeyAlgorithm.Algorithm = OIDPublicKeyECDSA + var paramBytes []byte + paramBytes, err = asn1.Marshal(oid) + if err != nil { + return + } + publicKeyAlgorithm.Parameters.FullBytes = paramBytes + case ed25519.PublicKey: + publicKeyBytes = pub + publicKeyAlgorithm.Algorithm = OIDPublicKeyEd25519 + default: + return nil, pkix.AlgorithmIdentifier{}, fmt.Errorf("x509: unsupported public key type: %T", pub) + } + + return publicKeyBytes, publicKeyAlgorithm, nil +} + +// MarshalPKIXPublicKey converts a public key to PKIX, ASN.1 DER form. +// +// The following key types are currently supported: *rsa.PublicKey, *ecdsa.PublicKey +// and ed25519.PublicKey. Unsupported key types result in an error. +// +// This kind of key is commonly encoded in PEM blocks of type "PUBLIC KEY". +func MarshalPKIXPublicKey(pub interface{}) ([]byte, error) { + var publicKeyBytes []byte + var publicKeyAlgorithm pkix.AlgorithmIdentifier + var err error + + if publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(pub); err != nil { + return nil, err + } + + pkix := pkixPublicKey{ + Algo: publicKeyAlgorithm, + BitString: asn1.BitString{ + Bytes: publicKeyBytes, + BitLength: 8 * len(publicKeyBytes), + }, + } + + ret, _ := asn1.Marshal(pkix) + return ret, nil +} + +// These structures reflect the ASN.1 structure of X.509 certificates.: + +type certificate struct { + Raw asn1.RawContent + TBSCertificate tbsCertificate + SignatureAlgorithm pkix.AlgorithmIdentifier + SignatureValue asn1.BitString +} + +type tbsCertificate struct { + Raw asn1.RawContent + Version int `asn1:"optional,explicit,default:0,tag:0"` + SerialNumber *big.Int + SignatureAlgorithm pkix.AlgorithmIdentifier + Issuer asn1.RawValue + Validity validity + Subject asn1.RawValue + PublicKey publicKeyInfo + UniqueId asn1.BitString `asn1:"optional,tag:1"` + SubjectUniqueId asn1.BitString `asn1:"optional,tag:2"` + Extensions []pkix.Extension `asn1:"optional,explicit,tag:3"` +} + +// RFC 4055, 4.1 +// The current ASN.1 parser does not support non-integer defaults so +// the 'default:' tags here do nothing. +type rsaesoaepAlgorithmParameters struct { + HashFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:0,default:sha1Identifier"` + MaskgenFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:1,default:mgf1SHA1Identifier"` + PSourceFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:2,default:pSpecifiedEmptyIdentifier"` +} + +type dsaAlgorithmParameters struct { + P, Q, G *big.Int +} + +type dsaSignature struct { + R, S *big.Int +} + +type ecdsaSignature dsaSignature + +type validity struct { + NotBefore, NotAfter time.Time +} + +type publicKeyInfo struct { + Raw asn1.RawContent + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString +} + +// RFC 5280, 4.2.1.1 +type authKeyId struct { + Id []byte `asn1:"optional,tag:0"` +} + +// SignatureAlgorithm indicates the algorithm used to sign a certificate. +type SignatureAlgorithm int + +// SignatureAlgorithm values: +const ( + UnknownSignatureAlgorithm SignatureAlgorithm = iota + MD2WithRSA + MD5WithRSA + SHA1WithRSA + SHA256WithRSA + SHA384WithRSA + SHA512WithRSA + DSAWithSHA1 + DSAWithSHA256 + ECDSAWithSHA1 + ECDSAWithSHA256 + ECDSAWithSHA384 + ECDSAWithSHA512 + SHA256WithRSAPSS + SHA384WithRSAPSS + SHA512WithRSAPSS + PureEd25519 +) + +// RFC 4055, 6. Basic object identifiers +var oidpSpecified = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 9} + +// These are the default parameters for an RSAES-OAEP pubkey. +// The current ASN.1 parser does not support non-integer defaults so +// these currently do nothing. +var ( + sha1Identifier = pkix.AlgorithmIdentifier{ + Algorithm: oidSHA1, + Parameters: asn1.NullRawValue, + } + mgf1SHA1Identifier = pkix.AlgorithmIdentifier{ + Algorithm: oidMGF1, + // RFC 4055, 2.1 sha1Identifier + Parameters: asn1.RawValue{ + Class: asn1.ClassUniversal, + Tag: asn1.TagSequence, + IsCompound: false, + Bytes: []byte{6, 5, 43, 14, 3, 2, 26, 5, 0}, + FullBytes: []byte{16, 9, 6, 5, 43, 14, 3, 2, 26, 5, 0}}, + } + pSpecifiedEmptyIdentifier = pkix.AlgorithmIdentifier{ + Algorithm: oidpSpecified, + // RFC 4055, 4.1 nullOctetString + Parameters: asn1.RawValue{ + Class: asn1.ClassUniversal, + Tag: asn1.TagOctetString, + IsCompound: false, + Bytes: []byte{}, + FullBytes: []byte{4, 0}}, + } +) + +func (algo SignatureAlgorithm) isRSAPSS() bool { + switch algo { + case SHA256WithRSAPSS, SHA384WithRSAPSS, SHA512WithRSAPSS: + return true + default: + return false + } +} + +func (algo SignatureAlgorithm) String() string { + for _, details := range signatureAlgorithmDetails { + if details.algo == algo { + return details.name + } + } + return strconv.Itoa(int(algo)) +} + +// PublicKeyAlgorithm indicates the algorithm used for a certificate's public key. +type PublicKeyAlgorithm int + +// PublicKeyAlgorithm values: +const ( + UnknownPublicKeyAlgorithm PublicKeyAlgorithm = iota + RSA + DSA + ECDSA + Ed25519 + RSAESOAEP +) + +var publicKeyAlgoName = [...]string{ + RSA: "RSA", + DSA: "DSA", + ECDSA: "ECDSA", + Ed25519: "Ed25519", + RSAESOAEP: "RSAESOAEP", +} + +func (algo PublicKeyAlgorithm) String() string { + if 0 < algo && int(algo) < len(publicKeyAlgoName) { + return publicKeyAlgoName[algo] + } + return strconv.Itoa(int(algo)) +} + +// OIDs for signature algorithms +// +// pkcs-1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 1 } +// +// +// RFC 3279 2.2.1 RSA Signature Algorithms +// +// md2WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 2 } +// +// md5WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 4 } +// +// sha-1WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 5 } +// +// dsaWithSha1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) x9-57(10040) x9cm(4) 3 } +// +// RFC 3279 2.2.3 ECDSA Signature Algorithm +// +// ecdsa-with-SHA1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-x962(10045) +// signatures(4) ecdsa-with-SHA1(1)} +// +// +// RFC 4055 5 PKCS #1 Version 1.5 +// +// sha256WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 11 } +// +// sha384WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 12 } +// +// sha512WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 13 } +// +// +// RFC 5758 3.1 DSA Signature Algorithms +// +// dsaWithSha256 OBJECT IDENTIFIER ::= { +// joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101) +// csor(3) algorithms(4) id-dsa-with-sha2(3) 2} +// +// RFC 5758 3.2 ECDSA Signature Algorithm +// +// ecdsa-with-SHA256 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 2 } +// +// ecdsa-with-SHA384 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 3 } +// +// ecdsa-with-SHA512 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 4 } +// +// +// RFC 8410 3 Curve25519 and Curve448 Algorithm Identifiers +// +// id-Ed25519 OBJECT IDENTIFIER ::= { 1 3 101 112 } + +var ( + oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} + oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} + oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10} + oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} + oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} + oidSignatureEd25519 = asn1.ObjectIdentifier{1, 3, 101, 112} + + oidSHA1 = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 26} + oidSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1} + oidSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2} + oidSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3} + + oidMGF1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 8} + + // oidISOSignatureSHA1WithRSA means the same as oidSignatureSHA1WithRSA + // but it's specified by ISO. Microsoft's makecert.exe has been known + // to produce certificates with this OID. + oidISOSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 29} +) + +var signatureAlgorithmDetails = []struct { + algo SignatureAlgorithm + name string + oid asn1.ObjectIdentifier + pubKeyAlgo PublicKeyAlgorithm + hash crypto.Hash +}{ + {MD2WithRSA, "MD2-RSA", oidSignatureMD2WithRSA, RSA, crypto.Hash(0) /* no value for MD2 */}, + {MD5WithRSA, "MD5-RSA", oidSignatureMD5WithRSA, RSA, crypto.MD5}, + {SHA1WithRSA, "SHA1-RSA", oidSignatureSHA1WithRSA, RSA, crypto.SHA1}, + {SHA1WithRSA, "SHA1-RSA", oidISOSignatureSHA1WithRSA, RSA, crypto.SHA1}, + {SHA256WithRSA, "SHA256-RSA", oidSignatureSHA256WithRSA, RSA, crypto.SHA256}, + {SHA384WithRSA, "SHA384-RSA", oidSignatureSHA384WithRSA, RSA, crypto.SHA384}, + {SHA512WithRSA, "SHA512-RSA", oidSignatureSHA512WithRSA, RSA, crypto.SHA512}, + {SHA256WithRSAPSS, "SHA256-RSAPSS", oidSignatureRSAPSS, RSA, crypto.SHA256}, + {SHA384WithRSAPSS, "SHA384-RSAPSS", oidSignatureRSAPSS, RSA, crypto.SHA384}, + {SHA512WithRSAPSS, "SHA512-RSAPSS", oidSignatureRSAPSS, RSA, crypto.SHA512}, + {DSAWithSHA1, "DSA-SHA1", oidSignatureDSAWithSHA1, DSA, crypto.SHA1}, + {DSAWithSHA256, "DSA-SHA256", oidSignatureDSAWithSHA256, DSA, crypto.SHA256}, + {ECDSAWithSHA1, "ECDSA-SHA1", oidSignatureECDSAWithSHA1, ECDSA, crypto.SHA1}, + {ECDSAWithSHA256, "ECDSA-SHA256", oidSignatureECDSAWithSHA256, ECDSA, crypto.SHA256}, + {ECDSAWithSHA384, "ECDSA-SHA384", oidSignatureECDSAWithSHA384, ECDSA, crypto.SHA384}, + {ECDSAWithSHA512, "ECDSA-SHA512", oidSignatureECDSAWithSHA512, ECDSA, crypto.SHA512}, + {PureEd25519, "Ed25519", oidSignatureEd25519, Ed25519, crypto.Hash(0) /* no pre-hashing */}, +} + +// pssParameters reflects the parameters in an AlgorithmIdentifier that +// specifies RSA PSS. See RFC 3447, Appendix A.2.3. +type pssParameters struct { + // The following three fields are not marked as + // optional because the default values specify SHA-1, + // which is no longer suitable for use in signatures. + Hash pkix.AlgorithmIdentifier `asn1:"explicit,tag:0"` + MGF pkix.AlgorithmIdentifier `asn1:"explicit,tag:1"` + SaltLength int `asn1:"explicit,tag:2"` + TrailerField int `asn1:"optional,explicit,tag:3,default:1"` +} + +// rsaPSSParameters returns an asn1.RawValue suitable for use as the Parameters +// in an AlgorithmIdentifier that specifies RSA PSS. +func rsaPSSParameters(hashFunc crypto.Hash) asn1.RawValue { + var hashOID asn1.ObjectIdentifier + + switch hashFunc { + case crypto.SHA256: + hashOID = oidSHA256 + case crypto.SHA384: + hashOID = oidSHA384 + case crypto.SHA512: + hashOID = oidSHA512 + } + + params := pssParameters{ + Hash: pkix.AlgorithmIdentifier{ + Algorithm: hashOID, + Parameters: asn1.NullRawValue, + }, + MGF: pkix.AlgorithmIdentifier{ + Algorithm: oidMGF1, + }, + SaltLength: hashFunc.Size(), + TrailerField: 1, + } + + mgf1Params := pkix.AlgorithmIdentifier{ + Algorithm: hashOID, + Parameters: asn1.NullRawValue, + } + + var err error + params.MGF.Parameters.FullBytes, err = asn1.Marshal(mgf1Params) + if err != nil { + panic(err) + } + + serialized, err := asn1.Marshal(params) + if err != nil { + panic(err) + } + + return asn1.RawValue{FullBytes: serialized} +} + +// SignatureAlgorithmFromAI converts an PKIX algorithm identifier to the +// equivalent local constant. +func SignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) SignatureAlgorithm { + if ai.Algorithm.Equal(oidSignatureEd25519) { + // RFC 8410, Section 3 + // > For all of the OIDs, the parameters MUST be absent. + if len(ai.Parameters.FullBytes) != 0 { + return UnknownSignatureAlgorithm + } + } + + if !ai.Algorithm.Equal(oidSignatureRSAPSS) { + for _, details := range signatureAlgorithmDetails { + if ai.Algorithm.Equal(details.oid) { + return details.algo + } + } + return UnknownSignatureAlgorithm + } + + // RSA PSS is special because it encodes important parameters + // in the Parameters. + + var params pssParameters + if _, err := asn1.Unmarshal(ai.Parameters.FullBytes, ¶ms); err != nil { + return UnknownSignatureAlgorithm + } + + var mgf1HashFunc pkix.AlgorithmIdentifier + if _, err := asn1.Unmarshal(params.MGF.Parameters.FullBytes, &mgf1HashFunc); err != nil { + return UnknownSignatureAlgorithm + } + + // PSS is greatly overburdened with options. This code forces them into + // three buckets by requiring that the MGF1 hash function always match the + // message hash function (as recommended in RFC 3447, Section 8.1), that the + // salt length matches the hash length, and that the trailer field has the + // default value. + if (len(params.Hash.Parameters.FullBytes) != 0 && !bytes.Equal(params.Hash.Parameters.FullBytes, asn1.NullBytes)) || + !params.MGF.Algorithm.Equal(oidMGF1) || + !mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) || + (len(mgf1HashFunc.Parameters.FullBytes) != 0 && !bytes.Equal(mgf1HashFunc.Parameters.FullBytes, asn1.NullBytes)) || + params.TrailerField != 1 { + return UnknownSignatureAlgorithm + } + + switch { + case params.Hash.Algorithm.Equal(oidSHA256) && params.SaltLength == 32: + return SHA256WithRSAPSS + case params.Hash.Algorithm.Equal(oidSHA384) && params.SaltLength == 48: + return SHA384WithRSAPSS + case params.Hash.Algorithm.Equal(oidSHA512) && params.SaltLength == 64: + return SHA512WithRSAPSS + } + + return UnknownSignatureAlgorithm +} + +// RFC 3279, 2.3 Public Key Algorithms +// +// pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) +// +// rsadsi(113549) pkcs(1) 1 } +// +// rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 } +// +// id-dsa OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) +// +// x9-57(10040) x9cm(4) 1 } +// +// # RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters +// +// id-ecPublicKey OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 } +var ( + OIDPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} + OIDPublicKeyRSAESOAEP = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 7} + OIDPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} + OIDPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} + OIDPublicKeyRSAObsolete = asn1.ObjectIdentifier{2, 5, 8, 1, 1} + OIDPublicKeyEd25519 = oidSignatureEd25519 +) + +func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) PublicKeyAlgorithm { + switch { + case oid.Equal(OIDPublicKeyRSA): + return RSA + case oid.Equal(OIDPublicKeyDSA): + return DSA + case oid.Equal(OIDPublicKeyECDSA): + return ECDSA + case oid.Equal(OIDPublicKeyRSAESOAEP): + return RSAESOAEP + case oid.Equal(OIDPublicKeyEd25519): + return Ed25519 + } + return UnknownPublicKeyAlgorithm +} + +// RFC 5480, 2.1.1.1. Named Curve +// +// secp224r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 33 } +// +// secp256r1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3) +// prime(1) 7 } +// +// secp384r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 34 } +// +// secp521r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 35 } +// +// secp192r1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3) +// prime(1) 1 } +// +// NB: secp256r1 is equivalent to prime256v1, +// secp192r1 is equivalent to ansix9p192r and prime192v1 +var ( + OIDNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33} + OIDNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} + OIDNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} + OIDNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} + OIDNamedCurveP192 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 1} +) + +func namedCurveFromOID(oid asn1.ObjectIdentifier, nfe *NonFatalErrors) elliptic.Curve { + switch { + case oid.Equal(OIDNamedCurveP224): + return elliptic.P224() + case oid.Equal(OIDNamedCurveP256): + return elliptic.P256() + case oid.Equal(OIDNamedCurveP384): + return elliptic.P384() + case oid.Equal(OIDNamedCurveP521): + return elliptic.P521() + case oid.Equal(OIDNamedCurveP192): + nfe.AddError(errors.New("insecure curve (secp192r1) specified")) + return secp192r1() + } + return nil +} + +// OIDFromNamedCurve returns the OID used to specify the use of the given +// elliptic curve. +func OIDFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) { + switch curve { + case elliptic.P224(): + return OIDNamedCurveP224, true + case elliptic.P256(): + return OIDNamedCurveP256, true + case elliptic.P384(): + return OIDNamedCurveP384, true + case elliptic.P521(): + return OIDNamedCurveP521, true + case secp192r1(): + return OIDNamedCurveP192, true + } + + return nil, false +} + +// KeyUsage represents the set of actions that are valid for a given key. It's +// a bitmap of the KeyUsage* constants. +type KeyUsage int + +// KeyUsage values: +const ( + KeyUsageDigitalSignature KeyUsage = 1 << iota + KeyUsageContentCommitment + KeyUsageKeyEncipherment + KeyUsageDataEncipherment + KeyUsageKeyAgreement + KeyUsageCertSign + KeyUsageCRLSign + KeyUsageEncipherOnly + KeyUsageDecipherOnly +) + +// RFC 5280, 4.2.1.12 Extended Key Usage +// +// anyExtendedKeyUsage OBJECT IDENTIFIER ::= { id-ce-extKeyUsage 0 } +// +// id-kp OBJECT IDENTIFIER ::= { id-pkix 3 } +// +// id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 } +// id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 } +// id-kp-codeSigning OBJECT IDENTIFIER ::= { id-kp 3 } +// id-kp-emailProtection OBJECT IDENTIFIER ::= { id-kp 4 } +// id-kp-timeStamping OBJECT IDENTIFIER ::= { id-kp 8 } +// id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 } +var ( + oidExtKeyUsageAny = asn1.ObjectIdentifier{2, 5, 29, 37, 0} + oidExtKeyUsageServerAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 1} + oidExtKeyUsageClientAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 2} + oidExtKeyUsageCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 3} + oidExtKeyUsageEmailProtection = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 4} + oidExtKeyUsageIPSECEndSystem = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 5} + oidExtKeyUsageIPSECTunnel = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 6} + oidExtKeyUsageIPSECUser = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 7} + oidExtKeyUsageTimeStamping = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 8} + oidExtKeyUsageOCSPSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 9} + oidExtKeyUsageMicrosoftServerGatedCrypto = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 3} + oidExtKeyUsageNetscapeServerGatedCrypto = asn1.ObjectIdentifier{2, 16, 840, 1, 113730, 4, 1} + oidExtKeyUsageMicrosoftCommercialCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 2, 1, 22} + oidExtKeyUsageMicrosoftKernelCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 61, 1, 1} + // RFC 6962 s3.1 + oidExtKeyUsageCertificateTransparency = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 4} +) + +// ExtKeyUsage represents an extended set of actions that are valid for a given key. +// Each of the ExtKeyUsage* constants define a unique action. +type ExtKeyUsage int + +// ExtKeyUsage values: +const ( + ExtKeyUsageAny ExtKeyUsage = iota + ExtKeyUsageServerAuth + ExtKeyUsageClientAuth + ExtKeyUsageCodeSigning + ExtKeyUsageEmailProtection + ExtKeyUsageIPSECEndSystem + ExtKeyUsageIPSECTunnel + ExtKeyUsageIPSECUser + ExtKeyUsageTimeStamping + ExtKeyUsageOCSPSigning + ExtKeyUsageMicrosoftServerGatedCrypto + ExtKeyUsageNetscapeServerGatedCrypto + ExtKeyUsageMicrosoftCommercialCodeSigning + ExtKeyUsageMicrosoftKernelCodeSigning + ExtKeyUsageCertificateTransparency +) + +// extKeyUsageOIDs contains the mapping between an ExtKeyUsage and its OID. +var extKeyUsageOIDs = []struct { + extKeyUsage ExtKeyUsage + oid asn1.ObjectIdentifier +}{ + {ExtKeyUsageAny, oidExtKeyUsageAny}, + {ExtKeyUsageServerAuth, oidExtKeyUsageServerAuth}, + {ExtKeyUsageClientAuth, oidExtKeyUsageClientAuth}, + {ExtKeyUsageCodeSigning, oidExtKeyUsageCodeSigning}, + {ExtKeyUsageEmailProtection, oidExtKeyUsageEmailProtection}, + {ExtKeyUsageIPSECEndSystem, oidExtKeyUsageIPSECEndSystem}, + {ExtKeyUsageIPSECTunnel, oidExtKeyUsageIPSECTunnel}, + {ExtKeyUsageIPSECUser, oidExtKeyUsageIPSECUser}, + {ExtKeyUsageTimeStamping, oidExtKeyUsageTimeStamping}, + {ExtKeyUsageOCSPSigning, oidExtKeyUsageOCSPSigning}, + {ExtKeyUsageMicrosoftServerGatedCrypto, oidExtKeyUsageMicrosoftServerGatedCrypto}, + {ExtKeyUsageNetscapeServerGatedCrypto, oidExtKeyUsageNetscapeServerGatedCrypto}, + {ExtKeyUsageMicrosoftCommercialCodeSigning, oidExtKeyUsageMicrosoftCommercialCodeSigning}, + {ExtKeyUsageMicrosoftKernelCodeSigning, oidExtKeyUsageMicrosoftKernelCodeSigning}, + {ExtKeyUsageCertificateTransparency, oidExtKeyUsageCertificateTransparency}, +} + +func extKeyUsageFromOID(oid asn1.ObjectIdentifier) (eku ExtKeyUsage, ok bool) { + for _, pair := range extKeyUsageOIDs { + if oid.Equal(pair.oid) { + return pair.extKeyUsage, true + } + } + return +} + +func oidFromExtKeyUsage(eku ExtKeyUsage) (oid asn1.ObjectIdentifier, ok bool) { + for _, pair := range extKeyUsageOIDs { + if eku == pair.extKeyUsage { + return pair.oid, true + } + } + return +} + +// SerializedSCT represents a single TLS-encoded signed certificate timestamp, from RFC6962 s3.3. +type SerializedSCT struct { + Val []byte `tls:"minlen:1,maxlen:65535"` +} + +// SignedCertificateTimestampList is a list of signed certificate timestamps, from RFC6962 s3.3. +type SignedCertificateTimestampList struct { + SCTList []SerializedSCT `tls:"minlen:1,maxlen:65335"` +} + +// A Certificate represents an X.509 certificate. +type Certificate struct { + Raw []byte // Complete ASN.1 DER content (certificate, signature algorithm and signature). + RawTBSCertificate []byte // Certificate part of raw ASN.1 DER content. + RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo. + RawSubject []byte // DER encoded Subject + RawIssuer []byte // DER encoded Issuer + + Signature []byte + SignatureAlgorithm SignatureAlgorithm + + PublicKeyAlgorithm PublicKeyAlgorithm + PublicKey interface{} + + Version int + SerialNumber *big.Int + Issuer pkix.Name + Subject pkix.Name + NotBefore, NotAfter time.Time // Validity bounds. + KeyUsage KeyUsage + + // Extensions contains raw X.509 extensions. When parsing certificates, + // this can be used to extract non-critical extensions that are not + // parsed by this package. When marshaling certificates, the Extensions + // field is ignored, see ExtraExtensions. + Extensions []pkix.Extension + + // ExtraExtensions contains extensions to be copied, raw, into any + // marshaled certificates. Values override any extensions that would + // otherwise be produced based on the other fields. The ExtraExtensions + // field is not populated when parsing certificates, see Extensions. + ExtraExtensions []pkix.Extension + + // UnhandledCriticalExtensions contains a list of extension IDs that + // were not (fully) processed when parsing. Verify will fail if this + // slice is non-empty, unless verification is delegated to an OS + // library which understands all the critical extensions. + // + // Users can access these extensions using Extensions and can remove + // elements from this slice if they believe that they have been + // handled. + UnhandledCriticalExtensions []asn1.ObjectIdentifier + + ExtKeyUsage []ExtKeyUsage // Sequence of extended key usages. + UnknownExtKeyUsage []asn1.ObjectIdentifier // Encountered extended key usages unknown to this package. + + // BasicConstraintsValid indicates whether IsCA, MaxPathLen, + // and MaxPathLenZero are valid. + BasicConstraintsValid bool + IsCA bool + + // MaxPathLen and MaxPathLenZero indicate the presence and + // value of the BasicConstraints' "pathLenConstraint". + // + // When parsing a certificate, a positive non-zero MaxPathLen + // means that the field was specified, -1 means it was unset, + // and MaxPathLenZero being true mean that the field was + // explicitly set to zero. The case of MaxPathLen==0 with MaxPathLenZero==false + // should be treated equivalent to -1 (unset). + // + // When generating a certificate, an unset pathLenConstraint + // can be requested with either MaxPathLen == -1 or using the + // zero value for both MaxPathLen and MaxPathLenZero. + MaxPathLen int + // MaxPathLenZero indicates that BasicConstraintsValid==true + // and MaxPathLen==0 should be interpreted as an actual + // maximum path length of zero. Otherwise, that combination is + // interpreted as MaxPathLen not being set. + MaxPathLenZero bool + + SubjectKeyId []byte + AuthorityKeyId []byte + + // RFC 5280, 4.2.2.1 (Authority Information Access) + OCSPServer []string + IssuingCertificateURL []string + + // Subject Information Access + SubjectTimestamps []string + SubjectCARepositories []string + + // Subject Alternate Name values. (Note that these values may not be valid + // if invalid values were contained within a parsed certificate. For + // example, an element of DNSNames may not be a valid DNS domain name.) + DNSNames []string + EmailAddresses []string + IPAddresses []net.IP + URIs []*url.URL + + // Name constraints + PermittedDNSDomainsCritical bool // if true then the name constraints are marked critical. + PermittedDNSDomains []string + ExcludedDNSDomains []string + PermittedIPRanges []*net.IPNet + ExcludedIPRanges []*net.IPNet + PermittedEmailAddresses []string + ExcludedEmailAddresses []string + PermittedURIDomains []string + ExcludedURIDomains []string + + // CRL Distribution Points + CRLDistributionPoints []string + + PolicyIdentifiers []asn1.ObjectIdentifier + + RPKIAddressRanges []*IPAddressFamilyBlocks + RPKIASNumbers, RPKIRoutingDomainIDs *ASIdentifiers + + // Certificate Transparency SCT extension contents; this is a TLS-encoded + // SignedCertificateTimestampList (RFC 6962 s3.3). + RawSCT []byte + SCTList SignedCertificateTimestampList +} + +// ErrUnsupportedAlgorithm results from attempting to perform an operation that +// involves algorithms that are not currently implemented. +var ErrUnsupportedAlgorithm = errors.New("x509: cannot verify signature: algorithm unimplemented") + +// InsecureAlgorithmError results when the signature algorithm for a certificate +// is known to be insecure. +type InsecureAlgorithmError SignatureAlgorithm + +func (e InsecureAlgorithmError) Error() string { + return fmt.Sprintf("x509: cannot verify signature: insecure algorithm %v", SignatureAlgorithm(e)) +} + +// ConstraintViolationError results when a requested usage is not permitted by +// a certificate. For example: checking a signature when the public key isn't a +// certificate signing key. +type ConstraintViolationError struct{} + +func (ConstraintViolationError) Error() string { + return "x509: invalid signature: parent certificate cannot sign this kind of certificate" +} + +// Equal indicates whether two Certificate objects are equal (by comparing their +// DER-encoded values). +func (c *Certificate) Equal(other *Certificate) bool { + if c == nil || other == nil { + return c == other + } + return bytes.Equal(c.Raw, other.Raw) +} + +// IsPrecertificate checks whether the certificate is a precertificate, by +// checking for the presence of the CT Poison extension. +func (c *Certificate) IsPrecertificate() bool { + if c == nil { + return false + } + for _, ext := range c.Extensions { + if ext.Id.Equal(OIDExtensionCTPoison) { + return true + } + } + return false +} + +func (c *Certificate) hasSANExtension() bool { + return oidInExtensions(OIDExtensionSubjectAltName, c.Extensions) +} + +// Entrust have a broken root certificate (CN=Entrust.net Certification +// Authority (2048)) which isn't marked as a CA certificate and is thus invalid +// according to PKIX. +// We recognise this certificate by its SubjectPublicKeyInfo and exempt it +// from the Basic Constraints requirement. +// See http://www.entrust.net/knowledge-base/technote.cfm?tn=7869 +// +// TODO(agl): remove this hack once their reissued root is sufficiently +// widespread. +var entrustBrokenSPKI = []byte{ + 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, + 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, + 0x00, 0x97, 0xa3, 0x2d, 0x3c, 0x9e, 0xde, 0x05, + 0xda, 0x13, 0xc2, 0x11, 0x8d, 0x9d, 0x8e, 0xe3, + 0x7f, 0xc7, 0x4b, 0x7e, 0x5a, 0x9f, 0xb3, 0xff, + 0x62, 0xab, 0x73, 0xc8, 0x28, 0x6b, 0xba, 0x10, + 0x64, 0x82, 0x87, 0x13, 0xcd, 0x57, 0x18, 0xff, + 0x28, 0xce, 0xc0, 0xe6, 0x0e, 0x06, 0x91, 0x50, + 0x29, 0x83, 0xd1, 0xf2, 0xc3, 0x2a, 0xdb, 0xd8, + 0xdb, 0x4e, 0x04, 0xcc, 0x00, 0xeb, 0x8b, 0xb6, + 0x96, 0xdc, 0xbc, 0xaa, 0xfa, 0x52, 0x77, 0x04, + 0xc1, 0xdb, 0x19, 0xe4, 0xae, 0x9c, 0xfd, 0x3c, + 0x8b, 0x03, 0xef, 0x4d, 0xbc, 0x1a, 0x03, 0x65, + 0xf9, 0xc1, 0xb1, 0x3f, 0x72, 0x86, 0xf2, 0x38, + 0xaa, 0x19, 0xae, 0x10, 0x88, 0x78, 0x28, 0xda, + 0x75, 0xc3, 0x3d, 0x02, 0x82, 0x02, 0x9c, 0xb9, + 0xc1, 0x65, 0x77, 0x76, 0x24, 0x4c, 0x98, 0xf7, + 0x6d, 0x31, 0x38, 0xfb, 0xdb, 0xfe, 0xdb, 0x37, + 0x02, 0x76, 0xa1, 0x18, 0x97, 0xa6, 0xcc, 0xde, + 0x20, 0x09, 0x49, 0x36, 0x24, 0x69, 0x42, 0xf6, + 0xe4, 0x37, 0x62, 0xf1, 0x59, 0x6d, 0xa9, 0x3c, + 0xed, 0x34, 0x9c, 0xa3, 0x8e, 0xdb, 0xdc, 0x3a, + 0xd7, 0xf7, 0x0a, 0x6f, 0xef, 0x2e, 0xd8, 0xd5, + 0x93, 0x5a, 0x7a, 0xed, 0x08, 0x49, 0x68, 0xe2, + 0x41, 0xe3, 0x5a, 0x90, 0xc1, 0x86, 0x55, 0xfc, + 0x51, 0x43, 0x9d, 0xe0, 0xb2, 0xc4, 0x67, 0xb4, + 0xcb, 0x32, 0x31, 0x25, 0xf0, 0x54, 0x9f, 0x4b, + 0xd1, 0x6f, 0xdb, 0xd4, 0xdd, 0xfc, 0xaf, 0x5e, + 0x6c, 0x78, 0x90, 0x95, 0xde, 0xca, 0x3a, 0x48, + 0xb9, 0x79, 0x3c, 0x9b, 0x19, 0xd6, 0x75, 0x05, + 0xa0, 0xf9, 0x88, 0xd7, 0xc1, 0xe8, 0xa5, 0x09, + 0xe4, 0x1a, 0x15, 0xdc, 0x87, 0x23, 0xaa, 0xb2, + 0x75, 0x8c, 0x63, 0x25, 0x87, 0xd8, 0xf8, 0x3d, + 0xa6, 0xc2, 0xcc, 0x66, 0xff, 0xa5, 0x66, 0x68, + 0x55, 0x02, 0x03, 0x01, 0x00, 0x01, +} + +// CheckSignatureFrom verifies that the signature on c is a valid signature +// from parent. +func (c *Certificate) CheckSignatureFrom(parent *Certificate) error { + // RFC 5280, 4.2.1.9: + // "If the basic constraints extension is not present in a version 3 + // certificate, or the extension is present but the cA boolean is not + // asserted, then the certified public key MUST NOT be used to verify + // certificate signatures." + // (except for Entrust, see comment above entrustBrokenSPKI) + if (parent.Version == 3 && !parent.BasicConstraintsValid || + parent.BasicConstraintsValid && !parent.IsCA) && + !bytes.Equal(c.RawSubjectPublicKeyInfo, entrustBrokenSPKI) { + return ConstraintViolationError{} + } + + if parent.KeyUsage != 0 && parent.KeyUsage&KeyUsageCertSign == 0 { + return ConstraintViolationError{} + } + + if parent.PublicKeyAlgorithm == UnknownPublicKeyAlgorithm { + return ErrUnsupportedAlgorithm + } + + // TODO(agl): don't ignore the path length constraint. + + return parent.CheckSignature(c.SignatureAlgorithm, c.RawTBSCertificate, c.Signature) +} + +// CheckSignature verifies that signature is a valid signature over signed from +// c's public key. +func (c *Certificate) CheckSignature(algo SignatureAlgorithm, signed, signature []byte) error { + return checkSignature(algo, signed, signature, c.PublicKey) +} + +func (c *Certificate) hasNameConstraints() bool { + return oidInExtensions(OIDExtensionNameConstraints, c.Extensions) +} + +func (c *Certificate) getSANExtension() []byte { + for _, e := range c.Extensions { + if e.Id.Equal(OIDExtensionSubjectAltName) { + return e.Value + } + } + + return nil +} + +func signaturePublicKeyAlgoMismatchError(expectedPubKeyAlgo PublicKeyAlgorithm, pubKey interface{}) error { + return fmt.Errorf("x509: signature algorithm specifies an %s public key, but have public key of type %T", expectedPubKeyAlgo.String(), pubKey) +} + +// CheckSignature verifies that signature is a valid signature over signed from +// a crypto.PublicKey. +func checkSignature(algo SignatureAlgorithm, signed, signature []byte, publicKey crypto.PublicKey) (err error) { + var hashType crypto.Hash + var pubKeyAlgo PublicKeyAlgorithm + + for _, details := range signatureAlgorithmDetails { + if details.algo == algo { + hashType = details.hash + pubKeyAlgo = details.pubKeyAlgo + } + } + + switch hashType { + case crypto.Hash(0): + if pubKeyAlgo != Ed25519 { + return ErrUnsupportedAlgorithm + } + case crypto.MD5: + return InsecureAlgorithmError(algo) + default: + if !hashType.Available() { + return ErrUnsupportedAlgorithm + } + h := hashType.New() + h.Write(signed) + signed = h.Sum(nil) + } + + switch pub := publicKey.(type) { + case *rsa.PublicKey: + if pubKeyAlgo != RSA { + return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub) + } + if algo.isRSAPSS() { + return rsa.VerifyPSS(pub, hashType, signed, signature, &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash}) + } else { + return rsa.VerifyPKCS1v15(pub, hashType, signed, signature) + } + case *dsa.PublicKey: + if pubKeyAlgo != DSA { + return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub) + } + dsaSig := new(dsaSignature) + if rest, err := asn1.Unmarshal(signature, dsaSig); err != nil { + return err + } else if len(rest) != 0 { + return errors.New("x509: trailing data after DSA signature") + } + if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 { + return errors.New("x509: DSA signature contained zero or negative values") + } + // According to FIPS 186-3, section 4.6, the hash must be truncated if it is longer + // than the key length, but crypto/dsa doesn't do it automatically. + if maxHashLen := pub.Q.BitLen() / 8; maxHashLen < len(signed) { + signed = signed[:maxHashLen] + } + if !dsa.Verify(pub, signed, dsaSig.R, dsaSig.S) { + return errors.New("x509: DSA verification failure") + } + return + case *ecdsa.PublicKey: + if pubKeyAlgo != ECDSA { + return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub) + } + ecdsaSig := new(ecdsaSignature) + if rest, err := asn1.Unmarshal(signature, ecdsaSig); err != nil { + return err + } else if len(rest) != 0 { + return errors.New("x509: trailing data after ECDSA signature") + } + if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { + return errors.New("x509: ECDSA signature contained zero or negative values") + } + if !ecdsa.Verify(pub, signed, ecdsaSig.R, ecdsaSig.S) { + return errors.New("x509: ECDSA verification failure") + } + return + case ed25519.PublicKey: + if pubKeyAlgo != Ed25519 { + return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub) + } + if !ed25519.Verify(pub, signed, signature) { + return errors.New("x509: Ed25519 verification failure") + } + return + } + return ErrUnsupportedAlgorithm +} + +// CheckCRLSignature checks that the signature in crl is from c. +func (c *Certificate) CheckCRLSignature(crl *pkix.CertificateList) error { + algo := SignatureAlgorithmFromAI(crl.SignatureAlgorithm) + return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign()) +} + +// UnhandledCriticalExtension results when the certificate contains an extension +// that is marked as critical but which is not handled by this library. +type UnhandledCriticalExtension struct { + ID asn1.ObjectIdentifier +} + +func (h UnhandledCriticalExtension) Error() string { + return fmt.Sprintf("x509: unhandled critical extension (%v)", h.ID) +} + +// removeExtension takes a DER-encoded TBSCertificate, removes the extension +// specified by oid (preserving the order of other extensions), and returns the +// result still as a DER-encoded TBSCertificate. This function will fail if +// there is not exactly 1 extension of the type specified by the oid present. +func removeExtension(tbsData []byte, oid asn1.ObjectIdentifier) ([]byte, error) { + var tbs tbsCertificate + rest, err := asn1.Unmarshal(tbsData, &tbs) + if err != nil { + return nil, fmt.Errorf("failed to parse TBSCertificate: %v", err) + } else if rLen := len(rest); rLen > 0 { + return nil, fmt.Errorf("trailing data (%d bytes) after TBSCertificate", rLen) + } + extAt := -1 + for i, ext := range tbs.Extensions { + if ext.Id.Equal(oid) { + if extAt != -1 { + return nil, errors.New("multiple extensions of specified type present") + } + extAt = i + } + } + if extAt == -1 { + return nil, errors.New("no extension of specified type present") + } + tbs.Extensions = append(tbs.Extensions[:extAt], tbs.Extensions[extAt+1:]...) + // Clear out the asn1.RawContent so the re-marshal operation sees the + // updated structure (rather than just copying the out-of-date DER data). + tbs.Raw = nil + + data, err := asn1.Marshal(tbs) + if err != nil { + return nil, fmt.Errorf("failed to re-marshal TBSCertificate: %v", err) + } + return data, nil +} + +// RemoveSCTList takes a DER-encoded TBSCertificate and removes the CT SCT +// extension that contains the SCT list (preserving the order of other +// extensions), and returns the result still as a DER-encoded TBSCertificate. +// This function will fail if there is not exactly 1 CT SCT extension present. +func RemoveSCTList(tbsData []byte) ([]byte, error) { + return removeExtension(tbsData, OIDExtensionCTSCT) +} + +// RemoveCTPoison takes a DER-encoded TBSCertificate and removes the CT poison +// extension (preserving the order of other extensions), and returns the result +// still as a DER-encoded TBSCertificate. This function will fail if there is +// not exactly 1 CT poison extension present. +func RemoveCTPoison(tbsData []byte) ([]byte, error) { + return BuildPrecertTBS(tbsData, nil) +} + +// BuildPrecertTBS builds a Certificate Transparency pre-certificate (RFC 6962 +// s3.1) from the given DER-encoded TBSCertificate, returning a DER-encoded +// TBSCertificate. +// +// This function removes the CT poison extension (there must be exactly 1 of +// these), preserving the order of other extensions. +// +// If preIssuer is provided, this should be a special intermediate certificate +// that was used to sign the precert (indicated by having the special +// CertificateTransparency extended key usage). In this case, the issuance +// information of the pre-cert is updated to reflect the next issuer in the +// chain, i.e. the issuer of this special intermediate: +// - The precert's Issuer is changed to the Issuer of the intermediate +// - The precert's AuthorityKeyId is changed to the AuthorityKeyId of the +// intermediate. +func BuildPrecertTBS(tbsData []byte, preIssuer *Certificate) ([]byte, error) { + data, err := removeExtension(tbsData, OIDExtensionCTPoison) + if err != nil { + return nil, err + } + + var tbs tbsCertificate + rest, err := asn1.Unmarshal(data, &tbs) + if err != nil { + return nil, fmt.Errorf("failed to parse TBSCertificate: %v", err) + } else if rLen := len(rest); rLen > 0 { + return nil, fmt.Errorf("trailing data (%d bytes) after TBSCertificate", rLen) + } + + if preIssuer != nil { + // Update the precert's Issuer field. Use the RawIssuer rather than the + // parsed Issuer to avoid any chance of ASN.1 differences (e.g. switching + // from UTF8String to PrintableString). + tbs.Issuer.FullBytes = preIssuer.RawIssuer + + // Also need to update the cert's AuthorityKeyID extension + // to that of the preIssuer. + var issuerKeyID []byte + for _, ext := range preIssuer.Extensions { + if ext.Id.Equal(OIDExtensionAuthorityKeyId) { + issuerKeyID = ext.Value + break + } + } + + // Check the preIssuer has the CT EKU. + seenCTEKU := false + for _, eku := range preIssuer.ExtKeyUsage { + if eku == ExtKeyUsageCertificateTransparency { + seenCTEKU = true + break + } + } + if !seenCTEKU { + return nil, fmt.Errorf("issuer does not have CertificateTransparency extended key usage") + } + + keyAt := -1 + for i, ext := range tbs.Extensions { + if ext.Id.Equal(OIDExtensionAuthorityKeyId) { + keyAt = i + break + } + } + if keyAt >= 0 { + // PreCert has an auth-key-id; replace it with the value from the preIssuer + if issuerKeyID != nil { + tbs.Extensions[keyAt].Value = issuerKeyID + } else { + tbs.Extensions = append(tbs.Extensions[:keyAt], tbs.Extensions[keyAt+1:]...) + } + } else if issuerKeyID != nil { + // PreCert did not have an auth-key-id, but the preIssuer does, so add it at the end. + authKeyIDExt := pkix.Extension{ + Id: OIDExtensionAuthorityKeyId, + Critical: false, + Value: issuerKeyID, + } + tbs.Extensions = append(tbs.Extensions, authKeyIDExt) + } + + // Clear out the asn1.RawContent so the re-marshal operation sees the + // updated structure (rather than just copying the out-of-date DER data). + tbs.Raw = nil + } + + data, err = asn1.Marshal(tbs) + if err != nil { + return nil, fmt.Errorf("failed to re-marshal TBSCertificate: %v", err) + } + return data, nil +} + +type basicConstraints struct { + IsCA bool `asn1:"optional"` + MaxPathLen int `asn1:"optional,default:-1"` +} + +// RFC 5280, 4.2.1.4 +type policyInformation struct { + Policy asn1.ObjectIdentifier + // policyQualifiers omitted +} + +const ( + nameTypeEmail = 1 + nameTypeDNS = 2 + nameTypeURI = 6 + nameTypeIP = 7 +) + +// RFC 5280, 4.2.2.1 +type accessDescription struct { + Method asn1.ObjectIdentifier + Location asn1.RawValue +} + +// RFC 5280, 4.2.1.14 +type distributionPoint struct { + DistributionPoint distributionPointName `asn1:"optional,tag:0"` + Reason asn1.BitString `asn1:"optional,tag:1"` + CRLIssuer asn1.RawValue `asn1:"optional,tag:2"` +} + +type distributionPointName struct { + FullName []asn1.RawValue `asn1:"optional,tag:0"` + RelativeName pkix.RDNSequence `asn1:"optional,tag:1"` +} + +func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo, nfe *NonFatalErrors) (interface{}, error) { + asn1Data := keyData.PublicKey.RightAlign() + switch algo { + case RSA, RSAESOAEP: + // RSA public keys must have a NULL in the parameters. + // See RFC 3279, Section 2.3.1. + if algo == RSA && !bytes.Equal(keyData.Algorithm.Parameters.FullBytes, asn1.NullBytes) { + nfe.AddError(errors.New("x509: RSA key missing NULL parameters")) + } + if algo == RSAESOAEP { + // We only parse the parameters to ensure it is a valid encoding, we throw out the actual values + paramsData := keyData.Algorithm.Parameters.FullBytes + params := new(rsaesoaepAlgorithmParameters) + params.HashFunc = sha1Identifier + params.MaskgenFunc = mgf1SHA1Identifier + params.PSourceFunc = pSpecifiedEmptyIdentifier + rest, err := asn1.Unmarshal(paramsData, params) + if err != nil { + return nil, err + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after RSAES-OAEP parameters") + } + } + + p := new(pkcs1PublicKey) + rest, err := asn1.Unmarshal(asn1Data, p) + if err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(asn1Data, p, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after RSA public key") + } + + if p.N.Sign() <= 0 { + nfe.AddError(errors.New("x509: RSA modulus is not a positive number")) + } + if p.E <= 0 { + return nil, errors.New("x509: RSA public exponent is not a positive number") + } + + // TODO(dkarch): Update to return the parameters once crypto/x509 has come up with permanent solution (https://github.com/golang/go/issues/30416) + pub := &rsa.PublicKey{ + E: p.E, + N: p.N, + } + return pub, nil + case DSA: + var p *big.Int + rest, err := asn1.Unmarshal(asn1Data, &p) + if err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(asn1Data, &p, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after DSA public key") + } + paramsData := keyData.Algorithm.Parameters.FullBytes + params := new(dsaAlgorithmParameters) + rest, err = asn1.Unmarshal(paramsData, params) + if err != nil { + return nil, err + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after DSA parameters") + } + if p.Sign() <= 0 || params.P.Sign() <= 0 || params.Q.Sign() <= 0 || params.G.Sign() <= 0 { + return nil, errors.New("x509: zero or negative DSA parameter") + } + pub := &dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: params.P, + Q: params.Q, + G: params.G, + }, + Y: p, + } + return pub, nil + case ECDSA: + paramsData := keyData.Algorithm.Parameters.FullBytes + namedCurveOID := new(asn1.ObjectIdentifier) + rest, err := asn1.Unmarshal(paramsData, namedCurveOID) + if err != nil { + return nil, errors.New("x509: failed to parse ECDSA parameters as named curve") + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after ECDSA parameters") + } + namedCurve := namedCurveFromOID(*namedCurveOID, nfe) + if namedCurve == nil { + return nil, fmt.Errorf("x509: unsupported elliptic curve %v", namedCurveOID) + } + x, y := elliptic.Unmarshal(namedCurve, asn1Data) + if x == nil { + return nil, errors.New("x509: failed to unmarshal elliptic curve point") + } + pub := &ecdsa.PublicKey{ + Curve: namedCurve, + X: x, + Y: y, + } + return pub, nil + case Ed25519: + return ed25519.PublicKey(asn1Data), nil + default: + return nil, nil + } +} + +// NonFatalErrors is an error type which can hold a number of other errors. +// It's used to collect a range of non-fatal errors which occur while parsing +// a certificate, that way we can still match on certs which technically are +// invalid. +type NonFatalErrors struct { + Errors []error +} + +// AddError adds an error to the list of errors contained by NonFatalErrors. +func (e *NonFatalErrors) AddError(err error) { + e.Errors = append(e.Errors, err) +} + +// Returns a string consisting of the values of Error() from all of the errors +// contained in |e| +func (e NonFatalErrors) Error() string { + r := "NonFatalErrors: " + for _, err := range e.Errors { + r += err.Error() + "; " + } + return r +} + +// HasError returns true if |e| contains at least one error +func (e *NonFatalErrors) HasError() bool { + if e == nil { + return false + } + return len(e.Errors) > 0 +} + +// Append combines the contents of two NonFatalErrors instances. +func (e *NonFatalErrors) Append(more *NonFatalErrors) *NonFatalErrors { + if e == nil { + return more + } + if more == nil { + return e + } + combined := NonFatalErrors{Errors: make([]error, 0, len(e.Errors)+len(more.Errors))} + combined.Errors = append(combined.Errors, e.Errors...) + combined.Errors = append(combined.Errors, more.Errors...) + return &combined +} + +// IsFatal indicates whether an error is fatal. +func IsFatal(err error) bool { + if err == nil { + return false + } + if _, ok := err.(NonFatalErrors); ok { + return false + } + if errs, ok := err.(*Errors); ok { + return errs.Fatal() + } + return true +} + +func parseDistributionPoints(data []byte, crldp *[]string) error { + // CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint + // + // DistributionPoint ::= SEQUENCE { + // distributionPoint [0] DistributionPointName OPTIONAL, + // reasons [1] ReasonFlags OPTIONAL, + // cRLIssuer [2] GeneralNames OPTIONAL } + // + // DistributionPointName ::= CHOICE { + // fullName [0] GeneralNames, + // nameRelativeToCRLIssuer [1] RelativeDistinguishedName } + + var cdp []distributionPoint + if rest, err := asn1.Unmarshal(data, &cdp); err != nil { + return err + } else if len(rest) != 0 { + return errors.New("x509: trailing data after X.509 CRL distribution point") + } + + for _, dp := range cdp { + // Per RFC 5280, 4.2.1.13, one of distributionPoint or cRLIssuer may be empty. + if len(dp.DistributionPoint.FullName) == 0 { + continue + } + + for _, fullName := range dp.DistributionPoint.FullName { + if fullName.Tag == 6 { + *crldp = append(*crldp, string(fullName.Bytes)) + } + } + } + return nil +} + +func forEachSAN(extension []byte, callback func(tag int, data []byte) error) error { + // RFC 5280, 4.2.1.6 + + // SubjectAltName ::= GeneralNames + // + // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName + // + // GeneralName ::= CHOICE { + // otherName [0] OtherName, + // rfc822Name [1] IA5String, + // dNSName [2] IA5String, + // x400Address [3] ORAddress, + // directoryName [4] Name, + // ediPartyName [5] EDIPartyName, + // uniformResourceIdentifier [6] IA5String, + // iPAddress [7] OCTET STRING, + // registeredID [8] OBJECT IDENTIFIER } + var seq asn1.RawValue + rest, err := asn1.Unmarshal(extension, &seq) + if err != nil { + return err + } else if len(rest) != 0 { + return errors.New("x509: trailing data after X.509 extension") + } + if !seq.IsCompound || seq.Tag != asn1.TagSequence || seq.Class != asn1.ClassUniversal { + return asn1.StructuralError{Msg: "bad SAN sequence"} + } + + rest = seq.Bytes + for len(rest) > 0 { + var v asn1.RawValue + rest, err = asn1.Unmarshal(rest, &v) + if err != nil { + return err + } + + if err := callback(v.Tag, v.Bytes); err != nil { + return err + } + } + + return nil +} + +func parseSANExtension(value []byte, nfe *NonFatalErrors) (dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL, err error) { + err = forEachSAN(value, func(tag int, data []byte) error { + switch tag { + case nameTypeEmail: + emailAddresses = append(emailAddresses, string(data)) + case nameTypeDNS: + dnsNames = append(dnsNames, string(data)) + case nameTypeURI: + uri, err := url.Parse(string(data)) + if err != nil { + return fmt.Errorf("x509: cannot parse URI %q: %s", string(data), err) + } + if len(uri.Host) > 0 { + if _, ok := domainToReverseLabels(uri.Host); !ok { + return fmt.Errorf("x509: cannot parse URI %q: invalid domain", string(data)) + } + } + uris = append(uris, uri) + case nameTypeIP: + switch len(data) { + case net.IPv4len, net.IPv6len: + ipAddresses = append(ipAddresses, data) + default: + nfe.AddError(errors.New("x509: cannot parse IP address of length " + strconv.Itoa(len(data)))) + } + } + + return nil + }) + + return +} + +// isValidIPMask reports whether mask consists of zero or more 1 bits, followed by zero bits. +func isValidIPMask(mask []byte) bool { + seenZero := false + + for _, b := range mask { + if seenZero { + if b != 0 { + return false + } + + continue + } + + switch b { + case 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe: + seenZero = true + case 0xff: + default: + return false + } + } + + return true +} + +func parseNameConstraintsExtension(out *Certificate, e pkix.Extension, nfe *NonFatalErrors) (unhandled bool, err error) { + // RFC 5280, 4.2.1.10 + + // NameConstraints ::= SEQUENCE { + // permittedSubtrees [0] GeneralSubtrees OPTIONAL, + // excludedSubtrees [1] GeneralSubtrees OPTIONAL } + // + // GeneralSubtrees ::= SEQUENCE SIZE (1..MAX) OF GeneralSubtree + // + // GeneralSubtree ::= SEQUENCE { + // base GeneralName, + // minimum [0] BaseDistance DEFAULT 0, + // maximum [1] BaseDistance OPTIONAL } + // + // BaseDistance ::= INTEGER (0..MAX) + + outer := cryptobyte.String(e.Value) + var toplevel, permitted, excluded cryptobyte.String + var havePermitted, haveExcluded bool + if !outer.ReadASN1(&toplevel, cryptobyte_asn1.SEQUENCE) || + !outer.Empty() || + !toplevel.ReadOptionalASN1(&permitted, &havePermitted, cryptobyte_asn1.Tag(0).ContextSpecific().Constructed()) || + !toplevel.ReadOptionalASN1(&excluded, &haveExcluded, cryptobyte_asn1.Tag(1).ContextSpecific().Constructed()) || + !toplevel.Empty() { + return false, errors.New("x509: invalid NameConstraints extension") + } + + if !havePermitted && !haveExcluded || len(permitted) == 0 && len(excluded) == 0 { + // From RFC 5280, Section 4.2.1.10: + // “either the permittedSubtrees field + // or the excludedSubtrees MUST be + // present” + return false, errors.New("x509: empty name constraints extension") + } + + getValues := func(subtrees cryptobyte.String) (dnsNames []string, ips []*net.IPNet, emails, uriDomains []string, err error) { + for !subtrees.Empty() { + var seq, value cryptobyte.String + var tag cryptobyte_asn1.Tag + if !subtrees.ReadASN1(&seq, cryptobyte_asn1.SEQUENCE) || + !seq.ReadAnyASN1(&value, &tag) { + return nil, nil, nil, nil, fmt.Errorf("x509: invalid NameConstraints extension") + } + + var ( + dnsTag = cryptobyte_asn1.Tag(2).ContextSpecific() + emailTag = cryptobyte_asn1.Tag(1).ContextSpecific() + ipTag = cryptobyte_asn1.Tag(7).ContextSpecific() + uriTag = cryptobyte_asn1.Tag(6).ContextSpecific() + ) + + switch tag { + case dnsTag: + domain := string(value) + if err := isIA5String(domain); err != nil { + return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) + } + + trimmedDomain := domain + if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { + // constraints can have a leading + // period to exclude the domain + // itself, but that's not valid in a + // normal domain name. + trimmedDomain = trimmedDomain[1:] + } + if _, ok := domainToReverseLabels(trimmedDomain); !ok { + nfe.AddError(fmt.Errorf("x509: failed to parse dnsName constraint %q", domain)) + } + dnsNames = append(dnsNames, domain) + + case ipTag: + l := len(value) + var ip, mask []byte + + switch l { + case 8: + ip = value[:4] + mask = value[4:] + + case 32: + ip = value[:16] + mask = value[16:] + + default: + return nil, nil, nil, nil, fmt.Errorf("x509: IP constraint contained value of length %d", l) + } + + if !isValidIPMask(mask) { + return nil, nil, nil, nil, fmt.Errorf("x509: IP constraint contained invalid mask %x", mask) + } + + ips = append(ips, &net.IPNet{IP: net.IP(ip), Mask: net.IPMask(mask)}) + + case emailTag: + constraint := string(value) + if err := isIA5String(constraint); err != nil { + return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) + } + + // If the constraint contains an @ then + // it specifies an exact mailbox name. + if strings.Contains(constraint, "@") { + if _, ok := parseRFC2821Mailbox(constraint); !ok { + nfe.AddError(fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint)) + } + } else { + // Otherwise it's a domain name. + domain := constraint + if len(domain) > 0 && domain[0] == '.' { + domain = domain[1:] + } + if _, ok := domainToReverseLabels(domain); !ok { + nfe.AddError(fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint)) + } + } + emails = append(emails, constraint) + + case uriTag: + domain := string(value) + if err := isIA5String(domain); err != nil { + return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) + } + + if net.ParseIP(domain) != nil { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q: cannot be IP address", domain) + } + + trimmedDomain := domain + if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { + // constraints can have a leading + // period to exclude the domain itself, + // but that's not valid in a normal + // domain name. + trimmedDomain = trimmedDomain[1:] + } + if _, ok := domainToReverseLabels(trimmedDomain); !ok { + nfe.AddError(fmt.Errorf("x509: failed to parse URI constraint %q", domain)) + } + uriDomains = append(uriDomains, domain) + + default: + unhandled = true + } + } + + return dnsNames, ips, emails, uriDomains, nil + } + + if out.PermittedDNSDomains, out.PermittedIPRanges, out.PermittedEmailAddresses, out.PermittedURIDomains, err = getValues(permitted); err != nil { + return false, err + } + if out.ExcludedDNSDomains, out.ExcludedIPRanges, out.ExcludedEmailAddresses, out.ExcludedURIDomains, err = getValues(excluded); err != nil { + return false, err + } + out.PermittedDNSDomainsCritical = e.Critical + + return unhandled, nil +} + +func parseCertificate(in *certificate, tbsOnly bool) (*Certificate, error) { + var nfe NonFatalErrors + + // Certificates contain two signature algorithm identifier fields, + // one in the inner signed tbsCertificate structure and one in the + // outer unsigned certificate structure. RFC 5280 requires these + // fields match, but golang doesn't impose this restriction. Because + // the outer structure is not covered by the signature the algorithm + // field is entirely malleable. This allows a user to bypass the + // leaf data uniqueness check that happens in trillian by altering + // the unbounded OID or parameter fields of the algorithmIdentifier + // structure and submit an infinite number of duplicate but slightly + // different looking certificates to a log. To avoid this directly + // compare the bytes of the two algorithmIdentifier structures + // and reject the certificate if they do not match. + if !tbsOnly && !bytes.Equal(in.SignatureAlgorithm.Raw, in.TBSCertificate.SignatureAlgorithm.Raw) { + return nil, errors.New("x509: mismatching signature algorithm identifiers") + } + + out := new(Certificate) + out.Raw = in.Raw + out.RawTBSCertificate = in.TBSCertificate.Raw + out.RawSubjectPublicKeyInfo = in.TBSCertificate.PublicKey.Raw + out.RawSubject = in.TBSCertificate.Subject.FullBytes + out.RawIssuer = in.TBSCertificate.Issuer.FullBytes + + out.Signature = in.SignatureValue.RightAlign() + out.SignatureAlgorithm = SignatureAlgorithmFromAI(in.TBSCertificate.SignatureAlgorithm) + + out.PublicKeyAlgorithm = + getPublicKeyAlgorithmFromOID(in.TBSCertificate.PublicKey.Algorithm.Algorithm) + var err error + out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCertificate.PublicKey, &nfe) + if err != nil { + return nil, err + } + + out.Version = in.TBSCertificate.Version + 1 + out.SerialNumber = in.TBSCertificate.SerialNumber + + var issuer, subject pkix.RDNSequence + if rest, err := asn1.Unmarshal(in.TBSCertificate.Subject.FullBytes, &subject); err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(in.TBSCertificate.Subject.FullBytes, &subject, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 subject") + } + if rest, err := asn1.Unmarshal(in.TBSCertificate.Issuer.FullBytes, &issuer); err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(in.TBSCertificate.Issuer.FullBytes, &issuer, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 subject") + } + + out.Issuer.FillFromRDNSequence(&issuer) + out.Subject.FillFromRDNSequence(&subject) + + out.NotBefore = in.TBSCertificate.Validity.NotBefore + out.NotAfter = in.TBSCertificate.Validity.NotAfter + + for _, e := range in.TBSCertificate.Extensions { + out.Extensions = append(out.Extensions, e) + unhandled := false + + if len(e.Id) == 4 && e.Id[0] == OIDExtensionArc[0] && e.Id[1] == OIDExtensionArc[1] && e.Id[2] == OIDExtensionArc[2] { + switch e.Id[3] { + case OIDExtensionKeyUsage[3]: + // RFC 5280, 4.2.1.3 + var usageBits asn1.BitString + if rest, err := asn1.Unmarshal(e.Value, &usageBits); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 KeyUsage") + } + + var usage int + for i := 0; i < 9; i++ { + if usageBits.At(i) != 0 { + usage |= 1 << uint(i) + } + } + out.KeyUsage = KeyUsage(usage) + + case OIDExtensionBasicConstraints[3]: + // RFC 5280, 4.2.1.9 + var constraints basicConstraints + if rest, err := asn1.Unmarshal(e.Value, &constraints); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 BasicConstraints") + } + + out.BasicConstraintsValid = true + out.IsCA = constraints.IsCA + out.MaxPathLen = constraints.MaxPathLen + out.MaxPathLenZero = out.MaxPathLen == 0 + // TODO: map out.MaxPathLen to 0 if it has the -1 default value? (Issue 19285) + + case OIDExtensionSubjectAltName[3]: + out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(e.Value, &nfe) + if err != nil { + return nil, err + } + + if len(out.DNSNames) == 0 && len(out.EmailAddresses) == 0 && len(out.IPAddresses) == 0 && len(out.URIs) == 0 { + // If we didn't parse anything then we do the critical check, below. + unhandled = true + } + + case OIDExtensionNameConstraints[3]: + unhandled, err = parseNameConstraintsExtension(out, e, &nfe) + if err != nil { + return nil, err + } + + case OIDExtensionCRLDistributionPoints[3]: + // RFC 5280, 4.2.1.13 + if err := parseDistributionPoints(e.Value, &out.CRLDistributionPoints); err != nil { + return nil, err + } + + case OIDExtensionAuthorityKeyId[3]: + // RFC 5280, 4.2.1.1 + var a authKeyId + if rest, err := asn1.Unmarshal(e.Value, &a); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 authority key-id") + } + out.AuthorityKeyId = a.Id + + case OIDExtensionExtendedKeyUsage[3]: + // RFC 5280, 4.2.1.12. Extended Key Usage + + // id-ce-extKeyUsage OBJECT IDENTIFIER ::= { id-ce 37 } + // + // ExtKeyUsageSyntax ::= SEQUENCE SIZE (1..MAX) OF KeyPurposeId + // + // KeyPurposeId ::= OBJECT IDENTIFIER + + var keyUsage []asn1.ObjectIdentifier + if len(e.Value) == 0 { + nfe.AddError(errors.New("x509: empty ExtendedKeyUsage")) + } else { + rest, err := asn1.Unmarshal(e.Value, &keyUsage) + if err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(e.Value, &keyUsage, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 ExtendedKeyUsage") + } + } + + for _, u := range keyUsage { + if extKeyUsage, ok := extKeyUsageFromOID(u); ok { + out.ExtKeyUsage = append(out.ExtKeyUsage, extKeyUsage) + } else { + out.UnknownExtKeyUsage = append(out.UnknownExtKeyUsage, u) + } + } + + case OIDExtensionSubjectKeyId[3]: + // RFC 5280, 4.2.1.2 + var keyid []byte + if rest, err := asn1.Unmarshal(e.Value, &keyid); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 key-id") + } + out.SubjectKeyId = keyid + + case OIDExtensionCertificatePolicies[3]: + // RFC 5280 4.2.1.4: Certificate Policies + var policies []policyInformation + if rest, err := asn1.Unmarshal(e.Value, &policies); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 certificate policies") + } + out.PolicyIdentifiers = make([]asn1.ObjectIdentifier, len(policies)) + for i, policy := range policies { + out.PolicyIdentifiers[i] = policy.Policy + } + + default: + // Unknown extensions are recorded if critical. + unhandled = true + } + } else if e.Id.Equal(OIDExtensionAuthorityInfoAccess) { + // RFC 5280 4.2.2.1: Authority Information Access + var aia []accessDescription + if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 authority information") + } + if len(aia) == 0 { + nfe.AddError(errors.New("x509: empty AuthorityInfoAccess extension")) + } + + for _, v := range aia { + // GeneralName: uniformResourceIdentifier [6] IA5String + if v.Location.Tag != 6 { + continue + } + if v.Method.Equal(OIDAuthorityInfoAccessOCSP) { + out.OCSPServer = append(out.OCSPServer, string(v.Location.Bytes)) + } else if v.Method.Equal(OIDAuthorityInfoAccessIssuers) { + out.IssuingCertificateURL = append(out.IssuingCertificateURL, string(v.Location.Bytes)) + } + } + } else if e.Id.Equal(OIDExtensionSubjectInfoAccess) { + // RFC 5280 4.2.2.2: Subject Information Access + var sia []accessDescription + if rest, err := asn1.Unmarshal(e.Value, &sia); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 subject information") + } + if len(sia) == 0 { + nfe.AddError(errors.New("x509: empty SubjectInfoAccess extension")) + } + + for _, v := range sia { + // TODO(drysdale): cope with non-URI types of GeneralName + // GeneralName: uniformResourceIdentifier [6] IA5String + if v.Location.Tag != 6 { + continue + } + if v.Method.Equal(OIDSubjectInfoAccessTimestamp) { + out.SubjectTimestamps = append(out.SubjectTimestamps, string(v.Location.Bytes)) + } else if v.Method.Equal(OIDSubjectInfoAccessCARepo) { + out.SubjectCARepositories = append(out.SubjectCARepositories, string(v.Location.Bytes)) + } + } + } else if e.Id.Equal(OIDExtensionIPPrefixList) { + out.RPKIAddressRanges = parseRPKIAddrBlocks(e.Value, &nfe) + } else if e.Id.Equal(OIDExtensionASList) { + out.RPKIASNumbers, out.RPKIRoutingDomainIDs = parseRPKIASIdentifiers(e.Value, &nfe) + } else if e.Id.Equal(OIDExtensionCTSCT) { + if rest, err := asn1.Unmarshal(e.Value, &out.RawSCT); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal SCT list extension: %v", err)) + } else if len(rest) != 0 { + nfe.AddError(errors.New("trailing data after ASN1-encoded SCT list")) + } else { + if rest, err := tls.Unmarshal(out.RawSCT, &out.SCTList); err != nil { + nfe.AddError(fmt.Errorf("failed to tls.Unmarshal SCT list: %v", err)) + } else if len(rest) != 0 { + nfe.AddError(errors.New("trailing data after TLS-encoded SCT list")) + } + } + } else { + // Unknown extensions are recorded if critical. + unhandled = true + } + + if e.Critical && unhandled { + out.UnhandledCriticalExtensions = append(out.UnhandledCriticalExtensions, e.Id) + } + } + if nfe.HasError() { + return out, nfe + } + return out, nil +} + +// ParseTBSCertificate parses a single TBSCertificate from the given ASN.1 DER data. +// The parsed data is returned in a Certificate struct for ease of access. +func ParseTBSCertificate(asn1Data []byte) (*Certificate, error) { + var tbsCert tbsCertificate + var nfe NonFatalErrors + rest, err := asn1.Unmarshal(asn1Data, &tbsCert) + if err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(asn1Data, &tbsCert, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } + if len(rest) > 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + ret, err := parseCertificate(&certificate{ + Raw: tbsCert.Raw, + TBSCertificate: tbsCert}, true) + if err != nil { + errs, ok := err.(NonFatalErrors) + if !ok { + return nil, err + } + nfe.Errors = append(nfe.Errors, errs.Errors...) + } + if nfe.HasError() { + return ret, nfe + } + return ret, nil +} + +// ParseCertificate parses a single certificate from the given ASN.1 DER data. +// This function can return both a Certificate and an error (in which case the +// error will be of type NonFatalErrors). +func ParseCertificate(asn1Data []byte) (*Certificate, error) { + var cert certificate + var nfe NonFatalErrors + rest, err := asn1.Unmarshal(asn1Data, &cert) + if err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(asn1Data, &cert, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } + if len(rest) > 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + ret, err := parseCertificate(&cert, false) + if err != nil { + errs, ok := err.(NonFatalErrors) + if !ok { + return nil, err + } + nfe.Errors = append(nfe.Errors, errs.Errors...) + } + if nfe.HasError() { + return ret, nfe + } + return ret, nil +} + +// ParseCertificates parses one or more certificates from the given ASN.1 DER +// data. The certificates must be concatenated with no intermediate padding. +// This function can return both a slice of Certificate and an error (in which +// case the error will be of type NonFatalErrors). +func ParseCertificates(asn1Data []byte) ([]*Certificate, error) { + var v []*certificate + var nfe NonFatalErrors + + for len(asn1Data) > 0 { + cert := new(certificate) + var err error + asn1Data, err = asn1.Unmarshal(asn1Data, cert) + if err != nil { + var laxErr error + asn1Data, laxErr = asn1.UnmarshalWithParams(asn1Data, &cert, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } + v = append(v, cert) + } + + ret := make([]*Certificate, len(v)) + for i, ci := range v { + cert, err := parseCertificate(ci, false) + if err != nil { + errs, ok := err.(NonFatalErrors) + if !ok { + return nil, err + } + nfe.Errors = append(nfe.Errors, errs.Errors...) + } + ret[i] = cert + } + + if nfe.HasError() { + return ret, nfe + } + return ret, nil +} + +func reverseBitsInAByte(in byte) byte { + b1 := in>>4 | in<<4 + b2 := b1>>2&0x33 | b1<<2&0xcc + b3 := b2>>1&0x55 | b2<<1&0xaa + return b3 +} + +// asn1BitLength returns the bit-length of bitString by considering the +// most-significant bit in a byte to be the "first" bit. This convention +// matches ASN.1, but differs from almost everything else. +func asn1BitLength(bitString []byte) int { + bitLen := len(bitString) * 8 + + for i := range bitString { + b := bitString[len(bitString)-i-1] + + for bit := uint(0); bit < 8; bit++ { + if (b>>bit)&1 == 1 { + return bitLen + } + bitLen-- + } + } + + return 0 +} + +// OID values for standard extensions from RFC 5280. +var ( + OIDExtensionArc = asn1.ObjectIdentifier{2, 5, 29} // id-ce RFC5280 s4.2.1 + OIDExtensionSubjectKeyId = asn1.ObjectIdentifier{2, 5, 29, 14} + OIDExtensionKeyUsage = asn1.ObjectIdentifier{2, 5, 29, 15} + OIDExtensionExtendedKeyUsage = asn1.ObjectIdentifier{2, 5, 29, 37} + OIDExtensionAuthorityKeyId = asn1.ObjectIdentifier{2, 5, 29, 35} + OIDExtensionBasicConstraints = asn1.ObjectIdentifier{2, 5, 29, 19} + OIDExtensionSubjectAltName = asn1.ObjectIdentifier{2, 5, 29, 17} + OIDExtensionCertificatePolicies = asn1.ObjectIdentifier{2, 5, 29, 32} + OIDExtensionNameConstraints = asn1.ObjectIdentifier{2, 5, 29, 30} + OIDExtensionCRLDistributionPoints = asn1.ObjectIdentifier{2, 5, 29, 31} + OIDExtensionIssuerAltName = asn1.ObjectIdentifier{2, 5, 29, 18} + OIDExtensionSubjectDirectoryAttributes = asn1.ObjectIdentifier{2, 5, 29, 9} + OIDExtensionInhibitAnyPolicy = asn1.ObjectIdentifier{2, 5, 29, 54} + OIDExtensionPolicyConstraints = asn1.ObjectIdentifier{2, 5, 29, 36} + OIDExtensionPolicyMappings = asn1.ObjectIdentifier{2, 5, 29, 33} + OIDExtensionFreshestCRL = asn1.ObjectIdentifier{2, 5, 29, 46} + + OIDExtensionAuthorityInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 1} + OIDExtensionSubjectInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 11} + + // OIDExtensionCTPoison is defined in RFC 6962 s3.1. + OIDExtensionCTPoison = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3} + // OIDExtensionCTSCT is defined in RFC 6962 s3.3. + OIDExtensionCTSCT = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2} + // OIDExtensionIPPrefixList is defined in RFC 3779 s2. + OIDExtensionIPPrefixList = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 7} + // OIDExtensionASList is defined in RFC 3779 s3. + OIDExtensionASList = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 8} +) + +var ( + OIDAuthorityInfoAccessOCSP = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1} + OIDAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2} + OIDSubjectInfoAccessTimestamp = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 3} + OIDSubjectInfoAccessCARepo = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 5} + OIDAnyPolicy = asn1.ObjectIdentifier{2, 5, 29, 32, 0} +) + +// oidInExtensions reports whether an extension with the given oid exists in +// extensions. +func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) bool { + for _, e := range extensions { + if e.Id.Equal(oid) { + return true + } + } + return false +} + +// marshalSANs marshals a list of addresses into a the contents of an X.509 +// SubjectAlternativeName extension. +func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL) (derBytes []byte, err error) { + var rawValues []asn1.RawValue + for _, name := range dnsNames { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeDNS, Class: asn1.ClassContextSpecific, Bytes: []byte(name)}) + } + for _, email := range emailAddresses { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeEmail, Class: asn1.ClassContextSpecific, Bytes: []byte(email)}) + } + for _, rawIP := range ipAddresses { + // If possible, we always want to encode IPv4 addresses in 4 bytes. + ip := rawIP.To4() + if ip == nil { + ip = rawIP + } + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeIP, Class: asn1.ClassContextSpecific, Bytes: ip}) + } + for _, uri := range uris { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeURI, Class: asn1.ClassContextSpecific, Bytes: []byte(uri.String())}) + } + return asn1.Marshal(rawValues) +} + +func isIA5String(s string) error { + for _, r := range s { + if r >= utf8.RuneSelf { + return fmt.Errorf("x509: %q cannot be encoded as an IA5String", s) + } + } + + return nil +} + +func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId []byte) (ret []pkix.Extension, err error) { + ret = make([]pkix.Extension, 12 /* maximum number of elements. */) + n := 0 + + if template.KeyUsage != 0 && + !oidInExtensions(OIDExtensionKeyUsage, template.ExtraExtensions) { + ret[n].Id = OIDExtensionKeyUsage + ret[n].Critical = true + + var a [2]byte + a[0] = reverseBitsInAByte(byte(template.KeyUsage)) + a[1] = reverseBitsInAByte(byte(template.KeyUsage >> 8)) + + l := 1 + if a[1] != 0 { + l = 2 + } + + bitString := a[:l] + ret[n].Value, err = asn1.Marshal(asn1.BitString{Bytes: bitString, BitLength: asn1BitLength(bitString)}) + if err != nil { + return + } + n++ + } + + if (len(template.ExtKeyUsage) > 0 || len(template.UnknownExtKeyUsage) > 0) && + !oidInExtensions(OIDExtensionExtendedKeyUsage, template.ExtraExtensions) { + ret[n].Id = OIDExtensionExtendedKeyUsage + + var oids []asn1.ObjectIdentifier + for _, u := range template.ExtKeyUsage { + if oid, ok := oidFromExtKeyUsage(u); ok { + oids = append(oids, oid) + } else { + panic("internal error") + } + } + + oids = append(oids, template.UnknownExtKeyUsage...) + + ret[n].Value, err = asn1.Marshal(oids) + if err != nil { + return + } + n++ + } + + if template.BasicConstraintsValid && !oidInExtensions(OIDExtensionBasicConstraints, template.ExtraExtensions) { + // Leaving MaxPathLen as zero indicates that no maximum path + // length is desired, unless MaxPathLenZero is set. A value of + // -1 causes encoding/asn1 to omit the value as desired. + maxPathLen := template.MaxPathLen + if maxPathLen == 0 && !template.MaxPathLenZero { + maxPathLen = -1 + } + ret[n].Id = OIDExtensionBasicConstraints + ret[n].Value, err = asn1.Marshal(basicConstraints{template.IsCA, maxPathLen}) + ret[n].Critical = true + if err != nil { + return + } + n++ + } + + if len(template.SubjectKeyId) > 0 && !oidInExtensions(OIDExtensionSubjectKeyId, template.ExtraExtensions) { + ret[n].Id = OIDExtensionSubjectKeyId + ret[n].Value, err = asn1.Marshal(template.SubjectKeyId) + if err != nil { + return + } + n++ + } + + if len(authorityKeyId) > 0 && !oidInExtensions(OIDExtensionAuthorityKeyId, template.ExtraExtensions) { + ret[n].Id = OIDExtensionAuthorityKeyId + ret[n].Value, err = asn1.Marshal(authKeyId{authorityKeyId}) + if err != nil { + return + } + n++ + } + + if (len(template.OCSPServer) > 0 || len(template.IssuingCertificateURL) > 0) && + !oidInExtensions(OIDExtensionAuthorityInfoAccess, template.ExtraExtensions) { + ret[n].Id = OIDExtensionAuthorityInfoAccess + var aiaValues []accessDescription + for _, name := range template.OCSPServer { + aiaValues = append(aiaValues, accessDescription{ + Method: OIDAuthorityInfoAccessOCSP, + Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)}, + }) + } + for _, name := range template.IssuingCertificateURL { + aiaValues = append(aiaValues, accessDescription{ + Method: OIDAuthorityInfoAccessIssuers, + Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)}, + }) + } + ret[n].Value, err = asn1.Marshal(aiaValues) + if err != nil { + return + } + n++ + } + + if len(template.SubjectTimestamps) > 0 || len(template.SubjectCARepositories) > 0 && + !oidInExtensions(OIDExtensionSubjectInfoAccess, template.ExtraExtensions) { + ret[n].Id = OIDExtensionSubjectInfoAccess + var siaValues []accessDescription + for _, ts := range template.SubjectTimestamps { + siaValues = append(siaValues, accessDescription{ + Method: OIDSubjectInfoAccessTimestamp, + Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(ts)}, + }) + } + for _, repo := range template.SubjectCARepositories { + siaValues = append(siaValues, accessDescription{ + Method: OIDSubjectInfoAccessCARepo, + Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(repo)}, + }) + } + ret[n].Value, err = asn1.Marshal(siaValues) + if err != nil { + return + } + n++ + } + + if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) && + !oidInExtensions(OIDExtensionSubjectAltName, template.ExtraExtensions) { + ret[n].Id = OIDExtensionSubjectAltName + // From RFC 5280, Section 4.2.1.6: + // “If the subject field contains an empty sequence ... then + // subjectAltName extension ... is marked as critical” + ret[n].Critical = subjectIsEmpty + ret[n].Value, err = marshalSANs(template.DNSNames, template.EmailAddresses, template.IPAddresses, template.URIs) + if err != nil { + return + } + n++ + } + + if len(template.PolicyIdentifiers) > 0 && + !oidInExtensions(OIDExtensionCertificatePolicies, template.ExtraExtensions) { + ret[n].Id = OIDExtensionCertificatePolicies + policies := make([]policyInformation, len(template.PolicyIdentifiers)) + for i, policy := range template.PolicyIdentifiers { + policies[i].Policy = policy + } + ret[n].Value, err = asn1.Marshal(policies) + if err != nil { + return + } + n++ + } + + if (len(template.PermittedDNSDomains) > 0 || len(template.ExcludedDNSDomains) > 0 || + len(template.PermittedIPRanges) > 0 || len(template.ExcludedIPRanges) > 0 || + len(template.PermittedEmailAddresses) > 0 || len(template.ExcludedEmailAddresses) > 0 || + len(template.PermittedURIDomains) > 0 || len(template.ExcludedURIDomains) > 0) && + !oidInExtensions(OIDExtensionNameConstraints, template.ExtraExtensions) { + ret[n].Id = OIDExtensionNameConstraints + ret[n].Critical = template.PermittedDNSDomainsCritical + + ipAndMask := func(ipNet *net.IPNet) []byte { + maskedIP := ipNet.IP.Mask(ipNet.Mask) + ipAndMask := make([]byte, 0, len(maskedIP)+len(ipNet.Mask)) + ipAndMask = append(ipAndMask, maskedIP...) + ipAndMask = append(ipAndMask, ipNet.Mask...) + return ipAndMask + } + + serialiseConstraints := func(dns []string, ips []*net.IPNet, emails []string, uriDomains []string) (der []byte, err error) { + var b cryptobyte.Builder + + for _, name := range dns { + if err = isIA5String(name); err != nil { + return nil, err + } + + b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { + b.AddASN1(cryptobyte_asn1.Tag(2).ContextSpecific(), func(b *cryptobyte.Builder) { + b.AddBytes([]byte(name)) + }) + }) + } + + for _, ipNet := range ips { + b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { + b.AddASN1(cryptobyte_asn1.Tag(7).ContextSpecific(), func(b *cryptobyte.Builder) { + b.AddBytes(ipAndMask(ipNet)) + }) + }) + } + + for _, email := range emails { + if err = isIA5String(email); err != nil { + return nil, err + } + + b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { + b.AddASN1(cryptobyte_asn1.Tag(1).ContextSpecific(), func(b *cryptobyte.Builder) { + b.AddBytes([]byte(email)) + }) + }) + } + + for _, uriDomain := range uriDomains { + if err = isIA5String(uriDomain); err != nil { + return nil, err + } + + b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { + b.AddASN1(cryptobyte_asn1.Tag(6).ContextSpecific(), func(b *cryptobyte.Builder) { + b.AddBytes([]byte(uriDomain)) + }) + }) + } + + return b.Bytes() + } + + permitted, err := serialiseConstraints(template.PermittedDNSDomains, template.PermittedIPRanges, template.PermittedEmailAddresses, template.PermittedURIDomains) + if err != nil { + return nil, err + } + + excluded, err := serialiseConstraints(template.ExcludedDNSDomains, template.ExcludedIPRanges, template.ExcludedEmailAddresses, template.ExcludedURIDomains) + if err != nil { + return nil, err + } + + var b cryptobyte.Builder + b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { + if len(permitted) > 0 { + b.AddASN1(cryptobyte_asn1.Tag(0).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) { + b.AddBytes(permitted) + }) + } + + if len(excluded) > 0 { + b.AddASN1(cryptobyte_asn1.Tag(1).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) { + b.AddBytes(excluded) + }) + } + }) + + ret[n].Value, err = b.Bytes() + if err != nil { + return nil, err + } + n++ + } + + if len(template.CRLDistributionPoints) > 0 && + !oidInExtensions(OIDExtensionCRLDistributionPoints, template.ExtraExtensions) { + ret[n].Id = OIDExtensionCRLDistributionPoints + + var crlDp []distributionPoint + for _, name := range template.CRLDistributionPoints { + dp := distributionPoint{ + DistributionPoint: distributionPointName{ + FullName: []asn1.RawValue{ + {Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)}, + }, + }, + } + crlDp = append(crlDp, dp) + } + + ret[n].Value, err = asn1.Marshal(crlDp) + if err != nil { + return + } + n++ + } + + if (len(template.RawSCT) > 0 || len(template.SCTList.SCTList) > 0) && !oidInExtensions(OIDExtensionCTSCT, template.ExtraExtensions) { + rawSCT := template.RawSCT + if len(template.SCTList.SCTList) > 0 { + rawSCT, err = tls.Marshal(template.SCTList) + if err != nil { + return + } + } + ret[n].Id = OIDExtensionCTSCT + ret[n].Value, err = asn1.Marshal(rawSCT) + if err != nil { + return + } + n++ + } + + // Adding another extension here? Remember to update the maximum number + // of elements in the make() at the top of the function and the list of + // template fields used in CreateCertificate documentation. + + return append(ret[:n], template.ExtraExtensions...), nil +} + +func subjectBytes(cert *Certificate) ([]byte, error) { + if len(cert.RawSubject) > 0 { + return cert.RawSubject, nil + } + + return asn1.Marshal(cert.Subject.ToRDNSequence()) +} + +// signingParamsForPublicKey returns the parameters to use for signing with +// priv. If requestedSigAlgo is not zero then it overrides the default +// signature algorithm. +func signingParamsForPublicKey(pub interface{}, requestedSigAlgo SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) { + var pubType PublicKeyAlgorithm + + switch pub := pub.(type) { + case *rsa.PublicKey: + pubType = RSA + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureSHA256WithRSA + sigAlgo.Parameters = asn1.NullRawValue + + case *ecdsa.PublicKey: + pubType = ECDSA + + switch pub.Curve { + case elliptic.P224(), elliptic.P256(): + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA256 + case elliptic.P384(): + hashFunc = crypto.SHA384 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA384 + case elliptic.P521(): + hashFunc = crypto.SHA512 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA512 + default: + err = errors.New("x509: unknown elliptic curve") + } + + case ed25519.PublicKey: + pubType = Ed25519 + sigAlgo.Algorithm = oidSignatureEd25519 + + default: + err = errors.New("x509: only RSA, ECDSA and Ed25519 keys supported") + } + + if err != nil { + return + } + + if requestedSigAlgo == 0 { + return + } + + found := false + for _, details := range signatureAlgorithmDetails { + if details.algo == requestedSigAlgo { + if details.pubKeyAlgo != pubType { + err = errors.New("x509: requested SignatureAlgorithm does not match private key type") + return + } + sigAlgo.Algorithm, hashFunc = details.oid, details.hash + if hashFunc == 0 && pubType != Ed25519 { + err = errors.New("x509: cannot sign with hash function requested") + return + } + if requestedSigAlgo.isRSAPSS() { + sigAlgo.Parameters = rsaPSSParameters(hashFunc) + } + found = true + break + } + } + + if !found { + err = errors.New("x509: unknown SignatureAlgorithm") + } + + return +} + +// emptyASN1Subject is the ASN.1 DER encoding of an empty Subject, which is +// just an empty SEQUENCE. +var emptyASN1Subject = []byte{0x30, 0} + +// CreateCertificate creates a new X.509v3 certificate based on a template. +// The following members of template are used: +// - SerialNumber +// - Subject +// - NotBefore, NotAfter +// - SignatureAlgorithm +// - For extensions: +// - KeyUsage +// - ExtKeyUsage, UnknownExtKeyUsage +// - BasicConstraintsValid, IsCA, MaxPathLen, MaxPathLenZero +// - SubjectKeyId +// - AuthorityKeyId +// - OCSPServer, IssuingCertificateURL +// - SubjectTimestamps, SubjectCARepositories +// - DNSNames, EmailAddresses, IPAddresses, URIs +// - PolicyIdentifiers +// - ExcludedDNSDomains, ExcludedIPRanges, ExcludedEmailAddresses, ExcludedURIDomains, PermittedDNSDomainsCritical, +// PermittedDNSDomains, PermittedIPRanges, PermittedEmailAddresses, PermittedURIDomains +// - CRLDistributionPoints +// - RawSCT, SCTList +// - ExtraExtensions +// +// The certificate is signed by parent. If parent is equal to template then the +// certificate is self-signed. The parameter pub is the public key of the +// signee and priv is the private key of the signer. +// +// The returned slice is the certificate in DER encoding. +// +// The currently supported key types are *rsa.PublicKey, *ecdsa.PublicKey and +// ed25519.PublicKey. pub must be a supported key type, and priv must be a +// crypto.Signer with a supported public key. +// +// The AuthorityKeyId will be taken from the SubjectKeyId of parent, if any, +// unless the resulting certificate is self-signed. Otherwise the value from +// template will be used. +func CreateCertificate(rand io.Reader, template, parent *Certificate, pub, priv interface{}) (cert []byte, err error) { + key, ok := priv.(crypto.Signer) + if !ok { + return nil, errors.New("x509: certificate private key does not implement crypto.Signer") + } + + if template.SerialNumber == nil { + return nil, errors.New("x509: no SerialNumber given") + } + + hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(key.Public(), template.SignatureAlgorithm) + if err != nil { + return nil, err + } + + publicKeyBytes, publicKeyAlgorithm, err := marshalPublicKey(pub) + if err != nil { + return nil, err + } + + asn1Issuer, err := subjectBytes(parent) + if err != nil { + return + } + + asn1Subject, err := subjectBytes(template) + if err != nil { + return + } + + authorityKeyId := template.AuthorityKeyId + if !bytes.Equal(asn1Issuer, asn1Subject) && len(parent.SubjectKeyId) > 0 { + authorityKeyId = parent.SubjectKeyId + } + + extensions, err := buildExtensions(template, bytes.Equal(asn1Subject, emptyASN1Subject), authorityKeyId) + if err != nil { + return + } + + encodedPublicKey := asn1.BitString{BitLength: len(publicKeyBytes) * 8, Bytes: publicKeyBytes} + c := tbsCertificate{ + Version: 2, + SerialNumber: template.SerialNumber, + SignatureAlgorithm: signatureAlgorithm, + Issuer: asn1.RawValue{FullBytes: asn1Issuer}, + Validity: validity{template.NotBefore.UTC(), template.NotAfter.UTC()}, + Subject: asn1.RawValue{FullBytes: asn1Subject}, + PublicKey: publicKeyInfo{nil, publicKeyAlgorithm, encodedPublicKey}, + Extensions: extensions, + } + + tbsCertContents, err := asn1.Marshal(c) + if err != nil { + return + } + c.Raw = tbsCertContents + + signed := tbsCertContents + if hashFunc != 0 { + h := hashFunc.New() + h.Write(signed) + signed = h.Sum(nil) + } + + var signerOpts crypto.SignerOpts = hashFunc + if template.SignatureAlgorithm != 0 && template.SignatureAlgorithm.isRSAPSS() { + signerOpts = &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + Hash: hashFunc, + } + } + + var signature []byte + signature, err = key.Sign(rand, signed, signerOpts) + if err != nil { + return + } + + return asn1.Marshal(certificate{ + nil, + c, + signatureAlgorithm, + asn1.BitString{Bytes: signature, BitLength: len(signature) * 8}, + }) +} + +// pemCRLPrefix is the magic string that indicates that we have a PEM encoded +// CRL. +var pemCRLPrefix = []byte("-----BEGIN X509 CRL") + +// pemType is the type of a PEM encoded CRL. +var pemType = "X509 CRL" + +// ParseCRL parses a CRL from the given bytes. It's often the case that PEM +// encoded CRLs will appear where they should be DER encoded, so this function +// will transparently handle PEM encoding as long as there isn't any leading +// garbage. +func ParseCRL(crlBytes []byte) (*pkix.CertificateList, error) { + if bytes.HasPrefix(crlBytes, pemCRLPrefix) { + block, _ := pem.Decode(crlBytes) + if block != nil && block.Type == pemType { + crlBytes = block.Bytes + } + } + return ParseDERCRL(crlBytes) +} + +// ParseDERCRL parses a DER encoded CRL from the given bytes. +func ParseDERCRL(derBytes []byte) (*pkix.CertificateList, error) { + certList := new(pkix.CertificateList) + if rest, err := asn1.Unmarshal(derBytes, certList); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after CRL") + } + return certList, nil +} + +// CreateCRL returns a DER encoded CRL, signed by this Certificate, that +// contains the given list of revoked certificates. +func (c *Certificate) CreateCRL(rand io.Reader, priv interface{}, revokedCerts []pkix.RevokedCertificate, now, expiry time.Time) (crlBytes []byte, err error) { + key, ok := priv.(crypto.Signer) + if !ok { + return nil, errors.New("x509: certificate private key does not implement crypto.Signer") + } + + hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(key.Public(), 0) + if err != nil { + return nil, err + } + + // Force revocation times to UTC per RFC 5280. + revokedCertsUTC := make([]pkix.RevokedCertificate, len(revokedCerts)) + for i, rc := range revokedCerts { + rc.RevocationTime = rc.RevocationTime.UTC() + revokedCertsUTC[i] = rc + } + + tbsCertList := pkix.TBSCertificateList{ + Version: 1, + Signature: signatureAlgorithm, + Issuer: c.Subject.ToRDNSequence(), + ThisUpdate: now.UTC(), + NextUpdate: expiry.UTC(), + RevokedCertificates: revokedCertsUTC, + } + + // Authority Key Id + if len(c.SubjectKeyId) > 0 { + var aki pkix.Extension + aki.Id = OIDExtensionAuthorityKeyId + aki.Value, err = asn1.Marshal(authKeyId{Id: c.SubjectKeyId}) + if err != nil { + return + } + tbsCertList.Extensions = append(tbsCertList.Extensions, aki) + } + + tbsCertListContents, err := asn1.Marshal(tbsCertList) + if err != nil { + return + } + + signed := tbsCertListContents + if hashFunc != 0 { + h := hashFunc.New() + h.Write(signed) + signed = h.Sum(nil) + } + + var signature []byte + signature, err = key.Sign(rand, signed, hashFunc) + if err != nil { + return + } + + return asn1.Marshal(pkix.CertificateList{ + TBSCertList: tbsCertList, + SignatureAlgorithm: signatureAlgorithm, + SignatureValue: asn1.BitString{Bytes: signature, BitLength: len(signature) * 8}, + }) +} + +// CertificateRequest represents a PKCS #10, certificate signature request. +type CertificateRequest struct { + Raw []byte // Complete ASN.1 DER content (CSR, signature algorithm and signature). + RawTBSCertificateRequest []byte // Certificate request info part of raw ASN.1 DER content. + RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo. + RawSubject []byte // DER encoded Subject. + + Version int + Signature []byte + SignatureAlgorithm SignatureAlgorithm + + PublicKeyAlgorithm PublicKeyAlgorithm + PublicKey interface{} + + Subject pkix.Name + + // Attributes contains the CSR attributes that can parse as + // pkix.AttributeTypeAndValueSET. + // + // Deprecated: Use Extensions and ExtraExtensions instead for parsing and + // generating the requestedExtensions attribute. + Attributes []pkix.AttributeTypeAndValueSET + + // Extensions contains all requested extensions, in raw form. When parsing + // CSRs, this can be used to extract extensions that are not parsed by this + // package. + Extensions []pkix.Extension + + // ExtraExtensions contains extensions to be copied, raw, into any CSR + // marshaled by CreateCertificateRequest. Values override any extensions + // that would otherwise be produced based on the other fields but are + // overridden by any extensions specified in Attributes. + // + // The ExtraExtensions field is not populated by ParseCertificateRequest, + // see Extensions instead. + ExtraExtensions []pkix.Extension + + // Subject Alternate Name values. + DNSNames []string + EmailAddresses []string + IPAddresses []net.IP + URIs []*url.URL +} + +// These structures reflect the ASN.1 structure of X.509 certificate +// signature requests (see RFC 2986): + +type tbsCertificateRequest struct { + Raw asn1.RawContent + Version int + Subject asn1.RawValue + PublicKey publicKeyInfo + RawAttributes []asn1.RawValue `asn1:"tag:0"` +} + +type certificateRequest struct { + Raw asn1.RawContent + TBSCSR tbsCertificateRequest + SignatureAlgorithm pkix.AlgorithmIdentifier + SignatureValue asn1.BitString +} + +// oidExtensionRequest is a PKCS#9 OBJECT IDENTIFIER that indicates requested +// extensions in a CSR. +var oidExtensionRequest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 14} + +// newRawAttributes converts AttributeTypeAndValueSETs from a template +// CertificateRequest's Attributes into tbsCertificateRequest RawAttributes. +func newRawAttributes(attributes []pkix.AttributeTypeAndValueSET) ([]asn1.RawValue, error) { + var rawAttributes []asn1.RawValue + b, err := asn1.Marshal(attributes) + if err != nil { + return nil, err + } + rest, err := asn1.Unmarshal(b, &rawAttributes) + if err != nil { + return nil, err + } + if len(rest) != 0 { + return nil, errors.New("x509: failed to unmarshal raw CSR Attributes") + } + return rawAttributes, nil +} + +// parseRawAttributes Unmarshals RawAttributes into AttributeTypeAndValueSETs. +func parseRawAttributes(rawAttributes []asn1.RawValue) []pkix.AttributeTypeAndValueSET { + var attributes []pkix.AttributeTypeAndValueSET + for _, rawAttr := range rawAttributes { + var attr pkix.AttributeTypeAndValueSET + rest, err := asn1.Unmarshal(rawAttr.FullBytes, &attr) + // Ignore attributes that don't parse into pkix.AttributeTypeAndValueSET + // (i.e.: challengePassword or unstructuredName). + if err == nil && len(rest) == 0 { + attributes = append(attributes, attr) + } + } + return attributes +} + +// parseCSRExtensions parses the attributes from a CSR and extracts any +// requested extensions. +func parseCSRExtensions(rawAttributes []asn1.RawValue) ([]pkix.Extension, error) { + // pkcs10Attribute reflects the Attribute structure from RFC 2986, Section 4.1. + type pkcs10Attribute struct { + Id asn1.ObjectIdentifier + Values []asn1.RawValue `asn1:"set"` + } + + var ret []pkix.Extension + for _, rawAttr := range rawAttributes { + var attr pkcs10Attribute + if rest, err := asn1.Unmarshal(rawAttr.FullBytes, &attr); err != nil || len(rest) != 0 || len(attr.Values) == 0 { + // Ignore attributes that don't parse. + continue + } + + if !attr.Id.Equal(oidExtensionRequest) { + continue + } + + var extensions []pkix.Extension + if _, err := asn1.Unmarshal(attr.Values[0].FullBytes, &extensions); err != nil { + return nil, err + } + ret = append(ret, extensions...) + } + + return ret, nil +} + +// CreateCertificateRequest creates a new certificate request based on a +// template. The following members of template are used: +// +// - SignatureAlgorithm +// - Subject +// - DNSNames +// - EmailAddresses +// - IPAddresses +// - URIs +// - ExtraExtensions +// - Attributes (deprecated) +// +// priv is the private key to sign the CSR with, and the corresponding public +// key will be included in the CSR. It must implement crypto.Signer and its +// Public() method must return a *rsa.PublicKey or a *ecdsa.PublicKey or a +// ed25519.PublicKey. (A *rsa.PrivateKey, *ecdsa.PrivateKey or +// ed25519.PrivateKey satisfies this.) +// +// The returned slice is the certificate request in DER encoding. +func CreateCertificateRequest(rand io.Reader, template *CertificateRequest, priv interface{}) (csr []byte, err error) { + key, ok := priv.(crypto.Signer) + if !ok { + return nil, errors.New("x509: certificate private key does not implement crypto.Signer") + } + + var hashFunc crypto.Hash + var sigAlgo pkix.AlgorithmIdentifier + hashFunc, sigAlgo, err = signingParamsForPublicKey(key.Public(), template.SignatureAlgorithm) + if err != nil { + return nil, err + } + + var publicKeyBytes []byte + var publicKeyAlgorithm pkix.AlgorithmIdentifier + publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(key.Public()) + if err != nil { + return nil, err + } + + var extensions []pkix.Extension + + if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) && + !oidInExtensions(OIDExtensionSubjectAltName, template.ExtraExtensions) { + sanBytes, err := marshalSANs(template.DNSNames, template.EmailAddresses, template.IPAddresses, template.URIs) + if err != nil { + return nil, err + } + + extensions = append(extensions, pkix.Extension{ + Id: OIDExtensionSubjectAltName, + Value: sanBytes, + }) + } + + extensions = append(extensions, template.ExtraExtensions...) + + // Make a copy of template.Attributes because we may alter it below. + attributes := make([]pkix.AttributeTypeAndValueSET, 0, len(template.Attributes)) + for _, attr := range template.Attributes { + values := make([][]pkix.AttributeTypeAndValue, len(attr.Value)) + copy(values, attr.Value) + attributes = append(attributes, pkix.AttributeTypeAndValueSET{ + Type: attr.Type, + Value: values, + }) + } + + extensionsAppended := false + if len(extensions) > 0 { + // Append the extensions to an existing attribute if possible. + for _, atvSet := range attributes { + if !atvSet.Type.Equal(oidExtensionRequest) || len(atvSet.Value) == 0 { + continue + } + + // specifiedExtensions contains all the extensions that we + // found specified via template.Attributes. + specifiedExtensions := make(map[string]bool) + + for _, atvs := range atvSet.Value { + for _, atv := range atvs { + specifiedExtensions[atv.Type.String()] = true + } + } + + newValue := make([]pkix.AttributeTypeAndValue, 0, len(atvSet.Value[0])+len(extensions)) + newValue = append(newValue, atvSet.Value[0]...) + + for _, e := range extensions { + if specifiedExtensions[e.Id.String()] { + // Attributes already contained a value for + // this extension and it takes priority. + continue + } + + newValue = append(newValue, pkix.AttributeTypeAndValue{ + // There is no place for the critical + // flag in an AttributeTypeAndValue. + Type: e.Id, + Value: e.Value, + }) + } + + atvSet.Value[0] = newValue + extensionsAppended = true + break + } + } + + rawAttributes, err := newRawAttributes(attributes) + if err != nil { + return + } + + // If not included in attributes, add a new attribute for the + // extensions. + if len(extensions) > 0 && !extensionsAppended { + attr := struct { + Type asn1.ObjectIdentifier + Value [][]pkix.Extension `asn1:"set"` + }{ + Type: oidExtensionRequest, + Value: [][]pkix.Extension{extensions}, + } + + b, err := asn1.Marshal(attr) + if err != nil { + return nil, errors.New("x509: failed to serialise extensions attribute: " + err.Error()) + } + + var rawValue asn1.RawValue + if _, err := asn1.Unmarshal(b, &rawValue); err != nil { + return nil, err + } + + rawAttributes = append(rawAttributes, rawValue) + } + + asn1Subject := template.RawSubject + if len(asn1Subject) == 0 { + asn1Subject, err = asn1.Marshal(template.Subject.ToRDNSequence()) + if err != nil { + return nil, err + } + } + + tbsCSR := tbsCertificateRequest{ + Version: 0, // PKCS #10, RFC 2986 + Subject: asn1.RawValue{FullBytes: asn1Subject}, + PublicKey: publicKeyInfo{ + Algorithm: publicKeyAlgorithm, + PublicKey: asn1.BitString{ + Bytes: publicKeyBytes, + BitLength: len(publicKeyBytes) * 8, + }, + }, + RawAttributes: rawAttributes, + } + + tbsCSRContents, err := asn1.Marshal(tbsCSR) + if err != nil { + return + } + tbsCSR.Raw = tbsCSRContents + + signed := tbsCSRContents + if hashFunc != 0 { + h := hashFunc.New() + h.Write(signed) + signed = h.Sum(nil) + } + + var signature []byte + signature, err = key.Sign(rand, signed, hashFunc) + if err != nil { + return + } + + return asn1.Marshal(certificateRequest{ + TBSCSR: tbsCSR, + SignatureAlgorithm: sigAlgo, + SignatureValue: asn1.BitString{ + Bytes: signature, + BitLength: len(signature) * 8, + }, + }) +} + +// ParseCertificateRequest parses a single certificate request from the +// given ASN.1 DER data. +func ParseCertificateRequest(asn1Data []byte) (*CertificateRequest, error) { + var csr certificateRequest + + rest, err := asn1.Unmarshal(asn1Data, &csr) + if err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + + return parseCertificateRequest(&csr) +} + +func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error) { + out := &CertificateRequest{ + Raw: in.Raw, + RawTBSCertificateRequest: in.TBSCSR.Raw, + RawSubjectPublicKeyInfo: in.TBSCSR.PublicKey.Raw, + RawSubject: in.TBSCSR.Subject.FullBytes, + + Signature: in.SignatureValue.RightAlign(), + SignatureAlgorithm: SignatureAlgorithmFromAI(in.SignatureAlgorithm), + + PublicKeyAlgorithm: getPublicKeyAlgorithmFromOID(in.TBSCSR.PublicKey.Algorithm.Algorithm), + + Version: in.TBSCSR.Version, + Attributes: parseRawAttributes(in.TBSCSR.RawAttributes), + } + + var err error + var nfe NonFatalErrors + out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCSR.PublicKey, &nfe) + if err != nil { + return nil, err + } + // Treat non-fatal errors as fatal here. + if len(nfe.Errors) > 0 { + return nil, nfe.Errors[0] + } + + var subject pkix.RDNSequence + if rest, err := asn1.Unmarshal(in.TBSCSR.Subject.FullBytes, &subject); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 Subject") + } + + out.Subject.FillFromRDNSequence(&subject) + + if out.Extensions, err = parseCSRExtensions(in.TBSCSR.RawAttributes); err != nil { + return nil, err + } + + for _, extension := range out.Extensions { + if extension.Id.Equal(OIDExtensionSubjectAltName) { + out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(extension.Value, &nfe) + if err != nil { + return nil, err + } + } + } + + return out, nil +} + +// CheckSignature reports whether the signature on c is valid. +func (c *CertificateRequest) CheckSignature() error { + return checkSignature(c.SignatureAlgorithm, c.RawTBSCertificateRequest, c.Signature, c.PublicKey) +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509util/files.go b/vendor/github.com/google/certificate-transparency-go/x509util/files.go new file mode 100644 index 000000000000..823ac7375a92 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509util/files.go @@ -0,0 +1,116 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package x509util + +import ( + "encoding/pem" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strings" + + "github.com/google/certificate-transparency-go/x509" +) + +// ReadPossiblePEMFile loads data from a file which may be in DER format +// or may be in PEM format (with the given blockname). +func ReadPossiblePEMFile(filename, blockname string) ([][]byte, error) { + data, err := os.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("%s: failed to read data: %v", filename, err) + } + return dePEM(data, blockname), nil +} + +// ReadPossiblePEMURL attempts to determine if the given target is a local file or a +// URL, and return the file contents regardless. It also copes with either PEM or DER +// format data. +func ReadPossiblePEMURL(target, blockname string) ([][]byte, error) { + if !strings.HasPrefix(target, "http://") && !strings.HasPrefix(target, "https://") { + // Assume it's a filename + return ReadPossiblePEMFile(target, blockname) + } + + rsp, err := http.Get(target) + if err != nil { + return nil, fmt.Errorf("failed to http.Get(%q): %v", target, err) + } + data, err := io.ReadAll(rsp.Body) + if err != nil { + return nil, fmt.Errorf("failed to io.ReadAll(%q): %v", target, err) + } + return dePEM(data, blockname), nil +} + +func dePEM(data []byte, blockname string) [][]byte { + var results [][]byte + if strings.Contains(string(data), "BEGIN "+blockname) { + rest := data + for { + var block *pem.Block + block, rest = pem.Decode(rest) + if block == nil { + break + } + if block.Type == blockname { + results = append(results, block.Bytes) + } + } + } else { + results = append(results, data) + } + return results +} + +// ReadFileOrURL returns the data from a target which may be either a filename +// or an HTTP(S) URL. +func ReadFileOrURL(target string, client *http.Client) ([]byte, error) { + u, err := url.Parse(target) + if err != nil || (u.Scheme != "http" && u.Scheme != "https") { + return os.ReadFile(target) + } + + rsp, err := client.Get(u.String()) + if err != nil { + return nil, fmt.Errorf("failed to http.Get(%q): %v", target, err) + } + return io.ReadAll(rsp.Body) +} + +// GetIssuer attempts to retrieve the issuer for a certificate, by examining +// the cert's Authority Information Access extension (if present) for the +// issuer's URL and retrieving from there. +func GetIssuer(cert *x509.Certificate, client *http.Client) (*x509.Certificate, error) { + if len(cert.IssuingCertificateURL) == 0 { + return nil, nil + } + issuerURL := cert.IssuingCertificateURL[0] + rsp, err := client.Get(issuerURL) + if err != nil || rsp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("failed to get issuer from %q: %v", issuerURL, err) + } + defer rsp.Body.Close() + body, err := io.ReadAll(rsp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read issuer from %q: %v", issuerURL, err) + } + issuers, err := x509.ParseCertificates(body) + if err != nil { + return nil, fmt.Errorf("failed to parse issuer cert: %v", err) + } + return issuers[0], nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509util/pem_cert_pool.go b/vendor/github.com/google/certificate-transparency-go/x509util/pem_cert_pool.go new file mode 100644 index 000000000000..c21bd6505898 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509util/pem_cert_pool.go @@ -0,0 +1,118 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package x509util + +import ( + "crypto/sha256" + "encoding/pem" + "errors" + "fmt" + "os" + + "github.com/google/certificate-transparency-go/x509" +) + +// String for certificate blocks in BEGIN / END PEM headers +const pemCertificateBlockType string = "CERTIFICATE" + +// PEMCertPool is a wrapper / extension to x509.CertPool. It allows us to access the +// raw certs, which we need to serve get-roots request and has stricter handling on loading +// certs into the pool. CertPool ignores errors if at least one cert loads correctly but +// PEMCertPool requires all certs to load. +type PEMCertPool struct { + // maps from sha-256 to certificate, used for dup detection + fingerprintToCertMap map[[sha256.Size]byte]x509.Certificate + rawCerts []*x509.Certificate + certPool *x509.CertPool +} + +// NewPEMCertPool creates a new, empty, instance of PEMCertPool. +func NewPEMCertPool() *PEMCertPool { + return &PEMCertPool{fingerprintToCertMap: make(map[[sha256.Size]byte]x509.Certificate), certPool: x509.NewCertPool()} +} + +// AddCert adds a certificate to a pool. Uses fingerprint to weed out duplicates. +// cert must not be nil. +func (p *PEMCertPool) AddCert(cert *x509.Certificate) { + fingerprint := sha256.Sum256(cert.Raw) + _, ok := p.fingerprintToCertMap[fingerprint] + + if !ok { + p.fingerprintToCertMap[fingerprint] = *cert + p.certPool.AddCert(cert) + p.rawCerts = append(p.rawCerts, cert) + } +} + +// Included indicates whether the given cert is included in the pool. +func (p *PEMCertPool) Included(cert *x509.Certificate) bool { + fingerprint := sha256.Sum256(cert.Raw) + _, ok := p.fingerprintToCertMap[fingerprint] + return ok +} + +// AppendCertsFromPEM adds certs to the pool from a byte slice assumed to contain PEM encoded data. +// Skips over non certificate blocks in the data. Returns true if all certificates in the +// data were parsed and added to the pool successfully and at least one certificate was found. +func (p *PEMCertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) { + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + if block.Type != pemCertificateBlockType || len(block.Headers) != 0 { + continue + } + + cert, err := x509.ParseCertificate(block.Bytes) + if x509.IsFatal(err) { + return false + } + + p.AddCert(cert) + ok = true + } + + return +} + +// AppendCertsFromPEMFile adds certs from a file that contains concatenated PEM data. +func (p *PEMCertPool) AppendCertsFromPEMFile(pemFile string) error { + pemData, err := os.ReadFile(pemFile) + if err != nil { + return fmt.Errorf("failed to load PEM certs file: %v", err) + } + + if !p.AppendCertsFromPEM(pemData) { + return errors.New("failed to parse PEM certs file") + } + return nil +} + +// Subjects returns a list of the DER-encoded subjects of all of the certificates in the pool. +func (p *PEMCertPool) Subjects() (res [][]byte) { + return p.certPool.Subjects() +} + +// CertPool returns the underlying CertPool. +func (p *PEMCertPool) CertPool() *x509.CertPool { + return p.certPool +} + +// RawCertificates returns a list of the raw bytes of certificates that are in this pool +func (p *PEMCertPool) RawCertificates() []*x509.Certificate { + return p.rawCerts +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509util/revoked.go b/vendor/github.com/google/certificate-transparency-go/x509util/revoked.go new file mode 100644 index 000000000000..e0927292630b --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509util/revoked.go @@ -0,0 +1,169 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package x509util + +import ( + "bytes" + "encoding/hex" + "fmt" + "strconv" + + "github.com/google/certificate-transparency-go/x509" + "github.com/google/certificate-transparency-go/x509/pkix" +) + +// RevocationReasonToString generates a string describing a revocation reason code. +func RevocationReasonToString(reason x509.RevocationReasonCode) string { + switch reason { + case x509.Unspecified: + return "Unspecified" + case x509.KeyCompromise: + return "Key Compromise" + case x509.CACompromise: + return "CA Compromise" + case x509.AffiliationChanged: + return "Affiliation Changed" + case x509.Superseded: + return "Superseded" + case x509.CessationOfOperation: + return "Cessation Of Operation" + case x509.CertificateHold: + return "Certificate Hold" + case x509.RemoveFromCRL: + return "Remove From CRL" + case x509.PrivilegeWithdrawn: + return "Privilege Withdrawn" + case x509.AACompromise: + return "AA Compromise" + default: + return strconv.Itoa(int(reason)) + } +} + +// CRLToString generates a string describing the given certificate revocation list. +// The output roughly resembles that from openssl crl -text. +func CRLToString(crl *x509.CertificateList) string { + var result bytes.Buffer + var showCritical = func(critical bool) { + if critical { + result.WriteString(" critical") + } + result.WriteString("\n") + } + result.WriteString("Certificate Revocation List (CRL):\n") + result.WriteString(fmt.Sprintf(" Version: %d (%#x)\n", crl.TBSCertList.Version+1, crl.TBSCertList.Version)) + result.WriteString(fmt.Sprintf(" Signature Algorithm: %v\n", x509.SignatureAlgorithmFromAI(crl.TBSCertList.Signature))) + var issuer pkix.Name + issuer.FillFromRDNSequence(&crl.TBSCertList.Issuer) + result.WriteString(fmt.Sprintf(" Issuer: %v\n", NameToString(issuer))) + result.WriteString(fmt.Sprintf(" Last Update: %v\n", crl.TBSCertList.ThisUpdate)) + result.WriteString(fmt.Sprintf(" Next Update: %v\n", crl.TBSCertList.NextUpdate)) + + if len(crl.TBSCertList.Extensions) > 0 { + result.WriteString(" CRL extensions:\n") + } + + count, critical := OIDInExtensions(x509.OIDExtensionAuthorityKeyId, crl.TBSCertList.Extensions) + if count > 0 { + result.WriteString(" X509v3 Authority Key Identifier:") + showCritical(critical) + result.WriteString(fmt.Sprintf(" keyid:%v\n", hex.EncodeToString(crl.TBSCertList.AuthorityKeyID))) + } + count, critical = OIDInExtensions(x509.OIDExtensionIssuerAltName, crl.TBSCertList.Extensions) + if count > 0 { + result.WriteString(" X509v3 Issuer Alt Name:") + showCritical(critical) + result.WriteString(fmt.Sprintf(" %s\n", GeneralNamesToString(&crl.TBSCertList.IssuerAltNames))) + } + count, critical = OIDInExtensions(x509.OIDExtensionCRLNumber, crl.TBSCertList.Extensions) + if count > 0 { + result.WriteString(" X509v3 CRLNumber:") + showCritical(critical) + result.WriteString(fmt.Sprintf(" %d\n", crl.TBSCertList.CRLNumber)) + } + count, critical = OIDInExtensions(x509.OIDExtensionDeltaCRLIndicator, crl.TBSCertList.Extensions) + if count > 0 { + result.WriteString(" X509v3 Delta CRL Indicator:") + showCritical(critical) + result.WriteString(fmt.Sprintf(" %d\n", crl.TBSCertList.BaseCRLNumber)) + } + count, critical = OIDInExtensions(x509.OIDExtensionIssuingDistributionPoint, crl.TBSCertList.Extensions) + if count > 0 { + result.WriteString(" X509v3 Issuing Distribution Point:") + showCritical(critical) + result.WriteString(fmt.Sprintf(" %s\n", GeneralNamesToString(&crl.TBSCertList.IssuingDPFullNames))) + } + count, critical = OIDInExtensions(x509.OIDExtensionFreshestCRL, crl.TBSCertList.Extensions) + if count > 0 { + result.WriteString(" X509v3 Freshest CRL:") + showCritical(critical) + result.WriteString(" Full Name:\n") + var buf bytes.Buffer + for _, pt := range crl.TBSCertList.FreshestCRLDistributionPoint { + commaAppend(&buf, "URI:"+pt) + } + result.WriteString(fmt.Sprintf(" %v\n", buf.String())) + } + count, critical = OIDInExtensions(x509.OIDExtensionAuthorityInfoAccess, crl.TBSCertList.Extensions) + if count > 0 { + result.WriteString(" Authority Information Access:") + showCritical(critical) + var issuerBuf bytes.Buffer + for _, issuer := range crl.TBSCertList.IssuingCertificateURL { + commaAppend(&issuerBuf, "URI:"+issuer) + } + if issuerBuf.Len() > 0 { + result.WriteString(fmt.Sprintf(" CA Issuers - %v\n", issuerBuf.String())) + } + var ocspBuf bytes.Buffer + for _, ocsp := range crl.TBSCertList.OCSPServer { + commaAppend(&ocspBuf, "URI:"+ocsp) + } + if ocspBuf.Len() > 0 { + result.WriteString(fmt.Sprintf(" OCSP - %v\n", ocspBuf.String())) + } + // TODO(drysdale): Display other GeneralName types + } + + result.WriteString("\n") + result.WriteString("Revoked Certificates:\n") + for _, c := range crl.TBSCertList.RevokedCertificates { + result.WriteString(fmt.Sprintf(" Serial Number: %s (0x%s)\n", c.SerialNumber.Text(10), c.SerialNumber.Text(16))) + result.WriteString(fmt.Sprintf(" Revocation Date : %v\n", c.RevocationTime)) + count, critical = OIDInExtensions(x509.OIDExtensionCRLReasons, c.Extensions) + if count > 0 { + result.WriteString(" X509v3 CRL Reason Code:") + showCritical(critical) + result.WriteString(fmt.Sprintf(" %s\n", RevocationReasonToString(c.RevocationReason))) + } + count, critical = OIDInExtensions(x509.OIDExtensionInvalidityDate, c.Extensions) + if count > 0 { + result.WriteString(" Invalidity Date:") + showCritical(critical) + result.WriteString(fmt.Sprintf(" %s\n", c.InvalidityDate)) + } + count, critical = OIDInExtensions(x509.OIDExtensionCertificateIssuer, c.Extensions) + if count > 0 { + result.WriteString(" Issuer:") + showCritical(critical) + result.WriteString(fmt.Sprintf(" %s\n", GeneralNamesToString(&c.Issuer))) + } + } + result.WriteString(fmt.Sprintf(" Signature Algorithm: %v\n", x509.SignatureAlgorithmFromAI(crl.SignatureAlgorithm))) + appendHexData(&result, crl.SignatureValue.Bytes, 18, " ") + result.WriteString("\n") + + return result.String() +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509util/x509util.go b/vendor/github.com/google/certificate-transparency-go/x509util/x509util.go new file mode 100644 index 000000000000..d3c20e1aa9ed --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509util/x509util.go @@ -0,0 +1,900 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package x509util includes utility code for working with X.509 +// certificates from the x509 package. +package x509util + +import ( + "bytes" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "net" + "strconv" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/asn1" + "github.com/google/certificate-transparency-go/gossip/minimal/x509ext" + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509" + "github.com/google/certificate-transparency-go/x509/pkix" +) + +// OIDForStandardExtension indicates whether oid identifies a standard extension. +// Standard extensions are listed in RFC 5280 (and other RFCs). +func OIDForStandardExtension(oid asn1.ObjectIdentifier) bool { + if oid.Equal(x509.OIDExtensionSubjectKeyId) || + oid.Equal(x509.OIDExtensionKeyUsage) || + oid.Equal(x509.OIDExtensionExtendedKeyUsage) || + oid.Equal(x509.OIDExtensionAuthorityKeyId) || + oid.Equal(x509.OIDExtensionBasicConstraints) || + oid.Equal(x509.OIDExtensionSubjectAltName) || + oid.Equal(x509.OIDExtensionCertificatePolicies) || + oid.Equal(x509.OIDExtensionNameConstraints) || + oid.Equal(x509.OIDExtensionCRLDistributionPoints) || + oid.Equal(x509.OIDExtensionIssuerAltName) || + oid.Equal(x509.OIDExtensionSubjectDirectoryAttributes) || + oid.Equal(x509.OIDExtensionInhibitAnyPolicy) || + oid.Equal(x509.OIDExtensionPolicyConstraints) || + oid.Equal(x509.OIDExtensionPolicyMappings) || + oid.Equal(x509.OIDExtensionFreshestCRL) || + oid.Equal(x509.OIDExtensionSubjectInfoAccess) || + oid.Equal(x509.OIDExtensionAuthorityInfoAccess) || + oid.Equal(x509.OIDExtensionIPPrefixList) || + oid.Equal(x509.OIDExtensionASList) || + oid.Equal(x509.OIDExtensionCTPoison) || + oid.Equal(x509.OIDExtensionCTSCT) { + return true + } + return false +} + +// OIDInExtensions checks whether the extension identified by oid is present in extensions +// and returns how many times it occurs together with an indication of whether any of them +// are marked critical. +func OIDInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) (int, bool) { + count := 0 + critical := false + for _, ext := range extensions { + if ext.Id.Equal(oid) { + count++ + if ext.Critical { + critical = true + } + } + } + return count, critical +} + +// String formatting for various X.509/ASN.1 types +func bitStringToString(b asn1.BitString) string { // nolint:deadcode,unused + result := hex.EncodeToString(b.Bytes) + bitsLeft := b.BitLength % 8 + if bitsLeft != 0 { + result += " (" + strconv.Itoa(8-bitsLeft) + " unused bits)" + } + return result +} + +func publicKeyAlgorithmToString(algo x509.PublicKeyAlgorithm) string { + // Use OpenSSL-compatible strings for the algorithms. + switch algo { + case x509.RSA: + return "rsaEncryption" + case x509.DSA: + return "dsaEncryption" + case x509.ECDSA: + return "id-ecPublicKey" + default: + return strconv.Itoa(int(algo)) + } +} + +// appendHexData adds a hex dump of binary data to buf, with line breaks +// after each set of count bytes, and with each new line prefixed with the +// given prefix. +func appendHexData(buf *bytes.Buffer, data []byte, count int, prefix string) { + for ii, b := range data { + if ii%count == 0 { + if ii > 0 { + buf.WriteString("\n") + } + buf.WriteString(prefix) + } + buf.WriteString(fmt.Sprintf("%02x:", b)) + } +} + +func curveOIDToString(oid asn1.ObjectIdentifier) (t string, bitlen int) { + switch { + case oid.Equal(x509.OIDNamedCurveP224): + return "secp224r1", 224 + case oid.Equal(x509.OIDNamedCurveP256): + return "prime256v1", 256 + case oid.Equal(x509.OIDNamedCurveP384): + return "secp384r1", 384 + case oid.Equal(x509.OIDNamedCurveP521): + return "secp521r1", 521 + case oid.Equal(x509.OIDNamedCurveP192): + return "secp192r1", 192 + } + return fmt.Sprintf("%v", oid), -1 +} + +func publicKeyToString(_ x509.PublicKeyAlgorithm, pub interface{}) string { + var buf bytes.Buffer + switch pub := pub.(type) { + case *rsa.PublicKey: + bitlen := pub.N.BitLen() + buf.WriteString(fmt.Sprintf(" Public Key: (%d bit)\n", bitlen)) + buf.WriteString(" Modulus:\n") + data := pub.N.Bytes() + appendHexData(&buf, data, 15, " ") + buf.WriteString("\n") + buf.WriteString(fmt.Sprintf(" Exponent: %d (0x%x)", pub.E, pub.E)) + case *dsa.PublicKey: + buf.WriteString(" pub:\n") + appendHexData(&buf, pub.Y.Bytes(), 15, " ") + buf.WriteString("\n") + buf.WriteString(" P:\n") + appendHexData(&buf, pub.P.Bytes(), 15, " ") + buf.WriteString("\n") + buf.WriteString(" Q:\n") + appendHexData(&buf, pub.Q.Bytes(), 15, " ") + buf.WriteString("\n") + buf.WriteString(" G:\n") + appendHexData(&buf, pub.G.Bytes(), 15, " ") + case *ecdsa.PublicKey: + data := elliptic.Marshal(pub.Curve, pub.X, pub.Y) + oid, ok := x509.OIDFromNamedCurve(pub.Curve) + if !ok { + return " " + } + oidname, bitlen := curveOIDToString(oid) + buf.WriteString(fmt.Sprintf(" Public Key: (%d bit)\n", bitlen)) + buf.WriteString(" pub:\n") + appendHexData(&buf, data, 15, " ") + buf.WriteString("\n") + buf.WriteString(fmt.Sprintf(" ASN1 OID: %s", oidname)) + default: + buf.WriteString(fmt.Sprintf("%v", pub)) + } + return buf.String() +} + +func commaAppend(buf *bytes.Buffer, s string) { + if buf.Len() > 0 { + buf.WriteString(", ") + } + buf.WriteString(s) +} + +func keyUsageToString(k x509.KeyUsage) string { + var buf bytes.Buffer + if k&x509.KeyUsageDigitalSignature != 0 { + commaAppend(&buf, "Digital Signature") + } + if k&x509.KeyUsageContentCommitment != 0 { + commaAppend(&buf, "Content Commitment") + } + if k&x509.KeyUsageKeyEncipherment != 0 { + commaAppend(&buf, "Key Encipherment") + } + if k&x509.KeyUsageDataEncipherment != 0 { + commaAppend(&buf, "Data Encipherment") + } + if k&x509.KeyUsageKeyAgreement != 0 { + commaAppend(&buf, "Key Agreement") + } + if k&x509.KeyUsageCertSign != 0 { + commaAppend(&buf, "Certificate Signing") + } + if k&x509.KeyUsageCRLSign != 0 { + commaAppend(&buf, "CRL Signing") + } + if k&x509.KeyUsageEncipherOnly != 0 { + commaAppend(&buf, "Encipher Only") + } + if k&x509.KeyUsageDecipherOnly != 0 { + commaAppend(&buf, "Decipher Only") + } + return buf.String() +} + +func extKeyUsageToString(u x509.ExtKeyUsage) string { + switch u { + case x509.ExtKeyUsageAny: + return "Any" + case x509.ExtKeyUsageServerAuth: + return "TLS Web server authentication" + case x509.ExtKeyUsageClientAuth: + return "TLS Web client authentication" + case x509.ExtKeyUsageCodeSigning: + return "Signing of executable code" + case x509.ExtKeyUsageEmailProtection: + return "Email protection" + case x509.ExtKeyUsageIPSECEndSystem: + return "IPSEC end system" + case x509.ExtKeyUsageIPSECTunnel: + return "IPSEC tunnel" + case x509.ExtKeyUsageIPSECUser: + return "IPSEC user" + case x509.ExtKeyUsageTimeStamping: + return "Time stamping" + case x509.ExtKeyUsageOCSPSigning: + return "OCSP signing" + case x509.ExtKeyUsageMicrosoftServerGatedCrypto: + return "Microsoft server gated cryptography" + case x509.ExtKeyUsageNetscapeServerGatedCrypto: + return "Netscape server gated cryptography" + case x509.ExtKeyUsageCertificateTransparency: + return "Certificate transparency" + default: + return "Unknown" + } +} + +func attributeOIDToString(oid asn1.ObjectIdentifier) string { // nolint:deadcode,unused + switch { + case oid.Equal(pkix.OIDCountry): + return "Country" + case oid.Equal(pkix.OIDOrganization): + return "Organization" + case oid.Equal(pkix.OIDOrganizationalUnit): + return "OrganizationalUnit" + case oid.Equal(pkix.OIDCommonName): + return "CommonName" + case oid.Equal(pkix.OIDSerialNumber): + return "SerialNumber" + case oid.Equal(pkix.OIDLocality): + return "Locality" + case oid.Equal(pkix.OIDProvince): + return "Province" + case oid.Equal(pkix.OIDStreetAddress): + return "StreetAddress" + case oid.Equal(pkix.OIDPostalCode): + return "PostalCode" + case oid.Equal(pkix.OIDPseudonym): + return "Pseudonym" + case oid.Equal(pkix.OIDTitle): + return "Title" + case oid.Equal(pkix.OIDDnQualifier): + return "DnQualifier" + case oid.Equal(pkix.OIDName): + return "Name" + case oid.Equal(pkix.OIDSurname): + return "Surname" + case oid.Equal(pkix.OIDGivenName): + return "GivenName" + case oid.Equal(pkix.OIDInitials): + return "Initials" + case oid.Equal(pkix.OIDGenerationQualifier): + return "GenerationQualifier" + default: + return oid.String() + } +} + +// NameToString creates a string description of a pkix.Name object. +func NameToString(name pkix.Name) string { + var result bytes.Buffer + addSingle := func(prefix, item string) { + if len(item) == 0 { + return + } + commaAppend(&result, prefix) + result.WriteString(item) + } + addList := func(prefix string, items []string) { + for _, item := range items { + addSingle(prefix, item) + } + } + addList("C=", name.Country) + addList("O=", name.Organization) + addList("OU=", name.OrganizationalUnit) + addList("L=", name.Locality) + addList("ST=", name.Province) + addList("streetAddress=", name.StreetAddress) + addList("postalCode=", name.PostalCode) + addSingle("serialNumber=", name.SerialNumber) + addSingle("CN=", name.CommonName) + for _, atv := range name.Names { + value, ok := atv.Value.(string) + if !ok { + continue + } + t := atv.Type + // All of the defined attribute OIDs are of the form 2.5.4.N, and OIDAttribute is + // the 2.5.4 prefix ('id-at' in RFC 5280). + if len(t) == 4 && t[0] == pkix.OIDAttribute[0] && t[1] == pkix.OIDAttribute[1] && t[2] == pkix.OIDAttribute[2] { + // OID is 'id-at N', so check the final value to figure out which attribute. + switch t[3] { + case pkix.OIDCommonName[3], pkix.OIDSerialNumber[3], pkix.OIDCountry[3], pkix.OIDLocality[3], pkix.OIDProvince[3], + pkix.OIDStreetAddress[3], pkix.OIDOrganization[3], pkix.OIDOrganizationalUnit[3], pkix.OIDPostalCode[3]: + continue // covered by explicit fields + case pkix.OIDPseudonym[3]: + addSingle("pseudonym=", value) + continue + case pkix.OIDTitle[3]: + addSingle("title=", value) + continue + case pkix.OIDDnQualifier[3]: + addSingle("dnQualifier=", value) + continue + case pkix.OIDName[3]: + addSingle("name=", value) + continue + case pkix.OIDSurname[3]: + addSingle("surname=", value) + continue + case pkix.OIDGivenName[3]: + addSingle("givenName=", value) + continue + case pkix.OIDInitials[3]: + addSingle("initials=", value) + continue + case pkix.OIDGenerationQualifier[3]: + addSingle("generationQualifier=", value) + continue + } + } + addSingle(t.String()+"=", value) + } + return result.String() +} + +// OtherNameToString creates a string description of an x509.OtherName object. +func OtherNameToString(other x509.OtherName) string { + return fmt.Sprintf("%v=%v", other.TypeID, hex.EncodeToString(other.Value.Bytes)) +} + +// GeneralNamesToString creates a string description of an x509.GeneralNames object. +func GeneralNamesToString(gname *x509.GeneralNames) string { + var buf bytes.Buffer + for _, name := range gname.DNSNames { + commaAppend(&buf, "DNS:"+name) + } + for _, email := range gname.EmailAddresses { + commaAppend(&buf, "email:"+email) + } + for _, name := range gname.DirectoryNames { + commaAppend(&buf, "DirName:"+NameToString(name)) + } + for _, uri := range gname.URIs { + commaAppend(&buf, "URI:"+uri) + } + for _, ip := range gname.IPNets { + if ip.Mask == nil { + commaAppend(&buf, "IP Address:"+ip.IP.String()) + } else { + commaAppend(&buf, "IP Address:"+ip.IP.String()+"/"+ip.Mask.String()) + } + } + for _, id := range gname.RegisteredIDs { + commaAppend(&buf, "Registered ID:"+id.String()) + } + for _, other := range gname.OtherNames { + commaAppend(&buf, "othername:"+OtherNameToString(other)) + } + return buf.String() +} + +// CertificateToString generates a string describing the given certificate. +// The output roughly resembles that from openssl x509 -text. +func CertificateToString(cert *x509.Certificate) string { + var result bytes.Buffer + result.WriteString("Certificate:\n") + result.WriteString(" Data:\n") + result.WriteString(fmt.Sprintf(" Version: %d (%#x)\n", cert.Version, cert.Version-1)) + result.WriteString(fmt.Sprintf(" Serial Number: %s (0x%s)\n", cert.SerialNumber.Text(10), cert.SerialNumber.Text(16))) + result.WriteString(fmt.Sprintf(" Signature Algorithm: %v\n", cert.SignatureAlgorithm)) + result.WriteString(fmt.Sprintf(" Issuer: %v\n", NameToString(cert.Issuer))) + result.WriteString(" Validity:\n") + result.WriteString(fmt.Sprintf(" Not Before: %v\n", cert.NotBefore)) + result.WriteString(fmt.Sprintf(" Not After : %v\n", cert.NotAfter)) + result.WriteString(fmt.Sprintf(" Subject: %v\n", NameToString(cert.Subject))) + result.WriteString(" Subject Public Key Info:\n") + result.WriteString(fmt.Sprintf(" Public Key Algorithm: %v\n", publicKeyAlgorithmToString(cert.PublicKeyAlgorithm))) + result.WriteString(fmt.Sprintf("%v\n", publicKeyToString(cert.PublicKeyAlgorithm, cert.PublicKey))) + + if len(cert.Extensions) > 0 { + result.WriteString(" X509v3 extensions:\n") + } + // First display the extensions that are already cracked out + showAuthKeyID(&result, cert) + showSubjectKeyID(&result, cert) + showKeyUsage(&result, cert) + showExtendedKeyUsage(&result, cert) + showBasicConstraints(&result, cert) + showSubjectAltName(&result, cert) + showNameConstraints(&result, cert) + showCertPolicies(&result, cert) + showCRLDPs(&result, cert) + showAuthInfoAccess(&result, cert) + showSubjectInfoAccess(&result, cert) + showRPKIAddressRanges(&result, cert) + showRPKIASIdentifiers(&result, cert) + showCTPoison(&result, cert) + showCTSCT(&result, cert) + showCTLogSTHInfo(&result, cert) + + showUnhandledExtensions(&result, cert) + showSignature(&result, cert) + + return result.String() +} + +func showCritical(result *bytes.Buffer, critical bool) { + if critical { + result.WriteString(" critical") + } + result.WriteString("\n") +} + +func showAuthKeyID(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionAuthorityKeyId, cert.Extensions) + if count > 0 { + result.WriteString(" X509v3 Authority Key Identifier:") + showCritical(result, critical) + result.WriteString(fmt.Sprintf(" keyid:%v\n", hex.EncodeToString(cert.AuthorityKeyId))) + } +} + +func showSubjectKeyID(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionSubjectKeyId, cert.Extensions) + if count > 0 { + result.WriteString(" X509v3 Subject Key Identifier:") + showCritical(result, critical) + result.WriteString(fmt.Sprintf(" keyid:%v\n", hex.EncodeToString(cert.SubjectKeyId))) + } +} + +func showKeyUsage(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionKeyUsage, cert.Extensions) + if count > 0 { + result.WriteString(" X509v3 Key Usage:") + showCritical(result, critical) + result.WriteString(fmt.Sprintf(" %v\n", keyUsageToString(cert.KeyUsage))) + } +} + +func showExtendedKeyUsage(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionExtendedKeyUsage, cert.Extensions) + if count > 0 { + result.WriteString(" X509v3 Extended Key Usage:") + showCritical(result, critical) + var usages bytes.Buffer + for _, usage := range cert.ExtKeyUsage { + commaAppend(&usages, extKeyUsageToString(usage)) + } + for _, oid := range cert.UnknownExtKeyUsage { + commaAppend(&usages, oid.String()) + } + result.WriteString(fmt.Sprintf(" %v\n", usages.String())) + } +} + +func showBasicConstraints(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionBasicConstraints, cert.Extensions) + if count > 0 { + result.WriteString(" X509v3 Basic Constraints:") + showCritical(result, critical) + result.WriteString(fmt.Sprintf(" CA:%t", cert.IsCA)) + if cert.MaxPathLen > 0 || cert.MaxPathLenZero { + result.WriteString(fmt.Sprintf(", pathlen:%d", cert.MaxPathLen)) + } + result.WriteString("\n") + } +} + +func showSubjectAltName(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionSubjectAltName, cert.Extensions) + if count > 0 { + result.WriteString(" X509v3 Subject Alternative Name:") + showCritical(result, critical) + var buf bytes.Buffer + for _, name := range cert.DNSNames { + commaAppend(&buf, "DNS:"+name) + } + for _, email := range cert.EmailAddresses { + commaAppend(&buf, "email:"+email) + } + for _, ip := range cert.IPAddresses { + commaAppend(&buf, "IP Address:"+ip.String()) + } + + result.WriteString(fmt.Sprintf(" %v\n", buf.String())) + // TODO(drysdale): include other name forms + } +} + +func showNameConstraints(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionNameConstraints, cert.Extensions) + if count > 0 { + result.WriteString(" X509v3 Name Constraints:") + showCritical(result, critical) + if len(cert.PermittedDNSDomains) > 0 { + result.WriteString(" Permitted:\n") + var buf bytes.Buffer + for _, name := range cert.PermittedDNSDomains { + commaAppend(&buf, "DNS:"+name) + } + result.WriteString(fmt.Sprintf(" %v\n", buf.String())) + } + // TODO(drysdale): include other name forms + } + +} + +func showCertPolicies(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionCertificatePolicies, cert.Extensions) + if count > 0 { + result.WriteString(" X509v3 Certificate Policies:") + showCritical(result, critical) + for _, oid := range cert.PolicyIdentifiers { + result.WriteString(fmt.Sprintf(" Policy: %v\n", oid.String())) + // TODO(drysdale): Display any qualifiers associated with the policy + } + } + +} + +func showCRLDPs(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionCRLDistributionPoints, cert.Extensions) + if count > 0 { + result.WriteString(" X509v3 CRL Distribution Points:") + showCritical(result, critical) + result.WriteString(" Full Name:\n") + var buf bytes.Buffer + for _, pt := range cert.CRLDistributionPoints { + commaAppend(&buf, "URI:"+pt) + } + result.WriteString(fmt.Sprintf(" %v\n", buf.String())) + // TODO(drysdale): Display other GeneralNames types, plus issuer/reasons/relative-name + } + +} + +func showAuthInfoAccess(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionAuthorityInfoAccess, cert.Extensions) + if count > 0 { + result.WriteString(" Authority Information Access:") + showCritical(result, critical) + var issuerBuf bytes.Buffer + for _, issuer := range cert.IssuingCertificateURL { + commaAppend(&issuerBuf, "URI:"+issuer) + } + if issuerBuf.Len() > 0 { + result.WriteString(fmt.Sprintf(" CA Issuers - %v\n", issuerBuf.String())) + } + var ocspBuf bytes.Buffer + for _, ocsp := range cert.OCSPServer { + commaAppend(&ocspBuf, "URI:"+ocsp) + } + if ocspBuf.Len() > 0 { + result.WriteString(fmt.Sprintf(" OCSP - %v\n", ocspBuf.String())) + } + // TODO(drysdale): Display other GeneralNames types + } +} + +func showSubjectInfoAccess(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionSubjectInfoAccess, cert.Extensions) + if count > 0 { + result.WriteString(" Subject Information Access:") + showCritical(result, critical) + var tsBuf bytes.Buffer + for _, ts := range cert.SubjectTimestamps { + commaAppend(&tsBuf, "URI:"+ts) + } + if tsBuf.Len() > 0 { + result.WriteString(fmt.Sprintf(" AD Time Stamping - %v\n", tsBuf.String())) + } + var repoBuf bytes.Buffer + for _, repo := range cert.SubjectCARepositories { + commaAppend(&repoBuf, "URI:"+repo) + } + if repoBuf.Len() > 0 { + result.WriteString(fmt.Sprintf(" CA repository - %v\n", repoBuf.String())) + } + } +} + +func showAddressRange(prefix x509.IPAddressPrefix, afi uint16) string { + switch afi { + case x509.IPv4AddressFamilyIndicator, x509.IPv6AddressFamilyIndicator: + size := 4 + if afi == x509.IPv6AddressFamilyIndicator { + size = 16 + } + ip := make([]byte, size) + copy(ip, prefix.Bytes) + addr := net.IPNet{IP: ip, Mask: net.CIDRMask(prefix.BitLength, 8*size)} + return addr.String() + default: + return fmt.Sprintf("%x/%d", prefix.Bytes, prefix.BitLength) + } + +} + +func showRPKIAddressRanges(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionIPPrefixList, cert.Extensions) + if count > 0 { + result.WriteString(" sbgp-ipAddrBlock:") + showCritical(result, critical) + for _, blocks := range cert.RPKIAddressRanges { + afi := blocks.AFI + switch afi { + case x509.IPv4AddressFamilyIndicator: + result.WriteString(" IPv4") + case x509.IPv6AddressFamilyIndicator: + result.WriteString(" IPv6") + default: + result.WriteString(fmt.Sprintf(" %d", afi)) + } + if blocks.SAFI != 0 { + result.WriteString(fmt.Sprintf(" SAFI=%d", blocks.SAFI)) + } + result.WriteString(":") + if blocks.InheritFromIssuer { + result.WriteString(" inherit\n") + continue + } + result.WriteString("\n") + for _, prefix := range blocks.AddressPrefixes { + result.WriteString(fmt.Sprintf(" %s\n", showAddressRange(prefix, afi))) + } + for _, ipRange := range blocks.AddressRanges { + result.WriteString(fmt.Sprintf(" [%s, %s]\n", showAddressRange(ipRange.Min, afi), showAddressRange(ipRange.Max, afi))) + } + } + } +} + +func showASIDs(result *bytes.Buffer, asids *x509.ASIdentifiers, label string) { + if asids == nil { + return + } + result.WriteString(fmt.Sprintf(" %s:\n", label)) + if asids.InheritFromIssuer { + result.WriteString(" inherit\n") + return + } + for _, id := range asids.ASIDs { + result.WriteString(fmt.Sprintf(" %d\n", id)) + } + for _, idRange := range asids.ASIDRanges { + result.WriteString(fmt.Sprintf(" %d-%d\n", idRange.Min, idRange.Max)) + } +} + +func showRPKIASIdentifiers(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionASList, cert.Extensions) + if count > 0 { + result.WriteString(" sbgp-autonomousSysNum:") + showCritical(result, critical) + showASIDs(result, cert.RPKIASNumbers, "Autonomous System Numbers") + showASIDs(result, cert.RPKIRoutingDomainIDs, "Routing Domain Identifiers") + } +} +func showCTPoison(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionCTPoison, cert.Extensions) + if count > 0 { + result.WriteString(" RFC6962 Pre-Certificate Poison:") + showCritical(result, critical) + result.WriteString(" .....\n") + } +} + +func showCTSCT(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionCTSCT, cert.Extensions) + if count > 0 { + result.WriteString(" RFC6962 Certificate Transparency SCT:") + showCritical(result, critical) + for i, sctData := range cert.SCTList.SCTList { + result.WriteString(fmt.Sprintf(" SCT [%d]:\n", i)) + var sct ct.SignedCertificateTimestamp + _, err := tls.Unmarshal(sctData.Val, &sct) + if err != nil { + appendHexData(result, sctData.Val, 16, " ") + result.WriteString("\n") + continue + } + result.WriteString(fmt.Sprintf(" Version: %d\n", sct.SCTVersion)) + result.WriteString(fmt.Sprintf(" LogID: %s\n", base64.StdEncoding.EncodeToString(sct.LogID.KeyID[:]))) + result.WriteString(fmt.Sprintf(" Timestamp: %d\n", sct.Timestamp)) + result.WriteString(fmt.Sprintf(" Signature: %s\n", sct.Signature.Algorithm)) + result.WriteString(" Signature:\n") + appendHexData(result, sct.Signature.Signature, 16, " ") + result.WriteString("\n") + } + } +} + +func showCTLogSTHInfo(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509ext.OIDExtensionCTSTH, cert.Extensions) + if count > 0 { + result.WriteString(" Certificate Transparency STH:") + showCritical(result, critical) + sthInfo, err := x509ext.LogSTHInfoFromCert(cert) + if err != nil { + result.WriteString(" Failed to decode STH:\n") + return + } + result.WriteString(fmt.Sprintf(" LogURL: %s\n", string(sthInfo.LogURL))) + result.WriteString(fmt.Sprintf(" Version: %d\n", sthInfo.Version)) + result.WriteString(fmt.Sprintf(" TreeSize: %d\n", sthInfo.TreeSize)) + result.WriteString(fmt.Sprintf(" Timestamp: %d\n", sthInfo.Timestamp)) + result.WriteString(" RootHash:\n") + appendHexData(result, sthInfo.SHA256RootHash[:], 16, " ") + result.WriteString("\n") + result.WriteString(fmt.Sprintf(" TreeHeadSignature: %s\n", sthInfo.TreeHeadSignature.Algorithm)) + appendHexData(result, sthInfo.TreeHeadSignature.Signature, 16, " ") + result.WriteString("\n") + } +} + +func showUnhandledExtensions(result *bytes.Buffer, cert *x509.Certificate) { + for _, ext := range cert.Extensions { + // Skip extensions that are already cracked out + if oidAlreadyPrinted(ext.Id) { + continue + } + result.WriteString(fmt.Sprintf(" %v:", ext.Id)) + showCritical(result, ext.Critical) + appendHexData(result, ext.Value, 16, " ") + result.WriteString("\n") + } +} + +func showSignature(result *bytes.Buffer, cert *x509.Certificate) { + result.WriteString(fmt.Sprintf(" Signature Algorithm: %v\n", cert.SignatureAlgorithm)) + appendHexData(result, cert.Signature, 18, " ") + result.WriteString("\n") +} + +// TODO(drysdale): remove this once all standard OIDs are parsed and printed. +func oidAlreadyPrinted(oid asn1.ObjectIdentifier) bool { + if oid.Equal(x509.OIDExtensionSubjectKeyId) || + oid.Equal(x509.OIDExtensionKeyUsage) || + oid.Equal(x509.OIDExtensionExtendedKeyUsage) || + oid.Equal(x509.OIDExtensionAuthorityKeyId) || + oid.Equal(x509.OIDExtensionBasicConstraints) || + oid.Equal(x509.OIDExtensionSubjectAltName) || + oid.Equal(x509.OIDExtensionCertificatePolicies) || + oid.Equal(x509.OIDExtensionNameConstraints) || + oid.Equal(x509.OIDExtensionCRLDistributionPoints) || + oid.Equal(x509.OIDExtensionAuthorityInfoAccess) || + oid.Equal(x509.OIDExtensionSubjectInfoAccess) || + oid.Equal(x509.OIDExtensionIPPrefixList) || + oid.Equal(x509.OIDExtensionASList) || + oid.Equal(x509.OIDExtensionCTPoison) || + oid.Equal(x509.OIDExtensionCTSCT) || + oid.Equal(x509ext.OIDExtensionCTSTH) { + return true + } + return false +} + +// CertificateFromPEM takes a certificate in PEM format and returns the +// corresponding x509.Certificate object. +func CertificateFromPEM(pemBytes []byte) (*x509.Certificate, error) { + block, rest := pem.Decode(pemBytes) + if len(rest) != 0 { + return nil, errors.New("trailing data found after PEM block") + } + if block == nil { + return nil, errors.New("PEM block is nil") + } + if block.Type != "CERTIFICATE" { + return nil, errors.New("PEM block is not a CERTIFICATE") + } + return x509.ParseCertificate(block.Bytes) +} + +// CertificatesFromPEM parses one or more certificates from the given PEM data. +// The PEM certificates must be concatenated. This function can be used for +// parsing PEM-formatted certificate chains, but does not verify that the +// resulting chain is a valid certificate chain. +func CertificatesFromPEM(pemBytes []byte) ([]*x509.Certificate, error) { + var chain []*x509.Certificate + for { + var block *pem.Block + block, pemBytes = pem.Decode(pemBytes) + if block == nil { + return chain, nil + } + if block.Type != "CERTIFICATE" { + return nil, fmt.Errorf("PEM block is not a CERTIFICATE") + } + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, errors.New("failed to parse certificate") + } + chain = append(chain, cert) + } +} + +// ParseSCTsFromSCTList parses each of the SCTs contained within an SCT list. +func ParseSCTsFromSCTList(sctList *x509.SignedCertificateTimestampList) ([]*ct.SignedCertificateTimestamp, error) { + var scts []*ct.SignedCertificateTimestamp + for i, data := range sctList.SCTList { + sct, err := ExtractSCT(&data) + if err != nil { + return nil, fmt.Errorf("error extracting SCT number %d: %s", i, err) + } + scts = append(scts, sct) + } + return scts, nil +} + +// ExtractSCT deserializes an SCT from a TLS-encoded SCT. +func ExtractSCT(sctData *x509.SerializedSCT) (*ct.SignedCertificateTimestamp, error) { + if sctData == nil { + return nil, errors.New("SCT is nil") + } + var sct ct.SignedCertificateTimestamp + if rest, err := tls.Unmarshal(sctData.Val, &sct); err != nil { + return nil, fmt.Errorf("error parsing SCT: %s", err) + } else if len(rest) > 0 { + return nil, fmt.Errorf("extra data (%d bytes) after serialized SCT", len(rest)) + } + return &sct, nil +} + +// MarshalSCTsIntoSCTList serializes SCTs into SCT list. +func MarshalSCTsIntoSCTList(scts []*ct.SignedCertificateTimestamp) (*x509.SignedCertificateTimestampList, error) { + var sctList x509.SignedCertificateTimestampList + sctList.SCTList = []x509.SerializedSCT{} + for i, sct := range scts { + if sct == nil { + return nil, fmt.Errorf("SCT number %d is nil", i) + } + encd, err := tls.Marshal(*sct) + if err != nil { + return nil, fmt.Errorf("error serializing SCT number %d: %s", i, err) + } + sctData := x509.SerializedSCT{Val: encd} + sctList.SCTList = append(sctList.SCTList, sctData) + } + return &sctList, nil +} + +var pemCertificatePrefix = []byte("-----BEGIN CERTIFICATE") + +// ParseSCTsFromCertificate parses any SCTs that are embedded in the +// certificate provided. The certificate bytes provided can be either DER or +// PEM, provided the PEM data starts with the PEM block marker (i.e. has no +// leading text). +func ParseSCTsFromCertificate(certBytes []byte) ([]*ct.SignedCertificateTimestamp, error) { + var cert *x509.Certificate + var err error + if bytes.HasPrefix(certBytes, pemCertificatePrefix) { + cert, err = CertificateFromPEM(certBytes) + } else { + cert, err = x509.ParseCertificate(certBytes) + } + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + return ParseSCTsFromSCTList(&cert.SCTList) +} diff --git a/vendor/github.com/google/go-containerregistry/LICENSE b/vendor/github.com/google/go-containerregistry/LICENSE new file mode 100644 index 000000000000..7a4a3ea2424c --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/README.md b/vendor/github.com/google/go-containerregistry/pkg/name/README.md new file mode 100644 index 000000000000..4889b8446a4c --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/README.md @@ -0,0 +1,3 @@ +# `name` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/name?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/name) diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/check.go b/vendor/github.com/google/go-containerregistry/pkg/name/check.go new file mode 100644 index 000000000000..e9a240a3e5ab --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/check.go @@ -0,0 +1,43 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "strings" + "unicode/utf8" +) + +// stripRunesFn returns a function which returns -1 (i.e. a value which +// signals deletion in strings.Map) for runes in 'runes', and the rune otherwise. +func stripRunesFn(runes string) func(rune) rune { + return func(r rune) rune { + if strings.ContainsRune(runes, r) { + return -1 + } + return r + } +} + +// checkElement checks a given named element matches character and length restrictions. +// Returns true if the given element adheres to the given restrictions, false otherwise. +func checkElement(name, element, allowedRunes string, minRunes, maxRunes int) error { + numRunes := utf8.RuneCountInString(element) + if (numRunes < minRunes) || (maxRunes < numRunes) { + return newErrBadName("%s must be between %d and %d characters in length: %s", name, minRunes, maxRunes, element) + } else if len(strings.Map(stripRunesFn(allowedRunes), element)) != 0 { + return newErrBadName("%s can only contain the characters `%s`: %s", name, allowedRunes, element) + } + return nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go new file mode 100644 index 000000000000..5b8eb4ff4651 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go @@ -0,0 +1,133 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + // nolint: depguard + _ "crypto/sha256" // Recommended by go-digest. + "encoding" + "encoding/json" + "strings" + + "github.com/opencontainers/go-digest" +) + +const digestDelim = "@" + +// Digest stores a digest name in a structured form. +type Digest struct { + Repository + digest string + original string +} + +var _ Reference = (*Digest)(nil) +var _ encoding.TextMarshaler = (*Digest)(nil) +var _ encoding.TextUnmarshaler = (*Digest)(nil) +var _ json.Marshaler = (*Digest)(nil) +var _ json.Unmarshaler = (*Digest)(nil) + +// Context implements Reference. +func (d Digest) Context() Repository { + return d.Repository +} + +// Identifier implements Reference. +func (d Digest) Identifier() string { + return d.DigestStr() +} + +// DigestStr returns the digest component of the Digest. +func (d Digest) DigestStr() string { + return d.digest +} + +// Name returns the name from which the Digest was derived. +func (d Digest) Name() string { + return d.Repository.Name() + digestDelim + d.DigestStr() +} + +// String returns the original input string. +func (d Digest) String() string { + return d.original +} + +// MarshalJSON formats the digest into a string for JSON serialization. +func (d Digest) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +// UnmarshalJSON parses a JSON string into a Digest. +func (d *Digest) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + n, err := NewDigest(s) + if err != nil { + return err + } + *d = n + return nil +} + +// MarshalText formats the digest into a string for text serialization. +func (d Digest) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText parses a text string into a Digest. +func (d *Digest) UnmarshalText(data []byte) error { + n, err := NewDigest(string(data)) + if err != nil { + return err + } + *d = n + return nil +} + +// NewDigest returns a new Digest representing the given name. +func NewDigest(name string, opts ...Option) (Digest, error) { + // Split on "@" + parts := strings.Split(name, digestDelim) + if len(parts) != 2 { + return Digest{}, newErrBadName("a digest must contain exactly one '@' separator (e.g. registry/repository@digest) saw: %s", name) + } + base := parts[0] + dig := parts[1] + prefix := digest.Canonical.String() + ":" + if !strings.HasPrefix(dig, prefix) { + return Digest{}, newErrBadName("unsupported digest algorithm: %s", dig) + } + hex := strings.TrimPrefix(dig, prefix) + if err := digest.Canonical.Validate(hex); err != nil { + return Digest{}, err + } + + tag, err := NewTag(base, opts...) + if err == nil { + base = tag.Repository.Name() + } + + repo, err := NewRepository(base, opts...) + if err != nil { + return Digest{}, err + } + return Digest{ + Repository: repo, + digest: dig, + original: name, + }, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/doc.go b/vendor/github.com/google/go-containerregistry/pkg/name/doc.go new file mode 100644 index 000000000000..b294794dc12f --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/doc.go @@ -0,0 +1,42 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package name defines structured types for representing image references. +// +// What's in a name? For image references, not nearly enough! +// +// Image references look a lot like URLs, but they differ in that they don't +// contain the scheme (http or https), they can end with a :tag or a @digest +// (the latter being validated), and they perform defaulting for missing +// components. +// +// Since image references don't contain the scheme, we do our best to infer +// if we use http or https from the given hostname. We allow http fallback for +// any host that looks like localhost (localhost, 127.0.0.1, ::1), ends in +// ".local", or is in the "private" address space per RFC 1918. For everything +// else, we assume https only. To override this heuristic, use the Insecure +// option. +// +// Image references with a digest signal to us that we should verify the content +// of the image matches the digest. E.g. when pulling a Digest reference, we'll +// calculate the sha256 of the manifest returned by the registry and error out +// if it doesn't match what we asked for. +// +// For defaulting, we interpret "ubuntu" as +// "index.docker.io/library/ubuntu:latest" because we add the missing repo +// "library", the missing registry "index.docker.io", and the missing tag +// "latest". To disable this defaulting, use the StrictValidation option. This +// is useful e.g. to only allow image references that explicitly set a tag or +// digest, so that you don't accidentally pull "latest". +package name diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/errors.go b/vendor/github.com/google/go-containerregistry/pkg/name/errors.go new file mode 100644 index 000000000000..bf004ffcfb80 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/errors.go @@ -0,0 +1,48 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "errors" + "fmt" +) + +// ErrBadName is an error for when a bad docker name is supplied. +type ErrBadName struct { + info string +} + +func (e *ErrBadName) Error() string { + return e.info +} + +// Is reports whether target is an error of type ErrBadName +func (e *ErrBadName) Is(target error) bool { + var berr *ErrBadName + return errors.As(target, &berr) +} + +// newErrBadName returns a ErrBadName which returns the given formatted string from Error(). +func newErrBadName(fmtStr string, args ...any) *ErrBadName { + return &ErrBadName{fmt.Sprintf(fmtStr, args...)} +} + +// IsErrBadName returns true if the given error is an ErrBadName. +// +// Deprecated: Use errors.Is. +func IsErrBadName(err error) bool { + var berr *ErrBadName + return errors.As(err, &berr) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/options.go b/vendor/github.com/google/go-containerregistry/pkg/name/options.go new file mode 100644 index 000000000000..d14fedcdadf1 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/options.go @@ -0,0 +1,83 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +const ( + // DefaultRegistry is the registry name that will be used if no registry + // provided and the default is not overridden. + DefaultRegistry = "index.docker.io" + defaultRegistryAlias = "docker.io" + + // DefaultTag is the tag name that will be used if no tag provided and the + // default is not overridden. + DefaultTag = "latest" +) + +type options struct { + strict bool // weak by default + insecure bool // secure by default + defaultRegistry string + defaultTag string +} + +func makeOptions(opts ...Option) options { + opt := options{ + defaultRegistry: DefaultRegistry, + defaultTag: DefaultTag, + } + for _, o := range opts { + o(&opt) + } + return opt +} + +// Option is a functional option for name parsing. +type Option func(*options) + +// StrictValidation is an Option that requires image references to be fully +// specified; i.e. no defaulting for registry (dockerhub), repo (library), +// or tag (latest). +func StrictValidation(opts *options) { + opts.strict = true +} + +// WeakValidation is an Option that sets defaults when parsing names, see +// StrictValidation. +func WeakValidation(opts *options) { + opts.strict = false +} + +// Insecure is an Option that allows image references to be fetched without TLS. +func Insecure(opts *options) { + opts.insecure = true +} + +// OptionFn is a function that returns an option. +type OptionFn func() Option + +// WithDefaultRegistry sets the default registry that will be used if one is not +// provided. +func WithDefaultRegistry(r string) Option { + return func(opts *options) { + opts.defaultRegistry = r + } +} + +// WithDefaultTag sets the default tag that will be used if one is not provided. +func WithDefaultTag(t string) Option { + return func(opts *options) { + opts.defaultTag = t + } +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/ref.go b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go new file mode 100644 index 000000000000..0a0486772302 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go @@ -0,0 +1,75 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "fmt" +) + +// Reference defines the interface that consumers use when they can +// take either a tag or a digest. +type Reference interface { + fmt.Stringer + + // Context accesses the Repository context of the reference. + Context() Repository + + // Identifier accesses the type-specific portion of the reference. + Identifier() string + + // Name is the fully-qualified reference name. + Name() string + + // Scope is the scope needed to access this reference. + Scope(string) string +} + +// ParseReference parses the string as a reference, either by tag or digest. +func ParseReference(s string, opts ...Option) (Reference, error) { + if t, err := NewTag(s, opts...); err == nil { + return t, nil + } + if d, err := NewDigest(s, opts...); err == nil { + return d, nil + } + return nil, newErrBadName("could not parse reference: %s", s) +} + +type stringConst string + +// MustParseReference behaves like ParseReference, but panics instead of +// returning an error. It's intended for use in tests, or when a value is +// expected to be valid at code authoring time. +// +// To discourage its use in scenarios where the value is not known at code +// authoring time, it must be passed a string constant: +// +// const str = "valid/string" +// MustParseReference(str) +// MustParseReference("another/valid/string") +// MustParseReference(str + "/and/more") +// +// These will not compile: +// +// var str = "valid/string" +// MustParseReference(str) +// MustParseReference(strings.Join([]string{"valid", "string"}, "/")) +func MustParseReference(s stringConst, opts ...Option) Reference { + ref, err := ParseReference(string(s), opts...) + if err != nil { + panic(err) + } + return ref +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go new file mode 100644 index 000000000000..5e6b6e62a040 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go @@ -0,0 +1,179 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "encoding" + "encoding/json" + "net" + "net/url" + "path" + "regexp" + "strings" +) + +// Detect more complex forms of local references. +var reLocal = regexp.MustCompile(`.*\.local(?:host)?(?::\d{1,5})?$`) + +// Detect the loopback IP (127.0.0.1) +var reLoopback = regexp.MustCompile(regexp.QuoteMeta("127.0.0.1")) + +// Detect the loopback IPV6 (::1) +var reipv6Loopback = regexp.MustCompile(regexp.QuoteMeta("::1")) + +// Registry stores a docker registry name in a structured form. +type Registry struct { + insecure bool + registry string +} + +var _ encoding.TextMarshaler = (*Registry)(nil) +var _ encoding.TextUnmarshaler = (*Registry)(nil) +var _ json.Marshaler = (*Registry)(nil) +var _ json.Unmarshaler = (*Registry)(nil) + +// RegistryStr returns the registry component of the Registry. +func (r Registry) RegistryStr() string { + return r.registry +} + +// Name returns the name from which the Registry was derived. +func (r Registry) Name() string { + return r.RegistryStr() +} + +func (r Registry) String() string { + return r.Name() +} + +// Repo returns a Repository in the Registry with the given name. +func (r Registry) Repo(repo ...string) Repository { + return Repository{Registry: r, repository: path.Join(repo...)} +} + +// Scope returns the scope required to access the registry. +func (r Registry) Scope(string) string { + // The only resource under 'registry' is 'catalog'. http://goo.gl/N9cN9Z + return "registry:catalog:*" +} + +func (r Registry) isRFC1918() bool { + ipStr := strings.Split(r.Name(), ":")[0] + ip := net.ParseIP(ipStr) + if ip == nil { + return false + } + for _, cidr := range []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"} { + _, block, _ := net.ParseCIDR(cidr) + if block.Contains(ip) { + return true + } + } + return false +} + +// Scheme returns https scheme for all the endpoints except localhost or when explicitly defined. +func (r Registry) Scheme() string { + if r.insecure { + return "http" + } + if r.isRFC1918() { + return "http" + } + if strings.HasPrefix(r.Name(), "localhost:") { + return "http" + } + if reLocal.MatchString(r.Name()) { + return "http" + } + if reLoopback.MatchString(r.Name()) { + return "http" + } + if reipv6Loopback.MatchString(r.Name()) { + return "http" + } + return "https" +} + +func checkRegistry(name string) error { + // Per RFC 3986, registries (authorities) are required to be prefixed with "//" + // url.Host == hostname[:port] == authority + if url, err := url.Parse("//" + name); err != nil || url.Host != name { + return newErrBadName("registries must be valid RFC 3986 URI authorities: %s", name) + } + return nil +} + +// NewRegistry returns a Registry based on the given name. +// Strict validation requires explicit, valid RFC 3986 URI authorities to be given. +func NewRegistry(name string, opts ...Option) (Registry, error) { + opt := makeOptions(opts...) + if opt.strict && len(name) == 0 { + return Registry{}, newErrBadName("strict validation requires the registry to be explicitly defined") + } + + if err := checkRegistry(name); err != nil { + return Registry{}, err + } + + if name == "" { + name = opt.defaultRegistry + } + // Rewrite "docker.io" to "index.docker.io". + // See: https://github.com/google/go-containerregistry/issues/68 + if name == defaultRegistryAlias { + name = DefaultRegistry + } + + return Registry{registry: name, insecure: opt.insecure}, nil +} + +// NewInsecureRegistry returns an Insecure Registry based on the given name. +// +// Deprecated: Use the Insecure Option with NewRegistry instead. +func NewInsecureRegistry(name string, opts ...Option) (Registry, error) { + opts = append(opts, Insecure) + return NewRegistry(name, opts...) +} + +// MarshalJSON formats the Registry into a string for JSON serialization. +func (r Registry) MarshalJSON() ([]byte, error) { return json.Marshal(r.String()) } + +// UnmarshalJSON parses a JSON string into a Registry. +func (r *Registry) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + n, err := NewRegistry(s) + if err != nil { + return err + } + *r = n + return nil +} + +// MarshalText formats the registry into a string for text serialization. +func (r Registry) MarshalText() ([]byte, error) { return []byte(r.String()), nil } + +// UnmarshalText parses a text string into a Registry. +func (r *Registry) UnmarshalText(data []byte) error { + n, err := NewRegistry(string(data)) + if err != nil { + return err + } + *r = n + return nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/repository.go b/vendor/github.com/google/go-containerregistry/pkg/name/repository.go new file mode 100644 index 000000000000..290797575ee1 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/repository.go @@ -0,0 +1,158 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "encoding" + "encoding/json" + "fmt" + "strings" +) + +const ( + defaultNamespace = "library" + repositoryChars = "abcdefghijklmnopqrstuvwxyz0123456789_-./" + regRepoDelimiter = "/" +) + +// Repository stores a docker repository name in a structured form. +type Repository struct { + Registry + repository string +} + +var _ encoding.TextMarshaler = (*Repository)(nil) +var _ encoding.TextUnmarshaler = (*Repository)(nil) +var _ json.Marshaler = (*Repository)(nil) +var _ json.Unmarshaler = (*Repository)(nil) + +// See https://docs.docker.com/docker-hub/official_repos +func hasImplicitNamespace(repo string, reg Registry) bool { + return !strings.ContainsRune(repo, '/') && reg.RegistryStr() == DefaultRegistry +} + +// RepositoryStr returns the repository component of the Repository. +func (r Repository) RepositoryStr() string { + if hasImplicitNamespace(r.repository, r.Registry) { + return fmt.Sprintf("%s/%s", defaultNamespace, r.repository) + } + return r.repository +} + +// Name returns the name from which the Repository was derived. +func (r Repository) Name() string { + regName := r.Registry.Name() + if regName != "" { + return regName + regRepoDelimiter + r.RepositoryStr() + } + // TODO: As far as I can tell, this is unreachable. + return r.RepositoryStr() +} + +func (r Repository) String() string { + return r.Name() +} + +// Scope returns the scope required to perform the given action on the registry. +// TODO(jonjohnsonjr): consider moving scopes to a separate package. +func (r Repository) Scope(action string) string { + return fmt.Sprintf("repository:%s:%s", r.RepositoryStr(), action) +} + +func checkRepository(repository string) error { + return checkElement("repository", repository, repositoryChars, 2, 255) +} + +// NewRepository returns a new Repository representing the given name, according to the given strictness. +func NewRepository(name string, opts ...Option) (Repository, error) { + opt := makeOptions(opts...) + if len(name) == 0 { + return Repository{}, newErrBadName("a repository name must be specified") + } + + var registry string + repo := name + parts := strings.SplitN(name, regRepoDelimiter, 2) + if len(parts) == 2 && (strings.ContainsRune(parts[0], '.') || strings.ContainsRune(parts[0], ':')) { + // The first part of the repository is treated as the registry domain + // iff it contains a '.' or ':' character, otherwise it is all repository + // and the domain defaults to Docker Hub. + registry = parts[0] + repo = parts[1] + } + + if err := checkRepository(repo); err != nil { + return Repository{}, err + } + + reg, err := NewRegistry(registry, opts...) + if err != nil { + return Repository{}, err + } + if hasImplicitNamespace(repo, reg) && opt.strict { + return Repository{}, newErrBadName("strict validation requires the full repository path (missing 'library')") + } + return Repository{reg, repo}, nil +} + +// Tag returns a Tag in this Repository. +func (r Repository) Tag(identifier string) Tag { + t := Tag{ + tag: identifier, + Repository: r, + } + t.original = t.Name() + return t +} + +// Digest returns a Digest in this Repository. +func (r Repository) Digest(identifier string) Digest { + d := Digest{ + digest: identifier, + Repository: r, + } + d.original = d.Name() + return d +} + +// MarshalJSON formats the Repository into a string for JSON serialization. +func (r Repository) MarshalJSON() ([]byte, error) { return json.Marshal(r.String()) } + +// UnmarshalJSON parses a JSON string into a Repository. +func (r *Repository) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + n, err := NewRepository(s) + if err != nil { + return err + } + *r = n + return nil +} + +// MarshalText formats the repository name into a string for text serialization. +func (r Repository) MarshalText() ([]byte, error) { return []byte(r.String()), nil } + +// UnmarshalText parses a text string into a Repository. +func (r *Repository) UnmarshalText(data []byte) error { + n, err := NewRepository(string(data)) + if err != nil { + return err + } + *r = n + return nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/tag.go b/vendor/github.com/google/go-containerregistry/pkg/name/tag.go new file mode 100644 index 000000000000..cfa923f59d85 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/tag.go @@ -0,0 +1,146 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "encoding" + "encoding/json" + "strings" +) + +const ( + // TODO(dekkagaijin): use the docker/distribution regexes for validation. + tagChars = "abcdefghijklmnopqrstuvwxyz0123456789_-.ABCDEFGHIJKLMNOPQRSTUVWXYZ" + tagDelim = ":" +) + +// Tag stores a docker tag name in a structured form. +type Tag struct { + Repository + tag string + original string +} + +var _ Reference = (*Tag)(nil) +var _ encoding.TextMarshaler = (*Tag)(nil) +var _ encoding.TextUnmarshaler = (*Tag)(nil) +var _ json.Marshaler = (*Tag)(nil) +var _ json.Unmarshaler = (*Tag)(nil) + +// Context implements Reference. +func (t Tag) Context() Repository { + return t.Repository +} + +// Identifier implements Reference. +func (t Tag) Identifier() string { + return t.TagStr() +} + +// TagStr returns the tag component of the Tag. +func (t Tag) TagStr() string { + return t.tag +} + +// Name returns the name from which the Tag was derived. +func (t Tag) Name() string { + return t.Repository.Name() + tagDelim + t.TagStr() +} + +// String returns the original input string. +func (t Tag) String() string { + return t.original +} + +// Scope returns the scope required to perform the given action on the tag. +func (t Tag) Scope(action string) string { + return t.Repository.Scope(action) +} + +func checkTag(name string) error { + return checkElement("tag", name, tagChars, 1, 128) +} + +// NewTag returns a new Tag representing the given name, according to the given strictness. +func NewTag(name string, opts ...Option) (Tag, error) { + opt := makeOptions(opts...) + base := name + tag := "" + + // Split on ":" + parts := strings.Split(name, tagDelim) + // Verify that we aren't confusing a tag for a hostname w/ port for the purposes of weak validation. + if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], regRepoDelimiter) { + base = strings.Join(parts[:len(parts)-1], tagDelim) + tag = parts[len(parts)-1] + if tag == "" { + return Tag{}, newErrBadName("%s must specify a tag name after the colon", name) + } + } + + // We don't require a tag, but if we get one check it's valid, + // even when not being strict. + // If we are being strict, we want to validate the tag regardless in case + // it's empty. + if tag != "" || opt.strict { + if err := checkTag(tag); err != nil { + return Tag{}, err + } + } + + if tag == "" { + tag = opt.defaultTag + } + + repo, err := NewRepository(base, opts...) + if err != nil { + return Tag{}, err + } + return Tag{ + Repository: repo, + tag: tag, + original: name, + }, nil +} + +// MarshalJSON formats the Tag into a string for JSON serialization. +func (t Tag) MarshalJSON() ([]byte, error) { return json.Marshal(t.String()) } + +// UnmarshalJSON parses a JSON string into a Tag. +func (t *Tag) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + n, err := NewTag(s) + if err != nil { + return err + } + *t = n + return nil +} + +// MarshalText formats the tag into a string for text serialization. +func (t Tag) MarshalText() ([]byte, error) { return []byte(t.String()), nil } + +// UnmarshalText parses a text string into a Tag. +func (t *Tag) UnmarshalText(data []byte) error { + n, err := NewTag(string(data)) + if err != nil { + return err + } + *t = n + return nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel new file mode 100644 index 000000000000..d71991e6e8db --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel @@ -0,0 +1,44 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") + +package(default_visibility = ["//visibility:public"]) + +filegroup( + name = "options_proto_files", + srcs = [ + "annotations.proto", + "openapiv2.proto", + ], +) + +go_library( + name = "options", + embed = [":options_go_proto"], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options", +) + +proto_library( + name = "options_proto", + srcs = [ + "annotations.proto", + "openapiv2.proto", + ], + deps = [ + "@com_google_protobuf//:descriptor_proto", + "@com_google_protobuf//:struct_proto", + ], +) + +go_proto_library( + name = "options_go_proto", + compilers = ["//:go_apiv2"], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options", + proto = ":options_proto", +) + +alias( + name = "go_default_library", + actual = ":options", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go new file mode 100644 index 000000000000..738c9754a615 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go @@ -0,0 +1,269 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.0 +// protoc (unknown) +// source: protoc-gen-openapiv2/options/annotations.proto + +//go:build !protoopaque + +package options + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*Swagger)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger", + Tag: "bytes,1042,opt,name=openapiv2_swagger", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*Operation)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation", + Tag: "bytes,1042,opt,name=openapiv2_operation", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema", + Tag: "bytes,1042,opt,name=openapiv2_schema", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.EnumOptions)(nil), + ExtensionType: (*EnumSchema)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum", + Tag: "bytes,1042,opt,name=openapiv2_enum", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*Tag)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag", + Tag: "bytes,1042,opt,name=openapiv2_tag", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*JSONSchema)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field", + Tag: "bytes,1042,opt,name=openapiv2_field", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Swagger openapiv2_swagger = 1042; + E_Openapiv2Swagger = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[0] +) + +// Extension fields to descriptorpb.MethodOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Operation openapiv2_operation = 1042; + E_Openapiv2Operation = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[1] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Schema openapiv2_schema = 1042; + E_Openapiv2Schema = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[2] +) + +// Extension fields to descriptorpb.EnumOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.EnumSchema openapiv2_enum = 1042; + E_Openapiv2Enum = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[3] +) + +// Extension fields to descriptorpb.ServiceOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Tag openapiv2_tag = 1042; + E_Openapiv2Tag = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[4] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.JSONSchema openapiv2_field = 1042; + E_Openapiv2Field = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[5] +) + +var File_protoc_gen_openapiv2_options_annotations_proto protoreflect.FileDescriptor + +var file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = []byte{ + 0x0a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x29, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x7e, 0x0a, 0x11, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, + 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x52, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x3a, 0x86, 0x01, 0x0a, 0x13, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x12, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x7e, 0x0a, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x0f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x7b, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x45, 0x6e, 0x75, + 0x6d, 0x3a, 0x75, 0x0a, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x74, + 0x61, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x0c, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x54, 0x61, 0x67, 0x3a, 0x7e, 0x0a, 0x0f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, + 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, + 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_protoc_gen_openapiv2_options_annotations_proto_goTypes = []any{ + (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions + (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions + (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions + (*descriptorpb.EnumOptions)(nil), // 3: google.protobuf.EnumOptions + (*descriptorpb.ServiceOptions)(nil), // 4: google.protobuf.ServiceOptions + (*descriptorpb.FieldOptions)(nil), // 5: google.protobuf.FieldOptions + (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger + (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation + (*Schema)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Schema + (*EnumSchema)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema + (*Tag)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Tag + (*JSONSchema)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema +} +var file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = []int32{ + 0, // 0: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:extendee -> google.protobuf.FileOptions + 1, // 1: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:extendee -> google.protobuf.MethodOptions + 2, // 2: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:extendee -> google.protobuf.MessageOptions + 3, // 3: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:extendee -> google.protobuf.EnumOptions + 4, // 4: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:extendee -> google.protobuf.ServiceOptions + 5, // 5: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:extendee -> google.protobuf.FieldOptions + 6, // 6: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger + 7, // 7: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation + 8, // 8: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema + 9, // 9: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema + 10, // 10: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag + 11, // 11: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 6, // [6:12] is the sub-list for extension type_name + 0, // [0:6] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_protoc_gen_openapiv2_options_annotations_proto_init() } +func file_protoc_gen_openapiv2_options_annotations_proto_init() { + if File_protoc_gen_openapiv2_options_annotations_proto != nil { + return + } + file_protoc_gen_openapiv2_options_openapiv2_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_protoc_gen_openapiv2_options_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 6, + NumServices: 0, + }, + GoTypes: file_protoc_gen_openapiv2_options_annotations_proto_goTypes, + DependencyIndexes: file_protoc_gen_openapiv2_options_annotations_proto_depIdxs, + ExtensionInfos: file_protoc_gen_openapiv2_options_annotations_proto_extTypes, + }.Build() + File_protoc_gen_openapiv2_options_annotations_proto = out.File + file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = nil + file_protoc_gen_openapiv2_options_annotations_proto_goTypes = nil + file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto new file mode 100644 index 000000000000..aecc5e709c32 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; + +package grpc.gateway.protoc_gen_openapiv2.options; + +import "google/protobuf/descriptor.proto"; +import "protoc-gen-openapiv2/options/openapiv2.proto"; + +option go_package = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"; + +extend google.protobuf.FileOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Swagger openapiv2_swagger = 1042; +} +extend google.protobuf.MethodOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Operation openapiv2_operation = 1042; +} +extend google.protobuf.MessageOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Schema openapiv2_schema = 1042; +} +extend google.protobuf.EnumOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + EnumSchema openapiv2_enum = 1042; +} +extend google.protobuf.ServiceOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Tag openapiv2_tag = 1042; +} +extend google.protobuf.FieldOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + JSONSchema openapiv2_field = 1042; +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go new file mode 100644 index 000000000000..b570167836dc --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go @@ -0,0 +1,269 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.0 +// protoc (unknown) +// source: protoc-gen-openapiv2/options/annotations.proto + +//go:build protoopaque + +package options + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*Swagger)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger", + Tag: "bytes,1042,opt,name=openapiv2_swagger", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*Operation)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation", + Tag: "bytes,1042,opt,name=openapiv2_operation", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema", + Tag: "bytes,1042,opt,name=openapiv2_schema", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.EnumOptions)(nil), + ExtensionType: (*EnumSchema)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum", + Tag: "bytes,1042,opt,name=openapiv2_enum", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*Tag)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag", + Tag: "bytes,1042,opt,name=openapiv2_tag", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*JSONSchema)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field", + Tag: "bytes,1042,opt,name=openapiv2_field", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Swagger openapiv2_swagger = 1042; + E_Openapiv2Swagger = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[0] +) + +// Extension fields to descriptorpb.MethodOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Operation openapiv2_operation = 1042; + E_Openapiv2Operation = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[1] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Schema openapiv2_schema = 1042; + E_Openapiv2Schema = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[2] +) + +// Extension fields to descriptorpb.EnumOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.EnumSchema openapiv2_enum = 1042; + E_Openapiv2Enum = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[3] +) + +// Extension fields to descriptorpb.ServiceOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Tag openapiv2_tag = 1042; + E_Openapiv2Tag = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[4] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.JSONSchema openapiv2_field = 1042; + E_Openapiv2Field = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[5] +) + +var File_protoc_gen_openapiv2_options_annotations_proto protoreflect.FileDescriptor + +var file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = []byte{ + 0x0a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x29, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x7e, 0x0a, 0x11, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, + 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x52, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x3a, 0x86, 0x01, 0x0a, 0x13, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x12, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x7e, 0x0a, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x0f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x7b, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x45, 0x6e, 0x75, + 0x6d, 0x3a, 0x75, 0x0a, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x74, + 0x61, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x0c, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x54, 0x61, 0x67, 0x3a, 0x7e, 0x0a, 0x0f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, + 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, + 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_protoc_gen_openapiv2_options_annotations_proto_goTypes = []any{ + (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions + (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions + (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions + (*descriptorpb.EnumOptions)(nil), // 3: google.protobuf.EnumOptions + (*descriptorpb.ServiceOptions)(nil), // 4: google.protobuf.ServiceOptions + (*descriptorpb.FieldOptions)(nil), // 5: google.protobuf.FieldOptions + (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger + (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation + (*Schema)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Schema + (*EnumSchema)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema + (*Tag)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Tag + (*JSONSchema)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema +} +var file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = []int32{ + 0, // 0: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:extendee -> google.protobuf.FileOptions + 1, // 1: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:extendee -> google.protobuf.MethodOptions + 2, // 2: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:extendee -> google.protobuf.MessageOptions + 3, // 3: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:extendee -> google.protobuf.EnumOptions + 4, // 4: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:extendee -> google.protobuf.ServiceOptions + 5, // 5: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:extendee -> google.protobuf.FieldOptions + 6, // 6: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger + 7, // 7: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation + 8, // 8: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema + 9, // 9: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema + 10, // 10: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag + 11, // 11: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 6, // [6:12] is the sub-list for extension type_name + 0, // [0:6] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_protoc_gen_openapiv2_options_annotations_proto_init() } +func file_protoc_gen_openapiv2_options_annotations_proto_init() { + if File_protoc_gen_openapiv2_options_annotations_proto != nil { + return + } + file_protoc_gen_openapiv2_options_openapiv2_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_protoc_gen_openapiv2_options_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 6, + NumServices: 0, + }, + GoTypes: file_protoc_gen_openapiv2_options_annotations_proto_goTypes, + DependencyIndexes: file_protoc_gen_openapiv2_options_annotations_proto_depIdxs, + ExtensionInfos: file_protoc_gen_openapiv2_options_annotations_proto_extTypes, + }.Build() + File_protoc_gen_openapiv2_options_annotations_proto = out.File + file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = nil + file_protoc_gen_openapiv2_options_annotations_proto_goTypes = nil + file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml new file mode 100644 index 000000000000..07dfb958f1e6 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml @@ -0,0 +1,7 @@ +version: v2 +plugins: + - remote: buf.build/protocolbuffers/go:v1.36.0 + out: . + opt: + - paths=source_relative + - default_api_level=API_HYBRID diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go new file mode 100644 index 000000000000..3a34e664e0a9 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go @@ -0,0 +1,4263 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.0 +// protoc (unknown) +// source: protoc-gen-openapiv2/options/openapiv2.proto + +//go:build !protoopaque + +package options + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Scheme describes the schemes supported by the OpenAPI Swagger +// and Operation objects. +type Scheme int32 + +const ( + Scheme_UNKNOWN Scheme = 0 + Scheme_HTTP Scheme = 1 + Scheme_HTTPS Scheme = 2 + Scheme_WS Scheme = 3 + Scheme_WSS Scheme = 4 +) + +// Enum value maps for Scheme. +var ( + Scheme_name = map[int32]string{ + 0: "UNKNOWN", + 1: "HTTP", + 2: "HTTPS", + 3: "WS", + 4: "WSS", + } + Scheme_value = map[string]int32{ + "UNKNOWN": 0, + "HTTP": 1, + "HTTPS": 2, + "WS": 3, + "WSS": 4, + } +) + +func (x Scheme) Enum() *Scheme { + p := new(Scheme) + *p = x + return p +} + +func (x Scheme) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Scheme) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0].Descriptor() +} + +func (Scheme) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0] +} + +func (x Scheme) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// `Type` is a supported HTTP header type. +// See https://swagger.io/specification/v2/#parameterType. +type HeaderParameter_Type int32 + +const ( + HeaderParameter_UNKNOWN HeaderParameter_Type = 0 + HeaderParameter_STRING HeaderParameter_Type = 1 + HeaderParameter_NUMBER HeaderParameter_Type = 2 + HeaderParameter_INTEGER HeaderParameter_Type = 3 + HeaderParameter_BOOLEAN HeaderParameter_Type = 4 +) + +// Enum value maps for HeaderParameter_Type. +var ( + HeaderParameter_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STRING", + 2: "NUMBER", + 3: "INTEGER", + 4: "BOOLEAN", + } + HeaderParameter_Type_value = map[string]int32{ + "UNKNOWN": 0, + "STRING": 1, + "NUMBER": 2, + "INTEGER": 3, + "BOOLEAN": 4, + } +) + +func (x HeaderParameter_Type) Enum() *HeaderParameter_Type { + p := new(HeaderParameter_Type) + *p = x + return p +} + +func (x HeaderParameter_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HeaderParameter_Type) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1].Descriptor() +} + +func (HeaderParameter_Type) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1] +} + +func (x HeaderParameter_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +type JSONSchema_JSONSchemaSimpleTypes int32 + +const ( + JSONSchema_UNKNOWN JSONSchema_JSONSchemaSimpleTypes = 0 + JSONSchema_ARRAY JSONSchema_JSONSchemaSimpleTypes = 1 + JSONSchema_BOOLEAN JSONSchema_JSONSchemaSimpleTypes = 2 + JSONSchema_INTEGER JSONSchema_JSONSchemaSimpleTypes = 3 + JSONSchema_NULL JSONSchema_JSONSchemaSimpleTypes = 4 + JSONSchema_NUMBER JSONSchema_JSONSchemaSimpleTypes = 5 + JSONSchema_OBJECT JSONSchema_JSONSchemaSimpleTypes = 6 + JSONSchema_STRING JSONSchema_JSONSchemaSimpleTypes = 7 +) + +// Enum value maps for JSONSchema_JSONSchemaSimpleTypes. +var ( + JSONSchema_JSONSchemaSimpleTypes_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ARRAY", + 2: "BOOLEAN", + 3: "INTEGER", + 4: "NULL", + 5: "NUMBER", + 6: "OBJECT", + 7: "STRING", + } + JSONSchema_JSONSchemaSimpleTypes_value = map[string]int32{ + "UNKNOWN": 0, + "ARRAY": 1, + "BOOLEAN": 2, + "INTEGER": 3, + "NULL": 4, + "NUMBER": 5, + "OBJECT": 6, + "STRING": 7, + } +) + +func (x JSONSchema_JSONSchemaSimpleTypes) Enum() *JSONSchema_JSONSchemaSimpleTypes { + p := new(JSONSchema_JSONSchemaSimpleTypes) + *p = x + return p +} + +func (x JSONSchema_JSONSchemaSimpleTypes) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (JSONSchema_JSONSchemaSimpleTypes) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2].Descriptor() +} + +func (JSONSchema_JSONSchemaSimpleTypes) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2] +} + +func (x JSONSchema_JSONSchemaSimpleTypes) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// The type of the security scheme. Valid values are "basic", +// "apiKey" or "oauth2". +type SecurityScheme_Type int32 + +const ( + SecurityScheme_TYPE_INVALID SecurityScheme_Type = 0 + SecurityScheme_TYPE_BASIC SecurityScheme_Type = 1 + SecurityScheme_TYPE_API_KEY SecurityScheme_Type = 2 + SecurityScheme_TYPE_OAUTH2 SecurityScheme_Type = 3 +) + +// Enum value maps for SecurityScheme_Type. +var ( + SecurityScheme_Type_name = map[int32]string{ + 0: "TYPE_INVALID", + 1: "TYPE_BASIC", + 2: "TYPE_API_KEY", + 3: "TYPE_OAUTH2", + } + SecurityScheme_Type_value = map[string]int32{ + "TYPE_INVALID": 0, + "TYPE_BASIC": 1, + "TYPE_API_KEY": 2, + "TYPE_OAUTH2": 3, + } +) + +func (x SecurityScheme_Type) Enum() *SecurityScheme_Type { + p := new(SecurityScheme_Type) + *p = x + return p +} + +func (x SecurityScheme_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityScheme_Type) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3].Descriptor() +} + +func (SecurityScheme_Type) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3] +} + +func (x SecurityScheme_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// The location of the API key. Valid values are "query" or "header". +type SecurityScheme_In int32 + +const ( + SecurityScheme_IN_INVALID SecurityScheme_In = 0 + SecurityScheme_IN_QUERY SecurityScheme_In = 1 + SecurityScheme_IN_HEADER SecurityScheme_In = 2 +) + +// Enum value maps for SecurityScheme_In. +var ( + SecurityScheme_In_name = map[int32]string{ + 0: "IN_INVALID", + 1: "IN_QUERY", + 2: "IN_HEADER", + } + SecurityScheme_In_value = map[string]int32{ + "IN_INVALID": 0, + "IN_QUERY": 1, + "IN_HEADER": 2, + } +) + +func (x SecurityScheme_In) Enum() *SecurityScheme_In { + p := new(SecurityScheme_In) + *p = x + return p +} + +func (x SecurityScheme_In) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityScheme_In) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4].Descriptor() +} + +func (SecurityScheme_In) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4] +} + +func (x SecurityScheme_In) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// The flow used by the OAuth2 security scheme. Valid values are +// "implicit", "password", "application" or "accessCode". +type SecurityScheme_Flow int32 + +const ( + SecurityScheme_FLOW_INVALID SecurityScheme_Flow = 0 + SecurityScheme_FLOW_IMPLICIT SecurityScheme_Flow = 1 + SecurityScheme_FLOW_PASSWORD SecurityScheme_Flow = 2 + SecurityScheme_FLOW_APPLICATION SecurityScheme_Flow = 3 + SecurityScheme_FLOW_ACCESS_CODE SecurityScheme_Flow = 4 +) + +// Enum value maps for SecurityScheme_Flow. +var ( + SecurityScheme_Flow_name = map[int32]string{ + 0: "FLOW_INVALID", + 1: "FLOW_IMPLICIT", + 2: "FLOW_PASSWORD", + 3: "FLOW_APPLICATION", + 4: "FLOW_ACCESS_CODE", + } + SecurityScheme_Flow_value = map[string]int32{ + "FLOW_INVALID": 0, + "FLOW_IMPLICIT": 1, + "FLOW_PASSWORD": 2, + "FLOW_APPLICATION": 3, + "FLOW_ACCESS_CODE": 4, + } +) + +func (x SecurityScheme_Flow) Enum() *SecurityScheme_Flow { + p := new(SecurityScheme_Flow) + *p = x + return p +} + +func (x SecurityScheme_Flow) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityScheme_Flow) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5].Descriptor() +} + +func (SecurityScheme_Flow) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5] +} + +func (x SecurityScheme_Flow) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// `Swagger` is a representation of OpenAPI v2 specification's Swagger object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// schemes: HTTPS; +// consumes: "application/json"; +// produces: "application/json"; +// }; +type Swagger struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // Specifies the OpenAPI Specification version being used. It can be + // used by the OpenAPI UI and other clients to interpret the API listing. The + // value MUST be "2.0". + Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"` + // Provides metadata about the API. The metadata can be used by the + // clients if needed. + Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + // The host (name or ip) serving the API. This MUST be the host only and does + // not include the scheme nor sub-paths. It MAY include a port. If the host is + // not included, the host serving the documentation is to be used (including + // the port). The host does not support path templating. + Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` + // The base path on which the API is served, which is relative to the host. If + // it is not included, the API is served directly under the host. The value + // MUST start with a leading slash (/). The basePath does not support path + // templating. + // Note that using `base_path` does not change the endpoint paths that are + // generated in the resulting OpenAPI file. If you wish to use `base_path` + // with relatively generated OpenAPI paths, the `base_path` prefix must be + // manually removed from your `google.api.http` paths and your code changed to + // serve the API from the `base_path`. + BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` + // The transfer protocol of the API. Values MUST be from the list: "http", + // "https", "ws", "wss". If the schemes is not included, the default scheme to + // be used is the one used to access the OpenAPI definition itself. + Schemes []Scheme `protobuf:"varint,5,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"` + // A list of MIME types the APIs can consume. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` + // A list of MIME types the APIs can produce. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + // An object to hold responses that can be used across operations. This + // property does not define global responses for all operations. + Responses map[string]*Response `protobuf:"bytes,10,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Security scheme definitions that can be used across the specification. + SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,11,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` + // A declaration of which security schemes are applied for the API as a whole. + // The list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). + // Individual operations can override this definition. + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + Tags []*Tag `protobuf:"bytes,13,rep,name=tags,proto3" json:"tags,omitempty"` + // Additional external documentation. + ExternalDocs *ExternalDocumentation `protobuf:"bytes,14,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,15,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Swagger) Reset() { + *x = Swagger{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Swagger) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Swagger) ProtoMessage() {} + +func (x *Swagger) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Swagger) GetSwagger() string { + if x != nil { + return x.Swagger + } + return "" +} + +func (x *Swagger) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil +} + +func (x *Swagger) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *Swagger) GetBasePath() string { + if x != nil { + return x.BasePath + } + return "" +} + +func (x *Swagger) GetSchemes() []Scheme { + if x != nil { + return x.Schemes + } + return nil +} + +func (x *Swagger) GetConsumes() []string { + if x != nil { + return x.Consumes + } + return nil +} + +func (x *Swagger) GetProduces() []string { + if x != nil { + return x.Produces + } + return nil +} + +func (x *Swagger) GetResponses() map[string]*Response { + if x != nil { + return x.Responses + } + return nil +} + +func (x *Swagger) GetSecurityDefinitions() *SecurityDefinitions { + if x != nil { + return x.SecurityDefinitions + } + return nil +} + +func (x *Swagger) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security + } + return nil +} + +func (x *Swagger) GetTags() []*Tag { + if x != nil { + return x.Tags + } + return nil +} + +func (x *Swagger) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Swagger) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *Swagger) SetSwagger(v string) { + x.Swagger = v +} + +func (x *Swagger) SetInfo(v *Info) { + x.Info = v +} + +func (x *Swagger) SetHost(v string) { + x.Host = v +} + +func (x *Swagger) SetBasePath(v string) { + x.BasePath = v +} + +func (x *Swagger) SetSchemes(v []Scheme) { + x.Schemes = v +} + +func (x *Swagger) SetConsumes(v []string) { + x.Consumes = v +} + +func (x *Swagger) SetProduces(v []string) { + x.Produces = v +} + +func (x *Swagger) SetResponses(v map[string]*Response) { + x.Responses = v +} + +func (x *Swagger) SetSecurityDefinitions(v *SecurityDefinitions) { + x.SecurityDefinitions = v +} + +func (x *Swagger) SetSecurity(v []*SecurityRequirement) { + x.Security = v +} + +func (x *Swagger) SetTags(v []*Tag) { + x.Tags = v +} + +func (x *Swagger) SetExternalDocs(v *ExternalDocumentation) { + x.ExternalDocs = v +} + +func (x *Swagger) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *Swagger) HasInfo() bool { + if x == nil { + return false + } + return x.Info != nil +} + +func (x *Swagger) HasSecurityDefinitions() bool { + if x == nil { + return false + } + return x.SecurityDefinitions != nil +} + +func (x *Swagger) HasExternalDocs() bool { + if x == nil { + return false + } + return x.ExternalDocs != nil +} + +func (x *Swagger) ClearInfo() { + x.Info = nil +} + +func (x *Swagger) ClearSecurityDefinitions() { + x.SecurityDefinitions = nil +} + +func (x *Swagger) ClearExternalDocs() { + x.ExternalDocs = nil +} + +type Swagger_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Specifies the OpenAPI Specification version being used. It can be + // used by the OpenAPI UI and other clients to interpret the API listing. The + // value MUST be "2.0". + Swagger string + // Provides metadata about the API. The metadata can be used by the + // clients if needed. + Info *Info + // The host (name or ip) serving the API. This MUST be the host only and does + // not include the scheme nor sub-paths. It MAY include a port. If the host is + // not included, the host serving the documentation is to be used (including + // the port). The host does not support path templating. + Host string + // The base path on which the API is served, which is relative to the host. If + // it is not included, the API is served directly under the host. The value + // MUST start with a leading slash (/). The basePath does not support path + // templating. + // Note that using `base_path` does not change the endpoint paths that are + // generated in the resulting OpenAPI file. If you wish to use `base_path` + // with relatively generated OpenAPI paths, the `base_path` prefix must be + // manually removed from your `google.api.http` paths and your code changed to + // serve the API from the `base_path`. + BasePath string + // The transfer protocol of the API. Values MUST be from the list: "http", + // "https", "ws", "wss". If the schemes is not included, the default scheme to + // be used is the one used to access the OpenAPI definition itself. + Schemes []Scheme + // A list of MIME types the APIs can consume. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + Consumes []string + // A list of MIME types the APIs can produce. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + Produces []string + // An object to hold responses that can be used across operations. This + // property does not define global responses for all operations. + Responses map[string]*Response + // Security scheme definitions that can be used across the specification. + SecurityDefinitions *SecurityDefinitions + // A declaration of which security schemes are applied for the API as a whole. + // The list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). + // Individual operations can override this definition. + Security []*SecurityRequirement + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + Tags []*Tag + // Additional external documentation. + ExternalDocs *ExternalDocumentation + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Swagger_builder) Build() *Swagger { + m0 := &Swagger{} + b, x := &b0, m0 + _, _ = b, x + x.Swagger = b.Swagger + x.Info = b.Info + x.Host = b.Host + x.BasePath = b.BasePath + x.Schemes = b.Schemes + x.Consumes = b.Consumes + x.Produces = b.Produces + x.Responses = b.Responses + x.SecurityDefinitions = b.SecurityDefinitions + x.Security = b.Security + x.Tags = b.Tags + x.ExternalDocs = b.ExternalDocs + x.Extensions = b.Extensions + return m0 +} + +// `Operation` is a representation of OpenAPI v2 specification's Operation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject +// +// Example: +// +// service EchoService { +// rpc Echo(SimpleMessage) returns (SimpleMessage) { +// option (google.api.http) = { +// get: "/v1/example/echo/{id}" +// }; +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { +// summary: "Get a message."; +// operation_id: "getMessage"; +// tags: "echo"; +// responses: { +// key: "200" +// value: { +// description: "OK"; +// } +// } +// }; +// } +// } +type Operation struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` + // A short summary of what the operation does. For maximum readability in the + // swagger-ui, this field SHOULD be less than 120 characters. + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + // A verbose explanation of the operation behavior. GFM syntax can be used for + // rich text representation. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Additional external documentation for this operation. + ExternalDocs *ExternalDocumentation `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + // Unique string used to identify the operation. The id MUST be unique among + // all operations described in the API. Tools and libraries MAY use the + // operationId to uniquely identify an operation, therefore, it is recommended + // to follow common programming naming conventions. + OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` + // A list of MIME types the operation can consume. This overrides the consumes + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` + // A list of MIME types the operation can produce. This overrides the produces + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + // The list of possible responses as they are returned from executing this + // operation. + Responses map[string]*Response `protobuf:"bytes,9,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The transfer protocol for the operation. Values MUST be from the list: + // "http", "https", "ws", "wss". The value overrides the OpenAPI Object + // schemes definition. + Schemes []Scheme `protobuf:"varint,10,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"` + // Declares this operation to be deprecated. Usage of the declared operation + // should be refrained. Default value is false. + Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + // A declaration of which security schemes are applied for this operation. The + // list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). This + // definition overrides any declared top-level security. To remove a top-level + // security declaration, an empty array can be used. + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,13,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Custom parameters such as HTTP request headers. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/ + // and https://swagger.io/specification/v2/#parameter-object. + Parameters *Parameters `protobuf:"bytes,14,opt,name=parameters,proto3" json:"parameters,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Operation) Reset() { + *x = Operation{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Operation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Operation) ProtoMessage() {} + +func (x *Operation) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Operation) GetTags() []string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *Operation) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Operation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Operation) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Operation) GetOperationId() string { + if x != nil { + return x.OperationId + } + return "" +} + +func (x *Operation) GetConsumes() []string { + if x != nil { + return x.Consumes + } + return nil +} + +func (x *Operation) GetProduces() []string { + if x != nil { + return x.Produces + } + return nil +} + +func (x *Operation) GetResponses() map[string]*Response { + if x != nil { + return x.Responses + } + return nil +} + +func (x *Operation) GetSchemes() []Scheme { + if x != nil { + return x.Schemes + } + return nil +} + +func (x *Operation) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +func (x *Operation) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security + } + return nil +} + +func (x *Operation) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *Operation) GetParameters() *Parameters { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Operation) SetTags(v []string) { + x.Tags = v +} + +func (x *Operation) SetSummary(v string) { + x.Summary = v +} + +func (x *Operation) SetDescription(v string) { + x.Description = v +} + +func (x *Operation) SetExternalDocs(v *ExternalDocumentation) { + x.ExternalDocs = v +} + +func (x *Operation) SetOperationId(v string) { + x.OperationId = v +} + +func (x *Operation) SetConsumes(v []string) { + x.Consumes = v +} + +func (x *Operation) SetProduces(v []string) { + x.Produces = v +} + +func (x *Operation) SetResponses(v map[string]*Response) { + x.Responses = v +} + +func (x *Operation) SetSchemes(v []Scheme) { + x.Schemes = v +} + +func (x *Operation) SetDeprecated(v bool) { + x.Deprecated = v +} + +func (x *Operation) SetSecurity(v []*SecurityRequirement) { + x.Security = v +} + +func (x *Operation) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *Operation) SetParameters(v *Parameters) { + x.Parameters = v +} + +func (x *Operation) HasExternalDocs() bool { + if x == nil { + return false + } + return x.ExternalDocs != nil +} + +func (x *Operation) HasParameters() bool { + if x == nil { + return false + } + return x.Parameters != nil +} + +func (x *Operation) ClearExternalDocs() { + x.ExternalDocs = nil +} + +func (x *Operation) ClearParameters() { + x.Parameters = nil +} + +type Operation_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + Tags []string + // A short summary of what the operation does. For maximum readability in the + // swagger-ui, this field SHOULD be less than 120 characters. + Summary string + // A verbose explanation of the operation behavior. GFM syntax can be used for + // rich text representation. + Description string + // Additional external documentation for this operation. + ExternalDocs *ExternalDocumentation + // Unique string used to identify the operation. The id MUST be unique among + // all operations described in the API. Tools and libraries MAY use the + // operationId to uniquely identify an operation, therefore, it is recommended + // to follow common programming naming conventions. + OperationId string + // A list of MIME types the operation can consume. This overrides the consumes + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + Consumes []string + // A list of MIME types the operation can produce. This overrides the produces + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + Produces []string + // The list of possible responses as they are returned from executing this + // operation. + Responses map[string]*Response + // The transfer protocol for the operation. Values MUST be from the list: + // "http", "https", "ws", "wss". The value overrides the OpenAPI Object + // schemes definition. + Schemes []Scheme + // Declares this operation to be deprecated. Usage of the declared operation + // should be refrained. Default value is false. + Deprecated bool + // A declaration of which security schemes are applied for this operation. The + // list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). This + // definition overrides any declared top-level security. To remove a top-level + // security declaration, an empty array can be used. + Security []*SecurityRequirement + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value + // Custom parameters such as HTTP request headers. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/ + // and https://swagger.io/specification/v2/#parameter-object. + Parameters *Parameters +} + +func (b0 Operation_builder) Build() *Operation { + m0 := &Operation{} + b, x := &b0, m0 + _, _ = b, x + x.Tags = b.Tags + x.Summary = b.Summary + x.Description = b.Description + x.ExternalDocs = b.ExternalDocs + x.OperationId = b.OperationId + x.Consumes = b.Consumes + x.Produces = b.Produces + x.Responses = b.Responses + x.Schemes = b.Schemes + x.Deprecated = b.Deprecated + x.Security = b.Security + x.Extensions = b.Extensions + x.Parameters = b.Parameters + return m0 +} + +// `Parameters` is a representation of OpenAPI v2 specification's parameters object. +// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only +// allow header parameters to be set here since we do not want users specifying custom non-header +// parameters beyond those inferred from the Protobuf schema. +// See: https://swagger.io/specification/v2/#parameter-object +type Parameters struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `Headers` is one or more HTTP header parameter. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters + Headers []*HeaderParameter `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Parameters) Reset() { + *x = Parameters{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Parameters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Parameters) ProtoMessage() {} + +func (x *Parameters) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Parameters) GetHeaders() []*HeaderParameter { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Parameters) SetHeaders(v []*HeaderParameter) { + x.Headers = v +} + +type Parameters_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Headers` is one or more HTTP header parameter. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters + Headers []*HeaderParameter +} + +func (b0 Parameters_builder) Build() *Parameters { + m0 := &Parameters{} + b, x := &b0, m0 + _, _ = b, x + x.Headers = b.Headers + return m0 +} + +// `HeaderParameter` a HTTP header parameter. +// See: https://swagger.io/specification/v2/#parameter-object +type HeaderParameter struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `Name` is the header name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // `Description` is a short description of the header. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + // See: https://swagger.io/specification/v2/#parameterType. + Type HeaderParameter_Type `protobuf:"varint,3,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter_Type" json:"type,omitempty"` + // `Format` The extending format for the previously mentioned type. + Format string `protobuf:"bytes,4,opt,name=format,proto3" json:"format,omitempty"` + // `Required` indicates if the header is optional + Required bool `protobuf:"varint,5,opt,name=required,proto3" json:"required,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HeaderParameter) Reset() { + *x = HeaderParameter{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HeaderParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderParameter) ProtoMessage() {} + +func (x *HeaderParameter) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *HeaderParameter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *HeaderParameter) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *HeaderParameter) GetType() HeaderParameter_Type { + if x != nil { + return x.Type + } + return HeaderParameter_UNKNOWN +} + +func (x *HeaderParameter) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *HeaderParameter) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *HeaderParameter) SetName(v string) { + x.Name = v +} + +func (x *HeaderParameter) SetDescription(v string) { + x.Description = v +} + +func (x *HeaderParameter) SetType(v HeaderParameter_Type) { + x.Type = v +} + +func (x *HeaderParameter) SetFormat(v string) { + x.Format = v +} + +func (x *HeaderParameter) SetRequired(v bool) { + x.Required = v +} + +type HeaderParameter_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Name` is the header name. + Name string + // `Description` is a short description of the header. + Description string + // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + // See: https://swagger.io/specification/v2/#parameterType. + Type HeaderParameter_Type + // `Format` The extending format for the previously mentioned type. + Format string + // `Required` indicates if the header is optional + Required bool +} + +func (b0 HeaderParameter_builder) Build() *HeaderParameter { + m0 := &HeaderParameter{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Description = b.Description + x.Type = b.Type + x.Format = b.Format + x.Required = b.Required + return m0 +} + +// `Header` is a representation of OpenAPI v2 specification's Header object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject +type Header struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `Description` is a short description of the header. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + // `Format` The extending format for the previously mentioned type. + Format string `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"` + // `Default` Declares the value of the header that the server will use if none is provided. + // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. + // Unlike JSON Schema this value MUST conform to the defined type for the header. + Default string `protobuf:"bytes,6,opt,name=default,proto3" json:"default,omitempty"` + // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. + Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Header) Reset() { + *x = Header{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Header) ProtoMessage() {} + +func (x *Header) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Header) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Header) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Header) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *Header) GetDefault() string { + if x != nil { + return x.Default + } + return "" +} + +func (x *Header) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *Header) SetDescription(v string) { + x.Description = v +} + +func (x *Header) SetType(v string) { + x.Type = v +} + +func (x *Header) SetFormat(v string) { + x.Format = v +} + +func (x *Header) SetDefault(v string) { + x.Default = v +} + +func (x *Header) SetPattern(v string) { + x.Pattern = v +} + +type Header_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Description` is a short description of the header. + Description string + // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + Type string + // `Format` The extending format for the previously mentioned type. + Format string + // `Default` Declares the value of the header that the server will use if none is provided. + // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. + // Unlike JSON Schema this value MUST conform to the defined type for the header. + Default string + // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. + Pattern string +} + +func (b0 Header_builder) Build() *Header { + m0 := &Header{} + b, x := &b0, m0 + _, _ = b, x + x.Description = b.Description + x.Type = b.Type + x.Format = b.Format + x.Default = b.Default + x.Pattern = b.Pattern + return m0 +} + +// `Response` is a representation of OpenAPI v2 specification's Response object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject +type Response struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `Description` is a short description of the response. + // GFM syntax can be used for rich text representation. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // `Schema` optionally defines the structure of the response. + // If `Schema` is not provided, it means there is no content to the response. + Schema *Schema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` + // `Headers` A list of headers that are sent with the response. + // `Header` name is expected to be a string in the canonical format of the MIME header key + // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey + Headers map[string]*Header `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // `Examples` gives per-mimetype response examples. + // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object + Examples map[string]string `protobuf:"bytes,4,rep,name=examples,proto3" json:"examples,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Response) Reset() { + *x = Response{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Response) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Response) GetSchema() *Schema { + if x != nil { + return x.Schema + } + return nil +} + +func (x *Response) GetHeaders() map[string]*Header { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Response) GetExamples() map[string]string { + if x != nil { + return x.Examples + } + return nil +} + +func (x *Response) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *Response) SetDescription(v string) { + x.Description = v +} + +func (x *Response) SetSchema(v *Schema) { + x.Schema = v +} + +func (x *Response) SetHeaders(v map[string]*Header) { + x.Headers = v +} + +func (x *Response) SetExamples(v map[string]string) { + x.Examples = v +} + +func (x *Response) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *Response) HasSchema() bool { + if x == nil { + return false + } + return x.Schema != nil +} + +func (x *Response) ClearSchema() { + x.Schema = nil +} + +type Response_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Description` is a short description of the response. + // GFM syntax can be used for rich text representation. + Description string + // `Schema` optionally defines the structure of the response. + // If `Schema` is not provided, it means there is no content to the response. + Schema *Schema + // `Headers` A list of headers that are sent with the response. + // `Header` name is expected to be a string in the canonical format of the MIME header key + // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey + Headers map[string]*Header + // `Examples` gives per-mimetype response examples. + // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object + Examples map[string]string + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Response_builder) Build() *Response { + m0 := &Response{} + b, x := &b0, m0 + _, _ = b, x + x.Description = b.Description + x.Schema = b.Schema + x.Headers = b.Headers + x.Examples = b.Examples + x.Extensions = b.Extensions + return m0 +} + +// `Info` is a representation of OpenAPI v2 specification's Info object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// ... +// }; +type Info struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // The title of the application. + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` + // A short description of the application. GFM syntax can be used for rich + // text representation. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The Terms of Service for the API. + TermsOfService string `protobuf:"bytes,3,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` + // The contact information for the exposed API. + Contact *Contact `protobuf:"bytes,4,opt,name=contact,proto3" json:"contact,omitempty"` + // The license information for the exposed API. + License *License `protobuf:"bytes,5,opt,name=license,proto3" json:"license,omitempty"` + // Provides the version of the application API (not to be confused + // with the specification version). + Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Info) Reset() { + *x = Info{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Info) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Info) ProtoMessage() {} + +func (x *Info) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Info) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *Info) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Info) GetTermsOfService() string { + if x != nil { + return x.TermsOfService + } + return "" +} + +func (x *Info) GetContact() *Contact { + if x != nil { + return x.Contact + } + return nil +} + +func (x *Info) GetLicense() *License { + if x != nil { + return x.License + } + return nil +} + +func (x *Info) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Info) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *Info) SetTitle(v string) { + x.Title = v +} + +func (x *Info) SetDescription(v string) { + x.Description = v +} + +func (x *Info) SetTermsOfService(v string) { + x.TermsOfService = v +} + +func (x *Info) SetContact(v *Contact) { + x.Contact = v +} + +func (x *Info) SetLicense(v *License) { + x.License = v +} + +func (x *Info) SetVersion(v string) { + x.Version = v +} + +func (x *Info) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *Info) HasContact() bool { + if x == nil { + return false + } + return x.Contact != nil +} + +func (x *Info) HasLicense() bool { + if x == nil { + return false + } + return x.License != nil +} + +func (x *Info) ClearContact() { + x.Contact = nil +} + +func (x *Info) ClearLicense() { + x.License = nil +} + +type Info_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The title of the application. + Title string + // A short description of the application. GFM syntax can be used for rich + // text representation. + Description string + // The Terms of Service for the API. + TermsOfService string + // The contact information for the exposed API. + Contact *Contact + // The license information for the exposed API. + License *License + // Provides the version of the application API (not to be confused + // with the specification version). + Version string + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Info_builder) Build() *Info { + m0 := &Info{} + b, x := &b0, m0 + _, _ = b, x + x.Title = b.Title + x.Description = b.Description + x.TermsOfService = b.TermsOfService + x.Contact = b.Contact + x.License = b.License + x.Version = b.Version + x.Extensions = b.Extensions + return m0 +} + +// `Contact` is a representation of OpenAPI v2 specification's Contact object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// ... +// }; +// ... +// }; +type Contact struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // The identifying name of the contact person/organization. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The URL pointing to the contact information. MUST be in the format of a + // URL. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + // The email address of the contact person/organization. MUST be in the format + // of an email address. + Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Contact) Reset() { + *x = Contact{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Contact) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Contact) ProtoMessage() {} + +func (x *Contact) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Contact) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Contact) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *Contact) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *Contact) SetName(v string) { + x.Name = v +} + +func (x *Contact) SetUrl(v string) { + x.Url = v +} + +func (x *Contact) SetEmail(v string) { + x.Email = v +} + +type Contact_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The identifying name of the contact person/organization. + Name string + // The URL pointing to the contact information. MUST be in the format of a + // URL. + Url string + // The email address of the contact person/organization. MUST be in the format + // of an email address. + Email string +} + +func (b0 Contact_builder) Build() *Contact { + m0 := &Contact{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Url = b.Url + x.Email = b.Email + return m0 +} + +// `License` is a representation of OpenAPI v2 specification's License object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// ... +// }; +// ... +// }; +type License struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // The license name used for the API. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A URL to the license used for the API. MUST be in the format of a URL. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *License) Reset() { + *x = License{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *License) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*License) ProtoMessage() {} + +func (x *License) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *License) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *License) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *License) SetName(v string) { + x.Name = v +} + +func (x *License) SetUrl(v string) { + x.Url = v +} + +type License_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The license name used for the API. + Name string + // A URL to the license used for the API. MUST be in the format of a URL. + Url string +} + +func (b0 License_builder) Build() *License { + m0 := &License{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Url = b.Url + return m0 +} + +// `ExternalDocumentation` is a representation of OpenAPI v2 specification's +// ExternalDocumentation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// ... +// external_docs: { +// description: "More about gRPC-Gateway"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// } +// ... +// }; +type ExternalDocumentation struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // A short description of the target documentation. GFM syntax can be used for + // rich text representation. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // The URL for the target documentation. Value MUST be in the format + // of a URL. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExternalDocumentation) Reset() { + *x = ExternalDocumentation{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExternalDocumentation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalDocumentation) ProtoMessage() {} + +func (x *ExternalDocumentation) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ExternalDocumentation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *ExternalDocumentation) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *ExternalDocumentation) SetDescription(v string) { + x.Description = v +} + +func (x *ExternalDocumentation) SetUrl(v string) { + x.Url = v +} + +type ExternalDocumentation_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A short description of the target documentation. GFM syntax can be used for + // rich text representation. + Description string + // The URL for the target documentation. Value MUST be in the format + // of a URL. + Url string +} + +func (b0 ExternalDocumentation_builder) Build() *ExternalDocumentation { + m0 := &ExternalDocumentation{} + b, x := &b0, m0 + _, _ = b, x + x.Description = b.Description + x.Url = b.Url + return m0 +} + +// `Schema` is a representation of OpenAPI v2 specification's Schema object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +type Schema struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + JsonSchema *JSONSchema `protobuf:"bytes,1,opt,name=json_schema,json=jsonSchema,proto3" json:"json_schema,omitempty"` + // Adds support for polymorphism. The discriminator is the schema property + // name that is used to differentiate between other schema that inherit this + // schema. The property name used MUST be defined at this schema and it MUST + // be in the required property list. When used, the value MUST be the name of + // this schema or any schema that inherits it. + Discriminator string `protobuf:"bytes,2,opt,name=discriminator,proto3" json:"discriminator,omitempty"` + // Relevant only for Schema "properties" definitions. Declares the property as + // "read only". This means that it MAY be sent as part of a response but MUST + // NOT be sent as part of the request. Properties marked as readOnly being + // true SHOULD NOT be in the required list of the defined schema. Default + // value is false. + ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + // Additional external documentation for this schema. + ExternalDocs *ExternalDocumentation `protobuf:"bytes,5,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + // A free-form property to include an example of an instance for this schema in JSON. + // This is copied verbatim to the output. + Example string `protobuf:"bytes,6,opt,name=example,proto3" json:"example,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Schema) Reset() { + *x = Schema{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Schema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema) ProtoMessage() {} + +func (x *Schema) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Schema) GetJsonSchema() *JSONSchema { + if x != nil { + return x.JsonSchema + } + return nil +} + +func (x *Schema) GetDiscriminator() string { + if x != nil { + return x.Discriminator + } + return "" +} + +func (x *Schema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly + } + return false +} + +func (x *Schema) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Schema) GetExample() string { + if x != nil { + return x.Example + } + return "" +} + +func (x *Schema) SetJsonSchema(v *JSONSchema) { + x.JsonSchema = v +} + +func (x *Schema) SetDiscriminator(v string) { + x.Discriminator = v +} + +func (x *Schema) SetReadOnly(v bool) { + x.ReadOnly = v +} + +func (x *Schema) SetExternalDocs(v *ExternalDocumentation) { + x.ExternalDocs = v +} + +func (x *Schema) SetExample(v string) { + x.Example = v +} + +func (x *Schema) HasJsonSchema() bool { + if x == nil { + return false + } + return x.JsonSchema != nil +} + +func (x *Schema) HasExternalDocs() bool { + if x == nil { + return false + } + return x.ExternalDocs != nil +} + +func (x *Schema) ClearJsonSchema() { + x.JsonSchema = nil +} + +func (x *Schema) ClearExternalDocs() { + x.ExternalDocs = nil +} + +type Schema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + JsonSchema *JSONSchema + // Adds support for polymorphism. The discriminator is the schema property + // name that is used to differentiate between other schema that inherit this + // schema. The property name used MUST be defined at this schema and it MUST + // be in the required property list. When used, the value MUST be the name of + // this schema or any schema that inherits it. + Discriminator string + // Relevant only for Schema "properties" definitions. Declares the property as + // "read only". This means that it MAY be sent as part of a response but MUST + // NOT be sent as part of the request. Properties marked as readOnly being + // true SHOULD NOT be in the required list of the defined schema. Default + // value is false. + ReadOnly bool + // Additional external documentation for this schema. + ExternalDocs *ExternalDocumentation + // A free-form property to include an example of an instance for this schema in JSON. + // This is copied verbatim to the output. + Example string +} + +func (b0 Schema_builder) Build() *Schema { + m0 := &Schema{} + b, x := &b0, m0 + _, _ = b, x + x.JsonSchema = b.JsonSchema + x.Discriminator = b.Discriminator + x.ReadOnly = b.ReadOnly + x.ExternalDocs = b.ExternalDocs + x.Example = b.Example + return m0 +} + +// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object. +// Only fields that are applicable to Enums are included +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = { +// ... +// title: "MyEnum"; +// description:"This is my nice enum"; +// example: "ZERO"; +// required: true; +// ... +// }; +type EnumSchema struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // A short description of the schema. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Default string `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"` + // The title of the schema. + Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + ReadOnly bool `protobuf:"varint,5,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + // Additional external documentation for this schema. + ExternalDocs *ExternalDocumentation `protobuf:"bytes,6,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Example string `protobuf:"bytes,7,opt,name=example,proto3" json:"example,omitempty"` + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // + // `ref: ".google.protobuf.Timestamp"`. + Ref string `protobuf:"bytes,8,opt,name=ref,proto3" json:"ref,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EnumSchema) Reset() { + *x = EnumSchema{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EnumSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumSchema) ProtoMessage() {} + +func (x *EnumSchema) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EnumSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *EnumSchema) GetDefault() string { + if x != nil { + return x.Default + } + return "" +} + +func (x *EnumSchema) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *EnumSchema) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *EnumSchema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly + } + return false +} + +func (x *EnumSchema) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *EnumSchema) GetExample() string { + if x != nil { + return x.Example + } + return "" +} + +func (x *EnumSchema) GetRef() string { + if x != nil { + return x.Ref + } + return "" +} + +func (x *EnumSchema) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *EnumSchema) SetDescription(v string) { + x.Description = v +} + +func (x *EnumSchema) SetDefault(v string) { + x.Default = v +} + +func (x *EnumSchema) SetTitle(v string) { + x.Title = v +} + +func (x *EnumSchema) SetRequired(v bool) { + x.Required = v +} + +func (x *EnumSchema) SetReadOnly(v bool) { + x.ReadOnly = v +} + +func (x *EnumSchema) SetExternalDocs(v *ExternalDocumentation) { + x.ExternalDocs = v +} + +func (x *EnumSchema) SetExample(v string) { + x.Example = v +} + +func (x *EnumSchema) SetRef(v string) { + x.Ref = v +} + +func (x *EnumSchema) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *EnumSchema) HasExternalDocs() bool { + if x == nil { + return false + } + return x.ExternalDocs != nil +} + +func (x *EnumSchema) ClearExternalDocs() { + x.ExternalDocs = nil +} + +type EnumSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A short description of the schema. + Description string + Default string + // The title of the schema. + Title string + Required bool + ReadOnly bool + // Additional external documentation for this schema. + ExternalDocs *ExternalDocumentation + Example string + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // + // `ref: ".google.protobuf.Timestamp"`. + Ref string + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 EnumSchema_builder) Build() *EnumSchema { + m0 := &EnumSchema{} + b, x := &b0, m0 + _, _ = b, x + x.Description = b.Description + x.Default = b.Default + x.Title = b.Title + x.Required = b.Required + x.ReadOnly = b.ReadOnly + x.ExternalDocs = b.ExternalDocs + x.Example = b.Example + x.Ref = b.Ref + x.Extensions = b.Extensions + return m0 +} + +// `JSONSchema` represents properties from JSON Schema taken, and as used, in +// the OpenAPI v2 spec. +// +// This includes changes made by OpenAPI v2. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// See also: https://cswr.github.io/JsonSchema/spec/basic_types/, +// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json +// +// Example: +// +// message SimpleMessage { +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = { +// json_schema: { +// title: "SimpleMessage" +// description: "A simple message." +// required: ["id"] +// } +// }; +// +// // Id represents the message identifier. +// string id = 1; [ +// (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { +// description: "The unique identifier of the simple message." +// }]; +// } +type JSONSchema struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // + // `ref: ".google.protobuf.Timestamp"`. + Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"` + // The title of the schema. + Title string `protobuf:"bytes,5,opt,name=title,proto3" json:"title,omitempty"` + // A short description of the schema. + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + Default string `protobuf:"bytes,7,opt,name=default,proto3" json:"default,omitempty"` + ReadOnly bool `protobuf:"varint,8,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + // A free-form property to include a JSON example of this field. This is copied + // verbatim to the output swagger.json. Quotes must be escaped. + // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject + Example string `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` + MultipleOf float64 `protobuf:"fixed64,10,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + // Maximum represents an inclusive upper limit for a numeric instance. The + // value of MUST be a number, + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + // minimum represents an inclusive lower limit for a numeric instance. The + // value of MUST be a number, + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength uint64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength uint64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems uint64 `protobuf:"varint,20,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems uint64 `protobuf:"varint,21,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,22,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + MaxProperties uint64 `protobuf:"varint,24,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"` + MinProperties uint64 `protobuf:"varint,25,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"` + Required []string `protobuf:"bytes,26,rep,name=required,proto3" json:"required,omitempty"` + // Items in 'array' must be unique. + Array []string `protobuf:"bytes,34,rep,name=array,proto3" json:"array,omitempty"` + Type []JSONSchema_JSONSchemaSimpleTypes `protobuf:"varint,35,rep,packed,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.JSONSchema_JSONSchemaSimpleTypes" json:"type,omitempty"` + // `Format` + Format string `protobuf:"bytes,36,opt,name=format,proto3" json:"format,omitempty"` + // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1 + Enum []string `protobuf:"bytes,46,rep,name=enum,proto3" json:"enum,omitempty"` + // Additional field level properties used when generating the OpenAPI v2 file. + FieldConfiguration *JSONSchema_FieldConfiguration `protobuf:"bytes,1001,opt,name=field_configuration,json=fieldConfiguration,proto3" json:"field_configuration,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,48,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *JSONSchema) Reset() { + *x = JSONSchema{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *JSONSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JSONSchema) ProtoMessage() {} + +func (x *JSONSchema) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *JSONSchema) GetRef() string { + if x != nil { + return x.Ref + } + return "" +} + +func (x *JSONSchema) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *JSONSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *JSONSchema) GetDefault() string { + if x != nil { + return x.Default + } + return "" +} + +func (x *JSONSchema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly + } + return false +} + +func (x *JSONSchema) GetExample() string { + if x != nil { + return x.Example + } + return "" +} + +func (x *JSONSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *JSONSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *JSONSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *JSONSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *JSONSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *JSONSchema) GetMaxLength() uint64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *JSONSchema) GetMinLength() uint64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *JSONSchema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *JSONSchema) GetMaxItems() uint64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *JSONSchema) GetMinItems() uint64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *JSONSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *JSONSchema) GetMaxProperties() uint64 { + if x != nil { + return x.MaxProperties + } + return 0 +} + +func (x *JSONSchema) GetMinProperties() uint64 { + if x != nil { + return x.MinProperties + } + return 0 +} + +func (x *JSONSchema) GetRequired() []string { + if x != nil { + return x.Required + } + return nil +} + +func (x *JSONSchema) GetArray() []string { + if x != nil { + return x.Array + } + return nil +} + +func (x *JSONSchema) GetType() []JSONSchema_JSONSchemaSimpleTypes { + if x != nil { + return x.Type + } + return nil +} + +func (x *JSONSchema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *JSONSchema) GetEnum() []string { + if x != nil { + return x.Enum + } + return nil +} + +func (x *JSONSchema) GetFieldConfiguration() *JSONSchema_FieldConfiguration { + if x != nil { + return x.FieldConfiguration + } + return nil +} + +func (x *JSONSchema) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *JSONSchema) SetRef(v string) { + x.Ref = v +} + +func (x *JSONSchema) SetTitle(v string) { + x.Title = v +} + +func (x *JSONSchema) SetDescription(v string) { + x.Description = v +} + +func (x *JSONSchema) SetDefault(v string) { + x.Default = v +} + +func (x *JSONSchema) SetReadOnly(v bool) { + x.ReadOnly = v +} + +func (x *JSONSchema) SetExample(v string) { + x.Example = v +} + +func (x *JSONSchema) SetMultipleOf(v float64) { + x.MultipleOf = v +} + +func (x *JSONSchema) SetMaximum(v float64) { + x.Maximum = v +} + +func (x *JSONSchema) SetExclusiveMaximum(v bool) { + x.ExclusiveMaximum = v +} + +func (x *JSONSchema) SetMinimum(v float64) { + x.Minimum = v +} + +func (x *JSONSchema) SetExclusiveMinimum(v bool) { + x.ExclusiveMinimum = v +} + +func (x *JSONSchema) SetMaxLength(v uint64) { + x.MaxLength = v +} + +func (x *JSONSchema) SetMinLength(v uint64) { + x.MinLength = v +} + +func (x *JSONSchema) SetPattern(v string) { + x.Pattern = v +} + +func (x *JSONSchema) SetMaxItems(v uint64) { + x.MaxItems = v +} + +func (x *JSONSchema) SetMinItems(v uint64) { + x.MinItems = v +} + +func (x *JSONSchema) SetUniqueItems(v bool) { + x.UniqueItems = v +} + +func (x *JSONSchema) SetMaxProperties(v uint64) { + x.MaxProperties = v +} + +func (x *JSONSchema) SetMinProperties(v uint64) { + x.MinProperties = v +} + +func (x *JSONSchema) SetRequired(v []string) { + x.Required = v +} + +func (x *JSONSchema) SetArray(v []string) { + x.Array = v +} + +func (x *JSONSchema) SetType(v []JSONSchema_JSONSchemaSimpleTypes) { + x.Type = v +} + +func (x *JSONSchema) SetFormat(v string) { + x.Format = v +} + +func (x *JSONSchema) SetEnum(v []string) { + x.Enum = v +} + +func (x *JSONSchema) SetFieldConfiguration(v *JSONSchema_FieldConfiguration) { + x.FieldConfiguration = v +} + +func (x *JSONSchema) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *JSONSchema) HasFieldConfiguration() bool { + if x == nil { + return false + } + return x.FieldConfiguration != nil +} + +func (x *JSONSchema) ClearFieldConfiguration() { + x.FieldConfiguration = nil +} + +type JSONSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // + // `ref: ".google.protobuf.Timestamp"`. + Ref string + // The title of the schema. + Title string + // A short description of the schema. + Description string + Default string + ReadOnly bool + // A free-form property to include a JSON example of this field. This is copied + // verbatim to the output swagger.json. Quotes must be escaped. + // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject + Example string + MultipleOf float64 + // Maximum represents an inclusive upper limit for a numeric instance. The + // value of MUST be a number, + Maximum float64 + ExclusiveMaximum bool + // minimum represents an inclusive lower limit for a numeric instance. The + // value of MUST be a number, + Minimum float64 + ExclusiveMinimum bool + MaxLength uint64 + MinLength uint64 + Pattern string + MaxItems uint64 + MinItems uint64 + UniqueItems bool + MaxProperties uint64 + MinProperties uint64 + Required []string + // Items in 'array' must be unique. + Array []string + Type []JSONSchema_JSONSchemaSimpleTypes + // `Format` + Format string + // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1 + Enum []string + // Additional field level properties used when generating the OpenAPI v2 file. + FieldConfiguration *JSONSchema_FieldConfiguration + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 JSONSchema_builder) Build() *JSONSchema { + m0 := &JSONSchema{} + b, x := &b0, m0 + _, _ = b, x + x.Ref = b.Ref + x.Title = b.Title + x.Description = b.Description + x.Default = b.Default + x.ReadOnly = b.ReadOnly + x.Example = b.Example + x.MultipleOf = b.MultipleOf + x.Maximum = b.Maximum + x.ExclusiveMaximum = b.ExclusiveMaximum + x.Minimum = b.Minimum + x.ExclusiveMinimum = b.ExclusiveMinimum + x.MaxLength = b.MaxLength + x.MinLength = b.MinLength + x.Pattern = b.Pattern + x.MaxItems = b.MaxItems + x.MinItems = b.MinItems + x.UniqueItems = b.UniqueItems + x.MaxProperties = b.MaxProperties + x.MinProperties = b.MinProperties + x.Required = b.Required + x.Array = b.Array + x.Type = b.Type + x.Format = b.Format + x.Enum = b.Enum + x.FieldConfiguration = b.FieldConfiguration + x.Extensions = b.Extensions + return m0 +} + +// `Tag` is a representation of OpenAPI v2 specification's Tag object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject +type Tag struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // The name of the tag. Use it to allow override of the name of a + // global Tag object, then use that name to reference the tag throughout the + // OpenAPI file. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A short description for the tag. GFM syntax can be used for rich text + // representation. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // Additional external documentation for this tag. + ExternalDocs *ExternalDocumentation `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Tag) Reset() { + *x = Tag{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Tag) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tag) ProtoMessage() {} + +func (x *Tag) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Tag) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Tag) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Tag) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Tag) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *Tag) SetName(v string) { + x.Name = v +} + +func (x *Tag) SetDescription(v string) { + x.Description = v +} + +func (x *Tag) SetExternalDocs(v *ExternalDocumentation) { + x.ExternalDocs = v +} + +func (x *Tag) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *Tag) HasExternalDocs() bool { + if x == nil { + return false + } + return x.ExternalDocs != nil +} + +func (x *Tag) ClearExternalDocs() { + x.ExternalDocs = nil +} + +type Tag_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The name of the tag. Use it to allow override of the name of a + // global Tag object, then use that name to reference the tag throughout the + // OpenAPI file. + Name string + // A short description for the tag. GFM syntax can be used for rich text + // representation. + Description string + // Additional external documentation for this tag. + ExternalDocs *ExternalDocumentation + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Tag_builder) Build() *Tag { + m0 := &Tag{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Description = b.Description + x.ExternalDocs = b.ExternalDocs + x.Extensions = b.Extensions + return m0 +} + +// `SecurityDefinitions` is a representation of OpenAPI v2 specification's +// Security Definitions object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject +// +// A declaration of the security schemes available to be used in the +// specification. This does not enforce the security schemes on the operations +// and only serves to provide the relevant details for each scheme. +type SecurityDefinitions struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // A single security scheme definition, mapping a "name" to the scheme it + // defines. + Security map[string]*SecurityScheme `protobuf:"bytes,1,rep,name=security,proto3" json:"security,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityDefinitions) Reset() { + *x = SecurityDefinitions{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityDefinitions) ProtoMessage() {} + +func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityDefinitions) GetSecurity() map[string]*SecurityScheme { + if x != nil { + return x.Security + } + return nil +} + +func (x *SecurityDefinitions) SetSecurity(v map[string]*SecurityScheme) { + x.Security = v +} + +type SecurityDefinitions_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A single security scheme definition, mapping a "name" to the scheme it + // defines. + Security map[string]*SecurityScheme +} + +func (b0 SecurityDefinitions_builder) Build() *SecurityDefinitions { + m0 := &SecurityDefinitions{} + b, x := &b0, m0 + _, _ = b, x + x.Security = b.Security + return m0 +} + +// `SecurityScheme` is a representation of OpenAPI v2 specification's +// Security Scheme object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject +// +// Allows the definition of a security scheme that can be used by the +// operations. Supported schemes are basic authentication, an API key (either as +// a header or as a query parameter) and OAuth2's common flows (implicit, +// password, application and access code). +type SecurityScheme struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // The type of the security scheme. Valid values are "basic", + // "apiKey" or "oauth2". + Type SecurityScheme_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Type" json:"type,omitempty"` + // A short description for security scheme. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The name of the header or query parameter to be used. + // Valid for apiKey. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // The location of the API key. Valid values are "query" or + // "header". + // Valid for apiKey. + In SecurityScheme_In `protobuf:"varint,4,opt,name=in,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_In" json:"in,omitempty"` + // The flow used by the OAuth2 security scheme. Valid values are + // "implicit", "password", "application" or "accessCode". + // Valid for oauth2. + Flow SecurityScheme_Flow `protobuf:"varint,5,opt,name=flow,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Flow" json:"flow,omitempty"` + // The authorization URL to be used for this flow. This SHOULD be in + // the form of a URL. + // Valid for oauth2/implicit and oauth2/accessCode. + AuthorizationUrl string `protobuf:"bytes,6,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + // The token URL to be used for this flow. This SHOULD be in the + // form of a URL. + // Valid for oauth2/password, oauth2/application and oauth2/accessCode. + TokenUrl string `protobuf:"bytes,7,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + // The available scopes for the OAuth2 security scheme. + // Valid for oauth2. + Scopes *Scopes `protobuf:"bytes,8,opt,name=scopes,proto3" json:"scopes,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityScheme) Reset() { + *x = SecurityScheme{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityScheme) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityScheme) ProtoMessage() {} + +func (x *SecurityScheme) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityScheme) GetType() SecurityScheme_Type { + if x != nil { + return x.Type + } + return SecurityScheme_TYPE_INVALID +} + +func (x *SecurityScheme) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *SecurityScheme) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SecurityScheme) GetIn() SecurityScheme_In { + if x != nil { + return x.In + } + return SecurityScheme_IN_INVALID +} + +func (x *SecurityScheme) GetFlow() SecurityScheme_Flow { + if x != nil { + return x.Flow + } + return SecurityScheme_FLOW_INVALID +} + +func (x *SecurityScheme) GetAuthorizationUrl() string { + if x != nil { + return x.AuthorizationUrl + } + return "" +} + +func (x *SecurityScheme) GetTokenUrl() string { + if x != nil { + return x.TokenUrl + } + return "" +} + +func (x *SecurityScheme) GetScopes() *Scopes { + if x != nil { + return x.Scopes + } + return nil +} + +func (x *SecurityScheme) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *SecurityScheme) SetType(v SecurityScheme_Type) { + x.Type = v +} + +func (x *SecurityScheme) SetDescription(v string) { + x.Description = v +} + +func (x *SecurityScheme) SetName(v string) { + x.Name = v +} + +func (x *SecurityScheme) SetIn(v SecurityScheme_In) { + x.In = v +} + +func (x *SecurityScheme) SetFlow(v SecurityScheme_Flow) { + x.Flow = v +} + +func (x *SecurityScheme) SetAuthorizationUrl(v string) { + x.AuthorizationUrl = v +} + +func (x *SecurityScheme) SetTokenUrl(v string) { + x.TokenUrl = v +} + +func (x *SecurityScheme) SetScopes(v *Scopes) { + x.Scopes = v +} + +func (x *SecurityScheme) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *SecurityScheme) HasScopes() bool { + if x == nil { + return false + } + return x.Scopes != nil +} + +func (x *SecurityScheme) ClearScopes() { + x.Scopes = nil +} + +type SecurityScheme_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The type of the security scheme. Valid values are "basic", + // "apiKey" or "oauth2". + Type SecurityScheme_Type + // A short description for security scheme. + Description string + // The name of the header or query parameter to be used. + // Valid for apiKey. + Name string + // The location of the API key. Valid values are "query" or + // "header". + // Valid for apiKey. + In SecurityScheme_In + // The flow used by the OAuth2 security scheme. Valid values are + // "implicit", "password", "application" or "accessCode". + // Valid for oauth2. + Flow SecurityScheme_Flow + // The authorization URL to be used for this flow. This SHOULD be in + // the form of a URL. + // Valid for oauth2/implicit and oauth2/accessCode. + AuthorizationUrl string + // The token URL to be used for this flow. This SHOULD be in the + // form of a URL. + // Valid for oauth2/password, oauth2/application and oauth2/accessCode. + TokenUrl string + // The available scopes for the OAuth2 security scheme. + // Valid for oauth2. + Scopes *Scopes + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 SecurityScheme_builder) Build() *SecurityScheme { + m0 := &SecurityScheme{} + b, x := &b0, m0 + _, _ = b, x + x.Type = b.Type + x.Description = b.Description + x.Name = b.Name + x.In = b.In + x.Flow = b.Flow + x.AuthorizationUrl = b.AuthorizationUrl + x.TokenUrl = b.TokenUrl + x.Scopes = b.Scopes + x.Extensions = b.Extensions + return m0 +} + +// `SecurityRequirement` is a representation of OpenAPI v2 specification's +// Security Requirement object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject +// +// Lists the required security schemes to execute this operation. The object can +// have multiple security schemes declared in it which are all required (that +// is, there is a logical AND between the schemes). +// +// The name used for each property MUST correspond to a security scheme +// declared in the Security Definitions. +type SecurityRequirement struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // Each name must correspond to a security scheme which is declared in + // the Security Definitions. If the security scheme is of type "oauth2", + // then the value is a list of scope names required for the execution. + // For other security scheme types, the array MUST be empty. + SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue `protobuf:"bytes,1,rep,name=security_requirement,json=securityRequirement,proto3" json:"security_requirement,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityRequirement) Reset() { + *x = SecurityRequirement{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityRequirement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityRequirement) ProtoMessage() {} + +func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityRequirement) GetSecurityRequirement() map[string]*SecurityRequirement_SecurityRequirementValue { + if x != nil { + return x.SecurityRequirement + } + return nil +} + +func (x *SecurityRequirement) SetSecurityRequirement(v map[string]*SecurityRequirement_SecurityRequirementValue) { + x.SecurityRequirement = v +} + +type SecurityRequirement_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Each name must correspond to a security scheme which is declared in + // the Security Definitions. If the security scheme is of type "oauth2", + // then the value is a list of scope names required for the execution. + // For other security scheme types, the array MUST be empty. + SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue +} + +func (b0 SecurityRequirement_builder) Build() *SecurityRequirement { + m0 := &SecurityRequirement{} + b, x := &b0, m0 + _, _ = b, x + x.SecurityRequirement = b.SecurityRequirement + return m0 +} + +// `Scopes` is a representation of OpenAPI v2 specification's Scopes object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject +// +// Lists the available scopes for an OAuth2 security scheme. +type Scopes struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // Maps between a name of a scope to a short description of it (as the value + // of the property). + Scope map[string]string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Scopes) Reset() { + *x = Scopes{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Scopes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Scopes) ProtoMessage() {} + +func (x *Scopes) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Scopes) GetScope() map[string]string { + if x != nil { + return x.Scope + } + return nil +} + +func (x *Scopes) SetScope(v map[string]string) { + x.Scope = v +} + +type Scopes_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Maps between a name of a scope to a short description of it (as the value + // of the property). + Scope map[string]string +} + +func (b0 Scopes_builder) Build() *Scopes { + m0 := &Scopes{} + b, x := &b0, m0 + _, _ = b, x + x.Scope = b.Scope + return m0 +} + +// 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file. +// These properties are not defined by OpenAPIv2, but they are used to control the generation. +type JSONSchema_FieldConfiguration struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // Alternative parameter name when used as path parameter. If set, this will + // be used as the complete parameter name when this field is used as a path + // parameter. Use this to avoid having auto generated path parameter names + // for overlapping paths. + PathParamName string `protobuf:"bytes,47,opt,name=path_param_name,json=pathParamName,proto3" json:"path_param_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *JSONSchema_FieldConfiguration) Reset() { + *x = JSONSchema_FieldConfiguration{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *JSONSchema_FieldConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JSONSchema_FieldConfiguration) ProtoMessage() {} + +func (x *JSONSchema_FieldConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *JSONSchema_FieldConfiguration) GetPathParamName() string { + if x != nil { + return x.PathParamName + } + return "" +} + +func (x *JSONSchema_FieldConfiguration) SetPathParamName(v string) { + x.PathParamName = v +} + +type JSONSchema_FieldConfiguration_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Alternative parameter name when used as path parameter. If set, this will + // be used as the complete parameter name when this field is used as a path + // parameter. Use this to avoid having auto generated path parameter names + // for overlapping paths. + PathParamName string +} + +func (b0 JSONSchema_FieldConfiguration_builder) Build() *JSONSchema_FieldConfiguration { + m0 := &JSONSchema_FieldConfiguration{} + b, x := &b0, m0 + _, _ = b, x + x.PathParamName = b.PathParamName + return m0 +} + +// If the security scheme is of type "oauth2", then the value is a list of +// scope names required for the execution. For other security scheme types, +// the array MUST be empty. +type SecurityRequirement_SecurityRequirementValue struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + Scope []string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityRequirement_SecurityRequirementValue) Reset() { + *x = SecurityRequirement_SecurityRequirementValue{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityRequirement_SecurityRequirementValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityRequirement_SecurityRequirementValue) ProtoMessage() {} + +func (x *SecurityRequirement_SecurityRequirementValue) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityRequirement_SecurityRequirementValue) GetScope() []string { + if x != nil { + return x.Scope + } + return nil +} + +func (x *SecurityRequirement_SecurityRequirementValue) SetScope(v []string) { + x.Scope = v +} + +type SecurityRequirement_SecurityRequirementValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Scope []string +} + +func (b0 SecurityRequirement_SecurityRequirementValue_builder) Build() *SecurityRequirement_SecurityRequirementValue { + m0 := &SecurityRequirement_SecurityRequirementValue{} + b, x := &b0, m0 + _, _ = b, x + x.Scope = b.Scope + return m0 +} + +var File_protoc_gen_openapiv2_options_openapiv2_proto protoreflect.FileDescriptor + +var file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb3, 0x08, 0x0a, 0x07, 0x53, 0x77, 0x61, 0x67, + 0x67, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x12, 0x43, 0x0a, + 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, + 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x62, 0x61, 0x73, 0x65, 0x50, + 0x61, 0x74, 0x68, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, + 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x71, 0x0a, 0x14, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5a, 0x0a, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, + 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x65, 0x0a, 0x0d, + 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, + 0x6f, 0x63, 0x73, 0x12, 0x62, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xd6, 0x07, + 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, + 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, + 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, + 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x61, 0x0a, + 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, + 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, + 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x5a, 0x0a, + 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, + 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x64, 0x0a, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x55, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, + 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0x62, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x12, 0x54, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xa3, 0x02, 0x0a, 0x0f, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x45, 0x0a, + 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0a, + 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, + 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, + 0x41, 0x4e, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, + 0x22, 0xd8, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x4a, 0x04, 0x08, + 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, + 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10, + 0x0b, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, + 0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a, + 0x04, 0x08, 0x11, 0x10, 0x12, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0x9a, 0x05, 0x0a, 0x08, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x12, 0x5d, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, + 0x12, 0x63, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x6d, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd6, 0x03, 0x0a, 0x04, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72, + 0x6d, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, + 0x74, 0x12, 0x4c, 0x0a, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4c, + 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x45, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, + 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x2f, 0x0a, 0x07, 0x4c, 0x69, 0x63, 0x65, + 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x4b, 0x0a, 0x15, 0x45, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0xaa, 0x02, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x56, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x6a, + 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73, + 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, 0x0d, + 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, + 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4a, 0x04, 0x08, + 0x04, 0x10, 0x05, 0x22, 0xe8, 0x03, 0x0a, 0x0a, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x69, 0x74, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, + 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x44, 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, + 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7, + 0x0a, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, 0x0a, + 0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, + 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x18, + 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, + 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, + 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, + 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, + 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, + 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x78, + 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, + 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, + 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, + 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e, + 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x18, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x69, 0x6e, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x18, + 0x22, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x12, 0x5f, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x23, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x69, 0x6d, 0x70, + 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x2e, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x7a, 0x0a, 0x13, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x12, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x30, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, + 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x12, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74, + 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x77, 0x0a, 0x15, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, + 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x52, 0x52, 0x41, 0x59, + 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, 0x41, 0x4e, 0x10, 0x02, 0x12, + 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, + 0x4e, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, + 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, + 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, + 0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, 0x4a, 0x04, 0x08, 0x17, 0x10, 0x18, 0x4a, 0x04, + 0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x1c, 0x10, 0x1d, 0x4a, 0x04, 0x08, 0x1d, 0x10, 0x1e, + 0x4a, 0x04, 0x08, 0x1e, 0x10, 0x22, 0x4a, 0x04, 0x08, 0x25, 0x10, 0x2a, 0x4a, 0x04, 0x08, 0x2a, + 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x2b, 0x10, 0x2e, 0x22, 0xd9, 0x02, 0x0a, 0x03, 0x54, 0x61, 0x67, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x5e, 0x0a, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, + 0x67, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, + 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x68, 0x0a, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4c, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x73, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x76, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, + 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff, + 0x06, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x65, 0x12, 0x52, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4c, 0x0a, 0x02, 0x69, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x2e, 0x49, 0x6e, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x52, 0x0a, 0x04, 0x66, 0x6c, 0x6f, + 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2b, 0x0a, + 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, + 0x65, 0x73, 0x12, 0x69, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, + 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0e, + 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x10, + 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x02, + 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x41, 0x55, 0x54, 0x48, 0x32, 0x10, + 0x03, 0x22, 0x31, 0x0a, 0x02, 0x49, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x5f, 0x49, 0x4e, + 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x5f, 0x51, 0x55, + 0x45, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x4e, 0x5f, 0x48, 0x45, 0x41, 0x44, + 0x45, 0x52, 0x10, 0x02, 0x22, 0x6a, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x0c, + 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x11, + 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f, + 0x52, 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x41, 0x50, 0x50, + 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, + 0x4f, 0x57, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x04, + 0x22, 0xf6, 0x02, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x8a, 0x01, 0x0a, 0x14, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x06, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x38, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x2a, 0x3b, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x0b, 0x0a, 0x07, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54, + 0x50, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x02, 0x12, 0x06, + 0x0a, 0x02, 0x57, 0x53, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x57, 0x53, 0x53, 0x10, 0x04, 0x42, + 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, + 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes = make([]protoimpl.MessageInfo, 35) +var file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = []any{ + (Scheme)(0), // 0: grpc.gateway.protoc_gen_openapiv2.options.Scheme + (HeaderParameter_Type)(0), // 1: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type + (JSONSchema_JSONSchemaSimpleTypes)(0), // 2: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes + (SecurityScheme_Type)(0), // 3: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type + (SecurityScheme_In)(0), // 4: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In + (SecurityScheme_Flow)(0), // 5: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow + (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger + (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation + (*Parameters)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Parameters + (*HeaderParameter)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter + (*Header)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Header + (*Response)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.Response + (*Info)(nil), // 12: grpc.gateway.protoc_gen_openapiv2.options.Info + (*Contact)(nil), // 13: grpc.gateway.protoc_gen_openapiv2.options.Contact + (*License)(nil), // 14: grpc.gateway.protoc_gen_openapiv2.options.License + (*ExternalDocumentation)(nil), // 15: grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + (*Schema)(nil), // 16: grpc.gateway.protoc_gen_openapiv2.options.Schema + (*EnumSchema)(nil), // 17: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema + (*JSONSchema)(nil), // 18: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + (*Tag)(nil), // 19: grpc.gateway.protoc_gen_openapiv2.options.Tag + (*SecurityDefinitions)(nil), // 20: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions + (*SecurityScheme)(nil), // 21: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme + (*SecurityRequirement)(nil), // 22: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + (*Scopes)(nil), // 23: grpc.gateway.protoc_gen_openapiv2.options.Scopes + nil, // 24: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry + nil, // 25: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry + nil, // 26: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry + nil, // 27: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry + nil, // 28: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry + nil, // 29: grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry + nil, // 30: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry + nil, // 31: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry + nil, // 32: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry + (*JSONSchema_FieldConfiguration)(nil), // 33: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration + nil, // 34: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry + nil, // 35: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry + nil, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry + nil, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry + (*SecurityRequirement_SecurityRequirementValue)(nil), // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue + nil, // 39: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry + nil, // 40: grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry + (*structpb.Value)(nil), // 41: google.protobuf.Value +} +var file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = []int32{ + 12, // 0: grpc.gateway.protoc_gen_openapiv2.options.Swagger.info:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info + 0, // 1: grpc.gateway.protoc_gen_openapiv2.options.Swagger.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme + 24, // 2: grpc.gateway.protoc_gen_openapiv2.options.Swagger.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry + 20, // 3: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security_definitions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions + 22, // 4: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + 19, // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger.tags:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag + 15, // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 25, // 7: grpc.gateway.protoc_gen_openapiv2.options.Swagger.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry + 15, // 8: grpc.gateway.protoc_gen_openapiv2.options.Operation.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 26, // 9: grpc.gateway.protoc_gen_openapiv2.options.Operation.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry + 0, // 10: grpc.gateway.protoc_gen_openapiv2.options.Operation.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme + 22, // 11: grpc.gateway.protoc_gen_openapiv2.options.Operation.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + 27, // 12: grpc.gateway.protoc_gen_openapiv2.options.Operation.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry + 8, // 13: grpc.gateway.protoc_gen_openapiv2.options.Operation.parameters:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Parameters + 9, // 14: grpc.gateway.protoc_gen_openapiv2.options.Parameters.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter + 1, // 15: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type + 16, // 16: grpc.gateway.protoc_gen_openapiv2.options.Response.schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema + 28, // 17: grpc.gateway.protoc_gen_openapiv2.options.Response.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry + 29, // 18: grpc.gateway.protoc_gen_openapiv2.options.Response.examples:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry + 30, // 19: grpc.gateway.protoc_gen_openapiv2.options.Response.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry + 13, // 20: grpc.gateway.protoc_gen_openapiv2.options.Info.contact:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Contact + 14, // 21: grpc.gateway.protoc_gen_openapiv2.options.Info.license:type_name -> grpc.gateway.protoc_gen_openapiv2.options.License + 31, // 22: grpc.gateway.protoc_gen_openapiv2.options.Info.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry + 18, // 23: grpc.gateway.protoc_gen_openapiv2.options.Schema.json_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + 15, // 24: grpc.gateway.protoc_gen_openapiv2.options.Schema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 15, // 25: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 32, // 26: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry + 2, // 27: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes + 33, // 28: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.field_configuration:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration + 34, // 29: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry + 15, // 30: grpc.gateway.protoc_gen_openapiv2.options.Tag.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 35, // 31: grpc.gateway.protoc_gen_openapiv2.options.Tag.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry + 36, // 32: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry + 3, // 33: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type + 4, // 34: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.in:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In + 5, // 35: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.flow:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow + 23, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.scopes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes + 37, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry + 39, // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.security_requirement:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry + 40, // 39: grpc.gateway.protoc_gen_openapiv2.options.Scopes.scope:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry + 11, // 40: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response + 41, // 41: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry.value:type_name -> google.protobuf.Value + 11, // 42: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response + 41, // 43: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry.value:type_name -> google.protobuf.Value + 10, // 44: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Header + 41, // 45: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 46: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 47: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 48: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 49: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry.value:type_name -> google.protobuf.Value + 21, // 50: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme + 41, // 51: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry.value:type_name -> google.protobuf.Value + 38, // 52: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue + 53, // [53:53] is the sub-list for method output_type + 53, // [53:53] is the sub-list for method input_type + 53, // [53:53] is the sub-list for extension type_name + 53, // [53:53] is the sub-list for extension extendee + 0, // [0:53] is the sub-list for field type_name +} + +func init() { file_protoc_gen_openapiv2_options_openapiv2_proto_init() } +func file_protoc_gen_openapiv2_options_openapiv2_proto_init() { + if File_protoc_gen_openapiv2_options_openapiv2_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc, + NumEnums: 6, + NumMessages: 35, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes, + DependencyIndexes: file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs, + EnumInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes, + MessageInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes, + }.Build() + File_protoc_gen_openapiv2_options_openapiv2_proto = out.File + file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = nil + file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = nil + file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto new file mode 100644 index 000000000000..5313f0818aea --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto @@ -0,0 +1,759 @@ +syntax = "proto3"; + +package grpc.gateway.protoc_gen_openapiv2.options; + +import "google/protobuf/struct.proto"; + +option go_package = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"; + +// Scheme describes the schemes supported by the OpenAPI Swagger +// and Operation objects. +enum Scheme { + UNKNOWN = 0; + HTTP = 1; + HTTPS = 2; + WS = 3; + WSS = 4; +} + +// `Swagger` is a representation of OpenAPI v2 specification's Swagger object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// schemes: HTTPS; +// consumes: "application/json"; +// produces: "application/json"; +// }; +// +message Swagger { + // Specifies the OpenAPI Specification version being used. It can be + // used by the OpenAPI UI and other clients to interpret the API listing. The + // value MUST be "2.0". + string swagger = 1; + // Provides metadata about the API. The metadata can be used by the + // clients if needed. + Info info = 2; + // The host (name or ip) serving the API. This MUST be the host only and does + // not include the scheme nor sub-paths. It MAY include a port. If the host is + // not included, the host serving the documentation is to be used (including + // the port). The host does not support path templating. + string host = 3; + // The base path on which the API is served, which is relative to the host. If + // it is not included, the API is served directly under the host. The value + // MUST start with a leading slash (/). The basePath does not support path + // templating. + // Note that using `base_path` does not change the endpoint paths that are + // generated in the resulting OpenAPI file. If you wish to use `base_path` + // with relatively generated OpenAPI paths, the `base_path` prefix must be + // manually removed from your `google.api.http` paths and your code changed to + // serve the API from the `base_path`. + string base_path = 4; + // The transfer protocol of the API. Values MUST be from the list: "http", + // "https", "ws", "wss". If the schemes is not included, the default scheme to + // be used is the one used to access the OpenAPI definition itself. + repeated Scheme schemes = 5; + // A list of MIME types the APIs can consume. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + repeated string consumes = 6; + // A list of MIME types the APIs can produce. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + repeated string produces = 7; + // field 8 is reserved for 'paths'. + reserved 8; + // field 9 is reserved for 'definitions', which at this time are already + // exposed as and customizable as proto messages. + reserved 9; + // An object to hold responses that can be used across operations. This + // property does not define global responses for all operations. + map responses = 10; + // Security scheme definitions that can be used across the specification. + SecurityDefinitions security_definitions = 11; + // A declaration of which security schemes are applied for the API as a whole. + // The list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). + // Individual operations can override this definition. + repeated SecurityRequirement security = 12; + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + repeated Tag tags = 13; + // Additional external documentation. + ExternalDocumentation external_docs = 14; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 15; +} + +// `Operation` is a representation of OpenAPI v2 specification's Operation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject +// +// Example: +// +// service EchoService { +// rpc Echo(SimpleMessage) returns (SimpleMessage) { +// option (google.api.http) = { +// get: "/v1/example/echo/{id}" +// }; +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { +// summary: "Get a message."; +// operation_id: "getMessage"; +// tags: "echo"; +// responses: { +// key: "200" +// value: { +// description: "OK"; +// } +// } +// }; +// } +// } +message Operation { + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + repeated string tags = 1; + // A short summary of what the operation does. For maximum readability in the + // swagger-ui, this field SHOULD be less than 120 characters. + string summary = 2; + // A verbose explanation of the operation behavior. GFM syntax can be used for + // rich text representation. + string description = 3; + // Additional external documentation for this operation. + ExternalDocumentation external_docs = 4; + // Unique string used to identify the operation. The id MUST be unique among + // all operations described in the API. Tools and libraries MAY use the + // operationId to uniquely identify an operation, therefore, it is recommended + // to follow common programming naming conventions. + string operation_id = 5; + // A list of MIME types the operation can consume. This overrides the consumes + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + repeated string consumes = 6; + // A list of MIME types the operation can produce. This overrides the produces + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + repeated string produces = 7; + // field 8 is reserved for 'parameters'. + reserved 8; + // The list of possible responses as they are returned from executing this + // operation. + map responses = 9; + // The transfer protocol for the operation. Values MUST be from the list: + // "http", "https", "ws", "wss". The value overrides the OpenAPI Object + // schemes definition. + repeated Scheme schemes = 10; + // Declares this operation to be deprecated. Usage of the declared operation + // should be refrained. Default value is false. + bool deprecated = 11; + // A declaration of which security schemes are applied for this operation. The + // list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). This + // definition overrides any declared top-level security. To remove a top-level + // security declaration, an empty array can be used. + repeated SecurityRequirement security = 12; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 13; + // Custom parameters such as HTTP request headers. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/ + // and https://swagger.io/specification/v2/#parameter-object. + Parameters parameters = 14; +} + +// `Parameters` is a representation of OpenAPI v2 specification's parameters object. +// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only +// allow header parameters to be set here since we do not want users specifying custom non-header +// parameters beyond those inferred from the Protobuf schema. +// See: https://swagger.io/specification/v2/#parameter-object +message Parameters { + // `Headers` is one or more HTTP header parameter. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters + repeated HeaderParameter headers = 1; +} + +// `HeaderParameter` a HTTP header parameter. +// See: https://swagger.io/specification/v2/#parameter-object +message HeaderParameter { + // `Type` is a supported HTTP header type. + // See https://swagger.io/specification/v2/#parameterType. + enum Type { + UNKNOWN = 0; + STRING = 1; + NUMBER = 2; + INTEGER = 3; + BOOLEAN = 4; + } + + // `Name` is the header name. + string name = 1; + // `Description` is a short description of the header. + string description = 2; + // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + // See: https://swagger.io/specification/v2/#parameterType. + Type type = 3; + // `Format` The extending format for the previously mentioned type. + string format = 4; + // `Required` indicates if the header is optional + bool required = 5; + // field 6 is reserved for 'items', but in OpenAPI-specific way. + reserved 6; + // field 7 is reserved `Collection Format`. Determines the format of the array if type array is used. + reserved 7; +} + +// `Header` is a representation of OpenAPI v2 specification's Header object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject +// +message Header { + // `Description` is a short description of the header. + string description = 1; + // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + string type = 2; + // `Format` The extending format for the previously mentioned type. + string format = 3; + // field 4 is reserved for 'items', but in OpenAPI-specific way. + reserved 4; + // field 5 is reserved `Collection Format` Determines the format of the array if type array is used. + reserved 5; + // `Default` Declares the value of the header that the server will use if none is provided. + // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. + // Unlike JSON Schema this value MUST conform to the defined type for the header. + string default = 6; + // field 7 is reserved for 'maximum'. + reserved 7; + // field 8 is reserved for 'exclusiveMaximum'. + reserved 8; + // field 9 is reserved for 'minimum'. + reserved 9; + // field 10 is reserved for 'exclusiveMinimum'. + reserved 10; + // field 11 is reserved for 'maxLength'. + reserved 11; + // field 12 is reserved for 'minLength'. + reserved 12; + // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. + string pattern = 13; + // field 14 is reserved for 'maxItems'. + reserved 14; + // field 15 is reserved for 'minItems'. + reserved 15; + // field 16 is reserved for 'uniqueItems'. + reserved 16; + // field 17 is reserved for 'enum'. + reserved 17; + // field 18 is reserved for 'multipleOf'. + reserved 18; +} + +// `Response` is a representation of OpenAPI v2 specification's Response object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject +// +message Response { + // `Description` is a short description of the response. + // GFM syntax can be used for rich text representation. + string description = 1; + // `Schema` optionally defines the structure of the response. + // If `Schema` is not provided, it means there is no content to the response. + Schema schema = 2; + // `Headers` A list of headers that are sent with the response. + // `Header` name is expected to be a string in the canonical format of the MIME header key + // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey + map headers = 3; + // `Examples` gives per-mimetype response examples. + // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object + map examples = 4; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 5; +} + +// `Info` is a representation of OpenAPI v2 specification's Info object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// ... +// }; +// +message Info { + // The title of the application. + string title = 1; + // A short description of the application. GFM syntax can be used for rich + // text representation. + string description = 2; + // The Terms of Service for the API. + string terms_of_service = 3; + // The contact information for the exposed API. + Contact contact = 4; + // The license information for the exposed API. + License license = 5; + // Provides the version of the application API (not to be confused + // with the specification version). + string version = 6; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 7; +} + +// `Contact` is a representation of OpenAPI v2 specification's Contact object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// ... +// }; +// ... +// }; +// +message Contact { + // The identifying name of the contact person/organization. + string name = 1; + // The URL pointing to the contact information. MUST be in the format of a + // URL. + string url = 2; + // The email address of the contact person/organization. MUST be in the format + // of an email address. + string email = 3; +} + +// `License` is a representation of OpenAPI v2 specification's License object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// ... +// }; +// ... +// }; +// +message License { + // The license name used for the API. + string name = 1; + // A URL to the license used for the API. MUST be in the format of a URL. + string url = 2; +} + +// `ExternalDocumentation` is a representation of OpenAPI v2 specification's +// ExternalDocumentation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// ... +// external_docs: { +// description: "More about gRPC-Gateway"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// } +// ... +// }; +// +message ExternalDocumentation { + // A short description of the target documentation. GFM syntax can be used for + // rich text representation. + string description = 1; + // The URL for the target documentation. Value MUST be in the format + // of a URL. + string url = 2; +} + +// `Schema` is a representation of OpenAPI v2 specification's Schema object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +message Schema { + JSONSchema json_schema = 1; + // Adds support for polymorphism. The discriminator is the schema property + // name that is used to differentiate between other schema that inherit this + // schema. The property name used MUST be defined at this schema and it MUST + // be in the required property list. When used, the value MUST be the name of + // this schema or any schema that inherits it. + string discriminator = 2; + // Relevant only for Schema "properties" definitions. Declares the property as + // "read only". This means that it MAY be sent as part of a response but MUST + // NOT be sent as part of the request. Properties marked as readOnly being + // true SHOULD NOT be in the required list of the defined schema. Default + // value is false. + bool read_only = 3; + // field 4 is reserved for 'xml'. + reserved 4; + // Additional external documentation for this schema. + ExternalDocumentation external_docs = 5; + // A free-form property to include an example of an instance for this schema in JSON. + // This is copied verbatim to the output. + string example = 6; +} + +// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object. +// Only fields that are applicable to Enums are included +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = { +// ... +// title: "MyEnum"; +// description:"This is my nice enum"; +// example: "ZERO"; +// required: true; +// ... +// }; +// +message EnumSchema { + // A short description of the schema. + string description = 1; + string default = 2; + // The title of the schema. + string title = 3; + bool required = 4; + bool read_only = 5; + // Additional external documentation for this schema. + ExternalDocumentation external_docs = 6; + string example = 7; + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // `ref: ".google.protobuf.Timestamp"`. + string ref = 8; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 9; +} + +// `JSONSchema` represents properties from JSON Schema taken, and as used, in +// the OpenAPI v2 spec. +// +// This includes changes made by OpenAPI v2. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// See also: https://cswr.github.io/JsonSchema/spec/basic_types/, +// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json +// +// Example: +// +// message SimpleMessage { +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = { +// json_schema: { +// title: "SimpleMessage" +// description: "A simple message." +// required: ["id"] +// } +// }; +// +// // Id represents the message identifier. +// string id = 1; [ +// (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { +// description: "The unique identifier of the simple message." +// }]; +// } +// +message JSONSchema { + // field 1 is reserved for '$id', omitted from OpenAPI v2. + reserved 1; + // field 2 is reserved for '$schema', omitted from OpenAPI v2. + reserved 2; + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // `ref: ".google.protobuf.Timestamp"`. + string ref = 3; + // field 4 is reserved for '$comment', omitted from OpenAPI v2. + reserved 4; + // The title of the schema. + string title = 5; + // A short description of the schema. + string description = 6; + string default = 7; + bool read_only = 8; + // A free-form property to include a JSON example of this field. This is copied + // verbatim to the output swagger.json. Quotes must be escaped. + // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject + string example = 9; + double multiple_of = 10; + // Maximum represents an inclusive upper limit for a numeric instance. The + // value of MUST be a number, + double maximum = 11; + bool exclusive_maximum = 12; + // minimum represents an inclusive lower limit for a numeric instance. The + // value of MUST be a number, + double minimum = 13; + bool exclusive_minimum = 14; + uint64 max_length = 15; + uint64 min_length = 16; + string pattern = 17; + // field 18 is reserved for 'additionalItems', omitted from OpenAPI v2. + reserved 18; + // field 19 is reserved for 'items', but in OpenAPI-specific way. + // TODO(ivucica): add 'items'? + reserved 19; + uint64 max_items = 20; + uint64 min_items = 21; + bool unique_items = 22; + // field 23 is reserved for 'contains', omitted from OpenAPI v2. + reserved 23; + uint64 max_properties = 24; + uint64 min_properties = 25; + repeated string required = 26; + // field 27 is reserved for 'additionalProperties', but in OpenAPI-specific + // way. TODO(ivucica): add 'additionalProperties'? + reserved 27; + // field 28 is reserved for 'definitions', omitted from OpenAPI v2. + reserved 28; + // field 29 is reserved for 'properties', but in OpenAPI-specific way. + // TODO(ivucica): add 'additionalProperties'? + reserved 29; + // following fields are reserved, as the properties have been omitted from + // OpenAPI v2: + // patternProperties, dependencies, propertyNames, const + reserved 30 to 33; + // Items in 'array' must be unique. + repeated string array = 34; + + enum JSONSchemaSimpleTypes { + UNKNOWN = 0; + ARRAY = 1; + BOOLEAN = 2; + INTEGER = 3; + NULL = 4; + NUMBER = 5; + OBJECT = 6; + STRING = 7; + } + + repeated JSONSchemaSimpleTypes type = 35; + // `Format` + string format = 36; + // following fields are reserved, as the properties have been omitted from + // OpenAPI v2: contentMediaType, contentEncoding, if, then, else + reserved 37 to 41; + // field 42 is reserved for 'allOf', but in OpenAPI-specific way. + // TODO(ivucica): add 'allOf'? + reserved 42; + // following fields are reserved, as the properties have been omitted from + // OpenAPI v2: + // anyOf, oneOf, not + reserved 43 to 45; + // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1 + repeated string enum = 46; + + // Additional field level properties used when generating the OpenAPI v2 file. + FieldConfiguration field_configuration = 1001; + + // 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file. + // These properties are not defined by OpenAPIv2, but they are used to control the generation. + message FieldConfiguration { + // Alternative parameter name when used as path parameter. If set, this will + // be used as the complete parameter name when this field is used as a path + // parameter. Use this to avoid having auto generated path parameter names + // for overlapping paths. + string path_param_name = 47; + } + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 48; +} + +// `Tag` is a representation of OpenAPI v2 specification's Tag object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject +// +message Tag { + // The name of the tag. Use it to allow override of the name of a + // global Tag object, then use that name to reference the tag throughout the + // OpenAPI file. + string name = 1; + // A short description for the tag. GFM syntax can be used for rich text + // representation. + string description = 2; + // Additional external documentation for this tag. + ExternalDocumentation external_docs = 3; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 4; +} + +// `SecurityDefinitions` is a representation of OpenAPI v2 specification's +// Security Definitions object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject +// +// A declaration of the security schemes available to be used in the +// specification. This does not enforce the security schemes on the operations +// and only serves to provide the relevant details for each scheme. +message SecurityDefinitions { + // A single security scheme definition, mapping a "name" to the scheme it + // defines. + map security = 1; +} + +// `SecurityScheme` is a representation of OpenAPI v2 specification's +// Security Scheme object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject +// +// Allows the definition of a security scheme that can be used by the +// operations. Supported schemes are basic authentication, an API key (either as +// a header or as a query parameter) and OAuth2's common flows (implicit, +// password, application and access code). +message SecurityScheme { + // The type of the security scheme. Valid values are "basic", + // "apiKey" or "oauth2". + enum Type { + TYPE_INVALID = 0; + TYPE_BASIC = 1; + TYPE_API_KEY = 2; + TYPE_OAUTH2 = 3; + } + + // The location of the API key. Valid values are "query" or "header". + enum In { + IN_INVALID = 0; + IN_QUERY = 1; + IN_HEADER = 2; + } + + // The flow used by the OAuth2 security scheme. Valid values are + // "implicit", "password", "application" or "accessCode". + enum Flow { + FLOW_INVALID = 0; + FLOW_IMPLICIT = 1; + FLOW_PASSWORD = 2; + FLOW_APPLICATION = 3; + FLOW_ACCESS_CODE = 4; + } + + // The type of the security scheme. Valid values are "basic", + // "apiKey" or "oauth2". + Type type = 1; + // A short description for security scheme. + string description = 2; + // The name of the header or query parameter to be used. + // Valid for apiKey. + string name = 3; + // The location of the API key. Valid values are "query" or + // "header". + // Valid for apiKey. + In in = 4; + // The flow used by the OAuth2 security scheme. Valid values are + // "implicit", "password", "application" or "accessCode". + // Valid for oauth2. + Flow flow = 5; + // The authorization URL to be used for this flow. This SHOULD be in + // the form of a URL. + // Valid for oauth2/implicit and oauth2/accessCode. + string authorization_url = 6; + // The token URL to be used for this flow. This SHOULD be in the + // form of a URL. + // Valid for oauth2/password, oauth2/application and oauth2/accessCode. + string token_url = 7; + // The available scopes for the OAuth2 security scheme. + // Valid for oauth2. + Scopes scopes = 8; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 9; +} + +// `SecurityRequirement` is a representation of OpenAPI v2 specification's +// Security Requirement object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject +// +// Lists the required security schemes to execute this operation. The object can +// have multiple security schemes declared in it which are all required (that +// is, there is a logical AND between the schemes). +// +// The name used for each property MUST correspond to a security scheme +// declared in the Security Definitions. +message SecurityRequirement { + // If the security scheme is of type "oauth2", then the value is a list of + // scope names required for the execution. For other security scheme types, + // the array MUST be empty. + message SecurityRequirementValue { + repeated string scope = 1; + } + // Each name must correspond to a security scheme which is declared in + // the Security Definitions. If the security scheme is of type "oauth2", + // then the value is a list of scope names required for the execution. + // For other security scheme types, the array MUST be empty. + map security_requirement = 1; +} + +// `Scopes` is a representation of OpenAPI v2 specification's Scopes object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject +// +// Lists the available scopes for an OAuth2 security scheme. +message Scopes { + // Maps between a name of a scope to a short description of it (as the value + // of the property). + map scope = 1; +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go new file mode 100644 index 000000000000..1f0e0c26916d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go @@ -0,0 +1,4055 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.0 +// protoc (unknown) +// source: protoc-gen-openapiv2/options/openapiv2.proto + +//go:build protoopaque + +package options + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Scheme describes the schemes supported by the OpenAPI Swagger +// and Operation objects. +type Scheme int32 + +const ( + Scheme_UNKNOWN Scheme = 0 + Scheme_HTTP Scheme = 1 + Scheme_HTTPS Scheme = 2 + Scheme_WS Scheme = 3 + Scheme_WSS Scheme = 4 +) + +// Enum value maps for Scheme. +var ( + Scheme_name = map[int32]string{ + 0: "UNKNOWN", + 1: "HTTP", + 2: "HTTPS", + 3: "WS", + 4: "WSS", + } + Scheme_value = map[string]int32{ + "UNKNOWN": 0, + "HTTP": 1, + "HTTPS": 2, + "WS": 3, + "WSS": 4, + } +) + +func (x Scheme) Enum() *Scheme { + p := new(Scheme) + *p = x + return p +} + +func (x Scheme) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Scheme) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0].Descriptor() +} + +func (Scheme) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0] +} + +func (x Scheme) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// `Type` is a supported HTTP header type. +// See https://swagger.io/specification/v2/#parameterType. +type HeaderParameter_Type int32 + +const ( + HeaderParameter_UNKNOWN HeaderParameter_Type = 0 + HeaderParameter_STRING HeaderParameter_Type = 1 + HeaderParameter_NUMBER HeaderParameter_Type = 2 + HeaderParameter_INTEGER HeaderParameter_Type = 3 + HeaderParameter_BOOLEAN HeaderParameter_Type = 4 +) + +// Enum value maps for HeaderParameter_Type. +var ( + HeaderParameter_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STRING", + 2: "NUMBER", + 3: "INTEGER", + 4: "BOOLEAN", + } + HeaderParameter_Type_value = map[string]int32{ + "UNKNOWN": 0, + "STRING": 1, + "NUMBER": 2, + "INTEGER": 3, + "BOOLEAN": 4, + } +) + +func (x HeaderParameter_Type) Enum() *HeaderParameter_Type { + p := new(HeaderParameter_Type) + *p = x + return p +} + +func (x HeaderParameter_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HeaderParameter_Type) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1].Descriptor() +} + +func (HeaderParameter_Type) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1] +} + +func (x HeaderParameter_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +type JSONSchema_JSONSchemaSimpleTypes int32 + +const ( + JSONSchema_UNKNOWN JSONSchema_JSONSchemaSimpleTypes = 0 + JSONSchema_ARRAY JSONSchema_JSONSchemaSimpleTypes = 1 + JSONSchema_BOOLEAN JSONSchema_JSONSchemaSimpleTypes = 2 + JSONSchema_INTEGER JSONSchema_JSONSchemaSimpleTypes = 3 + JSONSchema_NULL JSONSchema_JSONSchemaSimpleTypes = 4 + JSONSchema_NUMBER JSONSchema_JSONSchemaSimpleTypes = 5 + JSONSchema_OBJECT JSONSchema_JSONSchemaSimpleTypes = 6 + JSONSchema_STRING JSONSchema_JSONSchemaSimpleTypes = 7 +) + +// Enum value maps for JSONSchema_JSONSchemaSimpleTypes. +var ( + JSONSchema_JSONSchemaSimpleTypes_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ARRAY", + 2: "BOOLEAN", + 3: "INTEGER", + 4: "NULL", + 5: "NUMBER", + 6: "OBJECT", + 7: "STRING", + } + JSONSchema_JSONSchemaSimpleTypes_value = map[string]int32{ + "UNKNOWN": 0, + "ARRAY": 1, + "BOOLEAN": 2, + "INTEGER": 3, + "NULL": 4, + "NUMBER": 5, + "OBJECT": 6, + "STRING": 7, + } +) + +func (x JSONSchema_JSONSchemaSimpleTypes) Enum() *JSONSchema_JSONSchemaSimpleTypes { + p := new(JSONSchema_JSONSchemaSimpleTypes) + *p = x + return p +} + +func (x JSONSchema_JSONSchemaSimpleTypes) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (JSONSchema_JSONSchemaSimpleTypes) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2].Descriptor() +} + +func (JSONSchema_JSONSchemaSimpleTypes) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2] +} + +func (x JSONSchema_JSONSchemaSimpleTypes) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// The type of the security scheme. Valid values are "basic", +// "apiKey" or "oauth2". +type SecurityScheme_Type int32 + +const ( + SecurityScheme_TYPE_INVALID SecurityScheme_Type = 0 + SecurityScheme_TYPE_BASIC SecurityScheme_Type = 1 + SecurityScheme_TYPE_API_KEY SecurityScheme_Type = 2 + SecurityScheme_TYPE_OAUTH2 SecurityScheme_Type = 3 +) + +// Enum value maps for SecurityScheme_Type. +var ( + SecurityScheme_Type_name = map[int32]string{ + 0: "TYPE_INVALID", + 1: "TYPE_BASIC", + 2: "TYPE_API_KEY", + 3: "TYPE_OAUTH2", + } + SecurityScheme_Type_value = map[string]int32{ + "TYPE_INVALID": 0, + "TYPE_BASIC": 1, + "TYPE_API_KEY": 2, + "TYPE_OAUTH2": 3, + } +) + +func (x SecurityScheme_Type) Enum() *SecurityScheme_Type { + p := new(SecurityScheme_Type) + *p = x + return p +} + +func (x SecurityScheme_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityScheme_Type) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3].Descriptor() +} + +func (SecurityScheme_Type) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3] +} + +func (x SecurityScheme_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// The location of the API key. Valid values are "query" or "header". +type SecurityScheme_In int32 + +const ( + SecurityScheme_IN_INVALID SecurityScheme_In = 0 + SecurityScheme_IN_QUERY SecurityScheme_In = 1 + SecurityScheme_IN_HEADER SecurityScheme_In = 2 +) + +// Enum value maps for SecurityScheme_In. +var ( + SecurityScheme_In_name = map[int32]string{ + 0: "IN_INVALID", + 1: "IN_QUERY", + 2: "IN_HEADER", + } + SecurityScheme_In_value = map[string]int32{ + "IN_INVALID": 0, + "IN_QUERY": 1, + "IN_HEADER": 2, + } +) + +func (x SecurityScheme_In) Enum() *SecurityScheme_In { + p := new(SecurityScheme_In) + *p = x + return p +} + +func (x SecurityScheme_In) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityScheme_In) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4].Descriptor() +} + +func (SecurityScheme_In) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4] +} + +func (x SecurityScheme_In) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// The flow used by the OAuth2 security scheme. Valid values are +// "implicit", "password", "application" or "accessCode". +type SecurityScheme_Flow int32 + +const ( + SecurityScheme_FLOW_INVALID SecurityScheme_Flow = 0 + SecurityScheme_FLOW_IMPLICIT SecurityScheme_Flow = 1 + SecurityScheme_FLOW_PASSWORD SecurityScheme_Flow = 2 + SecurityScheme_FLOW_APPLICATION SecurityScheme_Flow = 3 + SecurityScheme_FLOW_ACCESS_CODE SecurityScheme_Flow = 4 +) + +// Enum value maps for SecurityScheme_Flow. +var ( + SecurityScheme_Flow_name = map[int32]string{ + 0: "FLOW_INVALID", + 1: "FLOW_IMPLICIT", + 2: "FLOW_PASSWORD", + 3: "FLOW_APPLICATION", + 4: "FLOW_ACCESS_CODE", + } + SecurityScheme_Flow_value = map[string]int32{ + "FLOW_INVALID": 0, + "FLOW_IMPLICIT": 1, + "FLOW_PASSWORD": 2, + "FLOW_APPLICATION": 3, + "FLOW_ACCESS_CODE": 4, + } +) + +func (x SecurityScheme_Flow) Enum() *SecurityScheme_Flow { + p := new(SecurityScheme_Flow) + *p = x + return p +} + +func (x SecurityScheme_Flow) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityScheme_Flow) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5].Descriptor() +} + +func (SecurityScheme_Flow) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5] +} + +func (x SecurityScheme_Flow) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// `Swagger` is a representation of OpenAPI v2 specification's Swagger object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// schemes: HTTPS; +// consumes: "application/json"; +// produces: "application/json"; +// }; +type Swagger struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"` + xxx_hidden_Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + xxx_hidden_Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` + xxx_hidden_BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` + xxx_hidden_Schemes []Scheme `protobuf:"varint,5,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"` + xxx_hidden_Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` + xxx_hidden_Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + xxx_hidden_Responses map[string]*Response `protobuf:"bytes,10,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,11,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` + xxx_hidden_Security *[]*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + xxx_hidden_Tags *[]*Tag `protobuf:"bytes,13,rep,name=tags,proto3" json:"tags,omitempty"` + xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,14,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,15,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Swagger) Reset() { + *x = Swagger{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Swagger) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Swagger) ProtoMessage() {} + +func (x *Swagger) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Swagger) GetSwagger() string { + if x != nil { + return x.xxx_hidden_Swagger + } + return "" +} + +func (x *Swagger) GetInfo() *Info { + if x != nil { + return x.xxx_hidden_Info + } + return nil +} + +func (x *Swagger) GetHost() string { + if x != nil { + return x.xxx_hidden_Host + } + return "" +} + +func (x *Swagger) GetBasePath() string { + if x != nil { + return x.xxx_hidden_BasePath + } + return "" +} + +func (x *Swagger) GetSchemes() []Scheme { + if x != nil { + return x.xxx_hidden_Schemes + } + return nil +} + +func (x *Swagger) GetConsumes() []string { + if x != nil { + return x.xxx_hidden_Consumes + } + return nil +} + +func (x *Swagger) GetProduces() []string { + if x != nil { + return x.xxx_hidden_Produces + } + return nil +} + +func (x *Swagger) GetResponses() map[string]*Response { + if x != nil { + return x.xxx_hidden_Responses + } + return nil +} + +func (x *Swagger) GetSecurityDefinitions() *SecurityDefinitions { + if x != nil { + return x.xxx_hidden_SecurityDefinitions + } + return nil +} + +func (x *Swagger) GetSecurity() []*SecurityRequirement { + if x != nil { + if x.xxx_hidden_Security != nil { + return *x.xxx_hidden_Security + } + } + return nil +} + +func (x *Swagger) GetTags() []*Tag { + if x != nil { + if x.xxx_hidden_Tags != nil { + return *x.xxx_hidden_Tags + } + } + return nil +} + +func (x *Swagger) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.xxx_hidden_ExternalDocs + } + return nil +} + +func (x *Swagger) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *Swagger) SetSwagger(v string) { + x.xxx_hidden_Swagger = v +} + +func (x *Swagger) SetInfo(v *Info) { + x.xxx_hidden_Info = v +} + +func (x *Swagger) SetHost(v string) { + x.xxx_hidden_Host = v +} + +func (x *Swagger) SetBasePath(v string) { + x.xxx_hidden_BasePath = v +} + +func (x *Swagger) SetSchemes(v []Scheme) { + x.xxx_hidden_Schemes = v +} + +func (x *Swagger) SetConsumes(v []string) { + x.xxx_hidden_Consumes = v +} + +func (x *Swagger) SetProduces(v []string) { + x.xxx_hidden_Produces = v +} + +func (x *Swagger) SetResponses(v map[string]*Response) { + x.xxx_hidden_Responses = v +} + +func (x *Swagger) SetSecurityDefinitions(v *SecurityDefinitions) { + x.xxx_hidden_SecurityDefinitions = v +} + +func (x *Swagger) SetSecurity(v []*SecurityRequirement) { + x.xxx_hidden_Security = &v +} + +func (x *Swagger) SetTags(v []*Tag) { + x.xxx_hidden_Tags = &v +} + +func (x *Swagger) SetExternalDocs(v *ExternalDocumentation) { + x.xxx_hidden_ExternalDocs = v +} + +func (x *Swagger) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *Swagger) HasInfo() bool { + if x == nil { + return false + } + return x.xxx_hidden_Info != nil +} + +func (x *Swagger) HasSecurityDefinitions() bool { + if x == nil { + return false + } + return x.xxx_hidden_SecurityDefinitions != nil +} + +func (x *Swagger) HasExternalDocs() bool { + if x == nil { + return false + } + return x.xxx_hidden_ExternalDocs != nil +} + +func (x *Swagger) ClearInfo() { + x.xxx_hidden_Info = nil +} + +func (x *Swagger) ClearSecurityDefinitions() { + x.xxx_hidden_SecurityDefinitions = nil +} + +func (x *Swagger) ClearExternalDocs() { + x.xxx_hidden_ExternalDocs = nil +} + +type Swagger_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Specifies the OpenAPI Specification version being used. It can be + // used by the OpenAPI UI and other clients to interpret the API listing. The + // value MUST be "2.0". + Swagger string + // Provides metadata about the API. The metadata can be used by the + // clients if needed. + Info *Info + // The host (name or ip) serving the API. This MUST be the host only and does + // not include the scheme nor sub-paths. It MAY include a port. If the host is + // not included, the host serving the documentation is to be used (including + // the port). The host does not support path templating. + Host string + // The base path on which the API is served, which is relative to the host. If + // it is not included, the API is served directly under the host. The value + // MUST start with a leading slash (/). The basePath does not support path + // templating. + // Note that using `base_path` does not change the endpoint paths that are + // generated in the resulting OpenAPI file. If you wish to use `base_path` + // with relatively generated OpenAPI paths, the `base_path` prefix must be + // manually removed from your `google.api.http` paths and your code changed to + // serve the API from the `base_path`. + BasePath string + // The transfer protocol of the API. Values MUST be from the list: "http", + // "https", "ws", "wss". If the schemes is not included, the default scheme to + // be used is the one used to access the OpenAPI definition itself. + Schemes []Scheme + // A list of MIME types the APIs can consume. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + Consumes []string + // A list of MIME types the APIs can produce. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + Produces []string + // An object to hold responses that can be used across operations. This + // property does not define global responses for all operations. + Responses map[string]*Response + // Security scheme definitions that can be used across the specification. + SecurityDefinitions *SecurityDefinitions + // A declaration of which security schemes are applied for the API as a whole. + // The list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). + // Individual operations can override this definition. + Security []*SecurityRequirement + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + Tags []*Tag + // Additional external documentation. + ExternalDocs *ExternalDocumentation + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Swagger_builder) Build() *Swagger { + m0 := &Swagger{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Swagger = b.Swagger + x.xxx_hidden_Info = b.Info + x.xxx_hidden_Host = b.Host + x.xxx_hidden_BasePath = b.BasePath + x.xxx_hidden_Schemes = b.Schemes + x.xxx_hidden_Consumes = b.Consumes + x.xxx_hidden_Produces = b.Produces + x.xxx_hidden_Responses = b.Responses + x.xxx_hidden_SecurityDefinitions = b.SecurityDefinitions + x.xxx_hidden_Security = &b.Security + x.xxx_hidden_Tags = &b.Tags + x.xxx_hidden_ExternalDocs = b.ExternalDocs + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `Operation` is a representation of OpenAPI v2 specification's Operation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject +// +// Example: +// +// service EchoService { +// rpc Echo(SimpleMessage) returns (SimpleMessage) { +// option (google.api.http) = { +// get: "/v1/example/echo/{id}" +// }; +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { +// summary: "Get a message."; +// operation_id: "getMessage"; +// tags: "echo"; +// responses: { +// key: "200" +// value: { +// description: "OK"; +// } +// } +// }; +// } +// } +type Operation struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` + xxx_hidden_Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + xxx_hidden_Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + xxx_hidden_OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` + xxx_hidden_Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` + xxx_hidden_Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + xxx_hidden_Responses map[string]*Response `protobuf:"bytes,9,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_Schemes []Scheme `protobuf:"varint,10,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"` + xxx_hidden_Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + xxx_hidden_Security *[]*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,13,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_Parameters *Parameters `protobuf:"bytes,14,opt,name=parameters,proto3" json:"parameters,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Operation) Reset() { + *x = Operation{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Operation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Operation) ProtoMessage() {} + +func (x *Operation) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Operation) GetTags() []string { + if x != nil { + return x.xxx_hidden_Tags + } + return nil +} + +func (x *Operation) GetSummary() string { + if x != nil { + return x.xxx_hidden_Summary + } + return "" +} + +func (x *Operation) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Operation) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.xxx_hidden_ExternalDocs + } + return nil +} + +func (x *Operation) GetOperationId() string { + if x != nil { + return x.xxx_hidden_OperationId + } + return "" +} + +func (x *Operation) GetConsumes() []string { + if x != nil { + return x.xxx_hidden_Consumes + } + return nil +} + +func (x *Operation) GetProduces() []string { + if x != nil { + return x.xxx_hidden_Produces + } + return nil +} + +func (x *Operation) GetResponses() map[string]*Response { + if x != nil { + return x.xxx_hidden_Responses + } + return nil +} + +func (x *Operation) GetSchemes() []Scheme { + if x != nil { + return x.xxx_hidden_Schemes + } + return nil +} + +func (x *Operation) GetDeprecated() bool { + if x != nil { + return x.xxx_hidden_Deprecated + } + return false +} + +func (x *Operation) GetSecurity() []*SecurityRequirement { + if x != nil { + if x.xxx_hidden_Security != nil { + return *x.xxx_hidden_Security + } + } + return nil +} + +func (x *Operation) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *Operation) GetParameters() *Parameters { + if x != nil { + return x.xxx_hidden_Parameters + } + return nil +} + +func (x *Operation) SetTags(v []string) { + x.xxx_hidden_Tags = v +} + +func (x *Operation) SetSummary(v string) { + x.xxx_hidden_Summary = v +} + +func (x *Operation) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Operation) SetExternalDocs(v *ExternalDocumentation) { + x.xxx_hidden_ExternalDocs = v +} + +func (x *Operation) SetOperationId(v string) { + x.xxx_hidden_OperationId = v +} + +func (x *Operation) SetConsumes(v []string) { + x.xxx_hidden_Consumes = v +} + +func (x *Operation) SetProduces(v []string) { + x.xxx_hidden_Produces = v +} + +func (x *Operation) SetResponses(v map[string]*Response) { + x.xxx_hidden_Responses = v +} + +func (x *Operation) SetSchemes(v []Scheme) { + x.xxx_hidden_Schemes = v +} + +func (x *Operation) SetDeprecated(v bool) { + x.xxx_hidden_Deprecated = v +} + +func (x *Operation) SetSecurity(v []*SecurityRequirement) { + x.xxx_hidden_Security = &v +} + +func (x *Operation) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *Operation) SetParameters(v *Parameters) { + x.xxx_hidden_Parameters = v +} + +func (x *Operation) HasExternalDocs() bool { + if x == nil { + return false + } + return x.xxx_hidden_ExternalDocs != nil +} + +func (x *Operation) HasParameters() bool { + if x == nil { + return false + } + return x.xxx_hidden_Parameters != nil +} + +func (x *Operation) ClearExternalDocs() { + x.xxx_hidden_ExternalDocs = nil +} + +func (x *Operation) ClearParameters() { + x.xxx_hidden_Parameters = nil +} + +type Operation_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + Tags []string + // A short summary of what the operation does. For maximum readability in the + // swagger-ui, this field SHOULD be less than 120 characters. + Summary string + // A verbose explanation of the operation behavior. GFM syntax can be used for + // rich text representation. + Description string + // Additional external documentation for this operation. + ExternalDocs *ExternalDocumentation + // Unique string used to identify the operation. The id MUST be unique among + // all operations described in the API. Tools and libraries MAY use the + // operationId to uniquely identify an operation, therefore, it is recommended + // to follow common programming naming conventions. + OperationId string + // A list of MIME types the operation can consume. This overrides the consumes + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + Consumes []string + // A list of MIME types the operation can produce. This overrides the produces + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + Produces []string + // The list of possible responses as they are returned from executing this + // operation. + Responses map[string]*Response + // The transfer protocol for the operation. Values MUST be from the list: + // "http", "https", "ws", "wss". The value overrides the OpenAPI Object + // schemes definition. + Schemes []Scheme + // Declares this operation to be deprecated. Usage of the declared operation + // should be refrained. Default value is false. + Deprecated bool + // A declaration of which security schemes are applied for this operation. The + // list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). This + // definition overrides any declared top-level security. To remove a top-level + // security declaration, an empty array can be used. + Security []*SecurityRequirement + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value + // Custom parameters such as HTTP request headers. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/ + // and https://swagger.io/specification/v2/#parameter-object. + Parameters *Parameters +} + +func (b0 Operation_builder) Build() *Operation { + m0 := &Operation{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Tags = b.Tags + x.xxx_hidden_Summary = b.Summary + x.xxx_hidden_Description = b.Description + x.xxx_hidden_ExternalDocs = b.ExternalDocs + x.xxx_hidden_OperationId = b.OperationId + x.xxx_hidden_Consumes = b.Consumes + x.xxx_hidden_Produces = b.Produces + x.xxx_hidden_Responses = b.Responses + x.xxx_hidden_Schemes = b.Schemes + x.xxx_hidden_Deprecated = b.Deprecated + x.xxx_hidden_Security = &b.Security + x.xxx_hidden_Extensions = b.Extensions + x.xxx_hidden_Parameters = b.Parameters + return m0 +} + +// `Parameters` is a representation of OpenAPI v2 specification's parameters object. +// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only +// allow header parameters to be set here since we do not want users specifying custom non-header +// parameters beyond those inferred from the Protobuf schema. +// See: https://swagger.io/specification/v2/#parameter-object +type Parameters struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Headers *[]*HeaderParameter `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Parameters) Reset() { + *x = Parameters{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Parameters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Parameters) ProtoMessage() {} + +func (x *Parameters) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Parameters) GetHeaders() []*HeaderParameter { + if x != nil { + if x.xxx_hidden_Headers != nil { + return *x.xxx_hidden_Headers + } + } + return nil +} + +func (x *Parameters) SetHeaders(v []*HeaderParameter) { + x.xxx_hidden_Headers = &v +} + +type Parameters_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Headers` is one or more HTTP header parameter. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters + Headers []*HeaderParameter +} + +func (b0 Parameters_builder) Build() *Parameters { + m0 := &Parameters{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Headers = &b.Headers + return m0 +} + +// `HeaderParameter` a HTTP header parameter. +// See: https://swagger.io/specification/v2/#parameter-object +type HeaderParameter struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Type HeaderParameter_Type `protobuf:"varint,3,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter_Type" json:"type,omitempty"` + xxx_hidden_Format string `protobuf:"bytes,4,opt,name=format,proto3" json:"format,omitempty"` + xxx_hidden_Required bool `protobuf:"varint,5,opt,name=required,proto3" json:"required,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HeaderParameter) Reset() { + *x = HeaderParameter{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HeaderParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderParameter) ProtoMessage() {} + +func (x *HeaderParameter) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *HeaderParameter) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *HeaderParameter) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *HeaderParameter) GetType() HeaderParameter_Type { + if x != nil { + return x.xxx_hidden_Type + } + return HeaderParameter_UNKNOWN +} + +func (x *HeaderParameter) GetFormat() string { + if x != nil { + return x.xxx_hidden_Format + } + return "" +} + +func (x *HeaderParameter) GetRequired() bool { + if x != nil { + return x.xxx_hidden_Required + } + return false +} + +func (x *HeaderParameter) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *HeaderParameter) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *HeaderParameter) SetType(v HeaderParameter_Type) { + x.xxx_hidden_Type = v +} + +func (x *HeaderParameter) SetFormat(v string) { + x.xxx_hidden_Format = v +} + +func (x *HeaderParameter) SetRequired(v bool) { + x.xxx_hidden_Required = v +} + +type HeaderParameter_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Name` is the header name. + Name string + // `Description` is a short description of the header. + Description string + // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + // See: https://swagger.io/specification/v2/#parameterType. + Type HeaderParameter_Type + // `Format` The extending format for the previously mentioned type. + Format string + // `Required` indicates if the header is optional + Required bool +} + +func (b0 HeaderParameter_builder) Build() *HeaderParameter { + m0 := &HeaderParameter{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Type = b.Type + x.xxx_hidden_Format = b.Format + x.xxx_hidden_Required = b.Required + return m0 +} + +// `Header` is a representation of OpenAPI v2 specification's Header object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject +type Header struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + xxx_hidden_Format string `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"` + xxx_hidden_Default string `protobuf:"bytes,6,opt,name=default,proto3" json:"default,omitempty"` + xxx_hidden_Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Header) Reset() { + *x = Header{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Header) ProtoMessage() {} + +func (x *Header) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Header) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Header) GetType() string { + if x != nil { + return x.xxx_hidden_Type + } + return "" +} + +func (x *Header) GetFormat() string { + if x != nil { + return x.xxx_hidden_Format + } + return "" +} + +func (x *Header) GetDefault() string { + if x != nil { + return x.xxx_hidden_Default + } + return "" +} + +func (x *Header) GetPattern() string { + if x != nil { + return x.xxx_hidden_Pattern + } + return "" +} + +func (x *Header) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Header) SetType(v string) { + x.xxx_hidden_Type = v +} + +func (x *Header) SetFormat(v string) { + x.xxx_hidden_Format = v +} + +func (x *Header) SetDefault(v string) { + x.xxx_hidden_Default = v +} + +func (x *Header) SetPattern(v string) { + x.xxx_hidden_Pattern = v +} + +type Header_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Description` is a short description of the header. + Description string + // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + Type string + // `Format` The extending format for the previously mentioned type. + Format string + // `Default` Declares the value of the header that the server will use if none is provided. + // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. + // Unlike JSON Schema this value MUST conform to the defined type for the header. + Default string + // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. + Pattern string +} + +func (b0 Header_builder) Build() *Header { + m0 := &Header{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Type = b.Type + x.xxx_hidden_Format = b.Format + x.xxx_hidden_Default = b.Default + x.xxx_hidden_Pattern = b.Pattern + return m0 +} + +// `Response` is a representation of OpenAPI v2 specification's Response object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject +type Response struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Schema *Schema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` + xxx_hidden_Headers map[string]*Header `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_Examples map[string]string `protobuf:"bytes,4,rep,name=examples,proto3" json:"examples,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Response) Reset() { + *x = Response{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Response) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Response) GetSchema() *Schema { + if x != nil { + return x.xxx_hidden_Schema + } + return nil +} + +func (x *Response) GetHeaders() map[string]*Header { + if x != nil { + return x.xxx_hidden_Headers + } + return nil +} + +func (x *Response) GetExamples() map[string]string { + if x != nil { + return x.xxx_hidden_Examples + } + return nil +} + +func (x *Response) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *Response) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Response) SetSchema(v *Schema) { + x.xxx_hidden_Schema = v +} + +func (x *Response) SetHeaders(v map[string]*Header) { + x.xxx_hidden_Headers = v +} + +func (x *Response) SetExamples(v map[string]string) { + x.xxx_hidden_Examples = v +} + +func (x *Response) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *Response) HasSchema() bool { + if x == nil { + return false + } + return x.xxx_hidden_Schema != nil +} + +func (x *Response) ClearSchema() { + x.xxx_hidden_Schema = nil +} + +type Response_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Description` is a short description of the response. + // GFM syntax can be used for rich text representation. + Description string + // `Schema` optionally defines the structure of the response. + // If `Schema` is not provided, it means there is no content to the response. + Schema *Schema + // `Headers` A list of headers that are sent with the response. + // `Header` name is expected to be a string in the canonical format of the MIME header key + // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey + Headers map[string]*Header + // `Examples` gives per-mimetype response examples. + // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object + Examples map[string]string + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Response_builder) Build() *Response { + m0 := &Response{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Schema = b.Schema + x.xxx_hidden_Headers = b.Headers + x.xxx_hidden_Examples = b.Examples + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `Info` is a representation of OpenAPI v2 specification's Info object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// ... +// }; +type Info struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` + xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_TermsOfService string `protobuf:"bytes,3,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` + xxx_hidden_Contact *Contact `protobuf:"bytes,4,opt,name=contact,proto3" json:"contact,omitempty"` + xxx_hidden_License *License `protobuf:"bytes,5,opt,name=license,proto3" json:"license,omitempty"` + xxx_hidden_Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Info) Reset() { + *x = Info{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Info) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Info) ProtoMessage() {} + +func (x *Info) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Info) GetTitle() string { + if x != nil { + return x.xxx_hidden_Title + } + return "" +} + +func (x *Info) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Info) GetTermsOfService() string { + if x != nil { + return x.xxx_hidden_TermsOfService + } + return "" +} + +func (x *Info) GetContact() *Contact { + if x != nil { + return x.xxx_hidden_Contact + } + return nil +} + +func (x *Info) GetLicense() *License { + if x != nil { + return x.xxx_hidden_License + } + return nil +} + +func (x *Info) GetVersion() string { + if x != nil { + return x.xxx_hidden_Version + } + return "" +} + +func (x *Info) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *Info) SetTitle(v string) { + x.xxx_hidden_Title = v +} + +func (x *Info) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Info) SetTermsOfService(v string) { + x.xxx_hidden_TermsOfService = v +} + +func (x *Info) SetContact(v *Contact) { + x.xxx_hidden_Contact = v +} + +func (x *Info) SetLicense(v *License) { + x.xxx_hidden_License = v +} + +func (x *Info) SetVersion(v string) { + x.xxx_hidden_Version = v +} + +func (x *Info) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *Info) HasContact() bool { + if x == nil { + return false + } + return x.xxx_hidden_Contact != nil +} + +func (x *Info) HasLicense() bool { + if x == nil { + return false + } + return x.xxx_hidden_License != nil +} + +func (x *Info) ClearContact() { + x.xxx_hidden_Contact = nil +} + +func (x *Info) ClearLicense() { + x.xxx_hidden_License = nil +} + +type Info_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The title of the application. + Title string + // A short description of the application. GFM syntax can be used for rich + // text representation. + Description string + // The Terms of Service for the API. + TermsOfService string + // The contact information for the exposed API. + Contact *Contact + // The license information for the exposed API. + License *License + // Provides the version of the application API (not to be confused + // with the specification version). + Version string + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Info_builder) Build() *Info { + m0 := &Info{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Title = b.Title + x.xxx_hidden_Description = b.Description + x.xxx_hidden_TermsOfService = b.TermsOfService + x.xxx_hidden_Contact = b.Contact + x.xxx_hidden_License = b.License + x.xxx_hidden_Version = b.Version + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `Contact` is a representation of OpenAPI v2 specification's Contact object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// ... +// }; +// ... +// }; +type Contact struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + xxx_hidden_Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + xxx_hidden_Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Contact) Reset() { + *x = Contact{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Contact) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Contact) ProtoMessage() {} + +func (x *Contact) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Contact) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *Contact) GetUrl() string { + if x != nil { + return x.xxx_hidden_Url + } + return "" +} + +func (x *Contact) GetEmail() string { + if x != nil { + return x.xxx_hidden_Email + } + return "" +} + +func (x *Contact) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *Contact) SetUrl(v string) { + x.xxx_hidden_Url = v +} + +func (x *Contact) SetEmail(v string) { + x.xxx_hidden_Email = v +} + +type Contact_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The identifying name of the contact person/organization. + Name string + // The URL pointing to the contact information. MUST be in the format of a + // URL. + Url string + // The email address of the contact person/organization. MUST be in the format + // of an email address. + Email string +} + +func (b0 Contact_builder) Build() *Contact { + m0 := &Contact{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Url = b.Url + x.xxx_hidden_Email = b.Email + return m0 +} + +// `License` is a representation of OpenAPI v2 specification's License object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// ... +// }; +// ... +// }; +type License struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + xxx_hidden_Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *License) Reset() { + *x = License{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *License) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*License) ProtoMessage() {} + +func (x *License) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *License) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *License) GetUrl() string { + if x != nil { + return x.xxx_hidden_Url + } + return "" +} + +func (x *License) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *License) SetUrl(v string) { + x.xxx_hidden_Url = v +} + +type License_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The license name used for the API. + Name string + // A URL to the license used for the API. MUST be in the format of a URL. + Url string +} + +func (b0 License_builder) Build() *License { + m0 := &License{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Url = b.Url + return m0 +} + +// `ExternalDocumentation` is a representation of OpenAPI v2 specification's +// ExternalDocumentation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// ... +// external_docs: { +// description: "More about gRPC-Gateway"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// } +// ... +// }; +type ExternalDocumentation struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExternalDocumentation) Reset() { + *x = ExternalDocumentation{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExternalDocumentation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalDocumentation) ProtoMessage() {} + +func (x *ExternalDocumentation) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ExternalDocumentation) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *ExternalDocumentation) GetUrl() string { + if x != nil { + return x.xxx_hidden_Url + } + return "" +} + +func (x *ExternalDocumentation) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *ExternalDocumentation) SetUrl(v string) { + x.xxx_hidden_Url = v +} + +type ExternalDocumentation_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A short description of the target documentation. GFM syntax can be used for + // rich text representation. + Description string + // The URL for the target documentation. Value MUST be in the format + // of a URL. + Url string +} + +func (b0 ExternalDocumentation_builder) Build() *ExternalDocumentation { + m0 := &ExternalDocumentation{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Url = b.Url + return m0 +} + +// `Schema` is a representation of OpenAPI v2 specification's Schema object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +type Schema struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_JsonSchema *JSONSchema `protobuf:"bytes,1,opt,name=json_schema,json=jsonSchema,proto3" json:"json_schema,omitempty"` + xxx_hidden_Discriminator string `protobuf:"bytes,2,opt,name=discriminator,proto3" json:"discriminator,omitempty"` + xxx_hidden_ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,5,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + xxx_hidden_Example string `protobuf:"bytes,6,opt,name=example,proto3" json:"example,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Schema) Reset() { + *x = Schema{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Schema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema) ProtoMessage() {} + +func (x *Schema) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Schema) GetJsonSchema() *JSONSchema { + if x != nil { + return x.xxx_hidden_JsonSchema + } + return nil +} + +func (x *Schema) GetDiscriminator() string { + if x != nil { + return x.xxx_hidden_Discriminator + } + return "" +} + +func (x *Schema) GetReadOnly() bool { + if x != nil { + return x.xxx_hidden_ReadOnly + } + return false +} + +func (x *Schema) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.xxx_hidden_ExternalDocs + } + return nil +} + +func (x *Schema) GetExample() string { + if x != nil { + return x.xxx_hidden_Example + } + return "" +} + +func (x *Schema) SetJsonSchema(v *JSONSchema) { + x.xxx_hidden_JsonSchema = v +} + +func (x *Schema) SetDiscriminator(v string) { + x.xxx_hidden_Discriminator = v +} + +func (x *Schema) SetReadOnly(v bool) { + x.xxx_hidden_ReadOnly = v +} + +func (x *Schema) SetExternalDocs(v *ExternalDocumentation) { + x.xxx_hidden_ExternalDocs = v +} + +func (x *Schema) SetExample(v string) { + x.xxx_hidden_Example = v +} + +func (x *Schema) HasJsonSchema() bool { + if x == nil { + return false + } + return x.xxx_hidden_JsonSchema != nil +} + +func (x *Schema) HasExternalDocs() bool { + if x == nil { + return false + } + return x.xxx_hidden_ExternalDocs != nil +} + +func (x *Schema) ClearJsonSchema() { + x.xxx_hidden_JsonSchema = nil +} + +func (x *Schema) ClearExternalDocs() { + x.xxx_hidden_ExternalDocs = nil +} + +type Schema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + JsonSchema *JSONSchema + // Adds support for polymorphism. The discriminator is the schema property + // name that is used to differentiate between other schema that inherit this + // schema. The property name used MUST be defined at this schema and it MUST + // be in the required property list. When used, the value MUST be the name of + // this schema or any schema that inherits it. + Discriminator string + // Relevant only for Schema "properties" definitions. Declares the property as + // "read only". This means that it MAY be sent as part of a response but MUST + // NOT be sent as part of the request. Properties marked as readOnly being + // true SHOULD NOT be in the required list of the defined schema. Default + // value is false. + ReadOnly bool + // Additional external documentation for this schema. + ExternalDocs *ExternalDocumentation + // A free-form property to include an example of an instance for this schema in JSON. + // This is copied verbatim to the output. + Example string +} + +func (b0 Schema_builder) Build() *Schema { + m0 := &Schema{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_JsonSchema = b.JsonSchema + x.xxx_hidden_Discriminator = b.Discriminator + x.xxx_hidden_ReadOnly = b.ReadOnly + x.xxx_hidden_ExternalDocs = b.ExternalDocs + x.xxx_hidden_Example = b.Example + return m0 +} + +// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object. +// Only fields that are applicable to Enums are included +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = { +// ... +// title: "MyEnum"; +// description:"This is my nice enum"; +// example: "ZERO"; +// required: true; +// ... +// }; +type EnumSchema struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Default string `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"` + xxx_hidden_Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` + xxx_hidden_Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + xxx_hidden_ReadOnly bool `protobuf:"varint,5,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,6,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + xxx_hidden_Example string `protobuf:"bytes,7,opt,name=example,proto3" json:"example,omitempty"` + xxx_hidden_Ref string `protobuf:"bytes,8,opt,name=ref,proto3" json:"ref,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EnumSchema) Reset() { + *x = EnumSchema{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EnumSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumSchema) ProtoMessage() {} + +func (x *EnumSchema) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EnumSchema) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *EnumSchema) GetDefault() string { + if x != nil { + return x.xxx_hidden_Default + } + return "" +} + +func (x *EnumSchema) GetTitle() string { + if x != nil { + return x.xxx_hidden_Title + } + return "" +} + +func (x *EnumSchema) GetRequired() bool { + if x != nil { + return x.xxx_hidden_Required + } + return false +} + +func (x *EnumSchema) GetReadOnly() bool { + if x != nil { + return x.xxx_hidden_ReadOnly + } + return false +} + +func (x *EnumSchema) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.xxx_hidden_ExternalDocs + } + return nil +} + +func (x *EnumSchema) GetExample() string { + if x != nil { + return x.xxx_hidden_Example + } + return "" +} + +func (x *EnumSchema) GetRef() string { + if x != nil { + return x.xxx_hidden_Ref + } + return "" +} + +func (x *EnumSchema) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *EnumSchema) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *EnumSchema) SetDefault(v string) { + x.xxx_hidden_Default = v +} + +func (x *EnumSchema) SetTitle(v string) { + x.xxx_hidden_Title = v +} + +func (x *EnumSchema) SetRequired(v bool) { + x.xxx_hidden_Required = v +} + +func (x *EnumSchema) SetReadOnly(v bool) { + x.xxx_hidden_ReadOnly = v +} + +func (x *EnumSchema) SetExternalDocs(v *ExternalDocumentation) { + x.xxx_hidden_ExternalDocs = v +} + +func (x *EnumSchema) SetExample(v string) { + x.xxx_hidden_Example = v +} + +func (x *EnumSchema) SetRef(v string) { + x.xxx_hidden_Ref = v +} + +func (x *EnumSchema) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *EnumSchema) HasExternalDocs() bool { + if x == nil { + return false + } + return x.xxx_hidden_ExternalDocs != nil +} + +func (x *EnumSchema) ClearExternalDocs() { + x.xxx_hidden_ExternalDocs = nil +} + +type EnumSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A short description of the schema. + Description string + Default string + // The title of the schema. + Title string + Required bool + ReadOnly bool + // Additional external documentation for this schema. + ExternalDocs *ExternalDocumentation + Example string + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // + // `ref: ".google.protobuf.Timestamp"`. + Ref string + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 EnumSchema_builder) Build() *EnumSchema { + m0 := &EnumSchema{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Default = b.Default + x.xxx_hidden_Title = b.Title + x.xxx_hidden_Required = b.Required + x.xxx_hidden_ReadOnly = b.ReadOnly + x.xxx_hidden_ExternalDocs = b.ExternalDocs + x.xxx_hidden_Example = b.Example + x.xxx_hidden_Ref = b.Ref + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `JSONSchema` represents properties from JSON Schema taken, and as used, in +// the OpenAPI v2 spec. +// +// This includes changes made by OpenAPI v2. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// See also: https://cswr.github.io/JsonSchema/spec/basic_types/, +// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json +// +// Example: +// +// message SimpleMessage { +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = { +// json_schema: { +// title: "SimpleMessage" +// description: "A simple message." +// required: ["id"] +// } +// }; +// +// // Id represents the message identifier. +// string id = 1; [ +// (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { +// description: "The unique identifier of the simple message." +// }]; +// } +type JSONSchema struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"` + xxx_hidden_Title string `protobuf:"bytes,5,opt,name=title,proto3" json:"title,omitempty"` + xxx_hidden_Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Default string `protobuf:"bytes,7,opt,name=default,proto3" json:"default,omitempty"` + xxx_hidden_ReadOnly bool `protobuf:"varint,8,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + xxx_hidden_Example string `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` + xxx_hidden_MultipleOf float64 `protobuf:"fixed64,10,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + xxx_hidden_Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + xxx_hidden_ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + xxx_hidden_Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + xxx_hidden_ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + xxx_hidden_MaxLength uint64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + xxx_hidden_MinLength uint64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + xxx_hidden_Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + xxx_hidden_MaxItems uint64 `protobuf:"varint,20,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + xxx_hidden_MinItems uint64 `protobuf:"varint,21,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + xxx_hidden_UniqueItems bool `protobuf:"varint,22,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + xxx_hidden_MaxProperties uint64 `protobuf:"varint,24,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"` + xxx_hidden_MinProperties uint64 `protobuf:"varint,25,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"` + xxx_hidden_Required []string `protobuf:"bytes,26,rep,name=required,proto3" json:"required,omitempty"` + xxx_hidden_Array []string `protobuf:"bytes,34,rep,name=array,proto3" json:"array,omitempty"` + xxx_hidden_Type []JSONSchema_JSONSchemaSimpleTypes `protobuf:"varint,35,rep,packed,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.JSONSchema_JSONSchemaSimpleTypes" json:"type,omitempty"` + xxx_hidden_Format string `protobuf:"bytes,36,opt,name=format,proto3" json:"format,omitempty"` + xxx_hidden_Enum []string `protobuf:"bytes,46,rep,name=enum,proto3" json:"enum,omitempty"` + xxx_hidden_FieldConfiguration *JSONSchema_FieldConfiguration `protobuf:"bytes,1001,opt,name=field_configuration,json=fieldConfiguration,proto3" json:"field_configuration,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,48,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *JSONSchema) Reset() { + *x = JSONSchema{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *JSONSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JSONSchema) ProtoMessage() {} + +func (x *JSONSchema) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *JSONSchema) GetRef() string { + if x != nil { + return x.xxx_hidden_Ref + } + return "" +} + +func (x *JSONSchema) GetTitle() string { + if x != nil { + return x.xxx_hidden_Title + } + return "" +} + +func (x *JSONSchema) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *JSONSchema) GetDefault() string { + if x != nil { + return x.xxx_hidden_Default + } + return "" +} + +func (x *JSONSchema) GetReadOnly() bool { + if x != nil { + return x.xxx_hidden_ReadOnly + } + return false +} + +func (x *JSONSchema) GetExample() string { + if x != nil { + return x.xxx_hidden_Example + } + return "" +} + +func (x *JSONSchema) GetMultipleOf() float64 { + if x != nil { + return x.xxx_hidden_MultipleOf + } + return 0 +} + +func (x *JSONSchema) GetMaximum() float64 { + if x != nil { + return x.xxx_hidden_Maximum + } + return 0 +} + +func (x *JSONSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.xxx_hidden_ExclusiveMaximum + } + return false +} + +func (x *JSONSchema) GetMinimum() float64 { + if x != nil { + return x.xxx_hidden_Minimum + } + return 0 +} + +func (x *JSONSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.xxx_hidden_ExclusiveMinimum + } + return false +} + +func (x *JSONSchema) GetMaxLength() uint64 { + if x != nil { + return x.xxx_hidden_MaxLength + } + return 0 +} + +func (x *JSONSchema) GetMinLength() uint64 { + if x != nil { + return x.xxx_hidden_MinLength + } + return 0 +} + +func (x *JSONSchema) GetPattern() string { + if x != nil { + return x.xxx_hidden_Pattern + } + return "" +} + +func (x *JSONSchema) GetMaxItems() uint64 { + if x != nil { + return x.xxx_hidden_MaxItems + } + return 0 +} + +func (x *JSONSchema) GetMinItems() uint64 { + if x != nil { + return x.xxx_hidden_MinItems + } + return 0 +} + +func (x *JSONSchema) GetUniqueItems() bool { + if x != nil { + return x.xxx_hidden_UniqueItems + } + return false +} + +func (x *JSONSchema) GetMaxProperties() uint64 { + if x != nil { + return x.xxx_hidden_MaxProperties + } + return 0 +} + +func (x *JSONSchema) GetMinProperties() uint64 { + if x != nil { + return x.xxx_hidden_MinProperties + } + return 0 +} + +func (x *JSONSchema) GetRequired() []string { + if x != nil { + return x.xxx_hidden_Required + } + return nil +} + +func (x *JSONSchema) GetArray() []string { + if x != nil { + return x.xxx_hidden_Array + } + return nil +} + +func (x *JSONSchema) GetType() []JSONSchema_JSONSchemaSimpleTypes { + if x != nil { + return x.xxx_hidden_Type + } + return nil +} + +func (x *JSONSchema) GetFormat() string { + if x != nil { + return x.xxx_hidden_Format + } + return "" +} + +func (x *JSONSchema) GetEnum() []string { + if x != nil { + return x.xxx_hidden_Enum + } + return nil +} + +func (x *JSONSchema) GetFieldConfiguration() *JSONSchema_FieldConfiguration { + if x != nil { + return x.xxx_hidden_FieldConfiguration + } + return nil +} + +func (x *JSONSchema) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *JSONSchema) SetRef(v string) { + x.xxx_hidden_Ref = v +} + +func (x *JSONSchema) SetTitle(v string) { + x.xxx_hidden_Title = v +} + +func (x *JSONSchema) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *JSONSchema) SetDefault(v string) { + x.xxx_hidden_Default = v +} + +func (x *JSONSchema) SetReadOnly(v bool) { + x.xxx_hidden_ReadOnly = v +} + +func (x *JSONSchema) SetExample(v string) { + x.xxx_hidden_Example = v +} + +func (x *JSONSchema) SetMultipleOf(v float64) { + x.xxx_hidden_MultipleOf = v +} + +func (x *JSONSchema) SetMaximum(v float64) { + x.xxx_hidden_Maximum = v +} + +func (x *JSONSchema) SetExclusiveMaximum(v bool) { + x.xxx_hidden_ExclusiveMaximum = v +} + +func (x *JSONSchema) SetMinimum(v float64) { + x.xxx_hidden_Minimum = v +} + +func (x *JSONSchema) SetExclusiveMinimum(v bool) { + x.xxx_hidden_ExclusiveMinimum = v +} + +func (x *JSONSchema) SetMaxLength(v uint64) { + x.xxx_hidden_MaxLength = v +} + +func (x *JSONSchema) SetMinLength(v uint64) { + x.xxx_hidden_MinLength = v +} + +func (x *JSONSchema) SetPattern(v string) { + x.xxx_hidden_Pattern = v +} + +func (x *JSONSchema) SetMaxItems(v uint64) { + x.xxx_hidden_MaxItems = v +} + +func (x *JSONSchema) SetMinItems(v uint64) { + x.xxx_hidden_MinItems = v +} + +func (x *JSONSchema) SetUniqueItems(v bool) { + x.xxx_hidden_UniqueItems = v +} + +func (x *JSONSchema) SetMaxProperties(v uint64) { + x.xxx_hidden_MaxProperties = v +} + +func (x *JSONSchema) SetMinProperties(v uint64) { + x.xxx_hidden_MinProperties = v +} + +func (x *JSONSchema) SetRequired(v []string) { + x.xxx_hidden_Required = v +} + +func (x *JSONSchema) SetArray(v []string) { + x.xxx_hidden_Array = v +} + +func (x *JSONSchema) SetType(v []JSONSchema_JSONSchemaSimpleTypes) { + x.xxx_hidden_Type = v +} + +func (x *JSONSchema) SetFormat(v string) { + x.xxx_hidden_Format = v +} + +func (x *JSONSchema) SetEnum(v []string) { + x.xxx_hidden_Enum = v +} + +func (x *JSONSchema) SetFieldConfiguration(v *JSONSchema_FieldConfiguration) { + x.xxx_hidden_FieldConfiguration = v +} + +func (x *JSONSchema) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *JSONSchema) HasFieldConfiguration() bool { + if x == nil { + return false + } + return x.xxx_hidden_FieldConfiguration != nil +} + +func (x *JSONSchema) ClearFieldConfiguration() { + x.xxx_hidden_FieldConfiguration = nil +} + +type JSONSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // + // `ref: ".google.protobuf.Timestamp"`. + Ref string + // The title of the schema. + Title string + // A short description of the schema. + Description string + Default string + ReadOnly bool + // A free-form property to include a JSON example of this field. This is copied + // verbatim to the output swagger.json. Quotes must be escaped. + // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject + Example string + MultipleOf float64 + // Maximum represents an inclusive upper limit for a numeric instance. The + // value of MUST be a number, + Maximum float64 + ExclusiveMaximum bool + // minimum represents an inclusive lower limit for a numeric instance. The + // value of MUST be a number, + Minimum float64 + ExclusiveMinimum bool + MaxLength uint64 + MinLength uint64 + Pattern string + MaxItems uint64 + MinItems uint64 + UniqueItems bool + MaxProperties uint64 + MinProperties uint64 + Required []string + // Items in 'array' must be unique. + Array []string + Type []JSONSchema_JSONSchemaSimpleTypes + // `Format` + Format string + // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1 + Enum []string + // Additional field level properties used when generating the OpenAPI v2 file. + FieldConfiguration *JSONSchema_FieldConfiguration + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 JSONSchema_builder) Build() *JSONSchema { + m0 := &JSONSchema{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Ref = b.Ref + x.xxx_hidden_Title = b.Title + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Default = b.Default + x.xxx_hidden_ReadOnly = b.ReadOnly + x.xxx_hidden_Example = b.Example + x.xxx_hidden_MultipleOf = b.MultipleOf + x.xxx_hidden_Maximum = b.Maximum + x.xxx_hidden_ExclusiveMaximum = b.ExclusiveMaximum + x.xxx_hidden_Minimum = b.Minimum + x.xxx_hidden_ExclusiveMinimum = b.ExclusiveMinimum + x.xxx_hidden_MaxLength = b.MaxLength + x.xxx_hidden_MinLength = b.MinLength + x.xxx_hidden_Pattern = b.Pattern + x.xxx_hidden_MaxItems = b.MaxItems + x.xxx_hidden_MinItems = b.MinItems + x.xxx_hidden_UniqueItems = b.UniqueItems + x.xxx_hidden_MaxProperties = b.MaxProperties + x.xxx_hidden_MinProperties = b.MinProperties + x.xxx_hidden_Required = b.Required + x.xxx_hidden_Array = b.Array + x.xxx_hidden_Type = b.Type + x.xxx_hidden_Format = b.Format + x.xxx_hidden_Enum = b.Enum + x.xxx_hidden_FieldConfiguration = b.FieldConfiguration + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `Tag` is a representation of OpenAPI v2 specification's Tag object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject +type Tag struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Tag) Reset() { + *x = Tag{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Tag) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tag) ProtoMessage() {} + +func (x *Tag) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Tag) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *Tag) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Tag) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.xxx_hidden_ExternalDocs + } + return nil +} + +func (x *Tag) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *Tag) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *Tag) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Tag) SetExternalDocs(v *ExternalDocumentation) { + x.xxx_hidden_ExternalDocs = v +} + +func (x *Tag) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *Tag) HasExternalDocs() bool { + if x == nil { + return false + } + return x.xxx_hidden_ExternalDocs != nil +} + +func (x *Tag) ClearExternalDocs() { + x.xxx_hidden_ExternalDocs = nil +} + +type Tag_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The name of the tag. Use it to allow override of the name of a + // global Tag object, then use that name to reference the tag throughout the + // OpenAPI file. + Name string + // A short description for the tag. GFM syntax can be used for rich text + // representation. + Description string + // Additional external documentation for this tag. + ExternalDocs *ExternalDocumentation + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Tag_builder) Build() *Tag { + m0 := &Tag{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Description = b.Description + x.xxx_hidden_ExternalDocs = b.ExternalDocs + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `SecurityDefinitions` is a representation of OpenAPI v2 specification's +// Security Definitions object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject +// +// A declaration of the security schemes available to be used in the +// specification. This does not enforce the security schemes on the operations +// and only serves to provide the relevant details for each scheme. +type SecurityDefinitions struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Security map[string]*SecurityScheme `protobuf:"bytes,1,rep,name=security,proto3" json:"security,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityDefinitions) Reset() { + *x = SecurityDefinitions{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityDefinitions) ProtoMessage() {} + +func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityDefinitions) GetSecurity() map[string]*SecurityScheme { + if x != nil { + return x.xxx_hidden_Security + } + return nil +} + +func (x *SecurityDefinitions) SetSecurity(v map[string]*SecurityScheme) { + x.xxx_hidden_Security = v +} + +type SecurityDefinitions_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A single security scheme definition, mapping a "name" to the scheme it + // defines. + Security map[string]*SecurityScheme +} + +func (b0 SecurityDefinitions_builder) Build() *SecurityDefinitions { + m0 := &SecurityDefinitions{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Security = b.Security + return m0 +} + +// `SecurityScheme` is a representation of OpenAPI v2 specification's +// Security Scheme object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject +// +// Allows the definition of a security scheme that can be used by the +// operations. Supported schemes are basic authentication, an API key (either as +// a header or as a query parameter) and OAuth2's common flows (implicit, +// password, application and access code). +type SecurityScheme struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Type SecurityScheme_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Type" json:"type,omitempty"` + xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + xxx_hidden_In SecurityScheme_In `protobuf:"varint,4,opt,name=in,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_In" json:"in,omitempty"` + xxx_hidden_Flow SecurityScheme_Flow `protobuf:"varint,5,opt,name=flow,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Flow" json:"flow,omitempty"` + xxx_hidden_AuthorizationUrl string `protobuf:"bytes,6,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + xxx_hidden_TokenUrl string `protobuf:"bytes,7,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + xxx_hidden_Scopes *Scopes `protobuf:"bytes,8,opt,name=scopes,proto3" json:"scopes,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityScheme) Reset() { + *x = SecurityScheme{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityScheme) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityScheme) ProtoMessage() {} + +func (x *SecurityScheme) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityScheme) GetType() SecurityScheme_Type { + if x != nil { + return x.xxx_hidden_Type + } + return SecurityScheme_TYPE_INVALID +} + +func (x *SecurityScheme) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *SecurityScheme) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *SecurityScheme) GetIn() SecurityScheme_In { + if x != nil { + return x.xxx_hidden_In + } + return SecurityScheme_IN_INVALID +} + +func (x *SecurityScheme) GetFlow() SecurityScheme_Flow { + if x != nil { + return x.xxx_hidden_Flow + } + return SecurityScheme_FLOW_INVALID +} + +func (x *SecurityScheme) GetAuthorizationUrl() string { + if x != nil { + return x.xxx_hidden_AuthorizationUrl + } + return "" +} + +func (x *SecurityScheme) GetTokenUrl() string { + if x != nil { + return x.xxx_hidden_TokenUrl + } + return "" +} + +func (x *SecurityScheme) GetScopes() *Scopes { + if x != nil { + return x.xxx_hidden_Scopes + } + return nil +} + +func (x *SecurityScheme) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *SecurityScheme) SetType(v SecurityScheme_Type) { + x.xxx_hidden_Type = v +} + +func (x *SecurityScheme) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *SecurityScheme) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *SecurityScheme) SetIn(v SecurityScheme_In) { + x.xxx_hidden_In = v +} + +func (x *SecurityScheme) SetFlow(v SecurityScheme_Flow) { + x.xxx_hidden_Flow = v +} + +func (x *SecurityScheme) SetAuthorizationUrl(v string) { + x.xxx_hidden_AuthorizationUrl = v +} + +func (x *SecurityScheme) SetTokenUrl(v string) { + x.xxx_hidden_TokenUrl = v +} + +func (x *SecurityScheme) SetScopes(v *Scopes) { + x.xxx_hidden_Scopes = v +} + +func (x *SecurityScheme) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *SecurityScheme) HasScopes() bool { + if x == nil { + return false + } + return x.xxx_hidden_Scopes != nil +} + +func (x *SecurityScheme) ClearScopes() { + x.xxx_hidden_Scopes = nil +} + +type SecurityScheme_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The type of the security scheme. Valid values are "basic", + // "apiKey" or "oauth2". + Type SecurityScheme_Type + // A short description for security scheme. + Description string + // The name of the header or query parameter to be used. + // Valid for apiKey. + Name string + // The location of the API key. Valid values are "query" or + // "header". + // Valid for apiKey. + In SecurityScheme_In + // The flow used by the OAuth2 security scheme. Valid values are + // "implicit", "password", "application" or "accessCode". + // Valid for oauth2. + Flow SecurityScheme_Flow + // The authorization URL to be used for this flow. This SHOULD be in + // the form of a URL. + // Valid for oauth2/implicit and oauth2/accessCode. + AuthorizationUrl string + // The token URL to be used for this flow. This SHOULD be in the + // form of a URL. + // Valid for oauth2/password, oauth2/application and oauth2/accessCode. + TokenUrl string + // The available scopes for the OAuth2 security scheme. + // Valid for oauth2. + Scopes *Scopes + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 SecurityScheme_builder) Build() *SecurityScheme { + m0 := &SecurityScheme{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Type = b.Type + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Name = b.Name + x.xxx_hidden_In = b.In + x.xxx_hidden_Flow = b.Flow + x.xxx_hidden_AuthorizationUrl = b.AuthorizationUrl + x.xxx_hidden_TokenUrl = b.TokenUrl + x.xxx_hidden_Scopes = b.Scopes + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `SecurityRequirement` is a representation of OpenAPI v2 specification's +// Security Requirement object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject +// +// Lists the required security schemes to execute this operation. The object can +// have multiple security schemes declared in it which are all required (that +// is, there is a logical AND between the schemes). +// +// The name used for each property MUST correspond to a security scheme +// declared in the Security Definitions. +type SecurityRequirement struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue `protobuf:"bytes,1,rep,name=security_requirement,json=securityRequirement,proto3" json:"security_requirement,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityRequirement) Reset() { + *x = SecurityRequirement{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityRequirement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityRequirement) ProtoMessage() {} + +func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityRequirement) GetSecurityRequirement() map[string]*SecurityRequirement_SecurityRequirementValue { + if x != nil { + return x.xxx_hidden_SecurityRequirement + } + return nil +} + +func (x *SecurityRequirement) SetSecurityRequirement(v map[string]*SecurityRequirement_SecurityRequirementValue) { + x.xxx_hidden_SecurityRequirement = v +} + +type SecurityRequirement_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Each name must correspond to a security scheme which is declared in + // the Security Definitions. If the security scheme is of type "oauth2", + // then the value is a list of scope names required for the execution. + // For other security scheme types, the array MUST be empty. + SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue +} + +func (b0 SecurityRequirement_builder) Build() *SecurityRequirement { + m0 := &SecurityRequirement{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_SecurityRequirement = b.SecurityRequirement + return m0 +} + +// `Scopes` is a representation of OpenAPI v2 specification's Scopes object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject +// +// Lists the available scopes for an OAuth2 security scheme. +type Scopes struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Scope map[string]string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Scopes) Reset() { + *x = Scopes{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Scopes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Scopes) ProtoMessage() {} + +func (x *Scopes) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Scopes) GetScope() map[string]string { + if x != nil { + return x.xxx_hidden_Scope + } + return nil +} + +func (x *Scopes) SetScope(v map[string]string) { + x.xxx_hidden_Scope = v +} + +type Scopes_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Maps between a name of a scope to a short description of it (as the value + // of the property). + Scope map[string]string +} + +func (b0 Scopes_builder) Build() *Scopes { + m0 := &Scopes{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Scope = b.Scope + return m0 +} + +// 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file. +// These properties are not defined by OpenAPIv2, but they are used to control the generation. +type JSONSchema_FieldConfiguration struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_PathParamName string `protobuf:"bytes,47,opt,name=path_param_name,json=pathParamName,proto3" json:"path_param_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *JSONSchema_FieldConfiguration) Reset() { + *x = JSONSchema_FieldConfiguration{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *JSONSchema_FieldConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JSONSchema_FieldConfiguration) ProtoMessage() {} + +func (x *JSONSchema_FieldConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *JSONSchema_FieldConfiguration) GetPathParamName() string { + if x != nil { + return x.xxx_hidden_PathParamName + } + return "" +} + +func (x *JSONSchema_FieldConfiguration) SetPathParamName(v string) { + x.xxx_hidden_PathParamName = v +} + +type JSONSchema_FieldConfiguration_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Alternative parameter name when used as path parameter. If set, this will + // be used as the complete parameter name when this field is used as a path + // parameter. Use this to avoid having auto generated path parameter names + // for overlapping paths. + PathParamName string +} + +func (b0 JSONSchema_FieldConfiguration_builder) Build() *JSONSchema_FieldConfiguration { + m0 := &JSONSchema_FieldConfiguration{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_PathParamName = b.PathParamName + return m0 +} + +// If the security scheme is of type "oauth2", then the value is a list of +// scope names required for the execution. For other security scheme types, +// the array MUST be empty. +type SecurityRequirement_SecurityRequirementValue struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Scope []string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityRequirement_SecurityRequirementValue) Reset() { + *x = SecurityRequirement_SecurityRequirementValue{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityRequirement_SecurityRequirementValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityRequirement_SecurityRequirementValue) ProtoMessage() {} + +func (x *SecurityRequirement_SecurityRequirementValue) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityRequirement_SecurityRequirementValue) GetScope() []string { + if x != nil { + return x.xxx_hidden_Scope + } + return nil +} + +func (x *SecurityRequirement_SecurityRequirementValue) SetScope(v []string) { + x.xxx_hidden_Scope = v +} + +type SecurityRequirement_SecurityRequirementValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Scope []string +} + +func (b0 SecurityRequirement_SecurityRequirementValue_builder) Build() *SecurityRequirement_SecurityRequirementValue { + m0 := &SecurityRequirement_SecurityRequirementValue{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Scope = b.Scope + return m0 +} + +var File_protoc_gen_openapiv2_options_openapiv2_proto protoreflect.FileDescriptor + +var file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb3, 0x08, 0x0a, 0x07, 0x53, 0x77, 0x61, 0x67, + 0x67, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x12, 0x43, 0x0a, + 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, + 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x62, 0x61, 0x73, 0x65, 0x50, + 0x61, 0x74, 0x68, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, + 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x71, 0x0a, 0x14, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5a, 0x0a, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, + 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x65, 0x0a, 0x0d, + 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, + 0x6f, 0x63, 0x73, 0x12, 0x62, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xd6, 0x07, + 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, + 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, + 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, + 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x61, 0x0a, + 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, + 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, + 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x5a, 0x0a, + 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, + 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x64, 0x0a, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x55, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, + 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0x62, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x12, 0x54, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xa3, 0x02, 0x0a, 0x0f, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x45, 0x0a, + 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0a, + 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, + 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, + 0x41, 0x4e, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, + 0x22, 0xd8, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x4a, 0x04, 0x08, + 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, + 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10, + 0x0b, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, + 0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a, + 0x04, 0x08, 0x11, 0x10, 0x12, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0x9a, 0x05, 0x0a, 0x08, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x12, 0x5d, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, + 0x12, 0x63, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x6d, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd6, 0x03, 0x0a, 0x04, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72, + 0x6d, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, + 0x74, 0x12, 0x4c, 0x0a, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4c, + 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x45, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, + 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x2f, 0x0a, 0x07, 0x4c, 0x69, 0x63, 0x65, + 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x4b, 0x0a, 0x15, 0x45, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0xaa, 0x02, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x56, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x6a, + 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73, + 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, 0x0d, + 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, + 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4a, 0x04, 0x08, + 0x04, 0x10, 0x05, 0x22, 0xe8, 0x03, 0x0a, 0x0a, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x69, 0x74, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, + 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x44, 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, + 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7, + 0x0a, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, 0x0a, + 0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, + 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x18, + 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, + 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, + 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, + 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, + 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, + 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x78, + 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, + 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, + 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, + 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e, + 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x18, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x69, 0x6e, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x18, + 0x22, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x12, 0x5f, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x23, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x69, 0x6d, 0x70, + 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x2e, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x7a, 0x0a, 0x13, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x12, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x30, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, + 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x12, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74, + 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x77, 0x0a, 0x15, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, + 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x52, 0x52, 0x41, 0x59, + 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, 0x41, 0x4e, 0x10, 0x02, 0x12, + 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, + 0x4e, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, + 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, + 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, + 0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, 0x4a, 0x04, 0x08, 0x17, 0x10, 0x18, 0x4a, 0x04, + 0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x1c, 0x10, 0x1d, 0x4a, 0x04, 0x08, 0x1d, 0x10, 0x1e, + 0x4a, 0x04, 0x08, 0x1e, 0x10, 0x22, 0x4a, 0x04, 0x08, 0x25, 0x10, 0x2a, 0x4a, 0x04, 0x08, 0x2a, + 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x2b, 0x10, 0x2e, 0x22, 0xd9, 0x02, 0x0a, 0x03, 0x54, 0x61, 0x67, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x5e, 0x0a, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, + 0x67, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, + 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x68, 0x0a, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4c, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x73, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x76, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, + 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff, + 0x06, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x65, 0x12, 0x52, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4c, 0x0a, 0x02, 0x69, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x2e, 0x49, 0x6e, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x52, 0x0a, 0x04, 0x66, 0x6c, 0x6f, + 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2b, 0x0a, + 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, + 0x65, 0x73, 0x12, 0x69, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, + 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0e, + 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x10, + 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x02, + 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x41, 0x55, 0x54, 0x48, 0x32, 0x10, + 0x03, 0x22, 0x31, 0x0a, 0x02, 0x49, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x5f, 0x49, 0x4e, + 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x5f, 0x51, 0x55, + 0x45, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x4e, 0x5f, 0x48, 0x45, 0x41, 0x44, + 0x45, 0x52, 0x10, 0x02, 0x22, 0x6a, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x0c, + 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x11, + 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f, + 0x52, 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x41, 0x50, 0x50, + 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, + 0x4f, 0x57, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x04, + 0x22, 0xf6, 0x02, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x8a, 0x01, 0x0a, 0x14, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x06, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x38, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x2a, 0x3b, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x0b, 0x0a, 0x07, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54, + 0x50, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x02, 0x12, 0x06, + 0x0a, 0x02, 0x57, 0x53, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x57, 0x53, 0x53, 0x10, 0x04, 0x42, + 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, + 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes = make([]protoimpl.MessageInfo, 35) +var file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = []any{ + (Scheme)(0), // 0: grpc.gateway.protoc_gen_openapiv2.options.Scheme + (HeaderParameter_Type)(0), // 1: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type + (JSONSchema_JSONSchemaSimpleTypes)(0), // 2: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes + (SecurityScheme_Type)(0), // 3: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type + (SecurityScheme_In)(0), // 4: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In + (SecurityScheme_Flow)(0), // 5: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow + (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger + (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation + (*Parameters)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Parameters + (*HeaderParameter)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter + (*Header)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Header + (*Response)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.Response + (*Info)(nil), // 12: grpc.gateway.protoc_gen_openapiv2.options.Info + (*Contact)(nil), // 13: grpc.gateway.protoc_gen_openapiv2.options.Contact + (*License)(nil), // 14: grpc.gateway.protoc_gen_openapiv2.options.License + (*ExternalDocumentation)(nil), // 15: grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + (*Schema)(nil), // 16: grpc.gateway.protoc_gen_openapiv2.options.Schema + (*EnumSchema)(nil), // 17: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema + (*JSONSchema)(nil), // 18: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + (*Tag)(nil), // 19: grpc.gateway.protoc_gen_openapiv2.options.Tag + (*SecurityDefinitions)(nil), // 20: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions + (*SecurityScheme)(nil), // 21: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme + (*SecurityRequirement)(nil), // 22: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + (*Scopes)(nil), // 23: grpc.gateway.protoc_gen_openapiv2.options.Scopes + nil, // 24: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry + nil, // 25: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry + nil, // 26: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry + nil, // 27: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry + nil, // 28: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry + nil, // 29: grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry + nil, // 30: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry + nil, // 31: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry + nil, // 32: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry + (*JSONSchema_FieldConfiguration)(nil), // 33: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration + nil, // 34: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry + nil, // 35: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry + nil, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry + nil, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry + (*SecurityRequirement_SecurityRequirementValue)(nil), // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue + nil, // 39: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry + nil, // 40: grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry + (*structpb.Value)(nil), // 41: google.protobuf.Value +} +var file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = []int32{ + 12, // 0: grpc.gateway.protoc_gen_openapiv2.options.Swagger.info:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info + 0, // 1: grpc.gateway.protoc_gen_openapiv2.options.Swagger.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme + 24, // 2: grpc.gateway.protoc_gen_openapiv2.options.Swagger.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry + 20, // 3: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security_definitions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions + 22, // 4: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + 19, // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger.tags:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag + 15, // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 25, // 7: grpc.gateway.protoc_gen_openapiv2.options.Swagger.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry + 15, // 8: grpc.gateway.protoc_gen_openapiv2.options.Operation.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 26, // 9: grpc.gateway.protoc_gen_openapiv2.options.Operation.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry + 0, // 10: grpc.gateway.protoc_gen_openapiv2.options.Operation.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme + 22, // 11: grpc.gateway.protoc_gen_openapiv2.options.Operation.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + 27, // 12: grpc.gateway.protoc_gen_openapiv2.options.Operation.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry + 8, // 13: grpc.gateway.protoc_gen_openapiv2.options.Operation.parameters:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Parameters + 9, // 14: grpc.gateway.protoc_gen_openapiv2.options.Parameters.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter + 1, // 15: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type + 16, // 16: grpc.gateway.protoc_gen_openapiv2.options.Response.schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema + 28, // 17: grpc.gateway.protoc_gen_openapiv2.options.Response.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry + 29, // 18: grpc.gateway.protoc_gen_openapiv2.options.Response.examples:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry + 30, // 19: grpc.gateway.protoc_gen_openapiv2.options.Response.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry + 13, // 20: grpc.gateway.protoc_gen_openapiv2.options.Info.contact:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Contact + 14, // 21: grpc.gateway.protoc_gen_openapiv2.options.Info.license:type_name -> grpc.gateway.protoc_gen_openapiv2.options.License + 31, // 22: grpc.gateway.protoc_gen_openapiv2.options.Info.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry + 18, // 23: grpc.gateway.protoc_gen_openapiv2.options.Schema.json_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + 15, // 24: grpc.gateway.protoc_gen_openapiv2.options.Schema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 15, // 25: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 32, // 26: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry + 2, // 27: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes + 33, // 28: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.field_configuration:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration + 34, // 29: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry + 15, // 30: grpc.gateway.protoc_gen_openapiv2.options.Tag.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 35, // 31: grpc.gateway.protoc_gen_openapiv2.options.Tag.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry + 36, // 32: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry + 3, // 33: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type + 4, // 34: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.in:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In + 5, // 35: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.flow:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow + 23, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.scopes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes + 37, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry + 39, // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.security_requirement:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry + 40, // 39: grpc.gateway.protoc_gen_openapiv2.options.Scopes.scope:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry + 11, // 40: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response + 41, // 41: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry.value:type_name -> google.protobuf.Value + 11, // 42: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response + 41, // 43: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry.value:type_name -> google.protobuf.Value + 10, // 44: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Header + 41, // 45: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 46: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 47: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 48: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 49: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry.value:type_name -> google.protobuf.Value + 21, // 50: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme + 41, // 51: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry.value:type_name -> google.protobuf.Value + 38, // 52: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue + 53, // [53:53] is the sub-list for method output_type + 53, // [53:53] is the sub-list for method input_type + 53, // [53:53] is the sub-list for extension type_name + 53, // [53:53] is the sub-list for extension extendee + 0, // [0:53] is the sub-list for field type_name +} + +func init() { file_protoc_gen_openapiv2_options_openapiv2_proto_init() } +func file_protoc_gen_openapiv2_options_openapiv2_proto_init() { + if File_protoc_gen_openapiv2_options_openapiv2_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc, + NumEnums: 6, + NumMessages: 35, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes, + DependencyIndexes: file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs, + EnumInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes, + MessageInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes, + }.Build() + File_protoc_gen_openapiv2_options_openapiv2_proto = out.File + file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = nil + file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = nil + file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel index a65d88eb8658..04b4bebf3d55 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -27,6 +27,7 @@ go_library( "//internal/httprule", "//utilities", "@org_golang_google_genproto_googleapis_api//httpbody", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//grpclog", "@org_golang_google_grpc//health/grpc_health_v1", diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index 2f2b342431d6..00b2228a1de5 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -201,13 +201,13 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM if timeout != 0 { ctx, _ = context.WithTimeout(ctx, timeout) } - if len(pairs) == 0 { - return ctx, nil, nil - } md := metadata.Pairs(pairs...) for _, mda := range mux.metadataAnnotators { md = metadata.Join(md, mda(ctx, req)) } + if len(md) == 0 { + return ctx, nil, nil + } return ctx, md, nil } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go index 8376d1e0efd5..3d07063007d5 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go @@ -66,7 +66,7 @@ func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { var ( // protoMessageType is stored to prevent constant lookup of the same type at runtime. - protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() + protoMessageType = reflect.TypeFor[proto.Message]() ) // marshalNonProto marshals a non-message field of a protobuf message. @@ -325,9 +325,9 @@ type protoEnum interface { EnumDescriptor() ([]byte, []int) } -var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem() +var typeProtoEnum = reflect.TypeFor[protoEnum]() -var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() +var typeProtoMessage = reflect.TypeFor[proto.Message]() // Delimiter for newline encoded JSON streams. func (j *JSONPb) Delimiter() []byte { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go index 19255ec441e6..3eb161671732 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -10,6 +10,7 @@ import ( "strings" "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule" + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/health/grpc_health_v1" @@ -281,12 +282,19 @@ func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpoin http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string, ) { _, outboundMarshaler := MarshalerForRequest(s, r) + annotatedContext, err := AnnotateContext(r.Context(), s, r, grpc_health_v1.Health_Check_FullMethodName, WithHTTPPathPattern(endpointPath)) + if err != nil { + s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) + return + } - resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{ + var md ServerMetadata + resp, err := healthCheckClient.Check(annotatedContext, &grpc_health_v1.HealthCheckRequest{ Service: r.URL.Query().Get("service"), - }) + }, grpc.Header(&md.HeaderMD), grpc.Trailer(&md.TrailerMD)) + annotatedContext = NewServerMetadataContext(annotatedContext, md) if err != nil { - s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) + s.errorHandler(annotatedContext, s, outboundMarshaler, w, r, err) return } @@ -300,7 +308,7 @@ func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpoin err = status.Error(codes.NotFound, resp.String()) } - s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) + s.errorHandler(annotatedContext, s, outboundMarshaler, w, r, err) return } diff --git a/vendor/github.com/in-toto/attestation/LICENSE b/vendor/github.com/in-toto/attestation/LICENSE new file mode 100644 index 000000000000..702a3365c066 --- /dev/null +++ b/vendor/github.com/in-toto/attestation/LICENSE @@ -0,0 +1,13 @@ +Copyright 2021 in-toto Developers + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go new file mode 100644 index 000000000000..ae912f0d1e24 --- /dev/null +++ b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go @@ -0,0 +1,128 @@ +/* +Wrapper APIs for in-toto attestation ResourceDescriptor protos. +*/ + +package v1 + +import ( + "encoding/hex" + "errors" + "fmt" +) + +var ( + ErrIncorrectDigestLength = errors.New("digest has incorrect length") + ErrInvalidDigestEncoding = errors.New("digest is not valid hex-encoded string") + ErrRDRequiredField = errors.New("at least one of name, URI, or digest are required") +) + +type HashAlgorithm string + +const ( + AlgorithmMD5 HashAlgorithm = "md5" + AlgorithmSHA1 HashAlgorithm = "sha1" + AlgorithmSHA224 HashAlgorithm = "sha224" + AlgorithmSHA512_224 HashAlgorithm = "sha512_224" + AlgorithmSHA256 HashAlgorithm = "sha256" + AlgorithmSHA512_256 HashAlgorithm = "sha512_256" + AlgorithmSHA384 HashAlgorithm = "sha384" + AlgorithmSHA512 HashAlgorithm = "sha512" + AlgorithmSHA3_224 HashAlgorithm = "sha3_224" + AlgorithmSHA3_256 HashAlgorithm = "sha3_256" + AlgorithmSHA3_384 HashAlgorithm = "sha3_384" + AlgorithmSHA3_512 HashAlgorithm = "sha3_512" + AlgorithmGitBlob HashAlgorithm = "gitBlob" + AlgorithmGitCommit HashAlgorithm = "gitCommit" + AlgorithmGitTag HashAlgorithm = "gitTag" + AlgorithmGitTree HashAlgorithm = "gitTree" + AlgorithmDirHash HashAlgorithm = "dirHash" +) + +// HashAlgorithms indexes the known algorithms in a dictionary +// by their string value +var HashAlgorithms = map[string]HashAlgorithm{ + "md5": AlgorithmMD5, + "sha1": AlgorithmSHA1, + "sha224": AlgorithmSHA224, + "sha512_224": AlgorithmSHA512_224, + "sha256": AlgorithmSHA256, + "sha512_256": AlgorithmSHA512_256, + "sha384": AlgorithmSHA384, + "sha512": AlgorithmSHA512, + "sha3_224": AlgorithmSHA3_224, + "sha3_256": AlgorithmSHA3_256, + "sha3_384": AlgorithmSHA3_384, + "sha3_512": AlgorithmSHA3_512, + "gitBlob": AlgorithmGitBlob, + "gitCommit": AlgorithmGitCommit, + "gitTag": AlgorithmGitTag, + "gitTree": AlgorithmGitTree, + "dirHash": AlgorithmDirHash, +} + +// HexLength returns the expected length of an algorithm's hash when hexencoded +func (algo HashAlgorithm) HexLength() int { + switch algo { + case AlgorithmMD5: + return 16 + case AlgorithmSHA1, AlgorithmGitBlob, AlgorithmGitCommit, AlgorithmGitTag, AlgorithmGitTree: + return 20 + case AlgorithmSHA224, AlgorithmSHA512_224, AlgorithmSHA3_224: + return 28 + case AlgorithmSHA256, AlgorithmSHA512_256, AlgorithmSHA3_256, AlgorithmDirHash: + return 32 + case AlgorithmSHA384, AlgorithmSHA3_384: + return 48 + case AlgorithmSHA512, AlgorithmSHA3_512: + return 64 + default: + return 0 + } +} + +// String returns the hash algorithm name as a string +func (algo HashAlgorithm) String() string { + return string(algo) +} + +// Indicates if a given fixed-size hash algorithm is supported by default and returns the algorithm's +// digest size in bytes, if supported. We assume gitCommit and dirHash are aliases for sha1 and sha256, respectively. +// +// SHA digest sizes from https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf +// MD5 digest size from https://www.rfc-editor.org/rfc/rfc1321.html#section-1 +func isSupportedFixedSizeAlgorithm(algString string) (bool, int) { + algo := HashAlgorithm(algString) + return algo.HexLength() > 0, algo.HexLength() +} + +func (d *ResourceDescriptor) Validate() error { + // at least one of name, URI or digest are required + if d.GetName() == "" && d.GetUri() == "" && len(d.GetDigest()) == 0 { + return ErrRDRequiredField + } + + if len(d.GetDigest()) > 0 { + for alg, digest := range d.GetDigest() { + + // Per https://github.com/in-toto/attestation/blob/main/spec/v1/digest_set.md + // check encoding and length for supported algorithms; + // use of custom, unsupported algorithms is allowed and does not not generate validation errors. + supported, size := isSupportedFixedSizeAlgorithm(alg) + if supported { + // the in-toto spec expects a hex-encoded string in DigestSets for supported algorithms + hashBytes, err := hex.DecodeString(digest) + + if err != nil { + return fmt.Errorf("%w (%s: %s)", ErrInvalidDigestEncoding, alg, digest) + } + + // check the length of the digest + if len(hashBytes) != size { + return fmt.Errorf("%w: got %d bytes, want %d bytes (%s: %s)", ErrIncorrectDigestLength, len(hashBytes), size, alg, digest) + } + } + } + } + + return nil +} diff --git a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go new file mode 100644 index 000000000000..0dd94ea2634a --- /dev/null +++ b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go @@ -0,0 +1,195 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.6 +// protoc v4.24.4 +// source: in_toto_attestation/v1/resource_descriptor.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Proto representation of the in-toto v1 ResourceDescriptor. +// https://github.com/in-toto/attestation/blob/main/spec/v1/resource_descriptor.md +// Validation of all fields is left to the users of this proto. +type ResourceDescriptor struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"` + Digest map[string]string `protobuf:"bytes,3,rep,name=digest,proto3" json:"digest,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Content []byte `protobuf:"bytes,4,opt,name=content,proto3" json:"content,omitempty"` + DownloadLocation string `protobuf:"bytes,5,opt,name=download_location,json=downloadLocation,proto3" json:"download_location,omitempty"` + MediaType string `protobuf:"bytes,6,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // Per the Struct protobuf spec, this type corresponds to + // a JSON Object, which is truly a map under the hood. + // So, the Struct a) is still consistent with our specification for + // the `annotations` field, and b) has native support in some language + // bindings making their use easier in implementations. + // See: https://pkg.go.dev/google.golang.org/protobuf/types/known/structpb#Struct + Annotations *structpb.Struct `protobuf:"bytes,7,opt,name=annotations,proto3" json:"annotations,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceDescriptor) Reset() { + *x = ResourceDescriptor{} + mi := &file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceDescriptor) ProtoMessage() {} + +func (x *ResourceDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceDescriptor.ProtoReflect.Descriptor instead. +func (*ResourceDescriptor) Descriptor() ([]byte, []int) { + return file_in_toto_attestation_v1_resource_descriptor_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceDescriptor) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ResourceDescriptor) GetUri() string { + if x != nil { + return x.Uri + } + return "" +} + +func (x *ResourceDescriptor) GetDigest() map[string]string { + if x != nil { + return x.Digest + } + return nil +} + +func (x *ResourceDescriptor) GetContent() []byte { + if x != nil { + return x.Content + } + return nil +} + +func (x *ResourceDescriptor) GetDownloadLocation() string { + if x != nil { + return x.DownloadLocation + } + return "" +} + +func (x *ResourceDescriptor) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *ResourceDescriptor) GetAnnotations() *structpb.Struct { + if x != nil { + return x.Annotations + } + return nil +} + +var File_in_toto_attestation_v1_resource_descriptor_proto protoreflect.FileDescriptor + +const file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc = "" + + "\n" + + "0in_toto_attestation/v1/resource_descriptor.proto\x12\x16in_toto_attestation.v1\x1a\x1cgoogle/protobuf/struct.proto\"\xe6\x02\n" + + "\x12ResourceDescriptor\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x10\n" + + "\x03uri\x18\x02 \x01(\tR\x03uri\x12N\n" + + "\x06digest\x18\x03 \x03(\v26.in_toto_attestation.v1.ResourceDescriptor.DigestEntryR\x06digest\x12\x18\n" + + "\acontent\x18\x04 \x01(\fR\acontent\x12+\n" + + "\x11download_location\x18\x05 \x01(\tR\x10downloadLocation\x12\x1d\n" + + "\n" + + "media_type\x18\x06 \x01(\tR\tmediaType\x129\n" + + "\vannotations\x18\a \x01(\v2\x17.google.protobuf.StructR\vannotations\x1a9\n" + + "\vDigestEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01BG\n" + + "\x1fio.github.intoto.attestation.v1Z$github.com/in-toto/attestation/go/v1b\x06proto3" + +var ( + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescOnce sync.Once + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData []byte +) + +func file_in_toto_attestation_v1_resource_descriptor_proto_rawDescGZIP() []byte { + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescOnce.Do(func() { + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc), len(file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc))) + }) + return file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData +} + +var file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_in_toto_attestation_v1_resource_descriptor_proto_goTypes = []any{ + (*ResourceDescriptor)(nil), // 0: in_toto_attestation.v1.ResourceDescriptor + nil, // 1: in_toto_attestation.v1.ResourceDescriptor.DigestEntry + (*structpb.Struct)(nil), // 2: google.protobuf.Struct +} +var file_in_toto_attestation_v1_resource_descriptor_proto_depIdxs = []int32{ + 1, // 0: in_toto_attestation.v1.ResourceDescriptor.digest:type_name -> in_toto_attestation.v1.ResourceDescriptor.DigestEntry + 2, // 1: in_toto_attestation.v1.ResourceDescriptor.annotations:type_name -> google.protobuf.Struct + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_in_toto_attestation_v1_resource_descriptor_proto_init() } +func file_in_toto_attestation_v1_resource_descriptor_proto_init() { + if File_in_toto_attestation_v1_resource_descriptor_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc), len(file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_in_toto_attestation_v1_resource_descriptor_proto_goTypes, + DependencyIndexes: file_in_toto_attestation_v1_resource_descriptor_proto_depIdxs, + MessageInfos: file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes, + }.Build() + File_in_toto_attestation_v1_resource_descriptor_proto = out.File + file_in_toto_attestation_v1_resource_descriptor_proto_goTypes = nil + file_in_toto_attestation_v1_resource_descriptor_proto_depIdxs = nil +} diff --git a/vendor/github.com/in-toto/attestation/go/v1/statement.go b/vendor/github.com/in-toto/attestation/go/v1/statement.go new file mode 100644 index 000000000000..f63d5f0d7475 --- /dev/null +++ b/vendor/github.com/in-toto/attestation/go/v1/statement.go @@ -0,0 +1,50 @@ +/* +Wrapper APIs for in-toto attestation Statement layer protos. +*/ + +package v1 + +import "errors" + +const StatementTypeUri = "https://in-toto.io/Statement/v1" + +var ( + ErrInvalidStatementType = errors.New("wrong statement type") + ErrSubjectRequired = errors.New("at least one subject required") + ErrDigestRequired = errors.New("at least one digest required") + ErrPredicateTypeRequired = errors.New("predicate type required") + ErrPredicateRequired = errors.New("predicate object required") +) + +func (s *Statement) Validate() error { + if s.GetType() != StatementTypeUri { + return ErrInvalidStatementType + } + + if s.GetSubject() == nil || len(s.GetSubject()) == 0 { + return ErrSubjectRequired + } + + // check all resource descriptors in the subject + subject := s.GetSubject() + for _, rd := range subject { + if err := rd.Validate(); err != nil { + return err + } + + // v1 statements require the digest to be set in the subject + if len(rd.GetDigest()) == 0 { + return ErrDigestRequired + } + } + + if s.GetPredicateType() == "" { + return ErrPredicateTypeRequired + } + + if s.GetPredicate() == nil { + return ErrPredicateRequired + } + + return nil +} diff --git a/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go new file mode 100644 index 000000000000..bc76eaf26170 --- /dev/null +++ b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go @@ -0,0 +1,160 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.6 +// protoc v4.24.4 +// source: in_toto_attestation/v1/statement.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Proto representation of the in-toto v1 Statement. +// https://github.com/in-toto/attestation/tree/main/spec/v1 +// Validation of all fields is left to the users of this proto. +type Statement struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Expected to always be "https://in-toto.io/Statement/v1" + Type string `protobuf:"bytes,1,opt,name=type,json=_type,proto3" json:"type,omitempty"` + Subject []*ResourceDescriptor `protobuf:"bytes,2,rep,name=subject,proto3" json:"subject,omitempty"` + PredicateType string `protobuf:"bytes,3,opt,name=predicate_type,json=predicateType,proto3" json:"predicate_type,omitempty"` + Predicate *structpb.Struct `protobuf:"bytes,4,opt,name=predicate,proto3" json:"predicate,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Statement) Reset() { + *x = Statement{} + mi := &file_in_toto_attestation_v1_statement_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Statement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Statement) ProtoMessage() {} + +func (x *Statement) ProtoReflect() protoreflect.Message { + mi := &file_in_toto_attestation_v1_statement_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Statement.ProtoReflect.Descriptor instead. +func (*Statement) Descriptor() ([]byte, []int) { + return file_in_toto_attestation_v1_statement_proto_rawDescGZIP(), []int{0} +} + +func (x *Statement) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Statement) GetSubject() []*ResourceDescriptor { + if x != nil { + return x.Subject + } + return nil +} + +func (x *Statement) GetPredicateType() string { + if x != nil { + return x.PredicateType + } + return "" +} + +func (x *Statement) GetPredicate() *structpb.Struct { + if x != nil { + return x.Predicate + } + return nil +} + +var File_in_toto_attestation_v1_statement_proto protoreflect.FileDescriptor + +const file_in_toto_attestation_v1_statement_proto_rawDesc = "" + + "\n" + + "&in_toto_attestation/v1/statement.proto\x12\x16in_toto_attestation.v1\x1a\x1cgoogle/protobuf/struct.proto\x1a0in_toto_attestation/v1/resource_descriptor.proto\"\xc4\x01\n" + + "\tStatement\x12\x13\n" + + "\x04type\x18\x01 \x01(\tR\x05_type\x12D\n" + + "\asubject\x18\x02 \x03(\v2*.in_toto_attestation.v1.ResourceDescriptorR\asubject\x12%\n" + + "\x0epredicate_type\x18\x03 \x01(\tR\rpredicateType\x125\n" + + "\tpredicate\x18\x04 \x01(\v2\x17.google.protobuf.StructR\tpredicateBG\n" + + "\x1fio.github.intoto.attestation.v1Z$github.com/in-toto/attestation/go/v1b\x06proto3" + +var ( + file_in_toto_attestation_v1_statement_proto_rawDescOnce sync.Once + file_in_toto_attestation_v1_statement_proto_rawDescData []byte +) + +func file_in_toto_attestation_v1_statement_proto_rawDescGZIP() []byte { + file_in_toto_attestation_v1_statement_proto_rawDescOnce.Do(func() { + file_in_toto_attestation_v1_statement_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_in_toto_attestation_v1_statement_proto_rawDesc), len(file_in_toto_attestation_v1_statement_proto_rawDesc))) + }) + return file_in_toto_attestation_v1_statement_proto_rawDescData +} + +var file_in_toto_attestation_v1_statement_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_in_toto_attestation_v1_statement_proto_goTypes = []any{ + (*Statement)(nil), // 0: in_toto_attestation.v1.Statement + (*ResourceDescriptor)(nil), // 1: in_toto_attestation.v1.ResourceDescriptor + (*structpb.Struct)(nil), // 2: google.protobuf.Struct +} +var file_in_toto_attestation_v1_statement_proto_depIdxs = []int32{ + 1, // 0: in_toto_attestation.v1.Statement.subject:type_name -> in_toto_attestation.v1.ResourceDescriptor + 2, // 1: in_toto_attestation.v1.Statement.predicate:type_name -> google.protobuf.Struct + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_in_toto_attestation_v1_statement_proto_init() } +func file_in_toto_attestation_v1_statement_proto_init() { + if File_in_toto_attestation_v1_statement_proto != nil { + return + } + file_in_toto_attestation_v1_resource_descriptor_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_in_toto_attestation_v1_statement_proto_rawDesc), len(file_in_toto_attestation_v1_statement_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_in_toto_attestation_v1_statement_proto_goTypes, + DependencyIndexes: file_in_toto_attestation_v1_statement_proto_depIdxs, + MessageInfos: file_in_toto_attestation_v1_statement_proto_msgTypes, + }.Build() + File_in_toto_attestation_v1_statement_proto = out.File + file_in_toto_attestation_v1_statement_proto_goTypes = nil + file_in_toto_attestation_v1_statement_proto_depIdxs = nil +} diff --git a/vendor/github.com/josharian/intern/README.md b/vendor/github.com/josharian/intern/README.md deleted file mode 100644 index ffc44b219b2f..000000000000 --- a/vendor/github.com/josharian/intern/README.md +++ /dev/null @@ -1,5 +0,0 @@ -Docs: https://godoc.org/github.com/josharian/intern - -See also [Go issue 5160](https://golang.org/issue/5160). - -License: MIT diff --git a/vendor/github.com/josharian/intern/intern.go b/vendor/github.com/josharian/intern/intern.go deleted file mode 100644 index 7acb1fe90a11..000000000000 --- a/vendor/github.com/josharian/intern/intern.go +++ /dev/null @@ -1,44 +0,0 @@ -// Package intern interns strings. -// Interning is best effort only. -// Interned strings may be removed automatically -// at any time without notification. -// All functions may be called concurrently -// with themselves and each other. -package intern - -import "sync" - -var ( - pool sync.Pool = sync.Pool{ - New: func() interface{} { - return make(map[string]string) - }, - } -) - -// String returns s, interned. -func String(s string) string { - m := pool.Get().(map[string]string) - c, ok := m[s] - if ok { - pool.Put(m) - return c - } - m[s] = s - pool.Put(m) - return s -} - -// Bytes returns b converted to a string, interned. -func Bytes(b []byte) string { - m := pool.Get().(map[string]string) - c, ok := m[string(b)] - if ok { - pool.Put(m) - return c - } - s := string(b) - m[s] = s - pool.Put(m) - return s -} diff --git a/vendor/github.com/mailru/easyjson/LICENSE b/vendor/github.com/mailru/easyjson/LICENSE deleted file mode 100644 index fbff658f70d9..000000000000 --- a/vendor/github.com/mailru/easyjson/LICENSE +++ /dev/null @@ -1,7 +0,0 @@ -Copyright (c) 2016 Mail.Ru Group - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go deleted file mode 100644 index 598a54af9dbf..000000000000 --- a/vendor/github.com/mailru/easyjson/buffer/pool.go +++ /dev/null @@ -1,278 +0,0 @@ -// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to -// reduce copying and to allow reuse of individual chunks. -package buffer - -import ( - "io" - "net" - "sync" -) - -// PoolConfig contains configuration for the allocation and reuse strategy. -type PoolConfig struct { - StartSize int // Minimum chunk size that is allocated. - PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead. - MaxSize int // Maximum chunk size that will be allocated. -} - -var config = PoolConfig{ - StartSize: 128, - PooledSize: 512, - MaxSize: 32768, -} - -// Reuse pool: chunk size -> pool. -var buffers = map[int]*sync.Pool{} - -func initBuffers() { - for l := config.PooledSize; l <= config.MaxSize; l *= 2 { - buffers[l] = new(sync.Pool) - } -} - -func init() { - initBuffers() -} - -// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done. -func Init(cfg PoolConfig) { - config = cfg - initBuffers() -} - -// putBuf puts a chunk to reuse pool if it can be reused. -func putBuf(buf []byte) { - size := cap(buf) - if size < config.PooledSize { - return - } - if c := buffers[size]; c != nil { - c.Put(buf[:0]) - } -} - -// getBuf gets a chunk from reuse pool or creates a new one if reuse failed. -func getBuf(size int) []byte { - if size >= config.PooledSize { - if c := buffers[size]; c != nil { - v := c.Get() - if v != nil { - return v.([]byte) - } - } - } - return make([]byte, 0, size) -} - -// Buffer is a buffer optimized for serialization without extra copying. -type Buffer struct { - - // Buf is the current chunk that can be used for serialization. - Buf []byte - - toPool []byte - bufs [][]byte -} - -// EnsureSpace makes sure that the current chunk contains at least s free bytes, -// possibly creating a new chunk. -func (b *Buffer) EnsureSpace(s int) { - if cap(b.Buf)-len(b.Buf) < s { - b.ensureSpaceSlow(s) - } -} - -func (b *Buffer) ensureSpaceSlow(s int) { - l := len(b.Buf) - if l > 0 { - if cap(b.toPool) != cap(b.Buf) { - // Chunk was reallocated, toPool can be pooled. - putBuf(b.toPool) - } - if cap(b.bufs) == 0 { - b.bufs = make([][]byte, 0, 8) - } - b.bufs = append(b.bufs, b.Buf) - l = cap(b.toPool) * 2 - } else { - l = config.StartSize - } - - if l > config.MaxSize { - l = config.MaxSize - } - b.Buf = getBuf(l) - b.toPool = b.Buf -} - -// AppendByte appends a single byte to buffer. -func (b *Buffer) AppendByte(data byte) { - b.EnsureSpace(1) - b.Buf = append(b.Buf, data) -} - -// AppendBytes appends a byte slice to buffer. -func (b *Buffer) AppendBytes(data []byte) { - if len(data) <= cap(b.Buf)-len(b.Buf) { - b.Buf = append(b.Buf, data...) // fast path - } else { - b.appendBytesSlow(data) - } -} - -func (b *Buffer) appendBytesSlow(data []byte) { - for len(data) > 0 { - b.EnsureSpace(1) - - sz := cap(b.Buf) - len(b.Buf) - if sz > len(data) { - sz = len(data) - } - - b.Buf = append(b.Buf, data[:sz]...) - data = data[sz:] - } -} - -// AppendString appends a string to buffer. -func (b *Buffer) AppendString(data string) { - if len(data) <= cap(b.Buf)-len(b.Buf) { - b.Buf = append(b.Buf, data...) // fast path - } else { - b.appendStringSlow(data) - } -} - -func (b *Buffer) appendStringSlow(data string) { - for len(data) > 0 { - b.EnsureSpace(1) - - sz := cap(b.Buf) - len(b.Buf) - if sz > len(data) { - sz = len(data) - } - - b.Buf = append(b.Buf, data[:sz]...) - data = data[sz:] - } -} - -// Size computes the size of a buffer by adding sizes of every chunk. -func (b *Buffer) Size() int { - size := len(b.Buf) - for _, buf := range b.bufs { - size += len(buf) - } - return size -} - -// DumpTo outputs the contents of a buffer to a writer and resets the buffer. -func (b *Buffer) DumpTo(w io.Writer) (written int, err error) { - bufs := net.Buffers(b.bufs) - if len(b.Buf) > 0 { - bufs = append(bufs, b.Buf) - } - n, err := bufs.WriteTo(w) - - for _, buf := range b.bufs { - putBuf(buf) - } - putBuf(b.toPool) - - b.bufs = nil - b.Buf = nil - b.toPool = nil - - return int(n), err -} - -// BuildBytes creates a single byte slice with all the contents of the buffer. Data is -// copied if it does not fit in a single chunk. You can optionally provide one byte -// slice as argument that it will try to reuse. -func (b *Buffer) BuildBytes(reuse ...[]byte) []byte { - if len(b.bufs) == 0 { - ret := b.Buf - b.toPool = nil - b.Buf = nil - return ret - } - - var ret []byte - size := b.Size() - - // If we got a buffer as argument and it is big enough, reuse it. - if len(reuse) == 1 && cap(reuse[0]) >= size { - ret = reuse[0][:0] - } else { - ret = make([]byte, 0, size) - } - for _, buf := range b.bufs { - ret = append(ret, buf...) - putBuf(buf) - } - - ret = append(ret, b.Buf...) - putBuf(b.toPool) - - b.bufs = nil - b.toPool = nil - b.Buf = nil - - return ret -} - -type readCloser struct { - offset int - bufs [][]byte -} - -func (r *readCloser) Read(p []byte) (n int, err error) { - for _, buf := range r.bufs { - // Copy as much as we can. - x := copy(p[n:], buf[r.offset:]) - n += x // Increment how much we filled. - - // Did we empty the whole buffer? - if r.offset+x == len(buf) { - // On to the next buffer. - r.offset = 0 - r.bufs = r.bufs[1:] - - // We can release this buffer. - putBuf(buf) - } else { - r.offset += x - } - - if n == len(p) { - break - } - } - // No buffers left or nothing read? - if len(r.bufs) == 0 { - err = io.EOF - } - return -} - -func (r *readCloser) Close() error { - // Release all remaining buffers. - for _, buf := range r.bufs { - putBuf(buf) - } - // In case Close gets called multiple times. - r.bufs = nil - - return nil -} - -// ReadCloser creates an io.ReadCloser with all the contents of the buffer. -func (b *Buffer) ReadCloser() io.ReadCloser { - ret := &readCloser{0, append(b.bufs, b.Buf)} - - b.bufs = nil - b.toPool = nil - b.Buf = nil - - return ret -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go deleted file mode 100644 index ff7b27c5b203..000000000000 --- a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go +++ /dev/null @@ -1,24 +0,0 @@ -// This file will only be included to the build if neither -// easyjson_nounsafe nor appengine build tag is set. See README notes -// for more details. - -//+build !easyjson_nounsafe -//+build !appengine - -package jlexer - -import ( - "reflect" - "unsafe" -) - -// bytesToStr creates a string pointing at the slice to avoid copying. -// -// Warning: the string returned by the function should be used with care, as the whole input data -// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data -// may be garbage-collected even when the string exists. -func bytesToStr(data []byte) string { - h := (*reflect.SliceHeader)(unsafe.Pointer(&data)) - shdr := reflect.StringHeader{Data: h.Data, Len: h.Len} - return *(*string)(unsafe.Pointer(&shdr)) -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go deleted file mode 100644 index 864d1be67638..000000000000 --- a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go +++ /dev/null @@ -1,13 +0,0 @@ -// This file is included to the build if any of the buildtags below -// are defined. Refer to README notes for more details. - -//+build easyjson_nounsafe appengine - -package jlexer - -// bytesToStr creates a string normally from []byte -// -// Note that this method is roughly 1.5x slower than using the 'unsafe' method. -func bytesToStr(data []byte) string { - return string(data) -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/error.go b/vendor/github.com/mailru/easyjson/jlexer/error.go deleted file mode 100644 index e90ec40d05f5..000000000000 --- a/vendor/github.com/mailru/easyjson/jlexer/error.go +++ /dev/null @@ -1,15 +0,0 @@ -package jlexer - -import "fmt" - -// LexerError implements the error interface and represents all possible errors that can be -// generated during parsing the JSON data. -type LexerError struct { - Reason string - Offset int - Data string -} - -func (l *LexerError) Error() string { - return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data) -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go deleted file mode 100644 index b5f5e2613293..000000000000 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ /dev/null @@ -1,1244 +0,0 @@ -// Package jlexer contains a JSON lexer implementation. -// -// It is expected that it is mostly used with generated parser code, so the interface is tuned -// for a parser that knows what kind of data is expected. -package jlexer - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" - - "github.com/josharian/intern" -) - -// tokenKind determines type of a token. -type tokenKind byte - -const ( - tokenUndef tokenKind = iota // No token. - tokenDelim // Delimiter: one of '{', '}', '[' or ']'. - tokenString // A string literal, e.g. "abc\u1234" - tokenNumber // Number literal, e.g. 1.5e5 - tokenBool // Boolean literal: true or false. - tokenNull // null keyword. -) - -// token describes a single token: type, position in the input and value. -type token struct { - kind tokenKind // Type of a token. - - boolValue bool // Value if a boolean literal token. - byteValueCloned bool // true if byteValue was allocated and does not refer to original json body - byteValue []byte // Raw value of a token. - delimValue byte -} - -// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice. -type Lexer struct { - Data []byte // Input data given to the lexer. - - start int // Start of the current token. - pos int // Current unscanned position in the input stream. - token token // Last scanned token, if token.kind != tokenUndef. - - firstElement bool // Whether current element is the first in array or an object. - wantSep byte // A comma or a colon character, which need to occur before a token. - - UseMultipleErrors bool // If we want to use multiple errors. - fatalError error // Fatal error occurred during lexing. It is usually a syntax error. - multipleErrors []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors. -} - -// FetchToken scans the input for the next token. -func (r *Lexer) FetchToken() { - r.token.kind = tokenUndef - r.start = r.pos - - // Check if r.Data has r.pos element - // If it doesn't, it mean corrupted input data - if len(r.Data) < r.pos { - r.errParse("Unexpected end of data") - return - } - // Determine the type of a token by skipping whitespace and reading the - // first character. - for _, c := range r.Data[r.pos:] { - switch c { - case ':', ',': - if r.wantSep == c { - r.pos++ - r.start++ - r.wantSep = 0 - } else { - r.errSyntax() - } - - case ' ', '\t', '\r', '\n': - r.pos++ - r.start++ - - case '"': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenString - r.fetchString() - return - - case '{', '[': - if r.wantSep != 0 { - r.errSyntax() - } - r.firstElement = true - r.token.kind = tokenDelim - r.token.delimValue = r.Data[r.pos] - r.pos++ - return - - case '}', ']': - if !r.firstElement && (r.wantSep != ',') { - r.errSyntax() - } - r.wantSep = 0 - r.token.kind = tokenDelim - r.token.delimValue = r.Data[r.pos] - r.pos++ - return - - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': - if r.wantSep != 0 { - r.errSyntax() - } - r.token.kind = tokenNumber - r.fetchNumber() - return - - case 'n': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenNull - r.fetchNull() - return - - case 't': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenBool - r.token.boolValue = true - r.fetchTrue() - return - - case 'f': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenBool - r.token.boolValue = false - r.fetchFalse() - return - - default: - r.errSyntax() - return - } - } - r.fatalError = io.EOF - return -} - -// isTokenEnd returns true if the char can follow a non-delimiter token -func isTokenEnd(c byte) bool { - return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':' -} - -// fetchNull fetches and checks remaining bytes of null keyword. -func (r *Lexer) fetchNull() { - r.pos += 4 - if r.pos > len(r.Data) || - r.Data[r.pos-3] != 'u' || - r.Data[r.pos-2] != 'l' || - r.Data[r.pos-1] != 'l' || - (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { - - r.pos -= 4 - r.errSyntax() - } -} - -// fetchTrue fetches and checks remaining bytes of true keyword. -func (r *Lexer) fetchTrue() { - r.pos += 4 - if r.pos > len(r.Data) || - r.Data[r.pos-3] != 'r' || - r.Data[r.pos-2] != 'u' || - r.Data[r.pos-1] != 'e' || - (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { - - r.pos -= 4 - r.errSyntax() - } -} - -// fetchFalse fetches and checks remaining bytes of false keyword. -func (r *Lexer) fetchFalse() { - r.pos += 5 - if r.pos > len(r.Data) || - r.Data[r.pos-4] != 'a' || - r.Data[r.pos-3] != 'l' || - r.Data[r.pos-2] != 's' || - r.Data[r.pos-1] != 'e' || - (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { - - r.pos -= 5 - r.errSyntax() - } -} - -// fetchNumber scans a number literal token. -func (r *Lexer) fetchNumber() { - hasE := false - afterE := false - hasDot := false - - r.pos++ - for i, c := range r.Data[r.pos:] { - switch { - case c >= '0' && c <= '9': - afterE = false - case c == '.' && !hasDot: - hasDot = true - case (c == 'e' || c == 'E') && !hasE: - hasE = true - hasDot = true - afterE = true - case (c == '+' || c == '-') && afterE: - afterE = false - default: - r.pos += i - if !isTokenEnd(c) { - r.errSyntax() - } else { - r.token.byteValue = r.Data[r.start:r.pos] - } - return - } - } - - r.pos = len(r.Data) - r.token.byteValue = r.Data[r.start:] -} - -// findStringLen tries to scan into the string literal for ending quote char to determine required size. -// The size will be exact if no escapes are present and may be inexact if there are escaped chars. -func findStringLen(data []byte) (isValid bool, length int) { - for { - idx := bytes.IndexByte(data, '"') - if idx == -1 { - return false, len(data) - } - if idx == 0 || (idx > 0 && data[idx-1] != '\\') { - return true, length + idx - } - - // count \\\\\\\ sequences. even number of slashes means quote is not really escaped - cnt := 1 - for idx-cnt-1 >= 0 && data[idx-cnt-1] == '\\' { - cnt++ - } - if cnt%2 == 0 { - return true, length + idx - } - - length += idx + 1 - data = data[idx+1:] - } -} - -// unescapeStringToken performs unescaping of string token. -// if no escaping is needed, original string is returned, otherwise - a new one allocated -func (r *Lexer) unescapeStringToken() (err error) { - data := r.token.byteValue - var unescapedData []byte - - for { - i := bytes.IndexByte(data, '\\') - if i == -1 { - break - } - - escapedRune, escapedBytes, err := decodeEscape(data[i:]) - if err != nil { - r.errParse(err.Error()) - return err - } - - if unescapedData == nil { - unescapedData = make([]byte, 0, len(r.token.byteValue)) - } - - var d [4]byte - s := utf8.EncodeRune(d[:], escapedRune) - unescapedData = append(unescapedData, data[:i]...) - unescapedData = append(unescapedData, d[:s]...) - - data = data[i+escapedBytes:] - } - - if unescapedData != nil { - r.token.byteValue = append(unescapedData, data...) - r.token.byteValueCloned = true - } - return -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - var val rune - for i := 2; i < len(s) && i < 6; i++ { - var v byte - c := s[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - v = c - '0' - case 'a', 'b', 'c', 'd', 'e', 'f': - v = c - 'a' + 10 - case 'A', 'B', 'C', 'D', 'E', 'F': - v = c - 'A' + 10 - default: - return -1 - } - - val <<= 4 - val |= rune(v) - } - return val -} - -// decodeEscape processes a single escape sequence and returns number of bytes processed. -func decodeEscape(data []byte) (decoded rune, bytesProcessed int, err error) { - if len(data) < 2 { - return 0, 0, errors.New("incorrect escape symbol \\ at the end of token") - } - - c := data[1] - switch c { - case '"', '/', '\\': - return rune(c), 2, nil - case 'b': - return '\b', 2, nil - case 'f': - return '\f', 2, nil - case 'n': - return '\n', 2, nil - case 'r': - return '\r', 2, nil - case 't': - return '\t', 2, nil - case 'u': - rr := getu4(data) - if rr < 0 { - return 0, 0, errors.New("incorrectly escaped \\uXXXX sequence") - } - - read := 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(data[read:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - read += 6 - rr = dec - } else { - rr = unicode.ReplacementChar - } - } - return rr, read, nil - } - - return 0, 0, errors.New("incorrectly escaped bytes") -} - -// fetchString scans a string literal token. -func (r *Lexer) fetchString() { - r.pos++ - data := r.Data[r.pos:] - - isValid, length := findStringLen(data) - if !isValid { - r.pos += length - r.errParse("unterminated string literal") - return - } - r.token.byteValue = data[:length] - r.pos += length + 1 // skip closing '"' as well -} - -// scanToken scans the next token if no token is currently available in the lexer. -func (r *Lexer) scanToken() { - if r.token.kind != tokenUndef || r.fatalError != nil { - return - } - - r.FetchToken() -} - -// consume resets the current token to allow scanning the next one. -func (r *Lexer) consume() { - r.token.kind = tokenUndef - r.token.byteValueCloned = false - r.token.delimValue = 0 -} - -// Ok returns true if no error (including io.EOF) was encountered during scanning. -func (r *Lexer) Ok() bool { - return r.fatalError == nil -} - -const maxErrorContextLen = 13 - -func (r *Lexer) errParse(what string) { - if r.fatalError == nil { - var str string - if len(r.Data)-r.pos <= maxErrorContextLen { - str = string(r.Data) - } else { - str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..." - } - r.fatalError = &LexerError{ - Reason: what, - Offset: r.pos, - Data: str, - } - } -} - -func (r *Lexer) errSyntax() { - r.errParse("syntax error") -} - -func (r *Lexer) errInvalidToken(expected string) { - if r.fatalError != nil { - return - } - if r.UseMultipleErrors { - r.pos = r.start - r.consume() - r.SkipRecursive() - switch expected { - case "[": - r.token.delimValue = ']' - r.token.kind = tokenDelim - case "{": - r.token.delimValue = '}' - r.token.kind = tokenDelim - } - r.addNonfatalError(&LexerError{ - Reason: fmt.Sprintf("expected %s", expected), - Offset: r.start, - Data: string(r.Data[r.start:r.pos]), - }) - return - } - - var str string - if len(r.token.byteValue) <= maxErrorContextLen { - str = string(r.token.byteValue) - } else { - str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..." - } - r.fatalError = &LexerError{ - Reason: fmt.Sprintf("expected %s", expected), - Offset: r.pos, - Data: str, - } -} - -func (r *Lexer) GetPos() int { - return r.pos -} - -// Delim consumes a token and verifies that it is the given delimiter. -func (r *Lexer) Delim(c byte) { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - - if !r.Ok() || r.token.delimValue != c { - r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled. - r.errInvalidToken(string([]byte{c})) - } else { - r.consume() - } -} - -// IsDelim returns true if there was no scanning error and next token is the given delimiter. -func (r *Lexer) IsDelim(c byte) bool { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - return !r.Ok() || r.token.delimValue == c -} - -// Null verifies that the next token is null and consumes it. -func (r *Lexer) Null() { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenNull { - r.errInvalidToken("null") - } - r.consume() -} - -// IsNull returns true if the next token is a null keyword. -func (r *Lexer) IsNull() bool { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - return r.Ok() && r.token.kind == tokenNull -} - -// Skip skips a single token. -func (r *Lexer) Skip() { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - r.consume() -} - -// SkipRecursive skips next array or object completely, or just skips a single token if not -// an array/object. -// -// Note: no syntax validation is performed on the skipped data. -func (r *Lexer) SkipRecursive() { - r.scanToken() - var start, end byte - startPos := r.start - - switch r.token.delimValue { - case '{': - start, end = '{', '}' - case '[': - start, end = '[', ']' - default: - r.consume() - return - } - - r.consume() - - level := 1 - inQuotes := false - wasEscape := false - - for i, c := range r.Data[r.pos:] { - switch { - case c == start && !inQuotes: - level++ - case c == end && !inQuotes: - level-- - if level == 0 { - r.pos += i + 1 - if !json.Valid(r.Data[startPos:r.pos]) { - r.pos = len(r.Data) - r.fatalError = &LexerError{ - Reason: "skipped array/object json value is invalid", - Offset: r.pos, - Data: string(r.Data[r.pos:]), - } - } - return - } - case c == '\\' && inQuotes: - wasEscape = !wasEscape - continue - case c == '"' && inQuotes: - inQuotes = wasEscape - case c == '"': - inQuotes = true - } - wasEscape = false - } - r.pos = len(r.Data) - r.fatalError = &LexerError{ - Reason: "EOF reached while skipping array/object or token", - Offset: r.pos, - Data: string(r.Data[r.pos:]), - } -} - -// Raw fetches the next item recursively as a data slice -func (r *Lexer) Raw() []byte { - r.SkipRecursive() - if !r.Ok() { - return nil - } - return r.Data[r.start:r.pos] -} - -// IsStart returns whether the lexer is positioned at the start -// of an input string. -func (r *Lexer) IsStart() bool { - return r.pos == 0 -} - -// Consumed reads all remaining bytes from the input, publishing an error if -// there is anything but whitespace remaining. -func (r *Lexer) Consumed() { - if r.pos > len(r.Data) || !r.Ok() { - return - } - - for _, c := range r.Data[r.pos:] { - if c != ' ' && c != '\t' && c != '\r' && c != '\n' { - r.AddError(&LexerError{ - Reason: "invalid character '" + string(c) + "' after top-level value", - Offset: r.pos, - Data: string(r.Data[r.pos:]), - }) - return - } - - r.pos++ - r.start++ - } -} - -func (r *Lexer) unsafeString(skipUnescape bool) (string, []byte) { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenString { - r.errInvalidToken("string") - return "", nil - } - if !skipUnescape { - if err := r.unescapeStringToken(); err != nil { - r.errInvalidToken("string") - return "", nil - } - } - - bytes := r.token.byteValue - ret := bytesToStr(r.token.byteValue) - r.consume() - return ret, bytes -} - -// UnsafeString returns the string value if the token is a string literal. -// -// Warning: returned string may point to the input buffer, so the string should not outlive -// the input buffer. Intended pattern of usage is as an argument to a switch statement. -func (r *Lexer) UnsafeString() string { - ret, _ := r.unsafeString(false) - return ret -} - -// UnsafeBytes returns the byte slice if the token is a string literal. -func (r *Lexer) UnsafeBytes() []byte { - _, ret := r.unsafeString(false) - return ret -} - -// UnsafeFieldName returns current member name string token -func (r *Lexer) UnsafeFieldName(skipUnescape bool) string { - ret, _ := r.unsafeString(skipUnescape) - return ret -} - -// String reads a string literal. -func (r *Lexer) String() string { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenString { - r.errInvalidToken("string") - return "" - } - if err := r.unescapeStringToken(); err != nil { - r.errInvalidToken("string") - return "" - } - var ret string - if r.token.byteValueCloned { - ret = bytesToStr(r.token.byteValue) - } else { - ret = string(r.token.byteValue) - } - r.consume() - return ret -} - -// StringIntern reads a string literal, and performs string interning on it. -func (r *Lexer) StringIntern() string { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenString { - r.errInvalidToken("string") - return "" - } - if err := r.unescapeStringToken(); err != nil { - r.errInvalidToken("string") - return "" - } - ret := intern.Bytes(r.token.byteValue) - r.consume() - return ret -} - -// Bytes reads a string literal and base64 decodes it into a byte slice. -func (r *Lexer) Bytes() []byte { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenString { - r.errInvalidToken("string") - return nil - } - if err := r.unescapeStringToken(); err != nil { - r.errInvalidToken("string") - return nil - } - ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) - n, err := base64.StdEncoding.Decode(ret, r.token.byteValue) - if err != nil { - r.fatalError = &LexerError{ - Reason: err.Error(), - } - return nil - } - - r.consume() - return ret[:n] -} - -// Bool reads a true or false boolean keyword. -func (r *Lexer) Bool() bool { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenBool { - r.errInvalidToken("bool") - return false - } - ret := r.token.boolValue - r.consume() - return ret -} - -func (r *Lexer) number() string { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenNumber { - r.errInvalidToken("number") - return "" - } - ret := bytesToStr(r.token.byteValue) - r.consume() - return ret -} - -func (r *Lexer) Uint8() uint8 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return uint8(n) -} - -func (r *Lexer) Uint16() uint16 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return uint16(n) -} - -func (r *Lexer) Uint32() uint32 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return uint32(n) -} - -func (r *Lexer) Uint64() uint64 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return n -} - -func (r *Lexer) Uint() uint { - return uint(r.Uint64()) -} - -func (r *Lexer) Int8() int8 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return int8(n) -} - -func (r *Lexer) Int16() int16 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return int16(n) -} - -func (r *Lexer) Int32() int32 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return int32(n) -} - -func (r *Lexer) Int64() int64 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return n -} - -func (r *Lexer) Int() int { - return int(r.Int64()) -} - -func (r *Lexer) Uint8Str() uint8 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return uint8(n) -} - -func (r *Lexer) Uint16Str() uint16 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return uint16(n) -} - -func (r *Lexer) Uint32Str() uint32 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return uint32(n) -} - -func (r *Lexer) Uint64Str() uint64 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return n -} - -func (r *Lexer) UintStr() uint { - return uint(r.Uint64Str()) -} - -func (r *Lexer) UintptrStr() uintptr { - return uintptr(r.Uint64Str()) -} - -func (r *Lexer) Int8Str() int8 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return int8(n) -} - -func (r *Lexer) Int16Str() int16 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return int16(n) -} - -func (r *Lexer) Int32Str() int32 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return int32(n) -} - -func (r *Lexer) Int64Str() int64 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return n -} - -func (r *Lexer) IntStr() int { - return int(r.Int64Str()) -} - -func (r *Lexer) Float32() float32 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseFloat(s, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return float32(n) -} - -func (r *Lexer) Float32Str() float32 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - n, err := strconv.ParseFloat(s, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return float32(n) -} - -func (r *Lexer) Float64() float64 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseFloat(s, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return n -} - -func (r *Lexer) Float64Str() float64 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - n, err := strconv.ParseFloat(s, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return n -} - -func (r *Lexer) Error() error { - return r.fatalError -} - -func (r *Lexer) AddError(e error) { - if r.fatalError == nil { - r.fatalError = e - } -} - -func (r *Lexer) AddNonFatalError(e error) { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Data: string(r.Data[r.start:r.pos]), - Reason: e.Error(), - }) -} - -func (r *Lexer) addNonfatalError(err *LexerError) { - if r.UseMultipleErrors { - // We don't want to add errors with the same offset. - if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset { - return - } - r.multipleErrors = append(r.multipleErrors, err) - return - } - r.fatalError = err -} - -func (r *Lexer) GetNonFatalErrors() []*LexerError { - return r.multipleErrors -} - -// JsonNumber fetches and json.Number from 'encoding/json' package. -// Both int, float or string, contains them are valid values -func (r *Lexer) JsonNumber() json.Number { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() { - r.errInvalidToken("json.Number") - return json.Number("") - } - - switch r.token.kind { - case tokenString: - return json.Number(r.String()) - case tokenNumber: - return json.Number(r.Raw()) - case tokenNull: - r.Null() - return json.Number("") - default: - r.errSyntax() - return json.Number("") - } -} - -// Interface fetches an interface{} analogous to the 'encoding/json' package. -func (r *Lexer) Interface() interface{} { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - - if !r.Ok() { - return nil - } - switch r.token.kind { - case tokenString: - return r.String() - case tokenNumber: - return r.Float64() - case tokenBool: - return r.Bool() - case tokenNull: - r.Null() - return nil - } - - if r.token.delimValue == '{' { - r.consume() - - ret := map[string]interface{}{} - for !r.IsDelim('}') { - key := r.String() - r.WantColon() - ret[key] = r.Interface() - r.WantComma() - } - r.Delim('}') - - if r.Ok() { - return ret - } else { - return nil - } - } else if r.token.delimValue == '[' { - r.consume() - - ret := []interface{}{} - for !r.IsDelim(']') { - ret = append(ret, r.Interface()) - r.WantComma() - } - r.Delim(']') - - if r.Ok() { - return ret - } else { - return nil - } - } - r.errSyntax() - return nil -} - -// WantComma requires a comma to be present before fetching next token. -func (r *Lexer) WantComma() { - r.wantSep = ',' - r.firstElement = false -} - -// WantColon requires a colon to be present before fetching next token. -func (r *Lexer) WantColon() { - r.wantSep = ':' - r.firstElement = false -} diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go deleted file mode 100644 index 2c5b20105bb9..000000000000 --- a/vendor/github.com/mailru/easyjson/jwriter/writer.go +++ /dev/null @@ -1,405 +0,0 @@ -// Package jwriter contains a JSON writer. -package jwriter - -import ( - "io" - "strconv" - "unicode/utf8" - - "github.com/mailru/easyjson/buffer" -) - -// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but -// Flags field in Writer is used to set and pass them around. -type Flags int - -const ( - NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'. - NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'. -) - -// Writer is a JSON writer. -type Writer struct { - Flags Flags - - Error error - Buffer buffer.Buffer - NoEscapeHTML bool -} - -// Size returns the size of the data that was written out. -func (w *Writer) Size() int { - return w.Buffer.Size() -} - -// DumpTo outputs the data to given io.Writer, resetting the buffer. -func (w *Writer) DumpTo(out io.Writer) (written int, err error) { - return w.Buffer.DumpTo(out) -} - -// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice -// as argument that it will try to reuse. -func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) { - if w.Error != nil { - return nil, w.Error - } - - return w.Buffer.BuildBytes(reuse...), nil -} - -// ReadCloser returns an io.ReadCloser that can be used to read the data. -// ReadCloser also resets the buffer. -func (w *Writer) ReadCloser() (io.ReadCloser, error) { - if w.Error != nil { - return nil, w.Error - } - - return w.Buffer.ReadCloser(), nil -} - -// RawByte appends raw binary data to the buffer. -func (w *Writer) RawByte(c byte) { - w.Buffer.AppendByte(c) -} - -// RawByte appends raw binary data to the buffer. -func (w *Writer) RawString(s string) { - w.Buffer.AppendString(s) -} - -// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for -// calling with results of MarshalJSON-like functions. -func (w *Writer) Raw(data []byte, err error) { - switch { - case w.Error != nil: - return - case err != nil: - w.Error = err - case len(data) > 0: - w.Buffer.AppendBytes(data) - default: - w.RawString("null") - } -} - -// RawText encloses raw binary data in quotes and appends in to the buffer. -// Useful for calling with results of MarshalText-like functions. -func (w *Writer) RawText(data []byte, err error) { - switch { - case w.Error != nil: - return - case err != nil: - w.Error = err - case len(data) > 0: - w.String(string(data)) - default: - w.RawString("null") - } -} - -// Base64Bytes appends data to the buffer after base64 encoding it -func (w *Writer) Base64Bytes(data []byte) { - if data == nil { - w.Buffer.AppendString("null") - return - } - w.Buffer.AppendByte('"') - w.base64(data) - w.Buffer.AppendByte('"') -} - -func (w *Writer) Uint8(n uint8) { - w.Buffer.EnsureSpace(3) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint16(n uint16) { - w.Buffer.EnsureSpace(5) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint32(n uint32) { - w.Buffer.EnsureSpace(10) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint(n uint) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint64(n uint64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) -} - -func (w *Writer) Int8(n int8) { - w.Buffer.EnsureSpace(4) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int16(n int16) { - w.Buffer.EnsureSpace(6) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int32(n int32) { - w.Buffer.EnsureSpace(11) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int(n int) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int64(n int64) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) -} - -func (w *Writer) Uint8Str(n uint8) { - w.Buffer.EnsureSpace(3) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Uint16Str(n uint16) { - w.Buffer.EnsureSpace(5) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Uint32Str(n uint32) { - w.Buffer.EnsureSpace(10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) UintStr(n uint) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Uint64Str(n uint64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) UintptrStr(n uintptr) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int8Str(n int8) { - w.Buffer.EnsureSpace(4) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int16Str(n int16) { - w.Buffer.EnsureSpace(6) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int32Str(n int32) { - w.Buffer.EnsureSpace(11) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) IntStr(n int) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int64Str(n int64) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Float32(n float32) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) -} - -func (w *Writer) Float32Str(n float32) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Float64(n float64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64) -} - -func (w *Writer) Float64Str(n float64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Bool(v bool) { - w.Buffer.EnsureSpace(5) - if v { - w.Buffer.Buf = append(w.Buffer.Buf, "true"...) - } else { - w.Buffer.Buf = append(w.Buffer.Buf, "false"...) - } -} - -const chars = "0123456789abcdef" - -func getTable(falseValues ...int) [128]bool { - table := [128]bool{} - - for i := 0; i < 128; i++ { - table[i] = true - } - - for _, v := range falseValues { - table[v] = false - } - - return table -} - -var ( - htmlEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '&', '<', '>', '\\') - htmlNoEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '\\') -) - -func (w *Writer) String(s string) { - w.Buffer.AppendByte('"') - - // Portions of the string that contain no escapes are appended as - // byte slices. - - p := 0 // last non-escape symbol - - escapeTable := &htmlEscapeTable - if w.NoEscapeHTML { - escapeTable = &htmlNoEscapeTable - } - - for i := 0; i < len(s); { - c := s[i] - - if c < utf8.RuneSelf { - if escapeTable[c] { - // single-width character, no escaping is required - i++ - continue - } - - w.Buffer.AppendString(s[p:i]) - switch c { - case '\t': - w.Buffer.AppendString(`\t`) - case '\r': - w.Buffer.AppendString(`\r`) - case '\n': - w.Buffer.AppendString(`\n`) - case '\\': - w.Buffer.AppendString(`\\`) - case '"': - w.Buffer.AppendString(`\"`) - default: - w.Buffer.AppendString(`\u00`) - w.Buffer.AppendByte(chars[c>>4]) - w.Buffer.AppendByte(chars[c&0xf]) - } - - i++ - p = i - continue - } - - // broken utf - runeValue, runeWidth := utf8.DecodeRuneInString(s[i:]) - if runeValue == utf8.RuneError && runeWidth == 1 { - w.Buffer.AppendString(s[p:i]) - w.Buffer.AppendString(`\ufffd`) - i++ - p = i - continue - } - - // jsonp stuff - tab separator and line separator - if runeValue == '\u2028' || runeValue == '\u2029' { - w.Buffer.AppendString(s[p:i]) - w.Buffer.AppendString(`\u202`) - w.Buffer.AppendByte(chars[runeValue&0xf]) - i += runeWidth - p = i - continue - } - i += runeWidth - } - w.Buffer.AppendString(s[p:]) - w.Buffer.AppendByte('"') -} - -const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" -const padChar = '=' - -func (w *Writer) base64(in []byte) { - - if len(in) == 0 { - return - } - - w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4) - - si := 0 - n := (len(in) / 3) * 3 - - for si < n { - // Convert 3x 8bit source bytes into 4 bytes - val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2]) - - w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F]) - - si += 3 - } - - remain := len(in) - si - if remain == 0 { - return - } - - // Add the remaining small block - val := uint(in[si+0]) << 16 - if remain == 2 { - val |= uint(in[si+1]) << 8 - } - - w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F]) - - switch remain { - case 2: - w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar)) - case 1: - w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar)) - } -} diff --git a/vendor/github.com/moby/policy-helpers/.gitignore b/vendor/github.com/moby/policy-helpers/.gitignore new file mode 100644 index 000000000000..d7d73bfed926 --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/.gitignore @@ -0,0 +1,2 @@ +bin +vendor \ No newline at end of file diff --git a/vendor/github.com/moby/policy-helpers/.golangci.yml b/vendor/github.com/moby/policy-helpers/.golangci.yml new file mode 100644 index 000000000000..cda8c27fed14 --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/.golangci.yml @@ -0,0 +1,130 @@ +version: "2" + +linters: + default: none + enable: + - bodyclose + - depguard + - durationcheck + - errname + - errorlint + - fatcontext + - forbidigo + - gocritic + - gosec + - govet + - importas + - ineffassign + - makezero + - misspell + - noctx + - nolintlint + - revive + - staticcheck + - testifylint + - unused + - usestdlibvars + - whitespace + settings: + depguard: + rules: + main: + deny: + - pkg: github.com/containerd/containerd/errdefs + desc: The containerd errdefs package was migrated to a separate module. Use github.com/containerd/errdefs instead. + - pkg: github.com/containerd/containerd/log + desc: The containerd log package was migrated to a separate module. Use github.com/containerd/log instead. + - pkg: github.com/containerd/containerd/platforms + desc: The containerd platforms package was migrated to a separate module. Use github.com/containerd/platforms instead. + - pkg: io/ioutil + desc: The io/ioutil package has been deprecated. + forbidigo: + forbid: + - pattern: ^context\.WithCancel(# use context\.WithCancelCause instead)?$ + - pattern: ^context\.WithDeadline(# use context\.WithDeadline instead)?$ + - pattern: ^context\.WithTimeout(# use context\.WithTimeoutCause instead)?$ + - pattern: ^ctx\.Err(# use context\.Cause instead)?$ + - pattern: ^fmt\.Errorf(# use errors\.Errorf instead)?$ + - pattern: ^logrus\.(Trace|Debug|Info|Warn|Warning|Error|Fatal)(f|ln)?(# use bklog\.G or bklog\.L instead of logrus directly)?$ + - pattern: ^log\.G\(ctx\)\.(# use bklog import instead of shadowing stdlib)? + - pattern: ^log\.L\.(# use bklog import instead of shadowing stdlib)? + - pattern: ^platforms\.DefaultString(# use platforms\.Format(platforms\.DefaultSpec()) instead\. Be aware that DefaultSpec is for the local platform, so must be avoided when working cross-platform)?$ + gocritic: + disabled-checks: + - ifElseChain + - assignOp + - appendAssign + - singleCaseSwitch + gosec: + excludes: + - G101 + - G402 + - G504 + - G601 + - G115 + config: + G306: "0644" + govet: + enable: + - nilness + - unusedwrite + importas: + alias: + - pkg: github.com/containerd/errdefs + alias: cerrdefs + - pkg: github.com/opencontainers/image-spec/specs-go/v1 + alias: ocispecs + - pkg: github.com/opencontainers/go-digest + alias: digest + no-unaliased: true + staticcheck: + checks: + - all + testifylint: + disable: + - empty + - bool-compare + - len + - negative-positive + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - linters: + - revive + text: stutters + - linters: + - revive + text: empty-block + - linters: + - revive + text: superfluous-else + - linters: + - revive + text: unused-parameter + - linters: + - revive + text: redefines-builtin-id + - linters: + - revive + text: if-return + paths: + - .*\.pb\.go$ + - examples + +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - .*\.pb\.go$ + +issues: + max-issues-per-linter: 0 + max-same-issues: 0 diff --git a/vendor/github.com/moby/policy-helpers/Dockerfile b/vendor/github.com/moby/policy-helpers/Dockerfile new file mode 100644 index 000000000000..fc03f3e0ebcf --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/Dockerfile @@ -0,0 +1,43 @@ +# syntax=docker/dockerfile:1.19-labs + +ARG ALPINE_VERSION=3.22 +ARG ROOT_SIGNING_VERSION=main +ARG GOLANG_VERSION=1.25 +ARG XX_VERSION=1.8.0 +ARG DOCKER_HARDENED_IMAGES_KEYRING_VERSION=main + +FROM scratch AS sigstore-root-signing +ARG ROOT_SIGNING_VERSION +ADD https://www.github.com/sigstore/root-signing.git#${ROOT_SIGNING_VERSION} / + +FROM scratch AS tuf-root +COPY --from=sigstore-root-signing metadata/root.json metadata/snapshot.json metadata/timestamp.json metadata/targets.json / +COPY --parents --from=sigstore-root-signing targets/trusted_root.json / + +FROM alpine:${ALPINE_VERSION} AS validate-tuf-root +RUN --mount=type=bind,from=tuf-root,target=/a \ + --mount=type=bind,source=roots/tuf-root,target=/b \ + diff -ruN /a /b + +FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx + +FROM scratch AS dhi-keyring +ARG DOCKER_HARDENED_IMAGES_KEYRING_VERSION +ADD https://www.github.com/docker-hardened-images/keyring.git#${DOCKER_HARDENED_IMAGES_KEYRING_VERSION} / + +FROM scratch AS dhi-pubkey +COPY --from=dhi-keyring /publickey/dhi-latest.pub /dhi.pub + +FROM alpine:${ALPINE_VERSION} AS validate-dhi-pubkey +RUN --mount=type=bind,from=dhi-pubkey,target=/a \ + --mount=type=bind,source=roots/dhi,target=/b \ + diff -u /a/dhi-latest.pub /b/dhi.pub + +FROM --platform=$BUILDPLATFORM golang:${GOLANG_VERSION}-alpine${ALPINE_VERSION} AS build +COPY --from=xx / / +WORKDIR /go/src/github.com/moby/policy-helpers +ARG TARGETPLATFORM +RUN --mount=target=. xx-go build -o /out/policy-helper ./cmd/policy-helper + +FROM scratch AS binary +COPY --from=build /out/policy-helper / diff --git a/vendor/github.com/moby/policy-helpers/LICENSE b/vendor/github.com/moby/policy-helpers/LICENSE new file mode 100644 index 000000000000..f49a4e16e68b --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/moby/policy-helpers/README.md b/vendor/github.com/moby/policy-helpers/README.md new file mode 100644 index 000000000000..591cc7a42c03 --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/README.md @@ -0,0 +1 @@ +Work in progress policy helpers for Moby & BuildKit. \ No newline at end of file diff --git a/vendor/github.com/moby/policy-helpers/docker-bake.hcl b/vendor/github.com/moby/policy-helpers/docker-bake.hcl new file mode 100644 index 000000000000..9fb6c9a083e7 --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/docker-bake.hcl @@ -0,0 +1,116 @@ +variable "ROOT_SIGNING_VERSION" { + type = string + # default = "8842feefbb65effea46ff4a0f2b6aad91e685fe9" # expired root + # default = "9d8b5c5e3bed603c80b57fcc316b7a1af688c57e" # expired timestamp + default = "b72505e865a7c68bd75e03272fa66512bcb41bb1" + description = "The git commit hash of sigstore/root-signing to use for embedded roots." +} + +variable "DOCKER_HARDENED_IMAGES_KEYRING_VERSION" { + type = string + default = "04ae44966821da8e5cdcb4c51137dee69297161a" + description = "The git branch or commit hash of docker/hardened-images-keyring to use for DHI verification." +} + +target "tuf-root" { + target = "tuf-root" + output = [{ + type = "local", + dest = "roots/tuf-root" + }] + args = { + ROOT_SIGNING_VERSION = ROOT_SIGNING_VERSION + } +} + +target "validate-tuf-root" { + target = "validate-tuf-root" + output = [{ + type = "cacheonly" + }] + args = { + ROOT_SIGNING_VERSION = ROOT_SIGNING_VERSION + } +} + +group "validate-all" { + targets = ["lint", "lint-gopls", "validate-dockerfile", "validate-generated-files"] +} + +group "validate-generated-files" { + targets = ["validate-tuf-root"] +} + +target "lint" { + dockerfile = "./hack/dockerfiles/lint.Dockerfile" + output = ["type=cacheonly"] + args = { + GOLANGCI_FROM_SOURCE = "true" + } +} + +target "validate-dockerfile" { + matrix = { + dockerfile = [ + "Dockerfile", + "./hack/dockerfiles/lint.Dockerfile", + ] + } + name = "validate-dockerfile-${md5(dockerfile)}" + dockerfile = dockerfile + call = "check" +} + +target "lint-gopls" { + inherits = [ "lint" ] + target = "gopls-analyze" +} + +target "binary" { + target = "binary" + platforms = [ "local" ] + output = [{ + type = "local", + dest = "bin/" + }] +} + +target "_all_platforms" { + platforms = [ + "freebsd/amd64", + "linux/amd64", + "linux/arm64", + "linux/s390x", + "linux/ppc64le", + "linux/riscv64", + "windows/amd64", + "windows/arm64", + "darwin/amd64", + "darwin/arm64", + ] +} + +target "binary-all" { + inherits = [ "binary", "_all_platforms" ] +} + +target "dhi-pubkey" { + target = "dhi-pubkey" + output = [{ + type = "local", + dest = "roots/dhi" + }] + args = { + DOCKER_HARDENED_IMAGES_KEYRING_VERSION = DOCKER_HARDENED_IMAGES_KEYRING_VERSION + } +} + +target "validate-dhi-pubkey" { + target = "validate-dhi-pubkey" + output = [{ + type = "cacheonly" + }] + args = { + DOCKER_HARDENED_IMAGES_KEYRING_VERSION = DOCKER_HARDENED_IMAGES_KEYRING_VERSION + } +} diff --git a/vendor/github.com/moby/policy-helpers/errors.go b/vendor/github.com/moby/policy-helpers/errors.go new file mode 100644 index 000000000000..d60ae5a4b879 --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/errors.go @@ -0,0 +1,21 @@ +package verifier + +import ( + "fmt" + + digest "github.com/opencontainers/go-digest" +) + +type NoSigChainError struct { + Target digest.Digest + HasAttestation bool +} + +var _ error = &NoSigChainError{} + +func (e *NoSigChainError) Error() string { + if e.HasAttestation { + return fmt.Sprintf("no signature found for image %s", e.Target) + } + return fmt.Sprintf("no provenance attestation found for image %s", e.Target) +} diff --git a/vendor/github.com/moby/policy-helpers/hashedrecordbundle.go b/vendor/github.com/moby/policy-helpers/hashedrecordbundle.go new file mode 100644 index 000000000000..823111d263e7 --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/hashedrecordbundle.go @@ -0,0 +1,338 @@ +package verifier + +import ( + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "strconv" + "strings" + "time" + + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + v1common "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1" + "github.com/sigstore/sigstore-go/pkg/bundle" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore-go/pkg/tlog" + "github.com/sigstore/sigstore-go/pkg/verify" +) + +const ( + annotationCert = "dev.sigstore.cosign/certificate" + // annotationChain = "dev.sigstore.cosign/chain" + annotationSignature = "dev.cosignproject.cosign/signature" + annotationBundle = "dev.sigstore.cosign/bundle" +) + +// hashedRecordSignedEntity implements verify.SignedEntity using cosign oldbundle format. +type hashedRecordSignedEntity struct { + mfst *ocispecs.Manifest + cert verify.VerificationContent + sig *messageSignatureContent + isDHI bool +} + +var _ verify.SignedEntity = &hashedRecordSignedEntity{} +var _ verify.SignatureContent = &hashedRecordSignedEntity{} +var _ verify.VerificationContent = &hashedRecordSignedEntity{} + +func newHashedRecordSignedEntity(mfst *ocispecs.Manifest, isDHI bool) (verify.SignedEntity, error) { + if len(mfst.Layers) == 0 { + return nil, errors.New("no layers in manifest") + } + desc := mfst.Layers[0] + sigStr, ok := desc.Annotations[annotationSignature] + if !ok { + return nil, errors.New("no signature annotation found") + } + sig, err := base64.StdEncoding.DecodeString(sigStr) + if err != nil { + return nil, errors.Wrapf(err, "decode signature") + } + dgstBytest, err := hex.DecodeString(desc.Digest.Hex()) + if err != nil { + return nil, errors.Wrapf(err, "decode digest") + } + + hr := &hashedRecordSignedEntity{ + mfst: mfst, + sig: &messageSignatureContent{ + digest: dgstBytest, + signature: sig, + digestAlgorithm: desc.Digest.Algorithm().String(), + }, + isDHI: isDHI, + } + + if !isDHI { + certData := desc.Annotations[annotationCert] + if certData == "" { + return nil, errors.Errorf("no certificate annotation found") + } + block, _ := pem.Decode([]byte(certData)) + if block == nil { + return nil, errors.New("no PEM certificate found in annotation") + } + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, errors.WithStack(err) + } + hr.cert = bundle.NewCertificate(cert) + } + + return hr, nil +} + +func (d *hashedRecordSignedEntity) HasInclusionPromise() bool { + return true +} + +func (d *hashedRecordSignedEntity) HasInclusionProof() bool { + return true +} + +func (d *hashedRecordSignedEntity) SignatureContent() (verify.SignatureContent, error) { + return d, nil +} + +func (d *hashedRecordSignedEntity) Timestamps() ([][]byte, error) { + return nil, nil +} + +func (d *hashedRecordSignedEntity) TlogEntries() ([]*tlog.Entry, error) { + bundleBytes, ok := d.extractBundle() + if !ok { + return nil, nil + } + bundle, err := parseRekorBundle(bundleBytes) + if err != nil { + return nil, errors.Wrap(err, "parse rekor bundle") + } + logIDRaw, err := hex.DecodeString(bundle.LogID) + if err != nil { + return nil, errors.Wrap(err, "decode logID") + } + + tl, err := tlog.NewTlogEntry(&v1.TransparencyLogEntry{ + LogIndex: bundle.LogIndex, + LogId: &v1common.LogId{KeyId: logIDRaw}, + IntegratedTime: bundle.IntegratedTime, + CanonicalizedBody: bundle.Body, + KindVersion: &v1.KindVersion{ + Kind: "hashedrekord", + Version: "0.0.1", + }, + InclusionPromise: &v1.InclusionPromise{ + SignedEntryTimestamp: bundle.Signature, + }, + }) + if err != nil { + return nil, errors.Wrap(err, "create tlog entry") + } + return []*tlog.Entry{tl}, nil +} + +func (d *hashedRecordSignedEntity) VerificationContent() (verify.VerificationContent, error) { + return d, nil +} + +func (d *hashedRecordSignedEntity) Version() (string, error) { + return "v0.1", nil +} + +func (d *hashedRecordSignedEntity) Signature() []byte { + return d.sig.signature +} + +func (d *hashedRecordSignedEntity) EnvelopeContent() verify.EnvelopeContent { + return nil +} + +func (d *hashedRecordSignedEntity) MessageSignatureContent() verify.MessageSignatureContent { + return d.sig +} + +type messageSignatureContent struct { + digest []byte + digestAlgorithm string + signature []byte +} + +func (m *messageSignatureContent) Digest() []byte { + return m.digest +} + +func (m *messageSignatureContent) DigestAlgorithm() string { + return m.digestAlgorithm +} + +func (m *messageSignatureContent) Signature() []byte { + return m.signature +} + +// CompareKey traces parameters and returns false. +func (d *hashedRecordSignedEntity) CompareKey(k any, tm root.TrustedMaterial) bool { + if d.isDHI { + return (&bundle.PublicKey{}).CompareKey(k, tm) + } + if _, ok := k.(*x509.Certificate); !ok { + return false + } + return d.cert.CompareKey(k, tm) +} + +func (d *hashedRecordSignedEntity) ValidAtTime(t time.Time, tm root.TrustedMaterial) bool { + if d.isDHI { + return (&bundle.PublicKey{}).ValidAtTime(t, tm) + } + return d.cert.ValidAtTime(t, tm) +} + +func (d *hashedRecordSignedEntity) Certificate() *x509.Certificate { + if d.isDHI { + return nil + } + return d.cert.Certificate() +} + +func (d *hashedRecordSignedEntity) PublicKey() verify.PublicKeyProvider { + if d.isDHI { + return bundle.PublicKey{} + } + return d.cert.PublicKey() +} + +func (d *hashedRecordSignedEntity) extractBundle() ([]byte, bool) { + if len(d.mfst.Layers) == 0 { + return nil, false + } + desc := d.mfst.Layers[0] + bundleStr := desc.Annotations[annotationBundle] + if bundleStr == "" { + return nil, false + } + return []byte(bundleStr), true +} + +type rekorBundle struct { + Body []byte + Signature []byte + LogID string + IntegratedTime int64 + LogIndex int64 +} + +func parseRekorBundle(bundleBytes []byte) (*rekorBundle, error) { + var nb struct { + Content struct { + VerificationMaterial struct { + TlogEntries []struct { + LogIndex any `json:"logIndex"` + LogID struct { + KeyID string `json:"keyId"` + } `json:"logId"` + IntegratedTime any `json:"integratedTime"` + InclusionPromise struct { + SignedEntryTimestamp []byte `json:"signedEntryTimestamp"` + } `json:"inclusionPromise"` + CanonicalizedBody []byte `json:"canonicalizedBody"` + } `json:"tlogEntries"` + } `json:"verificationMaterial"` + } `json:"content"` + } + if json.Unmarshal(bundleBytes, &nb) == nil && len(nb.Content.VerificationMaterial.TlogEntries) > 0 { + e := nb.Content.VerificationMaterial.TlogEntries[0] + if len(e.CanonicalizedBody) != 0 && len(e.InclusionPromise.SignedEntryTimestamp) != 0 { + b := &rekorBundle{ + Body: e.CanonicalizedBody, + Signature: e.InclusionPromise.SignedEntryTimestamp, + LogID: strings.ToLower(e.LogID.KeyID), + } + + it, err1 := anyToInt64(e.IntegratedTime) + if err1 == nil { + b.IntegratedTime = it + } + li, err2 := anyToInt64(e.LogIndex) + if err2 == nil { + b.LogIndex = li + } + return b, nil + } + } + + // Fallback to older cosign bundle shape + var bundle struct { + SignedEntryTimestamp []byte `json:"SignedEntryTimestamp"` + Payload struct { + Body []byte `json:"body"` + LogID any `json:"logID"` + IntegratedTime any `json:"integratedTime"` + LogIndex any `json:"logIndex"` + } `json:"Payload"` + LogID any `json:"logID"` + IntegratedTime any `json:"integratedTime"` + LogIndex any `json:"logIndex"` + } + if err := json.Unmarshal(bundleBytes, &bundle); err != nil { + return nil, errors.Wrap(err, "parse bundle json") + } + + b := &rekorBundle{ + Body: bundle.Payload.Body, + Signature: bundle.SignedEntryTimestamp, + } + + // Prefer top-level fields when present; otherwise fall back to nested under Payload + // Handle string/number types + if s, ok := bundle.LogID.(string); ok { + b.LogID = s + } + if b.LogID == "" { + if s, ok := bundle.Payload.LogID.(string); ok { + b.LogID = s + } + } + if v, err := anyToInt64(bundle.IntegratedTime); err == nil { + b.IntegratedTime = v + } + if b.IntegratedTime == 0 { + if v, err := anyToInt64(bundle.Payload.IntegratedTime); err == nil { + b.IntegratedTime = v + } + } + if v, err := anyToInt64(bundle.LogIndex); err == nil { + b.LogIndex = v + } + if b.LogIndex == 0 { + if v, err := anyToInt64(bundle.Payload.LogIndex); err == nil { + b.LogIndex = v + } + } + return b, nil +} + +func anyToInt64(v any) (int64, error) { + switch t := v.(type) { + case nil: + return 0, errors.New("nil") + case float64: + return int64(t), nil + case json.Number: + return t.Int64() + case string: + if t == "" { + return 0, errors.New("empty") + } + return strconv.ParseInt(t, 10, 64) + case int64: + return t, nil + case int: + return int64(t), nil + default: + return 0, errors.Errorf("unsupported type %T", v) + } +} diff --git a/vendor/github.com/moby/policy-helpers/image/dhi.go b/vendor/github.com/moby/policy-helpers/image/dhi.go new file mode 100644 index 000000000000..87c34531ca0e --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/image/dhi.go @@ -0,0 +1,45 @@ +package image + +import ( + "context" + "strings" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/core/remotes" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +type dhiKey struct{} + +func isDHIIndex(idx ocispecs.Index) bool { + for _, desc := range idx.Manifests { + if buildid, ok := desc.Annotations["com.docker.dhi.build.id"]; !ok || buildid == "" { + return false + } + } + return strings.HasPrefix(idx.Annotations["org.opencontainers.image.title"], "dhi/") +} + +func contextWithDHI(ctx context.Context) context.Context { + return context.WithValue(ctx, dhiKey{}, struct{}{}) +} + +func IsDHI(ctx context.Context) bool { + _, ok := ctx.Value(dhiKey{}).(struct{}) + return ok +} + +type dhiReferrersProvider struct { + ReferrersProvider +} + +func (d *dhiReferrersProvider) FetchReferrers(ctx context.Context, dgst digest.Digest, opts ...remotes.FetchReferrersOpt) ([]ocispecs.Descriptor, error) { + ctx = contextWithDHI(ctx) + return d.ReferrersProvider.FetchReferrers(ctx, dgst, opts...) +} + +func (d *dhiReferrersProvider) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { + ctx = contextWithDHI(ctx) + return d.ReferrersProvider.ReaderAt(ctx, desc) +} diff --git a/vendor/github.com/moby/policy-helpers/image/resolve.go b/vendor/github.com/moby/policy-helpers/image/resolve.go new file mode 100644 index 000000000000..b7eb4ca07e97 --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/image/resolve.go @@ -0,0 +1,239 @@ +package image + +import ( + "context" + "encoding/json" + "slices" + "sort" + "sync" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/core/images" + "github.com/containerd/containerd/v2/core/remotes" + cerrdefs "github.com/containerd/errdefs" + "github.com/containerd/platforms" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type ReferrersProvider interface { + content.Provider + remotes.ReferrersFetcher +} + +const ( + AnnotationDockerReferenceDigest = "vnd.docker.reference.digest" + AnnotationDockerReferenceType = "vnd.docker.reference.type" + AttestationManifestType = "attestation-manifest" +) + +const ( + ArtifactTypeCosignSignature = "application/vnd.dev.cosign.artifact.sig.v1+json" + ArtifactTypeSigstoreBundle = "application/vnd.dev.sigstore.bundle.v0.3+json" + ArtifactTypeInTotoJSON = "application/vnd.in-toto+json" + MediaTypeCosignSimpleSigning = "application/vnd.dev.cosign.simplesigning.v1+json" + SLSAProvenancePredicateType02 = "https://slsa.dev/provenance/v0.2" + SLSAProvenancePredicateType1 = "https://slsa.dev/provenance/v1" +) + +func resolveImageManifest(idx ocispecs.Index, platform ocispecs.Platform) (ocispecs.Descriptor, error) { + pMatcher := platforms.Only(platform) + + var descs []ocispecs.Descriptor + for _, d := range idx.Manifests { + // TODO: confirm handling of nested indexes + if !images.IsManifestType(d.MediaType) { + continue + } + if d.Platform == nil || pMatcher.Match(*d.Platform) { + descs = append(descs, d) + } + } + + sort.SliceStable(descs, func(i, j int) bool { + if descs[i].Platform == nil { + return false + } + if descs[j].Platform == nil { + return true + } + return pMatcher.Less(*descs[i].Platform, *descs[j].Platform) + }) + + if len(descs) == 0 { + return ocispecs.Descriptor{}, errors.Wrapf(cerrdefs.ErrNotFound, "no manifest for platform %+v", platforms.FormatAll(platform)) + } + return descs[0], nil +} + +type Manifest struct { + ocispecs.Descriptor + mu sync.Mutex + manifest *ocispecs.Manifest + data []byte +} + +type SignatureChain struct { + ImageManifest *Manifest + AttestationManifest *Manifest + SignatureManifest *Manifest + Provider content.Provider + DHI bool +} + +func (sc *SignatureChain) ManifestBytes(ctx context.Context, m *Manifest) ([]byte, error) { + m.mu.Lock() + defer m.mu.Unlock() + if m.data != nil { + return m.data, nil + } + dt, err := ReadBlob(ctx, sc.Provider, m.Descriptor) + if err != nil { + return nil, err + } + m.data = dt + return dt, nil +} + +func (sc *SignatureChain) OCIManifest(ctx context.Context, m *Manifest) (*ocispecs.Manifest, error) { + m.mu.Lock() + if m.manifest != nil { + m.mu.Unlock() + return m.manifest, nil + } + m.mu.Unlock() + dt, err := sc.ManifestBytes(ctx, m) + if err != nil { + return nil, err + } + var manifest ocispecs.Manifest + if err := json.Unmarshal(dt, &manifest); err != nil { + return nil, errors.Wrapf(err, "unmarshaling manifest %s", m.Digest) + } + m.mu.Lock() + m.manifest = &manifest + m.mu.Unlock() + return &manifest, nil +} + +func ResolveSignatureChain(ctx context.Context, provider ReferrersProvider, desc ocispecs.Descriptor, platform *ocispecs.Platform) (*SignatureChain, error) { + if desc.MediaType != ocispecs.MediaTypeImageIndex { + return nil, errors.Errorf("expected image index descriptor, got %s", desc.MediaType) + } + + dt, err := ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + var index ocispecs.Index + if err := json.Unmarshal(dt, &index); err != nil { + return nil, errors.Wrapf(err, "unmarshaling image index") + } + + isDHI := isDHIIndex(index) + + if platform == nil { + p := platforms.Normalize(platforms.DefaultSpec()) + platform = &p + } + + manifestDesc, err := resolveImageManifest(index, *platform) + if err != nil { + return nil, errors.Wrapf(err, "resolving image manifest for platform %+v", platform) + } + + var attestationDesc *ocispecs.Descriptor + if isDHI { + provider = &dhiReferrersProvider{ReferrersProvider: provider} + allRefs, err := provider.FetchReferrers(ctx, manifestDesc.Digest, + remotes.WithReferrerArtifactTypes(ArtifactTypeInTotoJSON), + remotes.WithReferrerQueryFilter("predicateType", SLSAProvenancePredicateType02), + remotes.WithReferrerQueryFilter("predicateType", SLSAProvenancePredicateType1), + ) + if err != nil { + return nil, errors.Wrapf(err, "fetching referrers for manifest %s", manifestDesc.Digest) + } + refs := make([]ocispecs.Descriptor, 0, len(allRefs)) + for _, r := range allRefs { + if r.ArtifactType == ArtifactTypeInTotoJSON { + switch r.Annotations["in-toto.io/predicate-type"] { + case SLSAProvenancePredicateType02, SLSAProvenancePredicateType1: + refs = append(refs, r) + } + } + } + if len(refs) == 0 { + return nil, errors.Errorf("no attestation referrers found for DHI manifest %s", manifestDesc.Digest) + } + attestationDesc = &refs[0] + } else { + for _, d := range index.Manifests { + if d.Annotations[AnnotationDockerReferenceType] == AttestationManifestType && d.Annotations[AnnotationDockerReferenceDigest] == manifestDesc.Digest.String() { + attestationDesc = &d + break + } + } + } + sh := &SignatureChain{ + ImageManifest: &Manifest{ + Descriptor: manifestDesc, + }, + Provider: provider, + DHI: isDHI, + } + + if attestationDesc == nil { + return sh, nil + } + + sh.AttestationManifest = &Manifest{ + Descriptor: *attestationDesc, + } + + // currently not setting WithReferrerArtifactTypes in here as some registries(e.g. aws) don't know how to filter two types at once. + allRefs, err := provider.FetchReferrers(ctx, attestationDesc.Digest) + if err != nil { + return nil, errors.Wrapf(err, "fetching referrers for attestation manifest %s", attestationDesc.Digest) + } + + refs := make([]ocispecs.Descriptor, 0, len(allRefs)) + for _, r := range allRefs { + if r.ArtifactType == ArtifactTypeSigstoreBundle || r.ArtifactType == ArtifactTypeCosignSignature { + refs = append(refs, r) + } + } + + if len(refs) == 0 { + return sh, nil + } + + // only allowing one signature manifest for now + // if multiple are found, prefer bundle format + slices.SortStableFunc(refs, func(a, b ocispecs.Descriptor) int { + aIsBundle := a.ArtifactType == ArtifactTypeSigstoreBundle + bIsBundle := b.ArtifactType == ArtifactTypeSigstoreBundle + if aIsBundle && !bIsBundle { + return -1 + } else if !aIsBundle && bIsBundle { + return 1 + } + return 0 + }) + + sh.SignatureManifest = &Manifest{ + Descriptor: refs[0], + } + return sh, nil +} + +func ReadBlob(ctx context.Context, provider content.Provider, desc ocispecs.Descriptor) ([]byte, error) { + dt, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, errors.Wrapf(err, "reading blob %s", desc.Digest) + } + if desc.Digest != digest.FromBytes(dt) { + return nil, errors.Wrapf(err, "digest mismatch for blob %s", desc.Digest) + } + return dt, nil +} diff --git a/vendor/github.com/moby/policy-helpers/roots/dhi/dhi.pub b/vendor/github.com/moby/policy-helpers/roots/dhi/dhi.pub new file mode 100644 index 000000000000..c36915ed3363 --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/roots/dhi/dhi.pub @@ -0,0 +1,4 @@ +-----BEGIN PUBLIC KEY----- +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEKdROmntRJFBrOJOQF5ww6gDBJqGm +Fxa4333s1KsL9ISjtmRzGNih9lNRsqfRVjgFgJIdL6EQ9dohdanvn7r2cg== +-----END PUBLIC KEY----- diff --git a/vendor/github.com/moby/policy-helpers/roots/dhi/dhiroot.go b/vendor/github.com/moby/policy-helpers/roots/dhi/dhiroot.go new file mode 100644 index 000000000000..a0987648ca60 --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/roots/dhi/dhiroot.go @@ -0,0 +1,67 @@ +package dhi + +import ( + _ "embed" + "time" + + "github.com/pkg/errors" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature" +) + +//go:embed dhi.pub +var pubkeyPEM string + +// This may need to be updated if key rotation occurs. +const dhiEpoch = 1743595200 // 2025-04-02 + +func TrustedRoot(fulcioTrustedRoot root.TrustedMaterial) (root.TrustedMaterial, error) { + pubKey, err := cryptoutils.UnmarshalPEMToPublicKey([]byte(pubkeyPEM)) + if err != nil { + return nil, err + } + v, err := signature.LoadVerifierWithOpts(pubKey) + if err != nil { + return nil, errors.Wrap(err, "loading DHI public key verifier") + } + return &dhiTrustedMaterial{ + dhiVerifier: &dhiVerifier{v}, + fulcio: fulcioTrustedRoot, + }, nil +} + +type dhiTrustedMaterial struct { + *dhiVerifier + fulcio root.TrustedMaterial +} + +var _ root.TrustedMaterial = &dhiTrustedMaterial{} + +func (d *dhiTrustedMaterial) PublicKeyVerifier(_ string) (root.TimeConstrainedVerifier, error) { + return d.dhiVerifier, nil +} + +func (d *dhiTrustedMaterial) TimestampingAuthorities() []root.TimestampingAuthority { + return d.fulcio.TimestampingAuthorities() +} + +func (d *dhiTrustedMaterial) FulcioCertificateAuthorities() []root.CertificateAuthority { + return nil +} + +func (d *dhiTrustedMaterial) RekorLogs() map[string]*root.TransparencyLog { + return d.fulcio.RekorLogs() +} + +func (d *dhiTrustedMaterial) CTLogs() map[string]*root.TransparencyLog { + return nil +} + +type dhiVerifier struct { + signature.Verifier +} + +func (d *dhiVerifier) ValidAtTime(t time.Time) bool { + return t.Unix() >= dhiEpoch +} diff --git a/vendor/github.com/moby/policy-helpers/roots/embedded.go b/vendor/github.com/moby/policy-helpers/roots/embedded.go new file mode 100644 index 000000000000..42c9ef12f3bc --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/roots/embedded.go @@ -0,0 +1,6 @@ +package roots + +import "embed" + +//go:embed tuf-root +var EmbeddedTUF embed.FS diff --git a/vendor/github.com/moby/policy-helpers/roots/roots.go b/vendor/github.com/moby/policy-helpers/roots/roots.go new file mode 100644 index 000000000000..b404d0ae19d7 --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/roots/roots.go @@ -0,0 +1,343 @@ +package roots + +import ( + "context" + "embed" + "io" + "io/fs" + "net/url" + "os" + "path" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/gofrs/flock" + "github.com/pkg/errors" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore-go/pkg/tuf" + "github.com/theupdateframework/go-tuf/v2/metadata" + "github.com/theupdateframework/go-tuf/v2/metadata/fetcher" +) + +type SigstoreRootsConfig struct { + CachePath string + UpdateInterval time.Duration + RequireOnline bool +} + +type TrustProvider struct { + mu sync.RWMutex + config SigstoreRootsConfig + client *tuf.Client + fetcher *airgappedFetcher + + status Status +} + +type Status struct { + Error error `json:"error,omitempty"` + LastUpdated *time.Time `json:"lastUpdated,omitempty"` +} + +const ( + trustedRootFilename = "trusted_root.json" +) + +func NewTrustProvider(cfg SigstoreRootsConfig) (*TrustProvider, error) { + if cfg.CachePath == "" { + return nil, errors.Errorf("cache path must be provided for trust provider") + } + def := tuf.DefaultOptions() + cacheDir := filepath.Join(cfg.CachePath, tuf.URLToPath(def.RepositoryBaseURL)) + if err := os.MkdirAll(cacheDir, 0o755); err != nil { + return nil, errors.Wrap(err, "creating cache directory for trust provider") + } + + tp := &TrustProvider{ + config: cfg, + } + + unlock, err := tp.lock() + if err != nil { + return nil, err + } + defer unlock() + + root, err := os.OpenRoot(cacheDir) + if err != nil { + return nil, errors.Wrap(err, "opening cache directory for trust provider") + } + defer root.Close() + if _, err := root.Lstat("root.json"); err != nil { + if !os.IsNotExist(err) { + return nil, errors.Wrap(err, "statting root.json in cache directory for trust provider") + } + if err := copyEmbeddedRoot(EmbeddedTUF, root); err != nil { + return nil, errors.Wrap(err, "initializing cache directory for trust provider with embedded root") + } + } + + agf := &airgappedFetcher{ + baseURL: def.RepositoryBaseURL, + cacheDir: cacheDir, + onlineFetcher: fetcher.NewDefaultFetcher(), + isOnline: true, + } + tp.fetcher = agf + + tufOpts, err := tp.tufClientOpts() + if err != nil { + return nil, errors.Wrap(err, "creating TUF client options for trust provider") + } + + c, err := tuf.New(tufOpts) + if err != nil { + // try again with airgapped fetcher + // this can still fail if the last root or timestamps file has expired + + agf.isOnline = false + tufOpts, err := tp.tufClientOpts() + if err != nil { + return nil, errors.Wrap(err, "creating TUF client options for trust provider with airgapped fetcher") + } + c, err = tuf.New(tufOpts) + if err != nil { + return nil, errors.WithStack(err) + } + } + tp.client = c + agf.isOnline = true + + go tp.update() + + if cfg.UpdateInterval > 0 { + go func() { + ticker := time.NewTicker(cfg.UpdateInterval) + defer ticker.Stop() + // TODO: stop condition + for range ticker.C { + tp.update() + } + }() + } + + return tp, nil +} + +func (tp *TrustProvider) tufClientOpts() (*tuf.Options, error) { + def := tuf.DefaultOptions() + cacheDir := filepath.Join(tp.config.CachePath, tuf.URLToPath(def.RepositoryBaseURL)) + root, err := os.OpenRoot(cacheDir) + if err != nil { + return nil, errors.Wrap(err, "opening cache directory for trust provider") + } + defer root.Close() + + dt, err := EmbeddedTUF.ReadFile("tuf-root/root.json") + if err != nil { + return nil, err + } + def.Root = dt + def.CachePath = tp.config.CachePath + def.ForceCache = !tp.config.RequireOnline + def.Fetcher = tp.fetcher + return def, nil +} + +func (tp *TrustProvider) update() (err error) { + defer func() { + if err != nil { + tp.mu.Lock() + tp.status.Error = err + tp.mu.Unlock() + } + }() + + unlock, err := tp.lock() + if err != nil { + return err + } + defer unlock() + tufOpts, err := tp.tufClientOpts() + if err != nil { + return errors.Wrap(err, "creating TUF client options for trust provider") + } + c, err := tuf.New(tufOpts) + if err != nil { + return errors.WithStack(err) + } + err = c.Refresh() + if err != nil { + return err + } + tp.mu.Lock() + defer tp.mu.Unlock() + now := time.Now().UTC() + tp.status = Status{LastUpdated: &now} + tp.client = c + return nil +} + +func (tp *TrustProvider) wait(ctx context.Context) (*tuf.Client, error) { + first := true + errCh := make(chan error, 1) + for { + tp.mu.RLock() + status := tp.status + client := tp.client + tp.mu.RUnlock() + if status.LastUpdated != nil && status.Error == nil { + return client, nil + } + // try update if we are in error from some old reason that might be resolved now + if status.Error != nil && first { + go func() { + errCh <- tp.update() + }() + first = false + } + select { + case err := <-errCh: + if err == nil { + continue + } + return nil, err + case <-ctx.Done(): + return nil, context.Cause(ctx) + case <-time.After(100 * time.Millisecond): + } + } +} + +func (tp *TrustProvider) lock() (func() error, error) { + lockPath := path.Join(tp.config.CachePath, ".lock") + fileLock := flock.New(lockPath) + if err := fileLock.Lock(); err != nil { + return nil, errors.Wrap(err, "acquiring lock on trust provider cache") + } + return fileLock.Unlock, nil +} + +func (tp *TrustProvider) TrustedRoot(ctx context.Context) (*root.TrustedRoot, Status, error) { + ctx, cnclFn := context.WithCancelCause(ctx) + ctx, _ = context.WithTimeoutCause(ctx, time.Second*5, errors.WithStack(context.DeadlineExceeded)) //nolint:govet + defer cnclFn(errors.WithStack(context.Canceled)) + + var st Status + client, err := tp.wait(ctx) + if err != nil { // return indication of last refresh error? TODO(@tonistiigi) does this make GetTarget fail as well and separate instance of client is needed for optional refresh? + st.Error = err + tp.mu.RLock() + client = tp.client + tp.mu.RUnlock() + } + + jsonBytes, err := client.GetTarget(trustedRootFilename) + if err != nil { + return nil, st, err + } + tr, err := root.NewTrustedRootFromJSON(jsonBytes) + return tr, st, err +} + +type airgappedFetcher struct { + baseURL string + cacheDir string + onlineFetcher fetcher.Fetcher + isOnline bool +} + +func (f *airgappedFetcher) DownloadFile(urlPath string, maxLength int64, dur time.Duration) ([]byte, error) { + if f.isOnline { + dt, err := f.onlineFetcher.DownloadFile(urlPath, maxLength, dur) + if err != nil { + return nil, err + } + // save root chain to cache so that it can be reverified while offline + u, err := url.Parse(urlPath) + if err != nil { + return nil, errors.Wrap(err, "parsing URL in trust provider fetcher") + } + cache, err := os.OpenRoot(f.cacheDir) + if err != nil { + return nil, errors.Wrap(err, "opening cache directory for trust provider") + } + defer cache.Close() + if strings.HasSuffix(u.Path, ".root.json") { + base := path.Base(u.Path) + if err := cache.MkdirAll("roots", 0o755); err != nil { + return nil, errors.Wrap(err, "creating roots directory in trust provider cache") + } + if err := cache.WriteFile(path.Join("roots", base), dt, 0o644); err != nil { + return nil, errors.Wrap(err, "caching root file in trust provider cache") + } + } + return dt, nil + } + const timestampFilename = "timestamp.json" + if urlPath == f.baseURL+"/"+timestampFilename { + cache, err := os.OpenRoot(f.cacheDir) + if err != nil { + return nil, errors.Wrap(err, "opening cache directory for trust provider") + } + defer cache.Close() + if dt, err := cache.ReadFile(timestampFilename); err == nil { + return dt, nil + } + } + if strings.HasSuffix(urlPath, ".root.json") { + u, err := url.Parse(urlPath) + if err == nil { + base := path.Base(u.Path) + if urlPath == f.baseURL+"/"+base && strings.HasSuffix(base, ".root.json") { + cache, err := os.OpenRoot(f.cacheDir) + if err != nil { + return nil, errors.Wrap(err, "opening cache directory for trust provider") + } + defer cache.Close() + dt, err := cache.ReadFile("roots/" + base) + if err == nil { + return dt, nil + } + } + } + } + return nil, &metadata.ErrDownloadHTTP{ + StatusCode: 404, + } +} + +func copyEmbeddedRoot(src embed.FS, dest *os.Root) error { + subFS, err := fs.Sub(src, "tuf-root") + if err != nil { + return errors.WithStack(err) + } + return fs.WalkDir(subFS, ".", func(p string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() { + return dest.MkdirAll(p, 0o755) + } + in, err := subFS.Open(p) + if err != nil { + return err + } + defer in.Close() + + if err := dest.MkdirAll(path.Dir(p), 0o755); err != nil { + return err + } + out, err := dest.OpenFile(p, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) + if err != nil { + return err + } + if _, err := io.Copy(out, in); err != nil { + out.Close() + return err + } + return out.Close() + }) +} diff --git a/vendor/github.com/moby/policy-helpers/roots/tuf-root/root.json b/vendor/github.com/moby/policy-helpers/roots/tuf-root/root.json new file mode 100644 index 000000000000..a50bcb2335c8 --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/roots/tuf-root/root.json @@ -0,0 +1,145 @@ +{ + "signatures": [ + { + "keyid": "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3", + "sig": "" + }, + { + "keyid": "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2", + "sig": "3045022100bbddd464f8066ceb88ba787375c12cd6330680e08c2910703e6538c71cc79ad202205190b06e4537fe961b3ef81fe68edcd0089c19f919afed423b9aafd700641153" + }, + { + "keyid": "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06", + "sig": "3044022069306cd5257f732a740c1afe60a8e433c5de58eafeadbe99c336c9c71d198cf802200d773953ae7dbc48d3e5bad9a6f64bafff196b7e2ad4a52a19519367d47dc042" + }, + { + "keyid": "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222", + "sig": "304402204d21a2ec80df66e61f6fe2912951dc47df836036f8c0ab10816d375e71dbf79e0220547adce1afdf04e6794efa203dd5264c6f7e0ef78e57fe934b0d26cb994eec76" + }, + { + "keyid": "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70", + "sig": "3045022060826496557144eb1649893ed5f6f4ea54536feb0ca82f8b89ae641be39743e5022100ad7118b5e9d4837326206e412fc6da2999925d110328a7c166b06c624336c93f" + }, + { + "keyid": "183e64f37670dc13ca0d28995a3053f3740954ddce44321a41e46534cf44e632", + "sig": "3046022100d8179439c2e73eb0c1733abee7faf832dcaea7263edcb4919891c3a247f05923022100e1a437e0797e803f9b72dc9d2d92155b0a2270c24efdd5f4b3a5d8f0b0f431a7" + } + ], + "signed": { + "_type": "root", + "consistent_snapshot": true, + "expires": "2026-01-22T13:05:59Z", + "keys": { + "0c87432c3bf09fd99189fdc32fa5eaedf4e4a5fac7bab73fa04a2e0fc64af6f5": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEWRiGr5+j+3J5SsH+Ztr5nE2H2wO7\nBV+nO3s93gLca18qTOzHY1oWyAGDykMSsGTUBSt9D+An0KfKsD2mfSM42Q==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-online-uri": "gcpkms:projects/sigstore-root-signing/locations/global/keyRings/root/cryptoKeys/timestamp/cryptoKeyVersions/1" + }, + "183e64f37670dc13ca0d28995a3053f3740954ddce44321a41e46534cf44e632": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEMxpPOJCIZ5otG4106fGJseEQi3V9\npkMYQ4uyV9Tj1M7WHXIyLG+jkfvuG0glQ1JZbRZZBV3gAR4sojdGHISeow==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@lance" + }, + "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEzBzVOmHCPojMVLSI364WiiV8NPrD\n6IgRxVliskz/v+y3JER5mcVGcONliDcWMC5J2lfHmjPNPhb4H7xm8LzfSA==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@santiagotorres" + }, + "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEinikSsAQmYkNeH5eYq/CnIzLaacO\nxlSaawQDOwqKy/tCqxq5xxPSJc21K4WIhs9GyOkKfzueY3GILzcMJZ4cWw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@bobcallaway" + }, + "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE0ghrh92Lw1Yr3idGV5WqCtMDB8Cx\n+D8hdC4w2ZLNIplVRoVGLskYa3gheMyOjiJ8kPi15aQ2//7P+oj7UvJPGw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@joshuagl" + }, + "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEXsz3SZXFb8jMV42j6pJlyjbjR8K\nN3Bwocexq6LMIb5qsWKOQvLN16NUefLc4HswOoumRsVVaajSpQS6fobkRw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@mnm678" + } + }, + "roles": { + "root": { + "keyids": [ + "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2", + "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06", + "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222", + "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70", + "183e64f37670dc13ca0d28995a3053f3740954ddce44321a41e46534cf44e632" + ], + "threshold": 3 + }, + "snapshot": { + "keyids": [ + "0c87432c3bf09fd99189fdc32fa5eaedf4e4a5fac7bab73fa04a2e0fc64af6f5" + ], + "threshold": 1, + "x-tuf-on-ci-expiry-period": 3650, + "x-tuf-on-ci-signing-period": 365 + }, + "targets": { + "keyids": [ + "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2", + "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06", + "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222", + "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70", + "183e64f37670dc13ca0d28995a3053f3740954ddce44321a41e46534cf44e632" + ], + "threshold": 3 + }, + "timestamp": { + "keyids": [ + "0c87432c3bf09fd99189fdc32fa5eaedf4e4a5fac7bab73fa04a2e0fc64af6f5" + ], + "threshold": 1, + "x-tuf-on-ci-expiry-period": 7, + "x-tuf-on-ci-signing-period": 6 + } + }, + "spec_version": "1.0", + "version": 13, + "x-tuf-on-ci-expiry-period": 197, + "x-tuf-on-ci-signing-period": 46 + } +} \ No newline at end of file diff --git a/vendor/github.com/moby/policy-helpers/roots/tuf-root/snapshot.json b/vendor/github.com/moby/policy-helpers/roots/tuf-root/snapshot.json new file mode 100644 index 000000000000..8aaad5e8f53b --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/roots/tuf-root/snapshot.json @@ -0,0 +1,54 @@ +{ + "signatures": [ + { + "keyid": "0c87432c3bf09fd99189fdc32fa5eaedf4e4a5fac7bab73fa04a2e0fc64af6f5", + "sig": "3044022043cdc6f2ee47ee7b4486ab92ce58424ef0b7b351a47853ea68316b67133a4a69022037c5cd433cc2cde76558c579c59a14dd9fc0bc85c496feaa17d90896cd4145fb" + } + ], + "signed": { + "_type": "snapshot", + "expires": "2035-10-08T16:46:31Z", + "meta": { + "registry.npmjs.org.json": { + "version": 6 + }, + "rekor.json": { + "hashes": { + "sha256": "9d2e1a5842937d8e0d3e3759170b0ad15c56c5df36afc5cf73583ddd283a463b", + "sha512": "176e9e710ddddd1b357a7d7970831bae59763395a0c18976110cbd35b25e5412dc50f356ec421a7a30265670cf7aec9ed84ee944ba700ec2394b9c876645b960" + }, + "length": 797, + "version": 3 + }, + "revocation.json": { + "hashes": { + "sha256": "6f60848ba8fb0955a02abfd1232fb3845dc9ee9f418bf03521a7ddb48217e040", + "sha512": "a965dddd0d0edef6c59e84cf02ecf5a53299f633fd339b2b61814a4219ab4df672a6390f265b8b29e1c8cea9368ea3440df013790759d50231a30df1c1f02551" + }, + "length": 800, + "version": 2 + }, + "root.json": { + "hashes": { + "sha256": "f5ad897c9414cca99629f400ac3585e41bd8ebb44c5af07fb08dd636a9eced9c", + "sha512": "7445ddfdd338ef786c324fc3d68f75be28cb95b7fb581d2a383e3e5dde18aa17029a5636ec0a22e9631931bbcb34057788311718ea41e21e7cdd3c0de13ede42" + }, + "length": 5297, + "version": 2 + }, + "staging.json": { + "hashes": { + "sha256": "cda57759abac5375397eea3531d7ca51e3a67da9a2dc93f2cdab749e2ae73149", + "sha512": "e9e59587bde453144c7079884a880c706f1d43f26e8bb23fac2b96a99569a2a30ae6cf51ec51c2454f760ce83d4c20915e062aede7f319b3da6a6ed1d26ca281" + }, + "length": 401, + "version": 2 + }, + "targets.json": { + "version": 13 + } + }, + "spec_version": "1.0", + "version": 162 + } +} \ No newline at end of file diff --git a/vendor/github.com/moby/policy-helpers/roots/tuf-root/targets.json b/vendor/github.com/moby/policy-helpers/roots/tuf-root/targets.json new file mode 100644 index 000000000000..6193cf9434ed --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/roots/tuf-root/targets.json @@ -0,0 +1,167 @@ +{ + "signatures": [ + { + "keyid": "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2", + "sig": "3044022054d6a8fe5f1eb2884cf4d6bb39e37515c70fc14f5730d07d8240b5da287aa2df022029054f010dd87e5bf64c75e061c81337a7946b3c870376587812156519572c2b" + }, + { + "keyid": "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06", + "sig": "30450220531a618d5c05608521ae05807299db298fc3319492fab15c4de6ac30d8011844022100b8fc501ccc1fb0aabc536a19abb805c4951c7fe0a957eb935eeca7b8072e9c58" + }, + { + "keyid": "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222", + "sig": "30440220180f029a56100e3f02ef26f7355595cad0c9eb8235fd6fcf71ba419c243f59cd022023d5619c686320409d71971137ca8eb2ce01b29da5864b36053e6b5207d6d39c" + }, + { + "keyid": "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70", + "sig": "" + }, + { + "keyid": "183e64f37670dc13ca0d28995a3053f3740954ddce44321a41e46534cf44e632", + "sig": "3046022100f748b26ba6c6d02bddea73df9d84d39dc365513bc60b6495b1391db785fffe1a022100aca703faf2839c8ab45cc22a8311b4b86b10422392b14cc94c1f0ed7ee634d37" + } + ], + "signed": { + "_type": "targets", + "delegations": { + "keys": { + "5e3a4021b11a425fd0a444f1670457ce5b15bbe036144f2417426f7f4b9721da": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEVfei1dXQRVeArCMcTDgxJtYg+Fs7\nV87DjhQbGlRJPyC7SW5TbNNkmvpmi4LeTv6moLVZ7T2nVqiRZbSkD+cf8w==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-online-uri": "azurekms://npm-tuf-delegate.vault.azure.net/keys/npm-tuf-delegate-2024-08/e2772c1d01ca400da571096889f1660e" + } + }, + "roles": [ + { + "keyids": [ + "5e3a4021b11a425fd0a444f1670457ce5b15bbe036144f2417426f7f4b9721da" + ], + "name": "registry.npmjs.org", + "paths": [ + "registry.npmjs.org/*" + ], + "terminating": true, + "threshold": 1 + } + ] + }, + "expires": "2035-09-29T08:18:33Z", + "spec_version": "1.0", + "targets": { + "artifact.pub": { + "custom": { + "sigstore": { + "status": "Active", + "usage": "Unknown" + } + }, + "hashes": { + "sha256": "59ebf97a9850aecec4bc39c1f5c1dc46e6490a6b5fd2a6cacdcac0c3a6fc4cbf" + }, + "length": 177 + }, + "ctfe.pub": { + "custom": { + "sigstore": { + "status": "Active", + "uri": "https://ctfe.sigstore.dev/test", + "usage": "CTFE" + } + }, + "hashes": { + "sha256": "7fcb94a5d0ed541260473b990b99a6c39864c1fb16f3f3e594a5a3cebbfe138a" + }, + "length": 177 + }, + "ctfe_2022.pub": { + "custom": { + "sigstore": { + "status": "Active", + "uri": "https://ctfe.sigstore.dev/2022", + "usage": "CTFE" + } + }, + "hashes": { + "sha256": "270488a309d22e804eeb245493e87c667658d749006b9fee9cc614572d4fbbdc" + }, + "length": 178 + }, + "fulcio.crt.pem": { + "custom": { + "sigstore": { + "status": "Expired", + "uri": "https://fulcio.sigstore.dev", + "usage": "Fulcio" + } + }, + "hashes": { + "sha256": "f360c53b2e13495a628b9b8096455badcb6d375b185c4816d95a5d746ff29908" + }, + "length": 744 + }, + "fulcio_intermediate_v1.crt.pem": { + "custom": { + "sigstore": { + "status": "Active", + "uri": "https://fulcio.sigstore.dev", + "usage": "Fulcio" + } + }, + "hashes": { + "sha256": "f8cbecf186db7714624a5f4e99da31a917cbef70a94dd6921f5c3ca969dfe30a" + }, + "length": 789 + }, + "fulcio_v1.crt.pem": { + "custom": { + "sigstore": { + "status": "Active", + "uri": "https://fulcio.sigstore.dev", + "usage": "Fulcio" + } + }, + "hashes": { + "sha256": "f989aa23def87c549404eadba767768d2a3c8d6d30a8b793f9f518a8eafd2cf5" + }, + "length": 740 + }, + "rekor.pub": { + "custom": { + "sigstore": { + "status": "Active", + "uri": "https://rekor.sigstore.dev", + "usage": "Rekor" + } + }, + "hashes": { + "sha256": "dce5ef715502ec9f3cdfd11f8cc384b31a6141023d3e7595e9908a81cb6241bd" + }, + "length": 178 + }, + "signing_config.json": { + "hashes": { + "sha256": "d358c75d032833f4193500f5b01b5760409410558fac962c599439adbb268b0f" + }, + "length": 219 + }, + "signing_config.v0.2.json": { + "hashes": { + "sha256": "9711a6d5375706957a4859af31c5866a4474f81f0544f9f4b76c9c4f4c8a539c" + }, + "length": 1034 + }, + "trusted_root.json": { + "hashes": { + "sha256": "6494e21ea73fa7ee769f85f57d5a3e6a08725eae1e38c755fc3517c9e6bc0b66" + }, + "length": 6787 + } + }, + "version": 13, + "x-tuf-on-ci-expiry-period": 3650, + "x-tuf-on-ci-signing-period": 31 + } +} \ No newline at end of file diff --git a/vendor/github.com/moby/policy-helpers/roots/tuf-root/targets/trusted_root.json b/vendor/github.com/moby/policy-helpers/roots/tuf-root/targets/trusted_root.json new file mode 100644 index 000000000000..effb0a19e6a0 --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/roots/tuf-root/targets/trusted_root.json @@ -0,0 +1,126 @@ +{ + "mediaType": "application/vnd.dev.sigstore.trustedroot+json;version=0.1", + "tlogs": [ + { + "baseUrl": "https://rekor.sigstore.dev", + "hashAlgorithm": "SHA2_256", + "publicKey": { + "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE2G2Y+2tabdTV5BcGiBIx0a9fAFwrkBbmLSGtks4L3qX6yYY0zufBnhC8Ur/iy55GhWP/9A/bY2LhC30M9+RYtw==", + "keyDetails": "PKIX_ECDSA_P256_SHA_256", + "validFor": { + "start": "2021-01-12T11:53:27Z" + } + }, + "logId": { + "keyId": "wNI9atQGlz+VWfO6LRygH4QUfY/8W4RFwiT5i5WRgB0=" + } + }, + { + "baseUrl": "https://log2025-1.rekor.sigstore.dev", + "hashAlgorithm": "SHA2_256", + "publicKey": { + "rawBytes": "MCowBQYDK2VwAyEAt8rlp1knGwjfbcXAYPYAkn0XiLz1x8O4t0YkEhie244=", + "keyDetails": "PKIX_ED25519", + "validFor": { + "start": "2025-09-23T00:00:00Z" + } + }, + "logId": { + "keyId": "zxGZFVvd0FEmjR8WrFwMdcAJ9vtaY/QXf44Y1wUeP6A=" + } + } + ], + "certificateAuthorities": [ + { + "subject": { + "organization": "sigstore.dev", + "commonName": "sigstore" + }, + "uri": "https://fulcio.sigstore.dev", + "certChain": { + "certificates": [ + { + "rawBytes": "MIIB+DCCAX6gAwIBAgITNVkDZoCiofPDsy7dfm6geLbuhzAKBggqhkjOPQQDAzAqMRUwEwYDVQQKEwxzaWdzdG9yZS5kZXYxETAPBgNVBAMTCHNpZ3N0b3JlMB4XDTIxMDMwNzAzMjAyOVoXDTMxMDIyMzAzMjAyOVowKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTB2MBAGByqGSM49AgEGBSuBBAAiA2IABLSyA7Ii5k+pNO8ZEWY0ylemWDowOkNa3kL+GZE5Z5GWehL9/A9bRNA3RbrsZ5i0JcastaRL7Sp5fp/jD5dxqc/UdTVnlvS16an+2Yfswe/QuLolRUCrcOE2+2iA5+tzd6NmMGQwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFMjFHQBBmiQpMlEk6w2uSu1KBtPsMB8GA1UdIwQYMBaAFMjFHQBBmiQpMlEk6w2uSu1KBtPsMAoGCCqGSM49BAMDA2gAMGUCMH8liWJfMui6vXXBhjDgY4MwslmN/TJxVe/83WrFomwmNf056y1X48F9c4m3a3ozXAIxAKjRay5/aj/jsKKGIkmQatjI8uupHr/+CxFvaJWmpYqNkLDGRU+9orzh5hI2RrcuaQ==" + } + ] + }, + "validFor": { + "start": "2021-03-07T03:20:29Z", + "end": "2022-12-31T23:59:59.999Z" + } + }, + { + "subject": { + "organization": "sigstore.dev", + "commonName": "sigstore" + }, + "uri": "https://fulcio.sigstore.dev", + "certChain": { + "certificates": [ + { + "rawBytes": "MIICGjCCAaGgAwIBAgIUALnViVfnU0brJasmRkHrn/UnfaQwCgYIKoZIzj0EAwMwKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0yMjA0MTMyMDA2MTVaFw0zMTEwMDUxMzU2NThaMDcxFTATBgNVBAoTDHNpZ3N0b3JlLmRldjEeMBwGA1UEAxMVc2lnc3RvcmUtaW50ZXJtZWRpYXRlMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE8RVS/ysH+NOvuDZyPIZtilgUF9NlarYpAd9HP1vBBH1U5CV77LSS7s0ZiH4nE7Hv7ptS6LvvR/STk798LVgMzLlJ4HeIfF3tHSaexLcYpSASr1kS0N/RgBJz/9jWCiXno3sweTAOBgNVHQ8BAf8EBAMCAQYwEwYDVR0lBAwwCgYIKwYBBQUHAwMwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQU39Ppz1YkEZb5qNjpKFWixi4YZD8wHwYDVR0jBBgwFoAUWMAeX5FFpWapesyQoZMi0CrFxfowCgYIKoZIzj0EAwMDZwAwZAIwPCsQK4DYiZYDPIaDi5HFKnfxXx6ASSVmERfsynYBiX2X6SJRnZU84/9DZdnFvvxmAjBOt6QpBlc4J/0DxvkTCqpclvziL6BCCPnjdlIB3Pu3BxsPmygUY7Ii2zbdCdliiow=" + }, + { + "rawBytes": "MIIB9zCCAXygAwIBAgIUALZNAPFdxHPwjeDloDwyYChAO/4wCgYIKoZIzj0EAwMwKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0yMTEwMDcxMzU2NTlaFw0zMTEwMDUxMzU2NThaMCoxFTATBgNVBAoTDHNpZ3N0b3JlLmRldjERMA8GA1UEAxMIc2lnc3RvcmUwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAT7XeFT4rb3PQGwS4IajtLk3/OlnpgangaBclYpsYBr5i+4ynB07ceb3LP0OIOZdxexX69c5iVuyJRQ+Hz05yi+UF3uBWAlHpiS5sh0+H2GHE7SXrk1EC5m1Tr19L9gg92jYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRYwB5fkUWlZql6zJChkyLQKsXF+jAfBgNVHSMEGDAWgBRYwB5fkUWlZql6zJChkyLQKsXF+jAKBggqhkjOPQQDAwNpADBmAjEAj1nHeXZp+13NWBNa+EDsDP8G1WWg1tCMWP/WHPqpaVo0jhsweNFZgSs0eE7wYI4qAjEA2WB9ot98sIkoF3vZYdd3/VtWB5b9TNMea7Ix/stJ5TfcLLeABLE4BNJOsQ4vnBHJ" + } + ] + }, + "validFor": { + "start": "2022-04-13T20:06:15Z" + } + } + ], + "ctlogs": [ + { + "baseUrl": "https://ctfe.sigstore.dev/test", + "hashAlgorithm": "SHA2_256", + "publicKey": { + "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEbfwR+RJudXscgRBRpKX1XFDy3PyudDxz/SfnRi1fT8ekpfBd2O1uoz7jr3Z8nKzxA69EUQ+eFCFI3zeubPWU7w==", + "keyDetails": "PKIX_ECDSA_P256_SHA_256", + "validFor": { + "start": "2021-03-14T00:00:00Z", + "end": "2022-10-31T23:59:59.999Z" + } + }, + "logId": { + "keyId": "CGCS8ChS/2hF0dFrJ4ScRWcYrBY9wzjSbea8IgY2b3I=" + } + }, + { + "baseUrl": "https://ctfe.sigstore.dev/2022", + "hashAlgorithm": "SHA2_256", + "publicKey": { + "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEiPSlFi0CmFTfEjCUqF9HuCEcYXNKAaYalIJmBZ8yyezPjTqhxrKBpMnaocVtLJBI1eM3uXnQzQGAJdJ4gs9Fyw==", + "keyDetails": "PKIX_ECDSA_P256_SHA_256", + "validFor": { + "start": "2022-10-20T00:00:00Z" + } + }, + "logId": { + "keyId": "3T0wasbHETJjGR4cmWc3AqJKXrjePK3/h4pygC8p7o4=" + } + } + ], + "timestampAuthorities": [ + { + "subject": { + "organization": "sigstore.dev", + "commonName": "sigstore-tsa-selfsigned" + }, + "uri": "https://timestamp.sigstore.dev/api/v1/timestamp", + "certChain": { + "certificates": [ + { + "rawBytes": "MIICEDCCAZagAwIBAgIUOhNULwyQYe68wUMvy4qOiyojiwwwCgYIKoZIzj0EAwMwOTEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MSAwHgYDVQQDExdzaWdzdG9yZS10c2Etc2VsZnNpZ25lZDAeFw0yNTA0MDgwNjU5NDNaFw0zNTA0MDYwNjU5NDNaMC4xFTATBgNVBAoTDHNpZ3N0b3JlLmRldjEVMBMGA1UEAxMMc2lnc3RvcmUtdHNhMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE4ra2Z8hKNig2T9kFjCAToGG30jky+WQv3BzL+mKvh1SKNR/UwuwsfNCg4sryoYAd8E6isovVA3M4aoNdm9QDi50Z8nTEyvqgfDPtTIwXItfiW/AFf1V7uwkbkAoj0xxco2owaDAOBgNVHQ8BAf8EBAMCB4AwHQYDVR0OBBYEFIn9eUOHz9BlRsMCRscsc1t9tOsDMB8GA1UdIwQYMBaAFJjsAe9/u1H/1JUeb4qImFMHic6/MBYGA1UdJQEB/wQMMAoGCCsGAQUFBwMIMAoGCCqGSM49BAMDA2gAMGUCMDtpsV/6KaO0qyF/UMsX2aSUXKQFdoGTptQGc0ftq1csulHPGG6dsmyMNd3JB+G3EQIxAOajvBcjpJmKb4Nv+2Taoj8Uc5+b6ih6FXCCKraSqupe07zqswMcXJTe1cExvHvvlw==" + }, + { + "rawBytes": "MIIB9zCCAXygAwIBAgIUV7f0GLDOoEzIh8LXSW80OJiUp14wCgYIKoZIzj0EAwMwOTEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MSAwHgYDVQQDExdzaWdzdG9yZS10c2Etc2VsZnNpZ25lZDAeFw0yNTA0MDgwNjU5NDNaFw0zNTA0MDYwNjU5NDNaMDkxFTATBgNVBAoTDHNpZ3N0b3JlLmRldjEgMB4GA1UEAxMXc2lnc3RvcmUtdHNhLXNlbGZzaWduZWQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQUQNtfRT/ou3YATa6wB/kKTe70cfJwyRIBovMnt8RcJph/COE82uyS6FmppLLL1VBPGcPfpQPYJNXzWwi8icwhKQ6W/Qe2h3oebBb2FHpwNJDqo+TMaC/tdfkv/ElJB72jRTBDMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBSY7AHvf7tR/9SVHm+KiJhTB4nOvzAKBggqhkjOPQQDAwNpADBmAjEAwGEGrfGZR1cen1R8/DTVMI943LssZmJRtDp/i7SfGHmGRP6gRbuj9vOK3b67Z0QQAjEAuT2H673LQEaHTcyQSZrkp4mX7WwkmF+sVbkYY5mXN+RMH13KUEHHOqASaemYWK/E" + } + ] + }, + "validFor": { + "start": "2025-07-04T00:00:00Z" + } + } + ] +} diff --git a/vendor/github.com/moby/policy-helpers/roots/tuf-root/timestamp.json b/vendor/github.com/moby/policy-helpers/roots/tuf-root/timestamp.json new file mode 100644 index 000000000000..1b2384092e4f --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/roots/tuf-root/timestamp.json @@ -0,0 +1,19 @@ +{ + "signatures": [ + { + "keyid": "0c87432c3bf09fd99189fdc32fa5eaedf4e4a5fac7bab73fa04a2e0fc64af6f5", + "sig": "3046022100baf8a66a62531e9db32df4a1475099798cc6126add00193c8b641ed3dfb83b2c022100eedbc40511f1f72459b95188995917ad564992a2258436a96d27885c063b7de1" + } + ], + "signed": { + "_type": "timestamp", + "expires": "2025-11-02T01:55:53Z", + "meta": { + "snapshot.json": { + "version": 162 + } + }, + "spec_version": "1.0", + "version": 498 + } +} \ No newline at end of file diff --git a/vendor/github.com/moby/policy-helpers/types/siginfo.go b/vendor/github.com/moby/policy-helpers/types/siginfo.go new file mode 100644 index 000000000000..7defff9f9316 --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/types/siginfo.go @@ -0,0 +1,26 @@ +package types + +import ( + "time" + + "github.com/sigstore/sigstore-go/pkg/fulcio/certificate" +) + +type TimestampVerificationResult struct { + Type string `json:"type"` + URI string `json:"uri"` + Timestamp time.Time `json:"timestamp"` +} + +type TrustRootStatus struct { + Error string `json:"error,omitempty"` + LastUpdated *time.Time `json:"lastUpdated,omitempty"` +} + +type SignatureInfo struct { + Signer *certificate.Summary `json:"signer,omitempty"` + Timestamps []TimestampVerificationResult `json:"timestamps,omitempty"` + DockerReference string `json:"dockerReference,omitempty"` + TrustRootStatus TrustRootStatus `json:"trustRootStatus,omitzero"` + IsDHI bool `json:"isDHI,omitempty"` +} diff --git a/vendor/github.com/moby/policy-helpers/verifier.go b/vendor/github.com/moby/policy-helpers/verifier.go new file mode 100644 index 000000000000..8c6b69db2274 --- /dev/null +++ b/vendor/github.com/moby/policy-helpers/verifier.go @@ -0,0 +1,407 @@ +package verifier + +import ( + "context" + "encoding/hex" + "encoding/json" + "path/filepath" + "time" + + slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + slsa1 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1" + "github.com/moby/policy-helpers/image" + "github.com/moby/policy-helpers/roots" + "github.com/moby/policy-helpers/roots/dhi" + "github.com/moby/policy-helpers/types" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + protobundle "github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1" + "github.com/sigstore/sigstore-go/pkg/bundle" + "github.com/sigstore/sigstore-go/pkg/fulcio/certificate" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore-go/pkg/verify" + "golang.org/x/sync/singleflight" +) + +type Config struct { + UpdateInterval time.Duration + RequireOnline bool + StateDir string +} + +type Verifier struct { + cfg Config + sf singleflight.Group + tp *roots.TrustProvider // tp may be nil if initialization failed +} + +func NewVerifier(cfg Config) (*Verifier, error) { + if cfg.StateDir == "" { + return nil, errors.Errorf("state directory must be provided") + } + v := &Verifier{cfg: cfg} + + v.loadTrustProvider() // initialization fails on expired root/timestamp + + return v, nil +} + +func (v *Verifier) VerifyArtifact(ctx context.Context, dgst digest.Digest, bundleBytes []byte) (*types.SignatureInfo, error) { + anyCert, err := anyCerificateIdentity() + if err != nil { + return nil, errors.WithStack(err) + } + alg, rawDgst, err := rawDigest(dgst) + if err != nil { + return nil, errors.WithStack(err) + } + policy := verify.NewPolicy(verify.WithArtifactDigest(alg, rawDgst), anyCert) + + b, err := loadBundle(bundleBytes) + if err != nil { + return nil, errors.WithStack(err) + } + + tp, err := v.loadTrustProvider() + if err != nil { + return nil, errors.Wrap(err, "loading trust provider") + } + + trustedRoot, st, err := tp.TrustedRoot(ctx) + if err != nil { + return nil, errors.Wrap(err, "getting trusted root") + } + + gv, err := verify.NewVerifier(trustedRoot, verify.WithSignedCertificateTimestamps(1), verify.WithTransparencyLog(1), verify.WithObserverTimestamps(1)) + if err != nil { + return nil, errors.Wrap(err, "creating verifier") + } + + result, err := gv.Verify(b, policy) + if err != nil { + return nil, errors.Wrap(err, "verifying bundle") + } + + if result.Signature == nil || result.Signature.Certificate == nil { + return nil, errors.Errorf("no valid signatures found") + } + + if !isSLSAPredicateType(result.Statement.PredicateType) { + return nil, errors.Errorf("unexpected predicate type %q, expecting SLSA provenance", result.Statement.PredicateType) + } + + return &types.SignatureInfo{ + TrustRootStatus: toRootStatus(st), + Signer: result.Signature.Certificate, + Timestamps: toTimestamps(result.VerifiedTimestamps), + }, nil +} + +func (v *Verifier) VerifyImage(ctx context.Context, provider image.ReferrersProvider, desc ocispecs.Descriptor, platform *ocispecs.Platform) (*types.SignatureInfo, error) { + sc, err := image.ResolveSignatureChain(ctx, provider, desc, platform) + if err != nil { + return nil, errors.Wrapf(err, "resolving signature chain for image %s", desc.Digest) + } + + if sc.AttestationManifest == nil || sc.SignatureManifest == nil { + return nil, errors.WithStack(&NoSigChainError{ + Target: desc.Digest, + HasAttestation: sc.AttestationManifest != nil, + }) + } + + attestationBytes, err := sc.ManifestBytes(ctx, sc.AttestationManifest) + if err != nil { + return nil, errors.Wrapf(err, "reading attestation manifest %s", sc.AttestationManifest.Digest) + } + + var attestation ocispecs.Manifest + if err := json.Unmarshal(attestationBytes, &attestation); err != nil { + return nil, errors.Wrapf(err, "unmarshaling attestation manifest %s", sc.AttestationManifest.Digest) + } + + if attestation.Subject == nil { + return nil, errors.Errorf("attestation manifest %s has no subject", sc.AttestationManifest.Digest) + } + if attestation.Subject.Digest != sc.ImageManifest.Digest { + return nil, errors.Errorf("attestation manifest %s subject digest %s does not match image manifest digest %s", sc.AttestationManifest.Digest, attestation.Subject.Digest, sc.ImageManifest.Digest) + } + if attestation.Subject.MediaType != ocispecs.MediaTypeImageManifest && attestation.Subject.MediaType != ocispecs.MediaTypeImageIndex { + return nil, errors.Errorf("attestation manifest %s subject media type %s is not an image manifest or index", sc.AttestationManifest.Digest, attestation.Subject.MediaType) + } + if attestation.Subject.Size != sc.ImageManifest.Size { + return nil, errors.Errorf("attestation manifest %s subject size %d does not match image manifest size %d", sc.AttestationManifest.Digest, attestation.Subject.Size, sc.ImageManifest.Size) + } + hasSLSA := false + for _, l := range attestation.Layers { + if isSLSAPredicateType(l.Annotations["in-toto.io/predicate-type"]) { + hasSLSA = true + break + } + } + if !hasSLSA { + return nil, errors.Errorf("attestation manifest %s has no SLSA provenance layer", sc.AttestationManifest.Digest) + } + + anyCert, err := anyCerificateIdentity() + if err != nil { + return nil, errors.WithStack(err) + } + var artifactPolicy verify.ArtifactPolicyOption + + tp, err := v.loadTrustProvider() + if err != nil { + return nil, errors.Wrap(err, "loading trust provider") + } + var trustedRoot root.TrustedMaterial + fulcioRoot, st, err := tp.TrustedRoot(ctx) + if err != nil { + return nil, errors.Wrap(err, "getting trusted root") + } + + sigBytes, err := sc.ManifestBytes(ctx, sc.SignatureManifest) + if err != nil { + return nil, errors.Wrapf(err, "reading signature manifest %s", sc.SignatureManifest.Digest) + } + + var mfst ocispecs.Manifest + if err := json.Unmarshal(sigBytes, &mfst); err != nil { + return nil, errors.Wrapf(err, "unmarshaling signature manifest %s", sc.SignatureManifest.Digest) + } + + // basic validations + if mfst.Subject == nil { + return nil, errors.Errorf("signature manifest %s has no subject", sc.SignatureManifest.Digest) + } + if mfst.Subject.Digest != sc.AttestationManifest.Digest { + return nil, errors.Errorf("signature manifest %s subject digest %s does not match attestation manifest digest %s", sc.SignatureManifest.Digest, mfst.Subject.Digest, sc.AttestationManifest.Digest) + } + if mfst.Subject.MediaType != ocispecs.MediaTypeImageManifest && mfst.Subject.MediaType != ocispecs.MediaTypeImageIndex { + return nil, errors.Errorf("signature manifest %s subject media type %s is not an image manifest or index", sc.SignatureManifest.Digest, mfst.Subject.MediaType) + } + if mfst.Subject.Size != sc.AttestationManifest.Size { + return nil, errors.Errorf("signature manifest %s subject size %d does not match attestation manifest size %d", sc.SignatureManifest.Digest, mfst.Subject.Size, sc.AttestationManifest.Size) + } + if len(mfst.Layers) == 0 { + return nil, errors.Errorf("signature manifest %s has %d layers, expected 1", sc.SignatureManifest.Digest, len(mfst.Layers)) + } + layer := mfst.Layers[0] + + var dockerReference string + + var se verify.SignedEntity + switch layer.MediaType { + case image.ArtifactTypeSigstoreBundle: + if mfst.ArtifactType != image.ArtifactTypeSigstoreBundle { + return nil, errors.Errorf("signature manifest %s is not a bundle (artifact type %q)", sc.SignatureManifest.Digest, mfst.ArtifactType) + } + bundleBytes, err := image.ReadBlob(ctx, sc.Provider, layer) + if err != nil { + return nil, errors.Wrapf(err, "reading bundle layer %s from signature manifest %s", layer.Digest, sc.SignatureManifest.Digest) + } + b, err := loadBundle(bundleBytes) + if err != nil { + return nil, errors.Wrapf(err, "loading signature bundle from manifest %s", sc.SignatureManifest.Digest) + } + se = b + + alg, rawDgst, err := rawDigest(sc.AttestationManifest.Digest) + if err != nil { + return nil, errors.WithStack(err) + } + artifactPolicy = verify.WithArtifactDigest(alg, rawDgst) + case image.MediaTypeCosignSimpleSigning: + payloadBytes, err := image.ReadBlob(ctx, sc.Provider, layer) + if err != nil { + return nil, errors.Wrapf(err, "reading bundle layer %s from signature manifest %s", layer.Digest, sc.SignatureManifest.Digest) + } + var payload struct { + Critical struct { + Identity struct { + DockerReference string `json:"docker-reference"` + } `json:"identity"` + Image struct { + DockerManifestDigest string `json:"docker-manifest-digest"` + } `json:"image"` + Type string `json:"type"` + } `json:"critical"` + Optional map[string]any `json:"optional"` + } + if err := json.Unmarshal(payloadBytes, &payload); err != nil { + return nil, errors.Wrapf(err, "unmarshaling simple signing payload from manifest %s", sc.SignatureManifest.Digest) + } + if payload.Critical.Image.DockerManifestDigest != sc.AttestationManifest.Digest.String() { + return nil, errors.Errorf("simple signing payload in manifest %s has docker-manifest-digest %s which does not match attestation manifest digest %s", sc.SignatureManifest.Digest, payload.Critical.Image.DockerManifestDigest, sc.AttestationManifest.Digest) + } + if payload.Critical.Type != "cosign container image signature" { + return nil, errors.Errorf("simple signing payload in manifest %s has invalid type %q", sc.SignatureManifest.Digest, payload.Critical.Type) + } + dockerReference = payload.Critical.Identity.DockerReference + // TODO: are more consistency checks needed for hashedrekord payload vs annotations? + + hrse, err := newHashedRecordSignedEntity(&mfst, sc.DHI) + if err != nil { + return nil, errors.Wrapf(err, "loading hashed record signed entity from manifest %s", sc.SignatureManifest.Digest) + } + se = hrse + alg, rawDgst, err := rawDigest(layer.Digest) + if err != nil { + return nil, errors.WithStack(err) + } + artifactPolicy = verify.WithArtifactDigest(alg, rawDgst) + default: + return nil, errors.Errorf("signature manifest %s layer has invalid media type %s", sc.SignatureManifest.Digest, layer.MediaType) + } + + verifierOpts := []verify.VerifierOption{} + + if sc.DHI { + trustedRoot, err = dhi.TrustedRoot(fulcioRoot) + if err != nil { + return nil, errors.Wrap(err, "getting DHI trust root") + } + // DHI signature may or may not have transparency data + // validation needs to be done in a later additional policy step + if _, hasBundleAnnotation := layer.Annotations["dev.sigstore.cosign/bundle"]; !hasBundleAnnotation { + verifierOpts = append(verifierOpts, verify.WithNoObserverTimestamps()) + } else { + verifierOpts = append(verifierOpts, + verify.WithObserverTimestamps(1), + verify.WithTransparencyLog(1), + ) + } + // signed with pubkey without cert identity + anyCert = verify.WithoutIdentitiesUnsafe() + } else { + trustedRoot = fulcioRoot + verifierOpts = append(verifierOpts, + verify.WithObserverTimestamps(1), + verify.WithTransparencyLog(1), + verify.WithSignedCertificateTimestamps(1), + ) + } + gv, err := verify.NewVerifier(trustedRoot, verifierOpts...) + if err != nil { + return nil, errors.Wrap(err, "creating verifier") + } + + policy := verify.NewPolicy(artifactPolicy, anyCert) + + result, err := gv.Verify(se, policy) + if err != nil { + return nil, errors.Wrap(err, "verifying bundle") + } + + if result.Signature == nil || (result.Signature.Certificate == nil && !sc.DHI) { + return nil, errors.Errorf("no valid signatures found") + } + + return &types.SignatureInfo{ + TrustRootStatus: toRootStatus(st), + Signer: result.Signature.Certificate, + Timestamps: toTimestamps(result.VerifiedTimestamps), + DockerReference: dockerReference, + IsDHI: sc.DHI, + }, nil +} + +func (v *Verifier) loadTrustProvider() (*roots.TrustProvider, error) { + var tpCache *roots.TrustProvider + _, err, _ := v.sf.Do("", func() (any, error) { + if v.tp != nil { + tpCache = v.tp + return nil, nil + } + tp, err := roots.NewTrustProvider(roots.SigstoreRootsConfig{ + CachePath: filepath.Join(v.cfg.StateDir, "tuf"), + UpdateInterval: v.cfg.UpdateInterval, + RequireOnline: v.cfg.RequireOnline, + }) + if err != nil { + return nil, err + } + v.tp = tp + tpCache = tp + return nil, nil + }) + if err != nil { + return nil, err + } + return tpCache, nil +} + +func anyCerificateIdentity() (verify.PolicyOption, error) { + sanMatcher, err := verify.NewSANMatcher("", ".*") + if err != nil { + return nil, err + } + + issuerMatcher, err := verify.NewIssuerMatcher("", ".*") + if err != nil { + return nil, err + } + + extensions := certificate.Extensions{ + RunnerEnvironment: "github-hosted", + } + + certID, err := verify.NewCertificateIdentity(sanMatcher, issuerMatcher, extensions) + if err != nil { + return nil, err + } + + return verify.WithCertificateIdentity(certID), nil +} + +func loadBundle(dt []byte) (*bundle.Bundle, error) { + var bundle bundle.Bundle + bundle.Bundle = new(protobundle.Bundle) + + err := bundle.UnmarshalJSON(dt) + if err != nil { + return nil, err + } + + return &bundle, nil +} + +func rawDigest(d digest.Digest) (string, []byte, error) { + alg := d.Algorithm().String() + b, err := hex.DecodeString(d.Encoded()) + if err != nil { + return "", nil, errors.Wrapf(err, "decoding digest %s", d) + } + return alg, b, nil +} + +func isSLSAPredicateType(v string) bool { + switch v { + case slsa1.PredicateSLSAProvenance, slsa02.PredicateSLSAProvenance: + return true + default: + return false + } +} + +func toTimestamps(ts []verify.TimestampVerificationResult) []types.TimestampVerificationResult { + tsout := make([]types.TimestampVerificationResult, len(ts)) + for i, t := range ts { + tsout[i] = types.TimestampVerificationResult{ + Type: t.Type, + URI: t.URI, + Timestamp: t.Timestamp, + } + } + return tsout +} + +func toRootStatus(st roots.Status) types.TrustRootStatus { + trs := types.TrustRootStatus{ + LastUpdated: st.LastUpdated, + } + if st.Error != nil { + trs.Error = st.Error.Error() + } + return trs +} diff --git a/vendor/github.com/oklog/ulid/.gitignore b/vendor/github.com/oklog/ulid/.gitignore new file mode 100644 index 000000000000..c92c4d56084b --- /dev/null +++ b/vendor/github.com/oklog/ulid/.gitignore @@ -0,0 +1,29 @@ +#### joe made this: http://goel.io/joe + +#####=== Go ===##### + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + diff --git a/vendor/github.com/oklog/ulid/.travis.yml b/vendor/github.com/oklog/ulid/.travis.yml new file mode 100644 index 000000000000..43eb762fa349 --- /dev/null +++ b/vendor/github.com/oklog/ulid/.travis.yml @@ -0,0 +1,16 @@ +language: go +sudo: false +go: + - 1.10.x +install: + - go get -v github.com/golang/lint/golint + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + - go get -d -t -v ./... + - go build -v ./... +script: + - go vet ./... + - $HOME/gopath/bin/golint . + - go test -v -race ./... + - go test -v -covermode=count -coverprofile=cov.out + - $HOME/gopath/bin/goveralls -coverprofile=cov.out -service=travis-ci -repotoken "$COVERALLS_TOKEN" || true diff --git a/vendor/github.com/oklog/ulid/AUTHORS.md b/vendor/github.com/oklog/ulid/AUTHORS.md new file mode 100644 index 000000000000..95581c78b062 --- /dev/null +++ b/vendor/github.com/oklog/ulid/AUTHORS.md @@ -0,0 +1,2 @@ +- Peter Bourgon (@peterbourgon) +- Tomás Senart (@tsenart) diff --git a/vendor/github.com/oklog/ulid/CHANGELOG.md b/vendor/github.com/oklog/ulid/CHANGELOG.md new file mode 100644 index 000000000000..8da38c6b00d3 --- /dev/null +++ b/vendor/github.com/oklog/ulid/CHANGELOG.md @@ -0,0 +1,33 @@ +## 1.3.1 / 2018-10-02 + +* Use underlying entropy source for random increments in Monotonic (#32) + +## 1.3.0 / 2018-09-29 + +* Monotonic entropy support (#31) + +## 1.2.0 / 2018-09-09 + +* Add a function to convert Unix time in milliseconds back to time.Time (#30) + +## 1.1.0 / 2018-08-15 + +* Ensure random part is always read from the entropy reader in full (#28) + +## 1.0.0 / 2018-07-29 + +* Add ParseStrict and MustParseStrict functions (#26) +* Enforce overflow checking when parsing (#20) + +## 0.3.0 / 2017-01-03 + +* Implement ULID.Compare method + +## 0.2.0 / 2016-12-13 + +* Remove year 2262 Timestamp bug. (#1) +* Gracefully handle invalid encodings when parsing. + +## 0.1.0 / 2016-12-06 + +* First ULID release diff --git a/vendor/github.com/oklog/ulid/CONTRIBUTING.md b/vendor/github.com/oklog/ulid/CONTRIBUTING.md new file mode 100644 index 000000000000..68f03f26eba0 --- /dev/null +++ b/vendor/github.com/oklog/ulid/CONTRIBUTING.md @@ -0,0 +1,17 @@ +# Contributing + +We use GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull + request, addressing (with `@...`) one or more of the maintainers + (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. + +* If you plan to do something more involved, first propose your ideas + in a Github issue. This will avoid unnecessary work and surely give + you and us a good deal of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/oklog/ulid/Gopkg.lock b/vendor/github.com/oklog/ulid/Gopkg.lock new file mode 100644 index 000000000000..349b449a6ea6 --- /dev/null +++ b/vendor/github.com/oklog/ulid/Gopkg.lock @@ -0,0 +1,15 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + name = "github.com/pborman/getopt" + packages = ["v2"] + revision = "7148bc3a4c3008adfcab60cbebfd0576018f330b" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "6779b05abd5cd429c5393641d2453005a3cb74a400d161b2b5c5d0ca2e10e116" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/oklog/ulid/Gopkg.toml b/vendor/github.com/oklog/ulid/Gopkg.toml new file mode 100644 index 000000000000..624a7a019c70 --- /dev/null +++ b/vendor/github.com/oklog/ulid/Gopkg.toml @@ -0,0 +1,26 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + branch = "master" + name = "github.com/pborman/getopt" diff --git a/vendor/github.com/oklog/ulid/LICENSE b/vendor/github.com/oklog/ulid/LICENSE new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/vendor/github.com/oklog/ulid/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/oklog/ulid/README.md b/vendor/github.com/oklog/ulid/README.md new file mode 100644 index 000000000000..0a3d2f82b255 --- /dev/null +++ b/vendor/github.com/oklog/ulid/README.md @@ -0,0 +1,150 @@ +# Universally Unique Lexicographically Sortable Identifier + +![Project status](https://img.shields.io/badge/version-1.3.0-yellow.svg) +[![Build Status](https://secure.travis-ci.org/oklog/ulid.png)](http://travis-ci.org/oklog/ulid) +[![Go Report Card](https://goreportcard.com/badge/oklog/ulid?cache=0)](https://goreportcard.com/report/oklog/ulid) +[![Coverage Status](https://coveralls.io/repos/github/oklog/ulid/badge.svg?branch=master&cache=0)](https://coveralls.io/github/oklog/ulid?branch=master) +[![GoDoc](https://godoc.org/github.com/oklog/ulid?status.svg)](https://godoc.org/github.com/oklog/ulid) +[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/ulid/master/LICENSE) + +A Go port of [alizain/ulid](https://github.com/alizain/ulid) with binary format implemented. + +## Background + +A GUID/UUID can be suboptimal for many use-cases because: + +- It isn't the most character efficient way of encoding 128 bits +- UUID v1/v2 is impractical in many environments, as it requires access to a unique, stable MAC address +- UUID v3/v5 requires a unique seed and produces randomly distributed IDs, which can cause fragmentation in many data structures +- UUID v4 provides no other information than randomness which can cause fragmentation in many data structures + +A ULID however: + +- Is compatible with UUID/GUID's +- 1.21e+24 unique ULIDs per millisecond (1,208,925,819,614,629,174,706,176 to be exact) +- Lexicographically sortable +- Canonically encoded as a 26 character string, as opposed to the 36 character UUID +- Uses Crockford's base32 for better efficiency and readability (5 bits per character) +- Case insensitive +- No special characters (URL safe) +- Monotonic sort order (correctly detects and handles the same millisecond) + +## Install + +```shell +go get github.com/oklog/ulid +``` + +## Usage + +An ULID is constructed with a `time.Time` and an `io.Reader` entropy source. +This design allows for greater flexibility in choosing your trade-offs. + +Please note that `rand.Rand` from the `math` package is *not* safe for concurrent use. +Instantiate one per long living go-routine or use a `sync.Pool` if you want to avoid the potential contention of a locked `rand.Source` as its been frequently observed in the package level functions. + + +```go +func ExampleULID() { + t := time.Unix(1000000, 0) + entropy := ulid.Monotonic(rand.New(rand.NewSource(t.UnixNano())), 0) + fmt.Println(ulid.MustNew(ulid.Timestamp(t), entropy)) + // Output: 0000XSNJG0MQJHBF4QX1EFD6Y3 +} + +``` + +## Specification + +Below is the current specification of ULID as implemented in this repository. + +### Components + +**Timestamp** +- 48 bits +- UNIX-time in milliseconds +- Won't run out of space till the year 10895 AD + +**Entropy** +- 80 bits +- User defined entropy source. +- Monotonicity within the same millisecond with [`ulid.Monotonic`](https://godoc.org/github.com/oklog/ulid#Monotonic) + +### Encoding + +[Crockford's Base32](http://www.crockford.com/wrmg/base32.html) is used as shown. +This alphabet excludes the letters I, L, O, and U to avoid confusion and abuse. + +``` +0123456789ABCDEFGHJKMNPQRSTVWXYZ +``` + +### Binary Layout and Byte Order + +The components are encoded as 16 octets. Each component is encoded with the Most Significant Byte first (network byte order). + +``` +0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 32_bit_uint_time_high | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 16_bit_uint_time_low | 16_bit_uint_random | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 32_bit_uint_random | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 32_bit_uint_random | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +``` + +### String Representation + +``` + 01AN4Z07BY 79KA1307SR9X4MV3 +|----------| |----------------| + Timestamp Entropy + 10 chars 16 chars + 48bits 80bits + base32 base32 +``` + +## Test + +```shell +go test ./... +``` + +## Benchmarks + +On a Intel Core i7 Ivy Bridge 2.7 GHz, MacOS 10.12.1 and Go 1.8.0beta1 + +``` +BenchmarkNew/WithCryptoEntropy-8 2000000 771 ns/op 20.73 MB/s 16 B/op 1 allocs/op +BenchmarkNew/WithEntropy-8 20000000 65.8 ns/op 243.01 MB/s 16 B/op 1 allocs/op +BenchmarkNew/WithoutEntropy-8 50000000 30.0 ns/op 534.06 MB/s 16 B/op 1 allocs/op +BenchmarkMustNew/WithCryptoEntropy-8 2000000 781 ns/op 20.48 MB/s 16 B/op 1 allocs/op +BenchmarkMustNew/WithEntropy-8 20000000 70.0 ns/op 228.51 MB/s 16 B/op 1 allocs/op +BenchmarkMustNew/WithoutEntropy-8 50000000 34.6 ns/op 462.98 MB/s 16 B/op 1 allocs/op +BenchmarkParse-8 50000000 30.0 ns/op 866.16 MB/s 0 B/op 0 allocs/op +BenchmarkMustParse-8 50000000 35.2 ns/op 738.94 MB/s 0 B/op 0 allocs/op +BenchmarkString-8 20000000 64.9 ns/op 246.40 MB/s 32 B/op 1 allocs/op +BenchmarkMarshal/Text-8 20000000 55.8 ns/op 286.84 MB/s 32 B/op 1 allocs/op +BenchmarkMarshal/TextTo-8 100000000 22.4 ns/op 714.91 MB/s 0 B/op 0 allocs/op +BenchmarkMarshal/Binary-8 300000000 4.02 ns/op 3981.77 MB/s 0 B/op 0 allocs/op +BenchmarkMarshal/BinaryTo-8 2000000000 1.18 ns/op 13551.75 MB/s 0 B/op 0 allocs/op +BenchmarkUnmarshal/Text-8 100000000 20.5 ns/op 1265.27 MB/s 0 B/op 0 allocs/op +BenchmarkUnmarshal/Binary-8 300000000 4.94 ns/op 3240.01 MB/s 0 B/op 0 allocs/op +BenchmarkNow-8 100000000 15.1 ns/op 528.09 MB/s 0 B/op 0 allocs/op +BenchmarkTimestamp-8 2000000000 0.29 ns/op 27271.59 MB/s 0 B/op 0 allocs/op +BenchmarkTime-8 2000000000 0.58 ns/op 13717.80 MB/s 0 B/op 0 allocs/op +BenchmarkSetTime-8 2000000000 0.89 ns/op 9023.95 MB/s 0 B/op 0 allocs/op +BenchmarkEntropy-8 200000000 7.62 ns/op 1311.66 MB/s 0 B/op 0 allocs/op +BenchmarkSetEntropy-8 2000000000 0.88 ns/op 11376.54 MB/s 0 B/op 0 allocs/op +BenchmarkCompare-8 200000000 7.34 ns/op 4359.23 MB/s 0 B/op 0 allocs/op +``` + +## Prior Art + +- [alizain/ulid](https://github.com/alizain/ulid) +- [RobThree/NUlid](https://github.com/RobThree/NUlid) +- [imdario/go-ulid](https://github.com/imdario/go-ulid) diff --git a/vendor/github.com/oklog/ulid/ulid.go b/vendor/github.com/oklog/ulid/ulid.go new file mode 100644 index 000000000000..c5d0d66fd2a4 --- /dev/null +++ b/vendor/github.com/oklog/ulid/ulid.go @@ -0,0 +1,614 @@ +// Copyright 2016 The Oklog Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ulid + +import ( + "bufio" + "bytes" + "database/sql/driver" + "encoding/binary" + "errors" + "io" + "math" + "math/bits" + "math/rand" + "time" +) + +/* +An ULID is a 16 byte Universally Unique Lexicographically Sortable Identifier + + The components are encoded as 16 octets. + Each component is encoded with the MSB first (network byte order). + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 32_bit_uint_time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 16_bit_uint_time_low | 16_bit_uint_random | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 32_bit_uint_random | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 32_bit_uint_random | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +*/ +type ULID [16]byte + +var ( + // ErrDataSize is returned when parsing or unmarshaling ULIDs with the wrong + // data size. + ErrDataSize = errors.New("ulid: bad data size when unmarshaling") + + // ErrInvalidCharacters is returned when parsing or unmarshaling ULIDs with + // invalid Base32 encodings. + ErrInvalidCharacters = errors.New("ulid: bad data characters when unmarshaling") + + // ErrBufferSize is returned when marshalling ULIDs to a buffer of insufficient + // size. + ErrBufferSize = errors.New("ulid: bad buffer size when marshaling") + + // ErrBigTime is returned when constructing an ULID with a time that is larger + // than MaxTime. + ErrBigTime = errors.New("ulid: time too big") + + // ErrOverflow is returned when unmarshaling a ULID whose first character is + // larger than 7, thereby exceeding the valid bit depth of 128. + ErrOverflow = errors.New("ulid: overflow when unmarshaling") + + // ErrMonotonicOverflow is returned by a Monotonic entropy source when + // incrementing the previous ULID's entropy bytes would result in overflow. + ErrMonotonicOverflow = errors.New("ulid: monotonic entropy overflow") + + // ErrScanValue is returned when the value passed to scan cannot be unmarshaled + // into the ULID. + ErrScanValue = errors.New("ulid: source value must be a string or byte slice") +) + +// New returns an ULID with the given Unix milliseconds timestamp and an +// optional entropy source. Use the Timestamp function to convert +// a time.Time to Unix milliseconds. +// +// ErrBigTime is returned when passing a timestamp bigger than MaxTime. +// Reading from the entropy source may also return an error. +func New(ms uint64, entropy io.Reader) (id ULID, err error) { + if err = id.SetTime(ms); err != nil { + return id, err + } + + switch e := entropy.(type) { + case nil: + return id, err + case *monotonic: + err = e.MonotonicRead(ms, id[6:]) + default: + _, err = io.ReadFull(e, id[6:]) + } + + return id, err +} + +// MustNew is a convenience function equivalent to New that panics on failure +// instead of returning an error. +func MustNew(ms uint64, entropy io.Reader) ULID { + id, err := New(ms, entropy) + if err != nil { + panic(err) + } + return id +} + +// Parse parses an encoded ULID, returning an error in case of failure. +// +// ErrDataSize is returned if the len(ulid) is different from an encoded +// ULID's length. Invalid encodings produce undefined ULIDs. For a version that +// returns an error instead, see ParseStrict. +func Parse(ulid string) (id ULID, err error) { + return id, parse([]byte(ulid), false, &id) +} + +// ParseStrict parses an encoded ULID, returning an error in case of failure. +// +// It is like Parse, but additionally validates that the parsed ULID consists +// only of valid base32 characters. It is slightly slower than Parse. +// +// ErrDataSize is returned if the len(ulid) is different from an encoded +// ULID's length. Invalid encodings return ErrInvalidCharacters. +func ParseStrict(ulid string) (id ULID, err error) { + return id, parse([]byte(ulid), true, &id) +} + +func parse(v []byte, strict bool, id *ULID) error { + // Check if a base32 encoded ULID is the right length. + if len(v) != EncodedSize { + return ErrDataSize + } + + // Check if all the characters in a base32 encoded ULID are part of the + // expected base32 character set. + if strict && + (dec[v[0]] == 0xFF || + dec[v[1]] == 0xFF || + dec[v[2]] == 0xFF || + dec[v[3]] == 0xFF || + dec[v[4]] == 0xFF || + dec[v[5]] == 0xFF || + dec[v[6]] == 0xFF || + dec[v[7]] == 0xFF || + dec[v[8]] == 0xFF || + dec[v[9]] == 0xFF || + dec[v[10]] == 0xFF || + dec[v[11]] == 0xFF || + dec[v[12]] == 0xFF || + dec[v[13]] == 0xFF || + dec[v[14]] == 0xFF || + dec[v[15]] == 0xFF || + dec[v[16]] == 0xFF || + dec[v[17]] == 0xFF || + dec[v[18]] == 0xFF || + dec[v[19]] == 0xFF || + dec[v[20]] == 0xFF || + dec[v[21]] == 0xFF || + dec[v[22]] == 0xFF || + dec[v[23]] == 0xFF || + dec[v[24]] == 0xFF || + dec[v[25]] == 0xFF) { + return ErrInvalidCharacters + } + + // Check if the first character in a base32 encoded ULID will overflow. This + // happens because the base32 representation encodes 130 bits, while the + // ULID is only 128 bits. + // + // See https://github.com/oklog/ulid/issues/9 for details. + if v[0] > '7' { + return ErrOverflow + } + + // Use an optimized unrolled loop (from https://github.com/RobThree/NUlid) + // to decode a base32 ULID. + + // 6 bytes timestamp (48 bits) + (*id)[0] = ((dec[v[0]] << 5) | dec[v[1]]) + (*id)[1] = ((dec[v[2]] << 3) | (dec[v[3]] >> 2)) + (*id)[2] = ((dec[v[3]] << 6) | (dec[v[4]] << 1) | (dec[v[5]] >> 4)) + (*id)[3] = ((dec[v[5]] << 4) | (dec[v[6]] >> 1)) + (*id)[4] = ((dec[v[6]] << 7) | (dec[v[7]] << 2) | (dec[v[8]] >> 3)) + (*id)[5] = ((dec[v[8]] << 5) | dec[v[9]]) + + // 10 bytes of entropy (80 bits) + (*id)[6] = ((dec[v[10]] << 3) | (dec[v[11]] >> 2)) + (*id)[7] = ((dec[v[11]] << 6) | (dec[v[12]] << 1) | (dec[v[13]] >> 4)) + (*id)[8] = ((dec[v[13]] << 4) | (dec[v[14]] >> 1)) + (*id)[9] = ((dec[v[14]] << 7) | (dec[v[15]] << 2) | (dec[v[16]] >> 3)) + (*id)[10] = ((dec[v[16]] << 5) | dec[v[17]]) + (*id)[11] = ((dec[v[18]] << 3) | dec[v[19]]>>2) + (*id)[12] = ((dec[v[19]] << 6) | (dec[v[20]] << 1) | (dec[v[21]] >> 4)) + (*id)[13] = ((dec[v[21]] << 4) | (dec[v[22]] >> 1)) + (*id)[14] = ((dec[v[22]] << 7) | (dec[v[23]] << 2) | (dec[v[24]] >> 3)) + (*id)[15] = ((dec[v[24]] << 5) | dec[v[25]]) + + return nil +} + +// MustParse is a convenience function equivalent to Parse that panics on failure +// instead of returning an error. +func MustParse(ulid string) ULID { + id, err := Parse(ulid) + if err != nil { + panic(err) + } + return id +} + +// MustParseStrict is a convenience function equivalent to ParseStrict that +// panics on failure instead of returning an error. +func MustParseStrict(ulid string) ULID { + id, err := ParseStrict(ulid) + if err != nil { + panic(err) + } + return id +} + +// String returns a lexicographically sortable string encoded ULID +// (26 characters, non-standard base 32) e.g. 01AN4Z07BY79KA1307SR9X4MV3 +// Format: tttttttttteeeeeeeeeeeeeeee where t is time and e is entropy +func (id ULID) String() string { + ulid := make([]byte, EncodedSize) + _ = id.MarshalTextTo(ulid) + return string(ulid) +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface by +// returning the ULID as a byte slice. +func (id ULID) MarshalBinary() ([]byte, error) { + ulid := make([]byte, len(id)) + return ulid, id.MarshalBinaryTo(ulid) +} + +// MarshalBinaryTo writes the binary encoding of the ULID to the given buffer. +// ErrBufferSize is returned when the len(dst) != 16. +func (id ULID) MarshalBinaryTo(dst []byte) error { + if len(dst) != len(id) { + return ErrBufferSize + } + + copy(dst, id[:]) + return nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface by +// copying the passed data and converting it to an ULID. ErrDataSize is +// returned if the data length is different from ULID length. +func (id *ULID) UnmarshalBinary(data []byte) error { + if len(data) != len(*id) { + return ErrDataSize + } + + copy((*id)[:], data) + return nil +} + +// Encoding is the base 32 encoding alphabet used in ULID strings. +const Encoding = "0123456789ABCDEFGHJKMNPQRSTVWXYZ" + +// MarshalText implements the encoding.TextMarshaler interface by +// returning the string encoded ULID. +func (id ULID) MarshalText() ([]byte, error) { + ulid := make([]byte, EncodedSize) + return ulid, id.MarshalTextTo(ulid) +} + +// MarshalTextTo writes the ULID as a string to the given buffer. +// ErrBufferSize is returned when the len(dst) != 26. +func (id ULID) MarshalTextTo(dst []byte) error { + // Optimized unrolled loop ahead. + // From https://github.com/RobThree/NUlid + + if len(dst) != EncodedSize { + return ErrBufferSize + } + + // 10 byte timestamp + dst[0] = Encoding[(id[0]&224)>>5] + dst[1] = Encoding[id[0]&31] + dst[2] = Encoding[(id[1]&248)>>3] + dst[3] = Encoding[((id[1]&7)<<2)|((id[2]&192)>>6)] + dst[4] = Encoding[(id[2]&62)>>1] + dst[5] = Encoding[((id[2]&1)<<4)|((id[3]&240)>>4)] + dst[6] = Encoding[((id[3]&15)<<1)|((id[4]&128)>>7)] + dst[7] = Encoding[(id[4]&124)>>2] + dst[8] = Encoding[((id[4]&3)<<3)|((id[5]&224)>>5)] + dst[9] = Encoding[id[5]&31] + + // 16 bytes of entropy + dst[10] = Encoding[(id[6]&248)>>3] + dst[11] = Encoding[((id[6]&7)<<2)|((id[7]&192)>>6)] + dst[12] = Encoding[(id[7]&62)>>1] + dst[13] = Encoding[((id[7]&1)<<4)|((id[8]&240)>>4)] + dst[14] = Encoding[((id[8]&15)<<1)|((id[9]&128)>>7)] + dst[15] = Encoding[(id[9]&124)>>2] + dst[16] = Encoding[((id[9]&3)<<3)|((id[10]&224)>>5)] + dst[17] = Encoding[id[10]&31] + dst[18] = Encoding[(id[11]&248)>>3] + dst[19] = Encoding[((id[11]&7)<<2)|((id[12]&192)>>6)] + dst[20] = Encoding[(id[12]&62)>>1] + dst[21] = Encoding[((id[12]&1)<<4)|((id[13]&240)>>4)] + dst[22] = Encoding[((id[13]&15)<<1)|((id[14]&128)>>7)] + dst[23] = Encoding[(id[14]&124)>>2] + dst[24] = Encoding[((id[14]&3)<<3)|((id[15]&224)>>5)] + dst[25] = Encoding[id[15]&31] + + return nil +} + +// Byte to index table for O(1) lookups when unmarshaling. +// We use 0xFF as sentinel value for invalid indexes. +var dec = [...]byte{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, + 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, + 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, + 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, + 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, + 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, + 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, +} + +// EncodedSize is the length of a text encoded ULID. +const EncodedSize = 26 + +// UnmarshalText implements the encoding.TextUnmarshaler interface by +// parsing the data as string encoded ULID. +// +// ErrDataSize is returned if the len(v) is different from an encoded +// ULID's length. Invalid encodings produce undefined ULIDs. +func (id *ULID) UnmarshalText(v []byte) error { + return parse(v, false, id) +} + +// Time returns the Unix time in milliseconds encoded in the ULID. +// Use the top level Time function to convert the returned value to +// a time.Time. +func (id ULID) Time() uint64 { + return uint64(id[5]) | uint64(id[4])<<8 | + uint64(id[3])<<16 | uint64(id[2])<<24 | + uint64(id[1])<<32 | uint64(id[0])<<40 +} + +// maxTime is the maximum Unix time in milliseconds that can be +// represented in an ULID. +var maxTime = ULID{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}.Time() + +// MaxTime returns the maximum Unix time in milliseconds that +// can be encoded in an ULID. +func MaxTime() uint64 { return maxTime } + +// Now is a convenience function that returns the current +// UTC time in Unix milliseconds. Equivalent to: +// Timestamp(time.Now().UTC()) +func Now() uint64 { return Timestamp(time.Now().UTC()) } + +// Timestamp converts a time.Time to Unix milliseconds. +// +// Because of the way ULID stores time, times from the year +// 10889 produces undefined results. +func Timestamp(t time.Time) uint64 { + return uint64(t.Unix())*1000 + + uint64(t.Nanosecond()/int(time.Millisecond)) +} + +// Time converts Unix milliseconds in the format +// returned by the Timestamp function to a time.Time. +func Time(ms uint64) time.Time { + s := int64(ms / 1e3) + ns := int64((ms % 1e3) * 1e6) + return time.Unix(s, ns) +} + +// SetTime sets the time component of the ULID to the given Unix time +// in milliseconds. +func (id *ULID) SetTime(ms uint64) error { + if ms > maxTime { + return ErrBigTime + } + + (*id)[0] = byte(ms >> 40) + (*id)[1] = byte(ms >> 32) + (*id)[2] = byte(ms >> 24) + (*id)[3] = byte(ms >> 16) + (*id)[4] = byte(ms >> 8) + (*id)[5] = byte(ms) + + return nil +} + +// Entropy returns the entropy from the ULID. +func (id ULID) Entropy() []byte { + e := make([]byte, 10) + copy(e, id[6:]) + return e +} + +// SetEntropy sets the ULID entropy to the passed byte slice. +// ErrDataSize is returned if len(e) != 10. +func (id *ULID) SetEntropy(e []byte) error { + if len(e) != 10 { + return ErrDataSize + } + + copy((*id)[6:], e) + return nil +} + +// Compare returns an integer comparing id and other lexicographically. +// The result will be 0 if id==other, -1 if id < other, and +1 if id > other. +func (id ULID) Compare(other ULID) int { + return bytes.Compare(id[:], other[:]) +} + +// Scan implements the sql.Scanner interface. It supports scanning +// a string or byte slice. +func (id *ULID) Scan(src interface{}) error { + switch x := src.(type) { + case nil: + return nil + case string: + return id.UnmarshalText([]byte(x)) + case []byte: + return id.UnmarshalBinary(x) + } + + return ErrScanValue +} + +// Value implements the sql/driver.Valuer interface. This returns the value +// represented as a byte slice. If instead a string is desirable, a wrapper +// type can be created that calls String(). +// +// // stringValuer wraps a ULID as a string-based driver.Valuer. +// type stringValuer ULID +// +// func (id stringValuer) Value() (driver.Value, error) { +// return ULID(id).String(), nil +// } +// +// // Example usage. +// db.Exec("...", stringValuer(id)) +func (id ULID) Value() (driver.Value, error) { + return id.MarshalBinary() +} + +// Monotonic returns an entropy source that is guaranteed to yield +// strictly increasing entropy bytes for the same ULID timestamp. +// On conflicts, the previous ULID entropy is incremented with a +// random number between 1 and `inc` (inclusive). +// +// The provided entropy source must actually yield random bytes or else +// monotonic reads are not guaranteed to terminate, since there isn't +// enough randomness to compute an increment number. +// +// When `inc == 0`, it'll be set to a secure default of `math.MaxUint32`. +// The lower the value of `inc`, the easier the next ULID within the +// same millisecond is to guess. If your code depends on ULIDs having +// secure entropy bytes, then don't go under this default unless you know +// what you're doing. +// +// The returned io.Reader isn't safe for concurrent use. +func Monotonic(entropy io.Reader, inc uint64) io.Reader { + m := monotonic{ + Reader: bufio.NewReader(entropy), + inc: inc, + } + + if m.inc == 0 { + m.inc = math.MaxUint32 + } + + if rng, ok := entropy.(*rand.Rand); ok { + m.rng = rng + } + + return &m +} + +type monotonic struct { + io.Reader + ms uint64 + inc uint64 + entropy uint80 + rand [8]byte + rng *rand.Rand +} + +func (m *monotonic) MonotonicRead(ms uint64, entropy []byte) (err error) { + if !m.entropy.IsZero() && m.ms == ms { + err = m.increment() + m.entropy.AppendTo(entropy) + } else if _, err = io.ReadFull(m.Reader, entropy); err == nil { + m.ms = ms + m.entropy.SetBytes(entropy) + } + return err +} + +// increment the previous entropy number with a random number +// of up to m.inc (inclusive). +func (m *monotonic) increment() error { + if inc, err := m.random(); err != nil { + return err + } else if m.entropy.Add(inc) { + return ErrMonotonicOverflow + } + return nil +} + +// random returns a uniform random value in [1, m.inc), reading entropy +// from m.Reader. When m.inc == 0 || m.inc == 1, it returns 1. +// Adapted from: https://golang.org/pkg/crypto/rand/#Int +func (m *monotonic) random() (inc uint64, err error) { + if m.inc <= 1 { + return 1, nil + } + + // Fast path for using a underlying rand.Rand directly. + if m.rng != nil { + // Range: [1, m.inc) + return 1 + uint64(m.rng.Int63n(int64(m.inc))), nil + } + + // bitLen is the maximum bit length needed to encode a value < m.inc. + bitLen := bits.Len64(m.inc) + + // byteLen is the maximum byte length needed to encode a value < m.inc. + byteLen := uint(bitLen+7) / 8 + + // msbitLen is the number of bits in the most significant byte of m.inc-1. + msbitLen := uint(bitLen % 8) + if msbitLen == 0 { + msbitLen = 8 + } + + for inc == 0 || inc >= m.inc { + if _, err = io.ReadFull(m.Reader, m.rand[:byteLen]); err != nil { + return 0, err + } + + // Clear bits in the first byte to increase the probability + // that the candidate is < m.inc. + m.rand[0] &= uint8(int(1< + // If the verification material contains a public key identifier + // (key hint) and the `content` is a DSSE envelope, the key hints + // MUST be exactly the same in the verification material and in the + // DSSE envelope. + VerificationMaterial *VerificationMaterial `protobuf:"bytes,2,opt,name=verification_material,json=verificationMaterial,proto3" json:"verification_material,omitempty"` + // Types that are valid to be assigned to Content: + // + // *Bundle_MessageSignature + // *Bundle_DsseEnvelope + Content isBundle_Content `protobuf_oneof:"content"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Bundle) Reset() { + *x = Bundle{} + mi := &file_sigstore_bundle_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Bundle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bundle) ProtoMessage() {} + +func (x *Bundle) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_bundle_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bundle.ProtoReflect.Descriptor instead. +func (*Bundle) Descriptor() ([]byte, []int) { + return file_sigstore_bundle_proto_rawDescGZIP(), []int{2} +} + +func (x *Bundle) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *Bundle) GetVerificationMaterial() *VerificationMaterial { + if x != nil { + return x.VerificationMaterial + } + return nil +} + +func (x *Bundle) GetContent() isBundle_Content { + if x != nil { + return x.Content + } + return nil +} + +func (x *Bundle) GetMessageSignature() *v1.MessageSignature { + if x != nil { + if x, ok := x.Content.(*Bundle_MessageSignature); ok { + return x.MessageSignature + } + } + return nil +} + +func (x *Bundle) GetDsseEnvelope() *dsse.Envelope { + if x != nil { + if x, ok := x.Content.(*Bundle_DsseEnvelope); ok { + return x.DsseEnvelope + } + } + return nil +} + +type isBundle_Content interface { + isBundle_Content() +} + +type Bundle_MessageSignature struct { + MessageSignature *v1.MessageSignature `protobuf:"bytes,3,opt,name=message_signature,json=messageSignature,proto3,oneof"` +} + +type Bundle_DsseEnvelope struct { + // A DSSE envelope can contain arbitrary payloads. + // Verifiers must verify that the payload type is a + // supported and expected type. This is part of the DSSE + // protocol which is defined here: + // + // DSSE envelopes in a bundle MUST have exactly one signature. + // This is a limitation from the DSSE spec, as it can contain + // multiple signatures. There are two primary reasons: + // 1. It simplifies the verification logic and policy + // 2. The bundle (currently) can only contain a single + // instance of the required verification materials + // + // During verification a client MUST reject an envelope if + // the number of signatures is not equal to one. + DsseEnvelope *dsse.Envelope `protobuf:"bytes,4,opt,name=dsse_envelope,json=dsseEnvelope,proto3,oneof"` +} + +func (*Bundle_MessageSignature) isBundle_Content() {} + +func (*Bundle_DsseEnvelope) isBundle_Content() {} + +var File_sigstore_bundle_proto protoreflect.FileDescriptor + +var file_sigstore_bundle_proto_rawDesc = string([]byte{ + 0x0a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x0e, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7a, 0x0a, + 0x19, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x5d, 0x0a, 0x12, 0x72, 0x66, + 0x63, 0x33, 0x31, 0x36, 0x31, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x52, 0x46, 0x43, 0x33, 0x31, 0x36, 0x31, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x11, 0x72, 0x66, 0x63, 0x33, 0x31, 0x36, 0x31, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x14, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x12, 0x51, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x69, 0x0a, 0x16, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x58, + 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x14, 0x78, 0x35, 0x30, 0x39, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x12, 0x50, 0x0a, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x58, + 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x12, 0x4e, 0x0a, 0x0c, 0x74, 0x6c, 0x6f, 0x67, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x74, 0x6c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x12, 0x71, 0x0a, 0x1b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x52, 0x19, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x09, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x22, 0xbf, 0x02, 0x0a, 0x06, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, + 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x66, 0x0a, 0x15, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x64, 0x65, 0x76, 0x2e, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x14, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x12, 0x5c, 0x0a, 0x11, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x10, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x12, 0x3f, 0x0a, 0x0d, 0x64, 0x73, 0x73, 0x65, 0x5f, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x69, 0x6f, 0x2e, 0x69, 0x6e, 0x74, + 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x48, 0x00, 0x52, 0x0c, 0x64, 0x73, 0x73, 0x65, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, + 0x65, 0x42, 0x09, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4a, 0x04, 0x08, 0x05, + 0x10, 0x33, 0x42, 0x7c, 0x0a, 0x1c, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, + 0x76, 0x31, 0x42, 0x0b, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, + 0x73, 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, + 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x14, 0x53, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_sigstore_bundle_proto_rawDescOnce sync.Once + file_sigstore_bundle_proto_rawDescData []byte +) + +func file_sigstore_bundle_proto_rawDescGZIP() []byte { + file_sigstore_bundle_proto_rawDescOnce.Do(func() { + file_sigstore_bundle_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_sigstore_bundle_proto_rawDesc), len(file_sigstore_bundle_proto_rawDesc))) + }) + return file_sigstore_bundle_proto_rawDescData +} + +var file_sigstore_bundle_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_sigstore_bundle_proto_goTypes = []any{ + (*TimestampVerificationData)(nil), // 0: dev.sigstore.bundle.v1.TimestampVerificationData + (*VerificationMaterial)(nil), // 1: dev.sigstore.bundle.v1.VerificationMaterial + (*Bundle)(nil), // 2: dev.sigstore.bundle.v1.Bundle + (*v1.RFC3161SignedTimestamp)(nil), // 3: dev.sigstore.common.v1.RFC3161SignedTimestamp + (*v1.PublicKeyIdentifier)(nil), // 4: dev.sigstore.common.v1.PublicKeyIdentifier + (*v1.X509CertificateChain)(nil), // 5: dev.sigstore.common.v1.X509CertificateChain + (*v1.X509Certificate)(nil), // 6: dev.sigstore.common.v1.X509Certificate + (*v11.TransparencyLogEntry)(nil), // 7: dev.sigstore.rekor.v1.TransparencyLogEntry + (*v1.MessageSignature)(nil), // 8: dev.sigstore.common.v1.MessageSignature + (*dsse.Envelope)(nil), // 9: io.intoto.Envelope +} +var file_sigstore_bundle_proto_depIdxs = []int32{ + 3, // 0: dev.sigstore.bundle.v1.TimestampVerificationData.rfc3161_timestamps:type_name -> dev.sigstore.common.v1.RFC3161SignedTimestamp + 4, // 1: dev.sigstore.bundle.v1.VerificationMaterial.public_key:type_name -> dev.sigstore.common.v1.PublicKeyIdentifier + 5, // 2: dev.sigstore.bundle.v1.VerificationMaterial.x509_certificate_chain:type_name -> dev.sigstore.common.v1.X509CertificateChain + 6, // 3: dev.sigstore.bundle.v1.VerificationMaterial.certificate:type_name -> dev.sigstore.common.v1.X509Certificate + 7, // 4: dev.sigstore.bundle.v1.VerificationMaterial.tlog_entries:type_name -> dev.sigstore.rekor.v1.TransparencyLogEntry + 0, // 5: dev.sigstore.bundle.v1.VerificationMaterial.timestamp_verification_data:type_name -> dev.sigstore.bundle.v1.TimestampVerificationData + 1, // 6: dev.sigstore.bundle.v1.Bundle.verification_material:type_name -> dev.sigstore.bundle.v1.VerificationMaterial + 8, // 7: dev.sigstore.bundle.v1.Bundle.message_signature:type_name -> dev.sigstore.common.v1.MessageSignature + 9, // 8: dev.sigstore.bundle.v1.Bundle.dsse_envelope:type_name -> io.intoto.Envelope + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_sigstore_bundle_proto_init() } +func file_sigstore_bundle_proto_init() { + if File_sigstore_bundle_proto != nil { + return + } + file_sigstore_bundle_proto_msgTypes[1].OneofWrappers = []any{ + (*VerificationMaterial_PublicKey)(nil), + (*VerificationMaterial_X509CertificateChain)(nil), + (*VerificationMaterial_Certificate)(nil), + } + file_sigstore_bundle_proto_msgTypes[2].OneofWrappers = []any{ + (*Bundle_MessageSignature)(nil), + (*Bundle_DsseEnvelope)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_sigstore_bundle_proto_rawDesc), len(file_sigstore_bundle_proto_rawDesc)), + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sigstore_bundle_proto_goTypes, + DependencyIndexes: file_sigstore_bundle_proto_depIdxs, + MessageInfos: file_sigstore_bundle_proto_msgTypes, + }.Build() + File_sigstore_bundle_proto = out.File + file_sigstore_bundle_proto_goTypes = nil + file_sigstore_bundle_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go new file mode 100644 index 000000000000..5f339b2d78de --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go @@ -0,0 +1,1299 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v6.30.2 +// source: sigstore_common.proto + +package v1 + +import ( + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Only a subset of the secure hash standard algorithms are supported. +// See for more +// details. +// UNSPECIFIED SHOULD not be used, primary reason for inclusion is to force +// any proto JSON serialization to emit the used hash algorithm, as default +// option is to *omit* the default value of an enum (which is the first +// value, represented by '0'. +type HashAlgorithm int32 + +const ( + HashAlgorithm_HASH_ALGORITHM_UNSPECIFIED HashAlgorithm = 0 + HashAlgorithm_SHA2_256 HashAlgorithm = 1 + HashAlgorithm_SHA2_384 HashAlgorithm = 2 + HashAlgorithm_SHA2_512 HashAlgorithm = 3 + HashAlgorithm_SHA3_256 HashAlgorithm = 4 + HashAlgorithm_SHA3_384 HashAlgorithm = 5 +) + +// Enum value maps for HashAlgorithm. +var ( + HashAlgorithm_name = map[int32]string{ + 0: "HASH_ALGORITHM_UNSPECIFIED", + 1: "SHA2_256", + 2: "SHA2_384", + 3: "SHA2_512", + 4: "SHA3_256", + 5: "SHA3_384", + } + HashAlgorithm_value = map[string]int32{ + "HASH_ALGORITHM_UNSPECIFIED": 0, + "SHA2_256": 1, + "SHA2_384": 2, + "SHA2_512": 3, + "SHA3_256": 4, + "SHA3_384": 5, + } +) + +func (x HashAlgorithm) Enum() *HashAlgorithm { + p := new(HashAlgorithm) + *p = x + return p +} + +func (x HashAlgorithm) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HashAlgorithm) Descriptor() protoreflect.EnumDescriptor { + return file_sigstore_common_proto_enumTypes[0].Descriptor() +} + +func (HashAlgorithm) Type() protoreflect.EnumType { + return &file_sigstore_common_proto_enumTypes[0] +} + +func (x HashAlgorithm) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HashAlgorithm.Descriptor instead. +func (HashAlgorithm) EnumDescriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{0} +} + +// Details of a specific public key, capturing the the key encoding method, +// and signature algorithm. +// +// PublicKeyDetails captures the public key/hash algorithm combinations +// recommended in the Sigstore ecosystem. +// +// This is modelled as a linear set as we want to provide a small number of +// opinionated options instead of allowing every possible permutation. +// +// Any changes to this enum MUST be reflected in the algorithm registry. +// +// See: +// +// To avoid the possibility of contradicting formats such as PKCS1 with +// ED25519 the valid permutations are listed as a linear set instead of a +// cartesian set (i.e one combined variable instead of two, one for encoding +// and one for the signature algorithm). +type PublicKeyDetails int32 + +const ( + PublicKeyDetails_PUBLIC_KEY_DETAILS_UNSPECIFIED PublicKeyDetails = 0 + // RSA + // + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKCS1_RSA_PKCS1V5 PublicKeyDetails = 1 // See RFC8017 + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKCS1_RSA_PSS PublicKeyDetails = 2 // See RFC8017 + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKIX_RSA_PKCS1V5 PublicKeyDetails = 3 + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKIX_RSA_PSS PublicKeyDetails = 4 + // RSA public key in PKIX format, PKCS#1v1.5 signature + PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256 PublicKeyDetails = 9 + PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256 PublicKeyDetails = 10 + PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256 PublicKeyDetails = 11 + // RSA public key in PKIX format, RSASSA-PSS signature + PublicKeyDetails_PKIX_RSA_PSS_2048_SHA256 PublicKeyDetails = 16 // See RFC4055 + PublicKeyDetails_PKIX_RSA_PSS_3072_SHA256 PublicKeyDetails = 17 + PublicKeyDetails_PKIX_RSA_PSS_4096_SHA256 PublicKeyDetails = 18 + // ECDSA + // + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKIX_ECDSA_P256_HMAC_SHA_256 PublicKeyDetails = 6 // See RFC6979 + PublicKeyDetails_PKIX_ECDSA_P256_SHA_256 PublicKeyDetails = 5 // See NIST FIPS 186-4 + PublicKeyDetails_PKIX_ECDSA_P384_SHA_384 PublicKeyDetails = 12 + PublicKeyDetails_PKIX_ECDSA_P521_SHA_512 PublicKeyDetails = 13 + // Ed 25519 + PublicKeyDetails_PKIX_ED25519 PublicKeyDetails = 7 // See RFC8032 + PublicKeyDetails_PKIX_ED25519_PH PublicKeyDetails = 8 + // These algorithms are deprecated and should not be used, but they + // were/are being used by most Sigstore clients implementations. + // + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKIX_ECDSA_P384_SHA_256 PublicKeyDetails = 19 + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKIX_ECDSA_P521_SHA_256 PublicKeyDetails = 20 + // LMS and LM-OTS + // + // These algorithms are deprecated and should not be used. + // Keys and signatures MAY be used by private Sigstore + // deployments, but will not be supported by the public + // good instance. + // + // USER WARNING: LMS and LM-OTS are both stateful signature schemes. + // Using them correctly requires discretion and careful consideration + // to ensure that individual secret keys are not used more than once. + // In addition, LM-OTS is a single-use scheme, meaning that it + // MUST NOT be used for more than one signature per LM-OTS key. + // If you cannot maintain these invariants, you MUST NOT use these + // schemes. + // + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_LMS_SHA256 PublicKeyDetails = 14 + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_LMOTS_SHA256 PublicKeyDetails = 15 + // ML-DSA + // + // These ML_DSA_65 and ML-DSA_87 algorithms are the pure variants that + // take data to sign rather than the prehash variants (HashML-DSA), which + // take digests. While considered quantum-resistant, their usage + // involves tradeoffs in that signatures and keys are much larger, and + // this makes deployments more costly. + // + // USER WARNING: ML_DSA_65 and ML_DSA_87 are experimental algorithms. + // In the future they MAY be used by private Sigstore deployments, but + // they are not yet fully functional. This warning will be removed when + // these algorithms are widely supported by Sigstore clients and servers, + // but care should still be taken for production environments. + PublicKeyDetails_ML_DSA_65 PublicKeyDetails = 21 // See NIST FIPS 204 + PublicKeyDetails_ML_DSA_87 PublicKeyDetails = 22 +) + +// Enum value maps for PublicKeyDetails. +var ( + PublicKeyDetails_name = map[int32]string{ + 0: "PUBLIC_KEY_DETAILS_UNSPECIFIED", + 1: "PKCS1_RSA_PKCS1V5", + 2: "PKCS1_RSA_PSS", + 3: "PKIX_RSA_PKCS1V5", + 4: "PKIX_RSA_PSS", + 9: "PKIX_RSA_PKCS1V15_2048_SHA256", + 10: "PKIX_RSA_PKCS1V15_3072_SHA256", + 11: "PKIX_RSA_PKCS1V15_4096_SHA256", + 16: "PKIX_RSA_PSS_2048_SHA256", + 17: "PKIX_RSA_PSS_3072_SHA256", + 18: "PKIX_RSA_PSS_4096_SHA256", + 6: "PKIX_ECDSA_P256_HMAC_SHA_256", + 5: "PKIX_ECDSA_P256_SHA_256", + 12: "PKIX_ECDSA_P384_SHA_384", + 13: "PKIX_ECDSA_P521_SHA_512", + 7: "PKIX_ED25519", + 8: "PKIX_ED25519_PH", + 19: "PKIX_ECDSA_P384_SHA_256", + 20: "PKIX_ECDSA_P521_SHA_256", + 14: "LMS_SHA256", + 15: "LMOTS_SHA256", + 21: "ML_DSA_65", + 22: "ML_DSA_87", + } + PublicKeyDetails_value = map[string]int32{ + "PUBLIC_KEY_DETAILS_UNSPECIFIED": 0, + "PKCS1_RSA_PKCS1V5": 1, + "PKCS1_RSA_PSS": 2, + "PKIX_RSA_PKCS1V5": 3, + "PKIX_RSA_PSS": 4, + "PKIX_RSA_PKCS1V15_2048_SHA256": 9, + "PKIX_RSA_PKCS1V15_3072_SHA256": 10, + "PKIX_RSA_PKCS1V15_4096_SHA256": 11, + "PKIX_RSA_PSS_2048_SHA256": 16, + "PKIX_RSA_PSS_3072_SHA256": 17, + "PKIX_RSA_PSS_4096_SHA256": 18, + "PKIX_ECDSA_P256_HMAC_SHA_256": 6, + "PKIX_ECDSA_P256_SHA_256": 5, + "PKIX_ECDSA_P384_SHA_384": 12, + "PKIX_ECDSA_P521_SHA_512": 13, + "PKIX_ED25519": 7, + "PKIX_ED25519_PH": 8, + "PKIX_ECDSA_P384_SHA_256": 19, + "PKIX_ECDSA_P521_SHA_256": 20, + "LMS_SHA256": 14, + "LMOTS_SHA256": 15, + "ML_DSA_65": 21, + "ML_DSA_87": 22, + } +) + +func (x PublicKeyDetails) Enum() *PublicKeyDetails { + p := new(PublicKeyDetails) + *p = x + return p +} + +func (x PublicKeyDetails) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PublicKeyDetails) Descriptor() protoreflect.EnumDescriptor { + return file_sigstore_common_proto_enumTypes[1].Descriptor() +} + +func (PublicKeyDetails) Type() protoreflect.EnumType { + return &file_sigstore_common_proto_enumTypes[1] +} + +func (x PublicKeyDetails) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PublicKeyDetails.Descriptor instead. +func (PublicKeyDetails) EnumDescriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{1} +} + +type SubjectAlternativeNameType int32 + +const ( + SubjectAlternativeNameType_SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED SubjectAlternativeNameType = 0 + SubjectAlternativeNameType_EMAIL SubjectAlternativeNameType = 1 + SubjectAlternativeNameType_URI SubjectAlternativeNameType = 2 + // OID 1.3.6.1.4.1.57264.1.7 + // See https://github.com/sigstore/fulcio/blob/main/docs/oid-info.md#1361415726417--othername-san + // for more details. + SubjectAlternativeNameType_OTHER_NAME SubjectAlternativeNameType = 3 +) + +// Enum value maps for SubjectAlternativeNameType. +var ( + SubjectAlternativeNameType_name = map[int32]string{ + 0: "SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED", + 1: "EMAIL", + 2: "URI", + 3: "OTHER_NAME", + } + SubjectAlternativeNameType_value = map[string]int32{ + "SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED": 0, + "EMAIL": 1, + "URI": 2, + "OTHER_NAME": 3, + } +) + +func (x SubjectAlternativeNameType) Enum() *SubjectAlternativeNameType { + p := new(SubjectAlternativeNameType) + *p = x + return p +} + +func (x SubjectAlternativeNameType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SubjectAlternativeNameType) Descriptor() protoreflect.EnumDescriptor { + return file_sigstore_common_proto_enumTypes[2].Descriptor() +} + +func (SubjectAlternativeNameType) Type() protoreflect.EnumType { + return &file_sigstore_common_proto_enumTypes[2] +} + +func (x SubjectAlternativeNameType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SubjectAlternativeNameType.Descriptor instead. +func (SubjectAlternativeNameType) EnumDescriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{2} +} + +// HashOutput captures a digest of a 'message' (generic octet sequence) +// and the corresponding hash algorithm used. +type HashOutput struct { + state protoimpl.MessageState `protogen:"open.v1"` + Algorithm HashAlgorithm `protobuf:"varint,1,opt,name=algorithm,proto3,enum=dev.sigstore.common.v1.HashAlgorithm" json:"algorithm,omitempty"` + // This is the raw octets of the message digest as computed by + // the hash algorithm. + Digest []byte `protobuf:"bytes,2,opt,name=digest,proto3" json:"digest,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HashOutput) Reset() { + *x = HashOutput{} + mi := &file_sigstore_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HashOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HashOutput) ProtoMessage() {} + +func (x *HashOutput) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HashOutput.ProtoReflect.Descriptor instead. +func (*HashOutput) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{0} +} + +func (x *HashOutput) GetAlgorithm() HashAlgorithm { + if x != nil { + return x.Algorithm + } + return HashAlgorithm_HASH_ALGORITHM_UNSPECIFIED +} + +func (x *HashOutput) GetDigest() []byte { + if x != nil { + return x.Digest + } + return nil +} + +// MessageSignature stores the computed signature over a message. +type MessageSignature struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Message digest can be used to identify the artifact. + // Clients MUST NOT attempt to use this digest to verify the associated + // signature; it is intended solely for identification. + MessageDigest *HashOutput `protobuf:"bytes,1,opt,name=message_digest,json=messageDigest,proto3" json:"message_digest,omitempty"` + // The raw bytes as returned from the signature algorithm. + // The signature algorithm (and so the format of the signature bytes) + // are determined by the contents of the 'verification_material', + // either a key-pair or a certificate. If using a certificate, the + // certificate contains the required information on the signature + // algorithm. + // When using a key pair, the algorithm MUST be part of the public + // key, which MUST be communicated out-of-band. + Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MessageSignature) Reset() { + *x = MessageSignature{} + mi := &file_sigstore_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MessageSignature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageSignature) ProtoMessage() {} + +func (x *MessageSignature) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageSignature.ProtoReflect.Descriptor instead. +func (*MessageSignature) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{1} +} + +func (x *MessageSignature) GetMessageDigest() *HashOutput { + if x != nil { + return x.MessageDigest + } + return nil +} + +func (x *MessageSignature) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +// LogId captures the identity of a transparency log. +type LogId struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The unique identity of the log, represented by its public key. + KeyId []byte `protobuf:"bytes,1,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LogId) Reset() { + *x = LogId{} + mi := &file_sigstore_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LogId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogId) ProtoMessage() {} + +func (x *LogId) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogId.ProtoReflect.Descriptor instead. +func (*LogId) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{2} +} + +func (x *LogId) GetKeyId() []byte { + if x != nil { + return x.KeyId + } + return nil +} + +// This message holds a RFC 3161 timestamp. +type RFC3161SignedTimestamp struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Signed timestamp is the DER encoded TimeStampResponse. + // See https://www.rfc-editor.org/rfc/rfc3161.html#section-2.4.2 + SignedTimestamp []byte `protobuf:"bytes,1,opt,name=signed_timestamp,json=signedTimestamp,proto3" json:"signed_timestamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RFC3161SignedTimestamp) Reset() { + *x = RFC3161SignedTimestamp{} + mi := &file_sigstore_common_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RFC3161SignedTimestamp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RFC3161SignedTimestamp) ProtoMessage() {} + +func (x *RFC3161SignedTimestamp) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RFC3161SignedTimestamp.ProtoReflect.Descriptor instead. +func (*RFC3161SignedTimestamp) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{3} +} + +func (x *RFC3161SignedTimestamp) GetSignedTimestamp() []byte { + if x != nil { + return x.SignedTimestamp + } + return nil +} + +type PublicKey struct { + state protoimpl.MessageState `protogen:"open.v1"` + // DER-encoded public key, encoding method is specified by the + // key_details attribute. + RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes,proto3,oneof" json:"raw_bytes,omitempty"` + // Key encoding and signature algorithm to use for this key. + KeyDetails PublicKeyDetails `protobuf:"varint,2,opt,name=key_details,json=keyDetails,proto3,enum=dev.sigstore.common.v1.PublicKeyDetails" json:"key_details,omitempty"` + // Optional validity period for this key, *inclusive* of the endpoints. + ValidFor *TimeRange `protobuf:"bytes,3,opt,name=valid_for,json=validFor,proto3,oneof" json:"valid_for,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PublicKey) Reset() { + *x = PublicKey{} + mi := &file_sigstore_common_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PublicKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublicKey) ProtoMessage() {} + +func (x *PublicKey) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublicKey.ProtoReflect.Descriptor instead. +func (*PublicKey) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{4} +} + +func (x *PublicKey) GetRawBytes() []byte { + if x != nil { + return x.RawBytes + } + return nil +} + +func (x *PublicKey) GetKeyDetails() PublicKeyDetails { + if x != nil { + return x.KeyDetails + } + return PublicKeyDetails_PUBLIC_KEY_DETAILS_UNSPECIFIED +} + +func (x *PublicKey) GetValidFor() *TimeRange { + if x != nil { + return x.ValidFor + } + return nil +} + +// PublicKeyIdentifier can be used to identify an (out of band) delivered +// key, to verify a signature. +type PublicKeyIdentifier struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Optional unauthenticated hint on which key to use. + // The format of the hint must be agreed upon out of band by the + // signer and the verifiers, and so is not subject to this + // specification. + // Example use-case is to specify the public key to use, from a + // trusted key-ring. + // Implementors are RECOMMENDED to derive the value from the public + // key as described in RFC 6962. + // See: + Hint string `protobuf:"bytes,1,opt,name=hint,proto3" json:"hint,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PublicKeyIdentifier) Reset() { + *x = PublicKeyIdentifier{} + mi := &file_sigstore_common_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PublicKeyIdentifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublicKeyIdentifier) ProtoMessage() {} + +func (x *PublicKeyIdentifier) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublicKeyIdentifier.ProtoReflect.Descriptor instead. +func (*PublicKeyIdentifier) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{5} +} + +func (x *PublicKeyIdentifier) GetHint() string { + if x != nil { + return x.Hint + } + return "" +} + +// An ASN.1 OBJECT IDENTIFIER +type ObjectIdentifier struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id []int32 `protobuf:"varint,1,rep,packed,name=id,proto3" json:"id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ObjectIdentifier) Reset() { + *x = ObjectIdentifier{} + mi := &file_sigstore_common_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ObjectIdentifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectIdentifier) ProtoMessage() {} + +func (x *ObjectIdentifier) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectIdentifier.ProtoReflect.Descriptor instead. +func (*ObjectIdentifier) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{6} +} + +func (x *ObjectIdentifier) GetId() []int32 { + if x != nil { + return x.Id + } + return nil +} + +// An OID and the corresponding (byte) value. +type ObjectIdentifierValuePair struct { + state protoimpl.MessageState `protogen:"open.v1"` + Oid *ObjectIdentifier `protobuf:"bytes,1,opt,name=oid,proto3" json:"oid,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ObjectIdentifierValuePair) Reset() { + *x = ObjectIdentifierValuePair{} + mi := &file_sigstore_common_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ObjectIdentifierValuePair) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectIdentifierValuePair) ProtoMessage() {} + +func (x *ObjectIdentifierValuePair) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectIdentifierValuePair.ProtoReflect.Descriptor instead. +func (*ObjectIdentifierValuePair) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{7} +} + +func (x *ObjectIdentifierValuePair) GetOid() *ObjectIdentifier { + if x != nil { + return x.Oid + } + return nil +} + +func (x *ObjectIdentifierValuePair) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +type DistinguishedName struct { + state protoimpl.MessageState `protogen:"open.v1"` + Organization string `protobuf:"bytes,1,opt,name=organization,proto3" json:"organization,omitempty"` + CommonName string `protobuf:"bytes,2,opt,name=common_name,json=commonName,proto3" json:"common_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DistinguishedName) Reset() { + *x = DistinguishedName{} + mi := &file_sigstore_common_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DistinguishedName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DistinguishedName) ProtoMessage() {} + +func (x *DistinguishedName) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DistinguishedName.ProtoReflect.Descriptor instead. +func (*DistinguishedName) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{8} +} + +func (x *DistinguishedName) GetOrganization() string { + if x != nil { + return x.Organization + } + return "" +} + +func (x *DistinguishedName) GetCommonName() string { + if x != nil { + return x.CommonName + } + return "" +} + +type X509Certificate struct { + state protoimpl.MessageState `protogen:"open.v1"` + // DER-encoded X.509 certificate. + RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes,proto3" json:"raw_bytes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *X509Certificate) Reset() { + *x = X509Certificate{} + mi := &file_sigstore_common_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *X509Certificate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*X509Certificate) ProtoMessage() {} + +func (x *X509Certificate) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use X509Certificate.ProtoReflect.Descriptor instead. +func (*X509Certificate) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{9} +} + +func (x *X509Certificate) GetRawBytes() []byte { + if x != nil { + return x.RawBytes + } + return nil +} + +type SubjectAlternativeName struct { + state protoimpl.MessageState `protogen:"open.v1"` + Type SubjectAlternativeNameType `protobuf:"varint,1,opt,name=type,proto3,enum=dev.sigstore.common.v1.SubjectAlternativeNameType" json:"type,omitempty"` + // Types that are valid to be assigned to Identity: + // + // *SubjectAlternativeName_Regexp + // *SubjectAlternativeName_Value + Identity isSubjectAlternativeName_Identity `protobuf_oneof:"identity"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SubjectAlternativeName) Reset() { + *x = SubjectAlternativeName{} + mi := &file_sigstore_common_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SubjectAlternativeName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubjectAlternativeName) ProtoMessage() {} + +func (x *SubjectAlternativeName) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubjectAlternativeName.ProtoReflect.Descriptor instead. +func (*SubjectAlternativeName) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{10} +} + +func (x *SubjectAlternativeName) GetType() SubjectAlternativeNameType { + if x != nil { + return x.Type + } + return SubjectAlternativeNameType_SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED +} + +func (x *SubjectAlternativeName) GetIdentity() isSubjectAlternativeName_Identity { + if x != nil { + return x.Identity + } + return nil +} + +func (x *SubjectAlternativeName) GetRegexp() string { + if x != nil { + if x, ok := x.Identity.(*SubjectAlternativeName_Regexp); ok { + return x.Regexp + } + } + return "" +} + +func (x *SubjectAlternativeName) GetValue() string { + if x != nil { + if x, ok := x.Identity.(*SubjectAlternativeName_Value); ok { + return x.Value + } + } + return "" +} + +type isSubjectAlternativeName_Identity interface { + isSubjectAlternativeName_Identity() +} + +type SubjectAlternativeName_Regexp struct { + // A regular expression describing the expected value for + // the SAN. + Regexp string `protobuf:"bytes,2,opt,name=regexp,proto3,oneof"` +} + +type SubjectAlternativeName_Value struct { + // The exact value to match against. + Value string `protobuf:"bytes,3,opt,name=value,proto3,oneof"` +} + +func (*SubjectAlternativeName_Regexp) isSubjectAlternativeName_Identity() {} + +func (*SubjectAlternativeName_Value) isSubjectAlternativeName_Identity() {} + +// A collection of X.509 certificates. +// +// This "chain" can be used in multiple contexts, such as providing a root CA +// certificate within a TUF root of trust or multiple untrusted certificates for +// the purpose of chain building. +type X509CertificateChain struct { + state protoimpl.MessageState `protogen:"open.v1"` + // One or more DER-encoded certificates. + // + // In some contexts (such as `VerificationMaterial.x509_certificate_chain`), this sequence + // has an imposed order. Unless explicitly specified, there is otherwise no + // guaranteed order. + Certificates []*X509Certificate `protobuf:"bytes,1,rep,name=certificates,proto3" json:"certificates,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *X509CertificateChain) Reset() { + *x = X509CertificateChain{} + mi := &file_sigstore_common_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *X509CertificateChain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*X509CertificateChain) ProtoMessage() {} + +func (x *X509CertificateChain) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use X509CertificateChain.ProtoReflect.Descriptor instead. +func (*X509CertificateChain) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{11} +} + +func (x *X509CertificateChain) GetCertificates() []*X509Certificate { + if x != nil { + return x.Certificates + } + return nil +} + +// The time range is closed and includes both the start and end times, +// (i.e., [start, end]). +// End is optional to be able to capture a period that has started but +// has no known end. +type TimeRange struct { + state protoimpl.MessageState `protogen:"open.v1"` + Start *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` + End *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end,proto3,oneof" json:"end,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TimeRange) Reset() { + *x = TimeRange{} + mi := &file_sigstore_common_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TimeRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeRange) ProtoMessage() {} + +func (x *TimeRange) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeRange.ProtoReflect.Descriptor instead. +func (*TimeRange) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{12} +} + +func (x *TimeRange) GetStart() *timestamppb.Timestamp { + if x != nil { + return x.Start + } + return nil +} + +func (x *TimeRange) GetEnd() *timestamppb.Timestamp { + if x != nil { + return x.End + } + return nil +} + +var File_sigstore_common_proto protoreflect.FileDescriptor + +var file_sigstore_common_proto_rawDesc = string([]byte{ + 0x0a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x69, 0x0a, 0x0a, 0x48, 0x61, 0x73, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, + 0x43, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, + 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x80, 0x01, 0x0a, + 0x10, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x12, 0x49, 0x0a, 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x69, 0x67, + 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x0d, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x09, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, + 0x23, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x06, 0x6b, 0x65, 0x79, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x6b, + 0x65, 0x79, 0x49, 0x64, 0x22, 0x48, 0x0a, 0x16, 0x52, 0x46, 0x43, 0x33, 0x31, 0x36, 0x31, 0x53, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2e, + 0x0a, 0x10, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x73, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xd9, + 0x01, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x09, + 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, + 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x88, 0x01, 0x01, 0x12, 0x49, + 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x0a, 0x6b, + 0x65, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x43, 0x0a, 0x09, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, + 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, + 0x01, 0x52, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x42, 0x0c, + 0x0a, 0x0a, 0x5f, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x42, 0x0c, 0x0a, 0x0a, + 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x22, 0x29, 0x0a, 0x13, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x68, 0x69, 0x6e, 0x74, 0x22, 0x27, 0x0a, 0x10, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x02, 0x69, 0x64, 0x22, 0x6d, + 0x0a, 0x19, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x50, 0x61, 0x69, 0x72, 0x12, 0x3a, 0x0a, 0x03, 0x6f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x52, 0x03, 0x6f, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x58, 0x0a, + 0x11, 0x44, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x75, 0x69, 0x73, 0x68, 0x65, 0x64, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x33, 0x0a, 0x0f, 0x58, 0x35, 0x30, 0x39, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x72, 0x61, + 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x9e, 0x01, 0x0a, + 0x16, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x18, 0x0a, 0x06, 0x72, 0x65, 0x67, 0x65, 0x78, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x06, 0x72, 0x65, 0x67, 0x65, 0x78, 0x70, 0x12, 0x16, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x22, 0x63, 0x0a, + 0x14, 0x58, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x65, + 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x73, 0x22, 0x78, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x30, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x12, 0x31, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x03, 0x65, 0x6e, + 0x64, 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x65, 0x6e, 0x64, 0x2a, 0x75, 0x0a, 0x0d, + 0x48, 0x61, 0x73, 0x68, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1e, 0x0a, + 0x1a, 0x48, 0x41, 0x53, 0x48, 0x5f, 0x41, 0x4c, 0x47, 0x4f, 0x52, 0x49, 0x54, 0x48, 0x4d, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, + 0x08, 0x53, 0x48, 0x41, 0x32, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x53, + 0x48, 0x41, 0x32, 0x5f, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, + 0x32, 0x5f, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, 0x33, 0x5f, + 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, 0x33, 0x5f, 0x33, 0x38, + 0x34, 0x10, 0x05, 0x2a, 0x8f, 0x05, 0x0a, 0x10, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, + 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x22, 0x0a, 0x1e, 0x50, 0x55, 0x42, 0x4c, + 0x49, 0x43, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x53, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x11, + 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x56, + 0x35, 0x10, 0x01, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x15, 0x0a, 0x0d, 0x50, 0x4b, 0x43, 0x53, 0x31, + 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x10, 0x02, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x18, + 0x0a, 0x10, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, + 0x56, 0x35, 0x10, 0x03, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x14, 0x0a, 0x0c, 0x50, 0x4b, 0x49, 0x58, + 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x10, 0x04, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x21, + 0x0a, 0x1d, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, + 0x56, 0x31, 0x35, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x09, 0x12, 0x21, 0x0a, 0x1d, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, + 0x43, 0x53, 0x31, 0x56, 0x31, 0x35, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, + 0x35, 0x36, 0x10, 0x0a, 0x12, 0x21, 0x0a, 0x1d, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, + 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x56, 0x31, 0x35, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0b, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4b, 0x49, 0x58, 0x5f, + 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x10, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, + 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, + 0x36, 0x10, 0x11, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, + 0x50, 0x53, 0x53, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x12, 0x12, 0x24, 0x0a, 0x1c, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, + 0x50, 0x32, 0x35, 0x36, 0x5f, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, 0x35, + 0x36, 0x10, 0x06, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x1b, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, + 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x50, 0x32, 0x35, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, + 0x35, 0x36, 0x10, 0x05, 0x12, 0x1b, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, 0x44, + 0x53, 0x41, 0x5f, 0x50, 0x33, 0x38, 0x34, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x33, 0x38, 0x34, 0x10, + 0x0c, 0x12, 0x1b, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, + 0x50, 0x35, 0x32, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x35, 0x31, 0x32, 0x10, 0x0d, 0x12, 0x10, + 0x0a, 0x0c, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x07, + 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, + 0x5f, 0x50, 0x48, 0x10, 0x08, 0x12, 0x1f, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, + 0x44, 0x53, 0x41, 0x5f, 0x50, 0x33, 0x38, 0x34, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, 0x35, 0x36, + 0x10, 0x13, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x1f, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, + 0x43, 0x44, 0x53, 0x41, 0x5f, 0x50, 0x35, 0x32, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, 0x35, + 0x36, 0x10, 0x14, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x12, 0x0a, 0x0a, 0x4c, 0x4d, 0x53, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0e, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x14, 0x0a, 0x0c, 0x4c, + 0x4d, 0x4f, 0x54, 0x53, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0f, 0x1a, 0x02, 0x08, + 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4d, 0x4c, 0x5f, 0x44, 0x53, 0x41, 0x5f, 0x36, 0x35, 0x10, 0x15, + 0x12, 0x0d, 0x0a, 0x09, 0x4d, 0x4c, 0x5f, 0x44, 0x53, 0x41, 0x5f, 0x38, 0x37, 0x10, 0x16, 0x22, + 0x04, 0x08, 0x17, 0x10, 0x32, 0x2a, 0x6f, 0x0a, 0x1a, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x29, 0x53, 0x55, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x41, + 0x4c, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x4d, 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, + 0x03, 0x55, 0x52, 0x49, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x5f, + 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x7c, 0x0a, 0x1c, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2d, 0x73, 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, + 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x14, + 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_sigstore_common_proto_rawDescOnce sync.Once + file_sigstore_common_proto_rawDescData []byte +) + +func file_sigstore_common_proto_rawDescGZIP() []byte { + file_sigstore_common_proto_rawDescOnce.Do(func() { + file_sigstore_common_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_sigstore_common_proto_rawDesc), len(file_sigstore_common_proto_rawDesc))) + }) + return file_sigstore_common_proto_rawDescData +} + +var file_sigstore_common_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_sigstore_common_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_sigstore_common_proto_goTypes = []any{ + (HashAlgorithm)(0), // 0: dev.sigstore.common.v1.HashAlgorithm + (PublicKeyDetails)(0), // 1: dev.sigstore.common.v1.PublicKeyDetails + (SubjectAlternativeNameType)(0), // 2: dev.sigstore.common.v1.SubjectAlternativeNameType + (*HashOutput)(nil), // 3: dev.sigstore.common.v1.HashOutput + (*MessageSignature)(nil), // 4: dev.sigstore.common.v1.MessageSignature + (*LogId)(nil), // 5: dev.sigstore.common.v1.LogId + (*RFC3161SignedTimestamp)(nil), // 6: dev.sigstore.common.v1.RFC3161SignedTimestamp + (*PublicKey)(nil), // 7: dev.sigstore.common.v1.PublicKey + (*PublicKeyIdentifier)(nil), // 8: dev.sigstore.common.v1.PublicKeyIdentifier + (*ObjectIdentifier)(nil), // 9: dev.sigstore.common.v1.ObjectIdentifier + (*ObjectIdentifierValuePair)(nil), // 10: dev.sigstore.common.v1.ObjectIdentifierValuePair + (*DistinguishedName)(nil), // 11: dev.sigstore.common.v1.DistinguishedName + (*X509Certificate)(nil), // 12: dev.sigstore.common.v1.X509Certificate + (*SubjectAlternativeName)(nil), // 13: dev.sigstore.common.v1.SubjectAlternativeName + (*X509CertificateChain)(nil), // 14: dev.sigstore.common.v1.X509CertificateChain + (*TimeRange)(nil), // 15: dev.sigstore.common.v1.TimeRange + (*timestamppb.Timestamp)(nil), // 16: google.protobuf.Timestamp +} +var file_sigstore_common_proto_depIdxs = []int32{ + 0, // 0: dev.sigstore.common.v1.HashOutput.algorithm:type_name -> dev.sigstore.common.v1.HashAlgorithm + 3, // 1: dev.sigstore.common.v1.MessageSignature.message_digest:type_name -> dev.sigstore.common.v1.HashOutput + 1, // 2: dev.sigstore.common.v1.PublicKey.key_details:type_name -> dev.sigstore.common.v1.PublicKeyDetails + 15, // 3: dev.sigstore.common.v1.PublicKey.valid_for:type_name -> dev.sigstore.common.v1.TimeRange + 9, // 4: dev.sigstore.common.v1.ObjectIdentifierValuePair.oid:type_name -> dev.sigstore.common.v1.ObjectIdentifier + 2, // 5: dev.sigstore.common.v1.SubjectAlternativeName.type:type_name -> dev.sigstore.common.v1.SubjectAlternativeNameType + 12, // 6: dev.sigstore.common.v1.X509CertificateChain.certificates:type_name -> dev.sigstore.common.v1.X509Certificate + 16, // 7: dev.sigstore.common.v1.TimeRange.start:type_name -> google.protobuf.Timestamp + 16, // 8: dev.sigstore.common.v1.TimeRange.end:type_name -> google.protobuf.Timestamp + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_sigstore_common_proto_init() } +func file_sigstore_common_proto_init() { + if File_sigstore_common_proto != nil { + return + } + file_sigstore_common_proto_msgTypes[4].OneofWrappers = []any{} + file_sigstore_common_proto_msgTypes[10].OneofWrappers = []any{ + (*SubjectAlternativeName_Regexp)(nil), + (*SubjectAlternativeName_Value)(nil), + } + file_sigstore_common_proto_msgTypes[12].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_sigstore_common_proto_rawDesc), len(file_sigstore_common_proto_rawDesc)), + NumEnums: 3, + NumMessages: 13, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sigstore_common_proto_goTypes, + DependencyIndexes: file_sigstore_common_proto_depIdxs, + EnumInfos: file_sigstore_common_proto_enumTypes, + MessageInfos: file_sigstore_common_proto_msgTypes, + }.Build() + File_sigstore_common_proto = out.File + file_sigstore_common_proto_goTypes = nil + file_sigstore_common_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go new file mode 100644 index 000000000000..298e2439ef1e --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go @@ -0,0 +1,241 @@ +// https://raw.githubusercontent.com/secure-systems-lab/dsse/9c813476bd36de70a5738c72e784f123ecea16af/envelope.proto + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v6.30.2 +// source: envelope.proto + +package dsse + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// An authenticated message of arbitrary type. +type Envelope struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Message to be signed. (In JSON, this is encoded as base64.) + // REQUIRED. + Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` + // String unambiguously identifying how to interpret payload. + // REQUIRED. + PayloadType string `protobuf:"bytes,2,opt,name=payloadType,proto3" json:"payloadType,omitempty"` + // Signature over: + // + // PAE(type, payload) + // + // Where PAE is defined as: + // PAE(type, payload) = "DSSEv1" + SP + LEN(type) + SP + type + SP + LEN(payload) + SP + payload + // + = concatenation + // SP = ASCII space [0x20] + // "DSSEv1" = ASCII [0x44, 0x53, 0x53, 0x45, 0x76, 0x31] + // LEN(s) = ASCII decimal encoding of the byte length of s, with no leading zeros + // REQUIRED (length >= 1). + Signatures []*Signature `protobuf:"bytes,3,rep,name=signatures,proto3" json:"signatures,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Envelope) Reset() { + *x = Envelope{} + mi := &file_envelope_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Envelope) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Envelope) ProtoMessage() {} + +func (x *Envelope) ProtoReflect() protoreflect.Message { + mi := &file_envelope_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Envelope.ProtoReflect.Descriptor instead. +func (*Envelope) Descriptor() ([]byte, []int) { + return file_envelope_proto_rawDescGZIP(), []int{0} +} + +func (x *Envelope) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *Envelope) GetPayloadType() string { + if x != nil { + return x.PayloadType + } + return "" +} + +func (x *Envelope) GetSignatures() []*Signature { + if x != nil { + return x.Signatures + } + return nil +} + +type Signature struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Signature itself. (In JSON, this is encoded as base64.) + // REQUIRED. + Sig []byte `protobuf:"bytes,1,opt,name=sig,proto3" json:"sig,omitempty"` + // *Unauthenticated* hint identifying which public key was used. + // OPTIONAL. + Keyid string `protobuf:"bytes,2,opt,name=keyid,proto3" json:"keyid,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Signature) Reset() { + *x = Signature{} + mi := &file_envelope_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Signature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Signature) ProtoMessage() {} + +func (x *Signature) ProtoReflect() protoreflect.Message { + mi := &file_envelope_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Signature.ProtoReflect.Descriptor instead. +func (*Signature) Descriptor() ([]byte, []int) { + return file_envelope_proto_rawDescGZIP(), []int{1} +} + +func (x *Signature) GetSig() []byte { + if x != nil { + return x.Sig + } + return nil +} + +func (x *Signature) GetKeyid() string { + if x != nil { + return x.Keyid + } + return "" +} + +var File_envelope_proto protoreflect.FileDescriptor + +var file_envelope_proto_rawDesc = string([]byte{ + 0x0a, 0x0e, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x09, 0x69, 0x6f, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x74, 0x6f, 0x22, 0x7c, 0x0a, 0x08, 0x45, + 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x6f, 0x2e, 0x69, 0x6e, 0x74, + 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x0a, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x33, 0x0a, 0x09, 0x53, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x69, 0x67, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x03, 0x73, 0x69, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x6b, 0x65, 0x79, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6b, 0x65, 0x79, 0x69, 0x64, 0x42, 0x44, + 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, 0x73, + 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x64, + 0x73, 0x73, 0x65, 0xea, 0x02, 0x0e, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, + 0x44, 0x53, 0x53, 0x45, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_envelope_proto_rawDescOnce sync.Once + file_envelope_proto_rawDescData []byte +) + +func file_envelope_proto_rawDescGZIP() []byte { + file_envelope_proto_rawDescOnce.Do(func() { + file_envelope_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_envelope_proto_rawDesc), len(file_envelope_proto_rawDesc))) + }) + return file_envelope_proto_rawDescData +} + +var file_envelope_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envelope_proto_goTypes = []any{ + (*Envelope)(nil), // 0: io.intoto.Envelope + (*Signature)(nil), // 1: io.intoto.Signature +} +var file_envelope_proto_depIdxs = []int32{ + 1, // 0: io.intoto.Envelope.signatures:type_name -> io.intoto.Signature + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envelope_proto_init() } +func file_envelope_proto_init() { + if File_envelope_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_envelope_proto_rawDesc), len(file_envelope_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envelope_proto_goTypes, + DependencyIndexes: file_envelope_proto_depIdxs, + MessageInfos: file_envelope_proto_msgTypes, + }.Build() + File_envelope_proto = out.File + file_envelope_proto_goTypes = nil + file_envelope_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go new file mode 100644 index 000000000000..43b3111ebf88 --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go @@ -0,0 +1,560 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v6.30.2 +// source: sigstore_rekor.proto + +package v1 + +import ( + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// KindVersion contains the entry's kind and api version. +type KindVersion struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Kind is the type of entry being stored in the log. + // See here for a list: https://github.com/sigstore/rekor/tree/main/pkg/types + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // The specific api version of the type. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *KindVersion) Reset() { + *x = KindVersion{} + mi := &file_sigstore_rekor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *KindVersion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KindVersion) ProtoMessage() {} + +func (x *KindVersion) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KindVersion.ProtoReflect.Descriptor instead. +func (*KindVersion) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{0} +} + +func (x *KindVersion) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +func (x *KindVersion) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +// The checkpoint MUST contain an origin string as a unique log identifier, +// the tree size, and the root hash. It MAY also be followed by optional data, +// and clients MUST NOT assume optional data. The checkpoint MUST also contain +// a signature over the root hash (tree head). The checkpoint MAY contain additional +// signatures, but the first SHOULD be the signature from the log. Checkpoint contents +// are concatenated with newlines into a single string. +// The checkpoint format is described in +// https://github.com/transparency-dev/formats/blob/main/log/README.md +// and https://github.com/C2SP/C2SP/blob/main/tlog-checkpoint.md. +// An example implementation can be found in https://github.com/sigstore/rekor/blob/main/pkg/util/signed_note.go +type Checkpoint struct { + state protoimpl.MessageState `protogen:"open.v1"` + Envelope string `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Checkpoint) Reset() { + *x = Checkpoint{} + mi := &file_sigstore_rekor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Checkpoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Checkpoint) ProtoMessage() {} + +func (x *Checkpoint) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Checkpoint.ProtoReflect.Descriptor instead. +func (*Checkpoint) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{1} +} + +func (x *Checkpoint) GetEnvelope() string { + if x != nil { + return x.Envelope + } + return "" +} + +// InclusionProof is the proof returned from the transparency log. Can +// be used for offline or online verification against the log. +type InclusionProof struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The index of the entry in the tree it was written to. + LogIndex int64 `protobuf:"varint,1,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` + // The hash digest stored at the root of the merkle tree at the time + // the proof was generated. + RootHash []byte `protobuf:"bytes,2,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` + // The size of the merkle tree at the time the proof was generated. + TreeSize int64 `protobuf:"varint,3,opt,name=tree_size,json=treeSize,proto3" json:"tree_size,omitempty"` + // A list of hashes required to compute the inclusion proof, sorted + // in order from leaf to root. + // Note that leaf and root hashes are not included. + // The root hash is available separately in this message, and the + // leaf hash should be calculated by the client. + Hashes [][]byte `protobuf:"bytes,4,rep,name=hashes,proto3" json:"hashes,omitempty"` + // Signature of the tree head, as of the time of this proof was + // generated. See above info on 'Checkpoint' for more details. + Checkpoint *Checkpoint `protobuf:"bytes,5,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InclusionProof) Reset() { + *x = InclusionProof{} + mi := &file_sigstore_rekor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InclusionProof) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InclusionProof) ProtoMessage() {} + +func (x *InclusionProof) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InclusionProof.ProtoReflect.Descriptor instead. +func (*InclusionProof) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{2} +} + +func (x *InclusionProof) GetLogIndex() int64 { + if x != nil { + return x.LogIndex + } + return 0 +} + +func (x *InclusionProof) GetRootHash() []byte { + if x != nil { + return x.RootHash + } + return nil +} + +func (x *InclusionProof) GetTreeSize() int64 { + if x != nil { + return x.TreeSize + } + return 0 +} + +func (x *InclusionProof) GetHashes() [][]byte { + if x != nil { + return x.Hashes + } + return nil +} + +func (x *InclusionProof) GetCheckpoint() *Checkpoint { + if x != nil { + return x.Checkpoint + } + return nil +} + +// The inclusion promise is calculated by Rekor. It's calculated as a +// signature over a canonical JSON serialization of the persisted entry, the +// log ID, log index and the integration timestamp. +// See https://github.com/sigstore/rekor/blob/a6e58f72b6b18cc06cefe61808efd562b9726330/pkg/api/entries.go#L54 +// The format of the signature depends on the transparency log's public key. +// If the signature algorithm requires a hash function and/or a signature +// scheme (e.g. RSA) those has to be retrieved out-of-band from the log's +// operators, together with the public key. +// This is used to verify the integration timestamp's value and that the log +// has promised to include the entry. +type InclusionPromise struct { + state protoimpl.MessageState `protogen:"open.v1"` + SignedEntryTimestamp []byte `protobuf:"bytes,1,opt,name=signed_entry_timestamp,json=signedEntryTimestamp,proto3" json:"signed_entry_timestamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InclusionPromise) Reset() { + *x = InclusionPromise{} + mi := &file_sigstore_rekor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InclusionPromise) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InclusionPromise) ProtoMessage() {} + +func (x *InclusionPromise) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InclusionPromise.ProtoReflect.Descriptor instead. +func (*InclusionPromise) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{3} +} + +func (x *InclusionPromise) GetSignedEntryTimestamp() []byte { + if x != nil { + return x.SignedEntryTimestamp + } + return nil +} + +// TransparencyLogEntry captures all the details required from Rekor to +// reconstruct an entry, given that the payload is provided via other means. +// This type can easily be created from the existing response from Rekor. +// Future iterations could rely on Rekor returning the minimal set of +// attributes (excluding the payload) that are required for verifying the +// inclusion promise. The inclusion promise (called SignedEntryTimestamp in +// the response from Rekor) is similar to a Signed Certificate Timestamp +// as described here https://www.rfc-editor.org/rfc/rfc6962.html#section-3.2. +type TransparencyLogEntry struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The global index of the entry, used when querying the log by index. + LogIndex int64 `protobuf:"varint,1,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` + // The unique identifier of the log. + LogId *v1.LogId `protobuf:"bytes,2,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"` + // The kind (type) and version of the object associated with this + // entry. These values are required to construct the entry during + // verification. + KindVersion *KindVersion `protobuf:"bytes,3,opt,name=kind_version,json=kindVersion,proto3" json:"kind_version,omitempty"` + // The UNIX timestamp from the log when the entry was persisted. + // The integration time MUST NOT be trusted if inclusion_promise + // is omitted. + IntegratedTime int64 `protobuf:"varint,4,opt,name=integrated_time,json=integratedTime,proto3" json:"integrated_time,omitempty"` + // The inclusion promise/signed entry timestamp from the log. + // Required for v0.1 bundles, and MUST be verified. + // Optional for >= v0.2 bundles if another suitable source of + // time is present (such as another source of signed time, + // or the current system time for long-lived certificates). + // MUST be verified if no other suitable source of time is present, + // and SHOULD be verified otherwise. + InclusionPromise *InclusionPromise `protobuf:"bytes,5,opt,name=inclusion_promise,json=inclusionPromise,proto3" json:"inclusion_promise,omitempty"` + // The inclusion proof can be used for offline or online verification + // that the entry was appended to the log, and that the log has not been + // altered. + InclusionProof *InclusionProof `protobuf:"bytes,6,opt,name=inclusion_proof,json=inclusionProof,proto3" json:"inclusion_proof,omitempty"` + // Optional. The canonicalized transparency log entry, used to + // reconstruct the Signed Entry Timestamp (SET) during verification. + // The contents of this field are the same as the `body` field in + // a Rekor response, meaning that it does **not** include the "full" + // canonicalized form (of log index, ID, etc.) which are + // exposed as separate fields. The verifier is responsible for + // combining the `canonicalized_body`, `log_index`, `log_id`, + // and `integrated_time` into the payload that the SET's signature + // is generated over. + // This field is intended to be used in cases where the SET cannot be + // produced determinisitically (e.g. inconsistent JSON field ordering, + // differing whitespace, etc). + // + // If set, clients MUST verify that the signature referenced in the + // `canonicalized_body` matches the signature provided in the + // `Bundle.content`. + // If not set, clients are responsible for constructing an equivalent + // payload from other sources to verify the signature. + CanonicalizedBody []byte `protobuf:"bytes,7,opt,name=canonicalized_body,json=canonicalizedBody,proto3" json:"canonicalized_body,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TransparencyLogEntry) Reset() { + *x = TransparencyLogEntry{} + mi := &file_sigstore_rekor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TransparencyLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransparencyLogEntry) ProtoMessage() {} + +func (x *TransparencyLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransparencyLogEntry.ProtoReflect.Descriptor instead. +func (*TransparencyLogEntry) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{4} +} + +func (x *TransparencyLogEntry) GetLogIndex() int64 { + if x != nil { + return x.LogIndex + } + return 0 +} + +func (x *TransparencyLogEntry) GetLogId() *v1.LogId { + if x != nil { + return x.LogId + } + return nil +} + +func (x *TransparencyLogEntry) GetKindVersion() *KindVersion { + if x != nil { + return x.KindVersion + } + return nil +} + +func (x *TransparencyLogEntry) GetIntegratedTime() int64 { + if x != nil { + return x.IntegratedTime + } + return 0 +} + +func (x *TransparencyLogEntry) GetInclusionPromise() *InclusionPromise { + if x != nil { + return x.InclusionPromise + } + return nil +} + +func (x *TransparencyLogEntry) GetInclusionProof() *InclusionProof { + if x != nil { + return x.InclusionProof + } + return nil +} + +func (x *TransparencyLogEntry) GetCanonicalizedBody() []byte { + if x != nil { + return x.CanonicalizedBody + } + return nil +} + +var File_sigstore_rekor_proto protoreflect.FileDescriptor + +var file_sigstore_rekor_proto_rawDesc = string([]byte{ + 0x0a, 0x14, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x45, 0x0a, 0x0b, 0x4b, 0x69, 0x6e, 0x64, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x1d, 0x0a, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x2d, 0x0a, 0x0a, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1f, 0x0a, 0x08, 0x65, 0x6e, + 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x22, 0xdb, 0x01, 0x0a, 0x0e, + 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x20, + 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x12, 0x20, 0x0a, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x48, 0x61, + 0x73, 0x68, 0x12, 0x20, 0x0a, 0x09, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x74, 0x72, 0x65, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, + 0x73, 0x12, 0x46, 0x0a, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x22, 0x4d, 0x0a, 0x10, 0x49, 0x6e, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x12, 0x39, 0x0a, + 0x16, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x14, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xc7, 0x03, 0x0a, 0x14, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x20, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x12, 0x39, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x67, + 0x49, 0x64, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x4a, + 0x0a, 0x0c, 0x6b, 0x69, 0x6e, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x69, 0x6e, + 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x6b, + 0x69, 0x6e, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x0f, 0x69, 0x6e, + 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x54, 0x0a, 0x11, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x63, 0x6c, + 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x52, 0x10, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x12, 0x53, + 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6f, + 0x66, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x6f, 0x66, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x11, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x42, 0x6f, + 0x64, 0x79, 0x42, 0x78, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, + 0x31, 0x42, 0x0a, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, 0x73, 0x70, + 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x72, 0x65, + 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x13, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x3a, 0x3a, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_sigstore_rekor_proto_rawDescOnce sync.Once + file_sigstore_rekor_proto_rawDescData []byte +) + +func file_sigstore_rekor_proto_rawDescGZIP() []byte { + file_sigstore_rekor_proto_rawDescOnce.Do(func() { + file_sigstore_rekor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_sigstore_rekor_proto_rawDesc), len(file_sigstore_rekor_proto_rawDesc))) + }) + return file_sigstore_rekor_proto_rawDescData +} + +var file_sigstore_rekor_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_sigstore_rekor_proto_goTypes = []any{ + (*KindVersion)(nil), // 0: dev.sigstore.rekor.v1.KindVersion + (*Checkpoint)(nil), // 1: dev.sigstore.rekor.v1.Checkpoint + (*InclusionProof)(nil), // 2: dev.sigstore.rekor.v1.InclusionProof + (*InclusionPromise)(nil), // 3: dev.sigstore.rekor.v1.InclusionPromise + (*TransparencyLogEntry)(nil), // 4: dev.sigstore.rekor.v1.TransparencyLogEntry + (*v1.LogId)(nil), // 5: dev.sigstore.common.v1.LogId +} +var file_sigstore_rekor_proto_depIdxs = []int32{ + 1, // 0: dev.sigstore.rekor.v1.InclusionProof.checkpoint:type_name -> dev.sigstore.rekor.v1.Checkpoint + 5, // 1: dev.sigstore.rekor.v1.TransparencyLogEntry.log_id:type_name -> dev.sigstore.common.v1.LogId + 0, // 2: dev.sigstore.rekor.v1.TransparencyLogEntry.kind_version:type_name -> dev.sigstore.rekor.v1.KindVersion + 3, // 3: dev.sigstore.rekor.v1.TransparencyLogEntry.inclusion_promise:type_name -> dev.sigstore.rekor.v1.InclusionPromise + 2, // 4: dev.sigstore.rekor.v1.TransparencyLogEntry.inclusion_proof:type_name -> dev.sigstore.rekor.v1.InclusionProof + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_sigstore_rekor_proto_init() } +func file_sigstore_rekor_proto_init() { + if File_sigstore_rekor_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_sigstore_rekor_proto_rawDesc), len(file_sigstore_rekor_proto_rawDesc)), + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sigstore_rekor_proto_goTypes, + DependencyIndexes: file_sigstore_rekor_proto_depIdxs, + MessageInfos: file_sigstore_rekor_proto_msgTypes, + }.Build() + File_sigstore_rekor_proto = out.File + file_sigstore_rekor_proto_goTypes = nil + file_sigstore_rekor_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1/sigstore_trustroot.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1/sigstore_trustroot.pb.go new file mode 100644 index 000000000000..580d1c69f5a5 --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1/sigstore_trustroot.pb.go @@ -0,0 +1,1089 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v6.30.2 +// source: sigstore_trustroot.proto + +package v1 + +import ( + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// ServiceSelector specifies how a client SHOULD select a set of +// Services to connect to. A client SHOULD throw an error if +// the value is SERVICE_SELECTOR_UNDEFINED. +type ServiceSelector int32 + +const ( + ServiceSelector_SERVICE_SELECTOR_UNDEFINED ServiceSelector = 0 + // Clients SHOULD select all Services based on supported API version + // and validity window. + ServiceSelector_ALL ServiceSelector = 1 + // Clients SHOULD select one Service based on supported API version + // and validity window. It is up to the client implementation to + // decide how to select the Service, e.g. random or round-robin. + ServiceSelector_ANY ServiceSelector = 2 + // Clients SHOULD select a specific number of Services based on + // supported API version and validity window, using the provided + // `count`. It is up to the client implementation to decide how to + // select the Service, e.g. random or round-robin. + ServiceSelector_EXACT ServiceSelector = 3 +) + +// Enum value maps for ServiceSelector. +var ( + ServiceSelector_name = map[int32]string{ + 0: "SERVICE_SELECTOR_UNDEFINED", + 1: "ALL", + 2: "ANY", + 3: "EXACT", + } + ServiceSelector_value = map[string]int32{ + "SERVICE_SELECTOR_UNDEFINED": 0, + "ALL": 1, + "ANY": 2, + "EXACT": 3, + } +) + +func (x ServiceSelector) Enum() *ServiceSelector { + p := new(ServiceSelector) + *p = x + return p +} + +func (x ServiceSelector) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ServiceSelector) Descriptor() protoreflect.EnumDescriptor { + return file_sigstore_trustroot_proto_enumTypes[0].Descriptor() +} + +func (ServiceSelector) Type() protoreflect.EnumType { + return &file_sigstore_trustroot_proto_enumTypes[0] +} + +func (x ServiceSelector) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ServiceSelector.Descriptor instead. +func (ServiceSelector) EnumDescriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{0} +} + +// TransparencyLogInstance describes the immutable parameters from a +// transparency log. +// See https://www.rfc-editor.org/rfc/rfc9162.html#name-log-parameters +// for more details. +// The included parameters are the minimal set required to identify a log, +// and verify an inclusion proof/promise. +type TransparencyLogInstance struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The base URL at which can be used to URLs for the client. + // SHOULD match the origin on the log checkpoint: + // https://github.com/C2SP/C2SP/blob/main/tlog-checkpoint.md#note-text. + BaseUrl string `protobuf:"bytes,1,opt,name=base_url,json=baseUrl,proto3" json:"base_url,omitempty"` + // The hash algorithm used for the Merkle Tree. + HashAlgorithm v1.HashAlgorithm `protobuf:"varint,2,opt,name=hash_algorithm,json=hashAlgorithm,proto3,enum=dev.sigstore.common.v1.HashAlgorithm" json:"hash_algorithm,omitempty"` + // The public key used to verify signatures generated by the log. + // This attribute contains the signature algorithm used by the log. + PublicKey *v1.PublicKey `protobuf:"bytes,3,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + // The identifier for this transparency log. + // Represented as the SHA-256 hash of the log's public key, + // calculated over the DER encoding of the key represented as + // SubjectPublicKeyInfo. + // See https://www.rfc-editor.org/rfc/rfc6962#section-3.2 + // For Rekor v2 instances, log_id and checkpoint_key_id will be set + // to the same value. + // It is recommended to use checkpoint_key_id instead, since log_id is not + // guaranteed to be unique across multiple deployments. Clients + // must use the key name and key ID, as defined by the signed-note spec + // linked below, from a checkpoint to determine the correct + // TransparencyLogInstance to verify a proof. + // log_id will eventually be deprecated in favor of checkpoint_id. + LogId *v1.LogId `protobuf:"bytes,4,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"` + // The unique identifier for the log, used in the checkpoint. + // Only supported for TrustedRoot media types matching or greater than + // application/vnd.dev.sigstore.trustedroot.v0.2+json + // Its calculation is described in + // https://github.com/C2SP/C2SP/blob/main/signed-note.md#signatures + // SHOULD be set for all logs. When not set, clients MUST use log_id. + // + // For Ed25519 signatures, the key ID is computed per the C2SP spec: + // key ID = SHA-256(key name || 0x0A || 0x01 || 32-byte Ed25519 public key)[:4] + // For ECDSA signatures, the key ID is computed per the C2SP spec: + // key ID = SHA-256(PKIX ASN.1 DER-encoded public key, in SubjectPublicKeyInfo format)[:4] + // For RSA signatures, the signature type will be 0xff with an appended identifier for the format, + // "PKIX-RSA-PKCS#1v1.5": + // key ID = SHA-256(key name || 0x0A || 0xff || PKIX-RSA-PKCS#1v1.5 || PKIX ASN.1 DER-encoded public key)[:4] + // + // This is provided for convenience. Clients can also calculate the + // checkpoint key ID given the log's public key. + // SHOULD be 4 bytes long, as a truncated hash. + // + // To find a matching TransparencyLogInstance in the TrustedRoot, + // clients will parse the checkpoint, and for each signature line, + // use the key name (i.e. log origin, base_url from TrustedRoot) + // and checkpoint key ID (i.e. checkpoint_key_id from TrustedRoot) + // which can then be compared against the TrustedRoot log instances. + CheckpointKeyId *v1.LogId `protobuf:"bytes,5,opt,name=checkpoint_key_id,json=checkpointKeyId,proto3" json:"checkpoint_key_id,omitempty"` + // The name of the operator of this log deployment. Operator MUST be + // formatted as a scheme-less URI, e.g. sigstore.dev + // Only supported for TrustedRoot media types matching or greater than + // application/vnd.dev.sigstore.trustedroot.v0.2+json + // This MUST be used when there are multiple transparency log instances + // to determine if log proof verification meets a specified threshold, + // e.g. two proofs from log deployments operated by the same operator + // should count as only one valid proof. + Operator string `protobuf:"bytes,6,opt,name=operator,proto3" json:"operator,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TransparencyLogInstance) Reset() { + *x = TransparencyLogInstance{} + mi := &file_sigstore_trustroot_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TransparencyLogInstance) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransparencyLogInstance) ProtoMessage() {} + +func (x *TransparencyLogInstance) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransparencyLogInstance.ProtoReflect.Descriptor instead. +func (*TransparencyLogInstance) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{0} +} + +func (x *TransparencyLogInstance) GetBaseUrl() string { + if x != nil { + return x.BaseUrl + } + return "" +} + +func (x *TransparencyLogInstance) GetHashAlgorithm() v1.HashAlgorithm { + if x != nil { + return x.HashAlgorithm + } + return v1.HashAlgorithm(0) +} + +func (x *TransparencyLogInstance) GetPublicKey() *v1.PublicKey { + if x != nil { + return x.PublicKey + } + return nil +} + +func (x *TransparencyLogInstance) GetLogId() *v1.LogId { + if x != nil { + return x.LogId + } + return nil +} + +func (x *TransparencyLogInstance) GetCheckpointKeyId() *v1.LogId { + if x != nil { + return x.CheckpointKeyId + } + return nil +} + +func (x *TransparencyLogInstance) GetOperator() string { + if x != nil { + return x.Operator + } + return "" +} + +// CertificateAuthority enlists the information required to identify which +// CA to use and perform signature verification. +type CertificateAuthority struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The root certificate MUST be self-signed, and so the subject and + // issuer are the same. + Subject *v1.DistinguishedName `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` + // The URI identifies the certificate authority. + // + // It is RECOMMENDED that the URI is the base URL for the certificate + // authority, that can be provided to any SDK/client provided + // by the certificate authority to interact with the certificate + // authority. + Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"` + // The certificate chain for this CA. The last certificate in the chain + // MUST be the trust anchor. The trust anchor MAY be a self-signed root + // CA certificate or MAY be an intermediate CA certificate. + CertChain *v1.X509CertificateChain `protobuf:"bytes,3,opt,name=cert_chain,json=certChain,proto3" json:"cert_chain,omitempty"` + // The time the *entire* chain was valid. This is at max the + // longest interval when *all* certificates in the chain were valid, + // but it MAY be shorter. Clients MUST check timestamps against *both* + // the `valid_for` time range *and* the entire certificate chain. + // + // The TimeRange should be considered valid *inclusive* of the + // endpoints. + ValidFor *v1.TimeRange `protobuf:"bytes,4,opt,name=valid_for,json=validFor,proto3" json:"valid_for,omitempty"` + // The name of the operator of this certificate or timestamp authority. + // Operator MUST be formatted as a scheme-less URI, e.g. sigstore.dev + // This MUST be used when there are multiple timestamp authorities to + // determine if the signed timestamp verification meets a specified + // threshold, e.g. two signed timestamps from timestamp authorities + // operated by the same operator should count as only one valid + // timestamp. + Operator string `protobuf:"bytes,5,opt,name=operator,proto3" json:"operator,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CertificateAuthority) Reset() { + *x = CertificateAuthority{} + mi := &file_sigstore_trustroot_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CertificateAuthority) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CertificateAuthority) ProtoMessage() {} + +func (x *CertificateAuthority) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CertificateAuthority.ProtoReflect.Descriptor instead. +func (*CertificateAuthority) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{1} +} + +func (x *CertificateAuthority) GetSubject() *v1.DistinguishedName { + if x != nil { + return x.Subject + } + return nil +} + +func (x *CertificateAuthority) GetUri() string { + if x != nil { + return x.Uri + } + return "" +} + +func (x *CertificateAuthority) GetCertChain() *v1.X509CertificateChain { + if x != nil { + return x.CertChain + } + return nil +} + +func (x *CertificateAuthority) GetValidFor() *v1.TimeRange { + if x != nil { + return x.ValidFor + } + return nil +} + +func (x *CertificateAuthority) GetOperator() string { + if x != nil { + return x.Operator + } + return "" +} + +// TrustedRoot describes the client's complete set of trusted entities. +// How the TrustedRoot is populated is not specified, but can be a +// combination of many sources such as TUF repositories, files on disk etc. +// +// The TrustedRoot is not meant to be used for any artifact verification, only +// to capture the complete/global set of trusted verification materials. +// When verifying an artifact, based on the artifact and policies, a selection +// of keys/authorities are expected to be extracted and provided to the +// verification function. This way the set of keys/authorities can be kept to +// a minimal set by the policy to gain better control over what signatures +// that are allowed. +// +// The embedded transparency logs, CT logs, CAs and TSAs MUST include any +// previously used instance -- otherwise signatures made in the past cannot +// be verified. +// +// All the listed instances SHOULD be sorted by the 'valid_for' in ascending +// order, that is, the oldest instance first. Only the last instance is +// allowed to have their 'end' timestamp unset. All previous instances MUST +// have a closed interval of validity. The last instance MAY have a closed +// interval. Clients MUST accept instances that overlaps in time, if not +// clients may experience problems during rotations of verification +// materials. +// +// To be able to manage planned rotations of either transparency logs or +// certificate authorities, clienst MUST accept lists of instances where +// the last instance have a 'valid_for' that belongs to the future. +// This should not be a problem as clients SHOULD first seek the trust root +// for a suitable instance before creating a per artifact trust root (that +// is, a sub-set of the complete trust root) that is used for verification. +type TrustedRoot struct { + state protoimpl.MessageState `protogen:"open.v1"` + // MUST be application/vnd.dev.sigstore.trustedroot.v0.2+json + // when encoded as JSON. + // Clients MAY choose to also support + // application/vnd.dev.sigstore.trustedroot.v0.1+json + // Clients MAY process and parse content with the media type defined + // in the old format: + // application/vnd.dev.sigstore.trustedroot+json;version=0.1 + MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // A set of trusted Rekor servers. + Tlogs []*TransparencyLogInstance `protobuf:"bytes,2,rep,name=tlogs,proto3" json:"tlogs,omitempty"` + // A set of trusted certificate authorities (e.g Fulcio), and any + // intermediate certificates they provide. + // If a CA is issuing multiple intermediate certificate, each + // combination shall be represented as separate chain. I.e, a single + // root cert may appear in multiple chains but with different + // intermediate and/or leaf certificates. + // The certificates are intended to be used for verifying artifact + // signatures. + CertificateAuthorities []*CertificateAuthority `protobuf:"bytes,3,rep,name=certificate_authorities,json=certificateAuthorities,proto3" json:"certificate_authorities,omitempty"` + // A set of trusted certificate transparency logs. + Ctlogs []*TransparencyLogInstance `protobuf:"bytes,4,rep,name=ctlogs,proto3" json:"ctlogs,omitempty"` + // A set of trusted timestamping authorities. + TimestampAuthorities []*CertificateAuthority `protobuf:"bytes,5,rep,name=timestamp_authorities,json=timestampAuthorities,proto3" json:"timestamp_authorities,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TrustedRoot) Reset() { + *x = TrustedRoot{} + mi := &file_sigstore_trustroot_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TrustedRoot) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TrustedRoot) ProtoMessage() {} + +func (x *TrustedRoot) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TrustedRoot.ProtoReflect.Descriptor instead. +func (*TrustedRoot) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{2} +} + +func (x *TrustedRoot) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *TrustedRoot) GetTlogs() []*TransparencyLogInstance { + if x != nil { + return x.Tlogs + } + return nil +} + +func (x *TrustedRoot) GetCertificateAuthorities() []*CertificateAuthority { + if x != nil { + return x.CertificateAuthorities + } + return nil +} + +func (x *TrustedRoot) GetCtlogs() []*TransparencyLogInstance { + if x != nil { + return x.Ctlogs + } + return nil +} + +func (x *TrustedRoot) GetTimestampAuthorities() []*CertificateAuthority { + if x != nil { + return x.TimestampAuthorities + } + return nil +} + +// SigningConfig represents the trusted entities/state needed by Sigstore +// signing. In particular, it primarily contains service URLs that a Sigstore +// signer may need to connect to for the online aspects of signing. +type SigningConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + // MUST be application/vnd.dev.sigstore.signingconfig.v0.2+json + // Clients MAY choose to also support + // application/vnd.dev.sigstore.signingconfig.v0.1+json + MediaType string `protobuf:"bytes,5,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // URLs to Fulcio-compatible CAs, capable of receiving + // Certificate Signing Requests (CSRs) and responding with + // issued certificates. + // + // These URLs MUST be the "base" URL for the CAs, which clients + // should construct an appropriate CSR endpoint on top of. + // For example, if a CA URL is `https://example.com/ca`, then + // the client MAY construct the CSR endpoint as + // `https://example.com/ca/api/v2/signingCert`. + // + // Clients MUST select only one Service with the highest API version + // that the client is compatible with, that is within its + // validity period, and has the newest validity start date. + // Client SHOULD select the first Service that meets this requirement. + // All listed Services SHOULD be sorted by the `valid_for` window in + // descending order, with the newest instance first. + CaUrls []*Service `protobuf:"bytes,6,rep,name=ca_urls,json=caUrls,proto3" json:"ca_urls,omitempty"` + // URLs to OpenID Connect identity providers. + // + // These URLs MUST be the "base" URLs for the OIDC IdPs, which clients + // should perform well-known OpenID Connect discovery against. + // + // Clients MUST select only one Service with the highest API version + // that the client is compatible with, that is within its + // validity period, and has the newest validity start date. + // Client SHOULD select the first Service that meets this requirement. + // All listed Services SHOULD be sorted by the `valid_for` window in + // descending order, with the newest instance first. + OidcUrls []*Service `protobuf:"bytes,7,rep,name=oidc_urls,json=oidcUrls,proto3" json:"oidc_urls,omitempty"` + // URLs to Rekor transparency logs. + // + // These URL MUST be the "base" URLs for the transparency logs, + // which clients should construct appropriate API endpoints on top of. + // + // Clients MUST group Services by `operator` and select at most one + // Service from each operator. Clients MUST select Services with the + // highest API version that the client is compatible with, that are + // within its validity period, and have the newest validity start dates. + // All listed Services SHOULD be sorted by the `valid_for` window in + // descending order, with the newest instance first. + // + // Clients MUST select Services based on the selector value of + // `rekor_tlog_config`. + RekorTlogUrls []*Service `protobuf:"bytes,8,rep,name=rekor_tlog_urls,json=rekorTlogUrls,proto3" json:"rekor_tlog_urls,omitempty"` + // Specifies how a client should select the set of Rekor transparency + // logs to write to. + RekorTlogConfig *ServiceConfiguration `protobuf:"bytes,9,opt,name=rekor_tlog_config,json=rekorTlogConfig,proto3" json:"rekor_tlog_config,omitempty"` + // URLs to RFC 3161 Time Stamping Authorities (TSA). + // + // These URLs MUST be the *full* URL for the TSA, meaning that it + // should be suitable for submitting Time Stamp Requests (TSRs) to + // via HTTP, per RFC 3161. + // + // Clients MUST group Services by `operator` and select at most one + // Service from each operator. Clients MUST select Services with the + // highest API version that the client is compatible with, that are + // within its validity period, and have the newest validity start dates. + // All listed Services SHOULD be sorted by the `valid_for` window in + // descending order, with the newest instance first. + // + // Clients MUST select Services based on the selector value of + // `tsa_config`. + TsaUrls []*Service `protobuf:"bytes,10,rep,name=tsa_urls,json=tsaUrls,proto3" json:"tsa_urls,omitempty"` + // Specifies how a client should select the set of TSAs to request + // signed timestamps from. + TsaConfig *ServiceConfiguration `protobuf:"bytes,11,opt,name=tsa_config,json=tsaConfig,proto3" json:"tsa_config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SigningConfig) Reset() { + *x = SigningConfig{} + mi := &file_sigstore_trustroot_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SigningConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SigningConfig) ProtoMessage() {} + +func (x *SigningConfig) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SigningConfig.ProtoReflect.Descriptor instead. +func (*SigningConfig) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{3} +} + +func (x *SigningConfig) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *SigningConfig) GetCaUrls() []*Service { + if x != nil { + return x.CaUrls + } + return nil +} + +func (x *SigningConfig) GetOidcUrls() []*Service { + if x != nil { + return x.OidcUrls + } + return nil +} + +func (x *SigningConfig) GetRekorTlogUrls() []*Service { + if x != nil { + return x.RekorTlogUrls + } + return nil +} + +func (x *SigningConfig) GetRekorTlogConfig() *ServiceConfiguration { + if x != nil { + return x.RekorTlogConfig + } + return nil +} + +func (x *SigningConfig) GetTsaUrls() []*Service { + if x != nil { + return x.TsaUrls + } + return nil +} + +func (x *SigningConfig) GetTsaConfig() *ServiceConfiguration { + if x != nil { + return x.TsaConfig + } + return nil +} + +// Service represents an instance of a service that is a part of Sigstore infrastructure. +// When selecting one or multiple services from a list of services, clients MUST: +// - Use the API version hint to determine the service with the highest API version +// that the client is compatible with. +// - Only select services within the specified validity period and that have the +// newest validity start date. +// +// When selecting multiple services, clients MUST: +// - Use the ServiceConfiguration to determine how many services MUST be selected. +// Clients MUST return an error if there are not enough services that meet the +// selection criteria. +// - Group services by `operator` and select at most one service from an operator. +// During verification, clients MUST treat valid verification metadata from the +// operator as valid only once towards a threshold. +// - Select services from only the highest supported API version. +type Service struct { + state protoimpl.MessageState `protogen:"open.v1"` + // URL of the service. MUST include scheme and authority. MAY include path. + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + // Specifies the major API version. A value of 0 represents a service that + // has not yet been released. + MajorApiVersion uint32 `protobuf:"varint,2,opt,name=major_api_version,json=majorApiVersion,proto3" json:"major_api_version,omitempty"` + // Validity period of a service. A service that has only a start date + // SHOULD be considered the most recent instance of that service, but + // the client MUST NOT assume there is only one valid instance. + // The TimeRange MUST be considered valid *inclusive* of the + // endpoints. + ValidFor *v1.TimeRange `protobuf:"bytes,3,opt,name=valid_for,json=validFor,proto3" json:"valid_for,omitempty"` + // Specifies the name of the service operator. When selecting multiple + // services, clients MUST use the operator to select services from + // distinct operators. Operator MUST be formatted as a scheme-less + // URI, e.g. sigstore.dev + Operator string `protobuf:"bytes,4,opt,name=operator,proto3" json:"operator,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Service) Reset() { + *x = Service{} + mi := &file_sigstore_trustroot_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Service) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service) ProtoMessage() {} + +func (x *Service) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service.ProtoReflect.Descriptor instead. +func (*Service) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{4} +} + +func (x *Service) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *Service) GetMajorApiVersion() uint32 { + if x != nil { + return x.MajorApiVersion + } + return 0 +} + +func (x *Service) GetValidFor() *v1.TimeRange { + if x != nil { + return x.ValidFor + } + return nil +} + +func (x *Service) GetOperator() string { + if x != nil { + return x.Operator + } + return "" +} + +// ServiceConfiguration specifies how a client should select a set of +// Services to connect to, along with a count when a specific number +// of Services is requested. +type ServiceConfiguration struct { + state protoimpl.MessageState `protogen:"open.v1"` + // How a client should select a set of Services to connect to. + // Clients SHOULD NOT select services from multiple API versions. + Selector ServiceSelector `protobuf:"varint,1,opt,name=selector,proto3,enum=dev.sigstore.trustroot.v1.ServiceSelector" json:"selector,omitempty"` + // count specifies the number of Services the client should use. + // Only used when selector is set to EXACT, and count MUST be greater + // than 0. count MUST be less than or equal to the number of Services. + // Clients MUST return an error is there are not enough services + // that meet selection criteria. + Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ServiceConfiguration) Reset() { + *x = ServiceConfiguration{} + mi := &file_sigstore_trustroot_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ServiceConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceConfiguration) ProtoMessage() {} + +func (x *ServiceConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceConfiguration.ProtoReflect.Descriptor instead. +func (*ServiceConfiguration) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{5} +} + +func (x *ServiceConfiguration) GetSelector() ServiceSelector { + if x != nil { + return x.Selector + } + return ServiceSelector_SERVICE_SELECTOR_UNDEFINED +} + +func (x *ServiceConfiguration) GetCount() uint32 { + if x != nil { + return x.Count + } + return 0 +} + +// ClientTrustConfig describes the complete state needed by a client +// to perform both signing and verification operations against a particular +// instance of Sigstore. +type ClientTrustConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + // MUST be application/vnd.dev.sigstore.clienttrustconfig.v0.1+json + MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // The root of trust, which MUST be present. + TrustedRoot *TrustedRoot `protobuf:"bytes,2,opt,name=trusted_root,json=trustedRoot,proto3" json:"trusted_root,omitempty"` + // Configuration for signing clients, which MUST be present. + SigningConfig *SigningConfig `protobuf:"bytes,3,opt,name=signing_config,json=signingConfig,proto3" json:"signing_config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ClientTrustConfig) Reset() { + *x = ClientTrustConfig{} + mi := &file_sigstore_trustroot_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ClientTrustConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientTrustConfig) ProtoMessage() {} + +func (x *ClientTrustConfig) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientTrustConfig.ProtoReflect.Descriptor instead. +func (*ClientTrustConfig) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{6} +} + +func (x *ClientTrustConfig) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *ClientTrustConfig) GetTrustedRoot() *TrustedRoot { + if x != nil { + return x.TrustedRoot + } + return nil +} + +func (x *ClientTrustConfig) GetSigningConfig() *SigningConfig { + if x != nil { + return x.SigningConfig + } + return nil +} + +var File_sigstore_trustroot_proto protoreflect.FileDescriptor + +var file_sigstore_trustroot_proto_rawDesc = string([]byte{ + 0x0a, 0x18, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x72, 0x75, 0x73, 0x74, + 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x64, 0x65, 0x76, 0x2e, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, + 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe1, 0x02, + 0x0a, 0x17, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, + 0x67, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x62, 0x61, 0x73, + 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x61, 0x73, + 0x65, 0x55, 0x72, 0x6c, 0x12, 0x4c, 0x0a, 0x0e, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x61, 0x6c, 0x67, + 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x64, + 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, + 0x74, 0x68, 0x6d, 0x52, 0x0d, 0x68, 0x61, 0x73, 0x68, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, + 0x68, 0x6d, 0x12, 0x40, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, + 0x63, 0x4b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x6f, + 0x67, 0x49, 0x64, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x49, 0x0a, 0x11, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, + 0x6f, 0x67, 0x49, 0x64, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, + 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, + 0x72, 0x22, 0x96, 0x02, 0x0a, 0x14, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x07, 0x73, 0x75, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x64, 0x65, + 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x75, 0x69, 0x73, 0x68, + 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, + 0x69, 0x12, 0x4b, 0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x58, + 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x52, 0x09, 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x3e, + 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46, 0x6f, 0x72, 0x12, 0x1a, + 0x0a, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x22, 0x92, 0x03, 0x0a, 0x0b, 0x54, + 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, + 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x48, 0x0a, 0x05, 0x74, 0x6c, 0x6f, + 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, + 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, + 0x79, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x74, 0x6c, + 0x6f, 0x67, 0x73, 0x12, 0x68, 0x0a, 0x17, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x16, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x4a, 0x0a, + 0x06, 0x63, 0x74, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, + 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, + 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x52, 0x06, 0x63, 0x74, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x64, 0x0a, 0x15, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, + 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, + 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x14, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, + 0xea, 0x03, 0x0a, 0x0d, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x3b, 0x0a, 0x07, 0x63, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x63, 0x61, 0x55, 0x72, 0x6c, 0x73, 0x12, 0x3f, 0x0a, + 0x09, 0x6f, 0x69, 0x64, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x08, 0x6f, 0x69, 0x64, 0x63, 0x55, 0x72, 0x6c, 0x73, 0x12, 0x4a, + 0x0a, 0x0f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x5f, 0x74, 0x6c, 0x6f, 0x67, 0x5f, 0x75, 0x72, 0x6c, + 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x6b, + 0x6f, 0x72, 0x54, 0x6c, 0x6f, 0x67, 0x55, 0x72, 0x6c, 0x73, 0x12, 0x5b, 0x0a, 0x11, 0x72, 0x65, + 0x6b, 0x6f, 0x72, 0x5f, 0x74, 0x6c, 0x6f, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x54, 0x6c, 0x6f, + 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x08, 0x74, 0x73, 0x61, 0x5f, 0x75, + 0x72, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, + 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x07, 0x74, + 0x73, 0x61, 0x55, 0x72, 0x6c, 0x73, 0x12, 0x4e, 0x0a, 0x0a, 0x74, 0x73, 0x61, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x64, 0x65, 0x76, + 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, + 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x74, 0x73, 0x61, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x05, 0x22, 0xa3, 0x01, 0x0a, + 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x6d, 0x61, + 0x6a, 0x6f, 0x72, 0x5f, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x41, 0x70, 0x69, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, + 0x66, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x65, 0x76, 0x2e, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x46, 0x6f, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x22, 0x74, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x08, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x64, + 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, + 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xd8, 0x01, 0x0a, 0x11, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x54, 0x72, 0x75, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, + 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4e, 0x0a, + 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, + 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x0b, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x54, 0x0a, + 0x0e, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2a, 0x4e, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, + 0x45, 0x5f, 0x53, 0x45, 0x4c, 0x45, 0x43, 0x54, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x46, + 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x12, + 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x58, 0x41, 0x43, + 0x54, 0x10, 0x03, 0x42, 0x88, 0x01, 0x0a, 0x1f, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, + 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x52, 0x6f, + 0x6f, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, 0x73, 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, + 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, + 0x74, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x17, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, + 0x3a, 0x54, 0x72, 0x75, 0x73, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_sigstore_trustroot_proto_rawDescOnce sync.Once + file_sigstore_trustroot_proto_rawDescData []byte +) + +func file_sigstore_trustroot_proto_rawDescGZIP() []byte { + file_sigstore_trustroot_proto_rawDescOnce.Do(func() { + file_sigstore_trustroot_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_sigstore_trustroot_proto_rawDesc), len(file_sigstore_trustroot_proto_rawDesc))) + }) + return file_sigstore_trustroot_proto_rawDescData +} + +var file_sigstore_trustroot_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_sigstore_trustroot_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_sigstore_trustroot_proto_goTypes = []any{ + (ServiceSelector)(0), // 0: dev.sigstore.trustroot.v1.ServiceSelector + (*TransparencyLogInstance)(nil), // 1: dev.sigstore.trustroot.v1.TransparencyLogInstance + (*CertificateAuthority)(nil), // 2: dev.sigstore.trustroot.v1.CertificateAuthority + (*TrustedRoot)(nil), // 3: dev.sigstore.trustroot.v1.TrustedRoot + (*SigningConfig)(nil), // 4: dev.sigstore.trustroot.v1.SigningConfig + (*Service)(nil), // 5: dev.sigstore.trustroot.v1.Service + (*ServiceConfiguration)(nil), // 6: dev.sigstore.trustroot.v1.ServiceConfiguration + (*ClientTrustConfig)(nil), // 7: dev.sigstore.trustroot.v1.ClientTrustConfig + (v1.HashAlgorithm)(0), // 8: dev.sigstore.common.v1.HashAlgorithm + (*v1.PublicKey)(nil), // 9: dev.sigstore.common.v1.PublicKey + (*v1.LogId)(nil), // 10: dev.sigstore.common.v1.LogId + (*v1.DistinguishedName)(nil), // 11: dev.sigstore.common.v1.DistinguishedName + (*v1.X509CertificateChain)(nil), // 12: dev.sigstore.common.v1.X509CertificateChain + (*v1.TimeRange)(nil), // 13: dev.sigstore.common.v1.TimeRange +} +var file_sigstore_trustroot_proto_depIdxs = []int32{ + 8, // 0: dev.sigstore.trustroot.v1.TransparencyLogInstance.hash_algorithm:type_name -> dev.sigstore.common.v1.HashAlgorithm + 9, // 1: dev.sigstore.trustroot.v1.TransparencyLogInstance.public_key:type_name -> dev.sigstore.common.v1.PublicKey + 10, // 2: dev.sigstore.trustroot.v1.TransparencyLogInstance.log_id:type_name -> dev.sigstore.common.v1.LogId + 10, // 3: dev.sigstore.trustroot.v1.TransparencyLogInstance.checkpoint_key_id:type_name -> dev.sigstore.common.v1.LogId + 11, // 4: dev.sigstore.trustroot.v1.CertificateAuthority.subject:type_name -> dev.sigstore.common.v1.DistinguishedName + 12, // 5: dev.sigstore.trustroot.v1.CertificateAuthority.cert_chain:type_name -> dev.sigstore.common.v1.X509CertificateChain + 13, // 6: dev.sigstore.trustroot.v1.CertificateAuthority.valid_for:type_name -> dev.sigstore.common.v1.TimeRange + 1, // 7: dev.sigstore.trustroot.v1.TrustedRoot.tlogs:type_name -> dev.sigstore.trustroot.v1.TransparencyLogInstance + 2, // 8: dev.sigstore.trustroot.v1.TrustedRoot.certificate_authorities:type_name -> dev.sigstore.trustroot.v1.CertificateAuthority + 1, // 9: dev.sigstore.trustroot.v1.TrustedRoot.ctlogs:type_name -> dev.sigstore.trustroot.v1.TransparencyLogInstance + 2, // 10: dev.sigstore.trustroot.v1.TrustedRoot.timestamp_authorities:type_name -> dev.sigstore.trustroot.v1.CertificateAuthority + 5, // 11: dev.sigstore.trustroot.v1.SigningConfig.ca_urls:type_name -> dev.sigstore.trustroot.v1.Service + 5, // 12: dev.sigstore.trustroot.v1.SigningConfig.oidc_urls:type_name -> dev.sigstore.trustroot.v1.Service + 5, // 13: dev.sigstore.trustroot.v1.SigningConfig.rekor_tlog_urls:type_name -> dev.sigstore.trustroot.v1.Service + 6, // 14: dev.sigstore.trustroot.v1.SigningConfig.rekor_tlog_config:type_name -> dev.sigstore.trustroot.v1.ServiceConfiguration + 5, // 15: dev.sigstore.trustroot.v1.SigningConfig.tsa_urls:type_name -> dev.sigstore.trustroot.v1.Service + 6, // 16: dev.sigstore.trustroot.v1.SigningConfig.tsa_config:type_name -> dev.sigstore.trustroot.v1.ServiceConfiguration + 13, // 17: dev.sigstore.trustroot.v1.Service.valid_for:type_name -> dev.sigstore.common.v1.TimeRange + 0, // 18: dev.sigstore.trustroot.v1.ServiceConfiguration.selector:type_name -> dev.sigstore.trustroot.v1.ServiceSelector + 3, // 19: dev.sigstore.trustroot.v1.ClientTrustConfig.trusted_root:type_name -> dev.sigstore.trustroot.v1.TrustedRoot + 4, // 20: dev.sigstore.trustroot.v1.ClientTrustConfig.signing_config:type_name -> dev.sigstore.trustroot.v1.SigningConfig + 21, // [21:21] is the sub-list for method output_type + 21, // [21:21] is the sub-list for method input_type + 21, // [21:21] is the sub-list for extension type_name + 21, // [21:21] is the sub-list for extension extendee + 0, // [0:21] is the sub-list for field type_name +} + +func init() { file_sigstore_trustroot_proto_init() } +func file_sigstore_trustroot_proto_init() { + if File_sigstore_trustroot_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_sigstore_trustroot_proto_rawDesc), len(file_sigstore_trustroot_proto_rawDesc)), + NumEnums: 1, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sigstore_trustroot_proto_goTypes, + DependencyIndexes: file_sigstore_trustroot_proto_depIdxs, + EnumInfos: file_sigstore_trustroot_proto_enumTypes, + MessageInfos: file_sigstore_trustroot_proto_msgTypes, + }.Build() + File_sigstore_trustroot_proto = out.File + file_sigstore_trustroot_proto_goTypes = nil + file_sigstore_trustroot_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/COPYRIGHT.txt b/vendor/github.com/sigstore/rekor-tiles/v2/COPYRIGHT.txt new file mode 100644 index 000000000000..8bb896e7f904 --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/COPYRIGHT.txt @@ -0,0 +1,13 @@ +Copyright 2025 The Sigstore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/LICENSE b/vendor/github.com/sigstore/rekor-tiles/v2/LICENSE new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/internal/safeint/safeint.go b/vendor/github.com/sigstore/rekor-tiles/v2/internal/safeint/safeint.go new file mode 100644 index 000000000000..bb28b196acba --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/internal/safeint/safeint.go @@ -0,0 +1,68 @@ +// +// Copyright 2025 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package safeint + +import ( + "fmt" + "math" +) + +// SafeInt64 holds equivalent int64 and uint64 integers. +type SafeInt64 struct { + u uint64 + i int64 +} + +// NewSafeInt64 returns a safeInt64 struct as long as the number is either an +// int64 or uint64 and the value can safely be converted in either direction +// without overflowing, i.e. is not greater than MaxInt64 and not negative. +// +// This has implications for its usage, e.g. when used for the tree size, a new +// tree must be created to replace the old tree before its size reaches +// math.MaxInt64. +// +// This is needed for compatibility with TransparencyLogEntry +// (https://github.com/sigstore/protobuf-specs/blob/e871d3e6fd06fa73a1524ef0efaf1452d3304cf6/protos/sigstore_rekor.proto#L86-L138). +func NewSafeInt64(number any) (*SafeInt64, error) { + var result SafeInt64 + switch n := number.(type) { + case uint64: + if n > math.MaxInt64 { + return nil, fmt.Errorf("exceeded max int64: %d", n) + } + result.u = n + result.i = int64(n) //nolint:gosec + case int64: + if n < 0 { + return nil, fmt.Errorf("negative integer: %d", n) + } + result.u = uint64(n) //nolint:gosec + result.i = n + default: + return nil, fmt.Errorf("only uint64 and int64 are supported") + } + return &result, nil +} + +// U returns the uint64 value of the integer. +func (s *SafeInt64) U() uint64 { + return s.u +} + +// I returns the int64 value of the integer. +func (s *SafeInt64) I() int64 { + return s.i +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/dsse.pb.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/dsse.pb.go new file mode 100644 index 000000000000..e243eabb9327 --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/dsse.pb.go @@ -0,0 +1,248 @@ +// Copyright 2025 The Sigstore Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v6.30.2 +// source: rekor/v2/dsse.proto + +package protobuf + +import ( + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + dsse "github.com/sigstore/protobuf-specs/gen/pb-go/dsse" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A request to add a DSSE v0.0.2 entry to the log +type DSSERequestV002 struct { + state protoimpl.MessageState `protogen:"open.v1"` + // A DSSE envelope + Envelope *dsse.Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"` + // All necessary verification material to verify all signatures embedded in the envelope + Verifiers []*Verifier `protobuf:"bytes,2,rep,name=verifiers,proto3" json:"verifiers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DSSERequestV002) Reset() { + *x = DSSERequestV002{} + mi := &file_rekor_v2_dsse_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DSSERequestV002) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DSSERequestV002) ProtoMessage() {} + +func (x *DSSERequestV002) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_dsse_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DSSERequestV002.ProtoReflect.Descriptor instead. +func (*DSSERequestV002) Descriptor() ([]byte, []int) { + return file_rekor_v2_dsse_proto_rawDescGZIP(), []int{0} +} + +func (x *DSSERequestV002) GetEnvelope() *dsse.Envelope { + if x != nil { + return x.Envelope + } + return nil +} + +func (x *DSSERequestV002) GetVerifiers() []*Verifier { + if x != nil { + return x.Verifiers + } + return nil +} + +type DSSELogEntryV002 struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The hash of the DSSE payload + PayloadHash *v1.HashOutput `protobuf:"bytes,1,opt,name=payloadHash,proto3" json:"payloadHash,omitempty"` + // Signatures and their associated verification material used to verify the payload + Signatures []*Signature `protobuf:"bytes,2,rep,name=signatures,proto3" json:"signatures,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DSSELogEntryV002) Reset() { + *x = DSSELogEntryV002{} + mi := &file_rekor_v2_dsse_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DSSELogEntryV002) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DSSELogEntryV002) ProtoMessage() {} + +func (x *DSSELogEntryV002) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_dsse_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DSSELogEntryV002.ProtoReflect.Descriptor instead. +func (*DSSELogEntryV002) Descriptor() ([]byte, []int) { + return file_rekor_v2_dsse_proto_rawDescGZIP(), []int{1} +} + +func (x *DSSELogEntryV002) GetPayloadHash() *v1.HashOutput { + if x != nil { + return x.PayloadHash + } + return nil +} + +func (x *DSSELogEntryV002) GetSignatures() []*Signature { + if x != nil { + return x.Signatures + } + return nil +} + +var File_rekor_v2_dsse_proto protoreflect.FileDescriptor + +var file_rekor_v2_dsse_proto_rawDesc = string([]byte{ + 0x0a, 0x13, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x64, 0x73, 0x73, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, + 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0e, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8b, 0x01, + 0x0a, 0x0f, 0x44, 0x53, 0x53, 0x45, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x30, 0x30, + 0x32, 0x12, 0x34, 0x0a, 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x69, 0x6f, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x74, 0x6f, 0x2e, + 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x65, + 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, 0x42, 0x0a, 0x09, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x64, 0x65, 0x76, + 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, + 0x76, 0x32, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x09, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x22, 0xa4, 0x01, 0x0a, 0x10, + 0x44, 0x53, 0x53, 0x45, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x56, 0x30, 0x30, 0x32, + 0x12, 0x49, 0x0a, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x61, 0x73, 0x68, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, + 0x61, 0x73, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x45, 0x0a, 0x0a, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, + 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x42, 0x7d, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, + 0x32, 0x42, 0x0b, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x56, 0x32, 0x44, 0x73, 0x73, 0x65, 0x50, 0x01, + 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, 0x74, 0x69, 0x6c, 0x65, + 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0xea, 0x02, 0x13, 0x53, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x3a, 0x3a, 0x56, + 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_rekor_v2_dsse_proto_rawDescOnce sync.Once + file_rekor_v2_dsse_proto_rawDescData []byte +) + +func file_rekor_v2_dsse_proto_rawDescGZIP() []byte { + file_rekor_v2_dsse_proto_rawDescOnce.Do(func() { + file_rekor_v2_dsse_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_rekor_v2_dsse_proto_rawDesc), len(file_rekor_v2_dsse_proto_rawDesc))) + }) + return file_rekor_v2_dsse_proto_rawDescData +} + +var file_rekor_v2_dsse_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_rekor_v2_dsse_proto_goTypes = []any{ + (*DSSERequestV002)(nil), // 0: dev.sigstore.rekor.v2.DSSERequestV002 + (*DSSELogEntryV002)(nil), // 1: dev.sigstore.rekor.v2.DSSELogEntryV002 + (*dsse.Envelope)(nil), // 2: io.intoto.Envelope + (*Verifier)(nil), // 3: dev.sigstore.rekor.v2.Verifier + (*v1.HashOutput)(nil), // 4: dev.sigstore.common.v1.HashOutput + (*Signature)(nil), // 5: dev.sigstore.rekor.v2.Signature +} +var file_rekor_v2_dsse_proto_depIdxs = []int32{ + 2, // 0: dev.sigstore.rekor.v2.DSSERequestV002.envelope:type_name -> io.intoto.Envelope + 3, // 1: dev.sigstore.rekor.v2.DSSERequestV002.verifiers:type_name -> dev.sigstore.rekor.v2.Verifier + 4, // 2: dev.sigstore.rekor.v2.DSSELogEntryV002.payloadHash:type_name -> dev.sigstore.common.v1.HashOutput + 5, // 3: dev.sigstore.rekor.v2.DSSELogEntryV002.signatures:type_name -> dev.sigstore.rekor.v2.Signature + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_rekor_v2_dsse_proto_init() } +func file_rekor_v2_dsse_proto_init() { + if File_rekor_v2_dsse_proto != nil { + return + } + file_rekor_v2_verifier_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_rekor_v2_dsse_proto_rawDesc), len(file_rekor_v2_dsse_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_rekor_v2_dsse_proto_goTypes, + DependencyIndexes: file_rekor_v2_dsse_proto_depIdxs, + MessageInfos: file_rekor_v2_dsse_proto_msgTypes, + }.Build() + File_rekor_v2_dsse_proto = out.File + file_rekor_v2_dsse_proto_goTypes = nil + file_rekor_v2_dsse_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/entry.pb.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/entry.pb.go new file mode 100644 index 000000000000..fded303ae7ad --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/entry.pb.go @@ -0,0 +1,395 @@ +// Copyright 2025 The Sigstore Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v6.30.2 +// source: rekor/v2/entry.proto + +package protobuf + +import ( + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Entry is the message that is canonicalized and uploaded to the log. +// This format is meant to be compliant with Rekor v1 entries in that +// the `apiVersion` and `kind` can be parsed before parsing the spec. +// Clients are expected to understand and handle the differences in the +// contents of `spec` between Rekor v1 (a polymorphic OpenAPI defintion) +// and Rekor v2 (a typed proto defintion). +type Entry struct { + state protoimpl.MessageState `protogen:"open.v1"` + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + ApiVersion string `protobuf:"bytes,2,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"` + Spec *Spec `protobuf:"bytes,3,opt,name=spec,proto3" json:"spec,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Entry) Reset() { + *x = Entry{} + mi := &file_rekor_v2_entry_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Entry) ProtoMessage() {} + +func (x *Entry) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_entry_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Entry.ProtoReflect.Descriptor instead. +func (*Entry) Descriptor() ([]byte, []int) { + return file_rekor_v2_entry_proto_rawDescGZIP(), []int{0} +} + +func (x *Entry) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +func (x *Entry) GetApiVersion() string { + if x != nil { + return x.ApiVersion + } + return "" +} + +func (x *Entry) GetSpec() *Spec { + if x != nil { + return x.Spec + } + return nil +} + +// Spec contains one of the Rekor entry types. +type Spec struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Spec: + // + // *Spec_HashedRekordV002 + // *Spec_DsseV002 + Spec isSpec_Spec `protobuf_oneof:"spec"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Spec) Reset() { + *x = Spec{} + mi := &file_rekor_v2_entry_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Spec) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Spec) ProtoMessage() {} + +func (x *Spec) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_entry_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Spec.ProtoReflect.Descriptor instead. +func (*Spec) Descriptor() ([]byte, []int) { + return file_rekor_v2_entry_proto_rawDescGZIP(), []int{1} +} + +func (x *Spec) GetSpec() isSpec_Spec { + if x != nil { + return x.Spec + } + return nil +} + +func (x *Spec) GetHashedRekordV002() *HashedRekordLogEntryV002 { + if x != nil { + if x, ok := x.Spec.(*Spec_HashedRekordV002); ok { + return x.HashedRekordV002 + } + } + return nil +} + +func (x *Spec) GetDsseV002() *DSSELogEntryV002 { + if x != nil { + if x, ok := x.Spec.(*Spec_DsseV002); ok { + return x.DsseV002 + } + } + return nil +} + +type isSpec_Spec interface { + isSpec_Spec() +} + +type Spec_HashedRekordV002 struct { + HashedRekordV002 *HashedRekordLogEntryV002 `protobuf:"bytes,1,opt,name=hashed_rekord_v002,json=hashedRekordV002,proto3,oneof"` +} + +type Spec_DsseV002 struct { + DsseV002 *DSSELogEntryV002 `protobuf:"bytes,2,opt,name=dsse_v002,json=dsseV002,proto3,oneof"` +} + +func (*Spec_HashedRekordV002) isSpec_Spec() {} + +func (*Spec_DsseV002) isSpec_Spec() {} + +// Create a new HashedRekord or DSSE +type CreateEntryRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Spec: + // + // *CreateEntryRequest_HashedRekordRequestV002 + // *CreateEntryRequest_DsseRequestV002 + Spec isCreateEntryRequest_Spec `protobuf_oneof:"spec"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateEntryRequest) Reset() { + *x = CreateEntryRequest{} + mi := &file_rekor_v2_entry_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateEntryRequest) ProtoMessage() {} + +func (x *CreateEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_entry_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateEntryRequest.ProtoReflect.Descriptor instead. +func (*CreateEntryRequest) Descriptor() ([]byte, []int) { + return file_rekor_v2_entry_proto_rawDescGZIP(), []int{2} +} + +func (x *CreateEntryRequest) GetSpec() isCreateEntryRequest_Spec { + if x != nil { + return x.Spec + } + return nil +} + +func (x *CreateEntryRequest) GetHashedRekordRequestV002() *HashedRekordRequestV002 { + if x != nil { + if x, ok := x.Spec.(*CreateEntryRequest_HashedRekordRequestV002); ok { + return x.HashedRekordRequestV002 + } + } + return nil +} + +func (x *CreateEntryRequest) GetDsseRequestV002() *DSSERequestV002 { + if x != nil { + if x, ok := x.Spec.(*CreateEntryRequest_DsseRequestV002); ok { + return x.DsseRequestV002 + } + } + return nil +} + +type isCreateEntryRequest_Spec interface { + isCreateEntryRequest_Spec() +} + +type CreateEntryRequest_HashedRekordRequestV002 struct { + HashedRekordRequestV002 *HashedRekordRequestV002 `protobuf:"bytes,1,opt,name=hashed_rekord_request_v002,json=hashedRekordRequestV002,proto3,oneof"` +} + +type CreateEntryRequest_DsseRequestV002 struct { + DsseRequestV002 *DSSERequestV002 `protobuf:"bytes,2,opt,name=dsse_request_v002,json=dsseRequestV002,proto3,oneof"` +} + +func (*CreateEntryRequest_HashedRekordRequestV002) isCreateEntryRequest_Spec() {} + +func (*CreateEntryRequest_DsseRequestV002) isCreateEntryRequest_Spec() {} + +var File_rekor_v2_entry_proto protoreflect.FileDescriptor + +var file_rekor_v2_entry_proto_rawDesc = string([]byte{ + 0x0a, 0x14, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x13, + 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x64, 0x73, 0x73, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x68, 0x61, + 0x73, 0x68, 0x65, 0x64, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x7c, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x04, 0x6b, 0x69, 0x6e, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6b, 0x69, + 0x6e, 0x64, 0x12, 0x24, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x61, 0x70, + 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, + 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x22, 0xc1, + 0x01, 0x0a, 0x04, 0x53, 0x70, 0x65, 0x63, 0x12, 0x64, 0x0a, 0x12, 0x68, 0x61, 0x73, 0x68, 0x65, + 0x64, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x5f, 0x76, 0x30, 0x30, 0x32, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x61, 0x73, 0x68, + 0x65, 0x64, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x56, 0x30, 0x30, 0x32, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x10, 0x68, 0x61, 0x73, + 0x68, 0x65, 0x64, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x56, 0x30, 0x30, 0x32, 0x12, 0x4b, 0x0a, + 0x09, 0x64, 0x73, 0x73, 0x65, 0x5f, 0x76, 0x30, 0x30, 0x32, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x27, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x53, 0x53, 0x45, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x56, 0x30, 0x30, 0x32, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, + 0x52, 0x08, 0x64, 0x73, 0x73, 0x65, 0x56, 0x30, 0x30, 0x32, 0x42, 0x06, 0x0a, 0x04, 0x73, 0x70, + 0x65, 0x63, 0x22, 0xeb, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x72, 0x0a, 0x1a, 0x68, 0x61, 0x73, + 0x68, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x76, 0x30, 0x30, 0x32, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, + 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, + 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x65, 0x64, 0x52, 0x65, 0x6b, 0x6f, + 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x30, 0x30, 0x32, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x48, 0x00, 0x52, 0x17, 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x52, 0x65, 0x6b, 0x6f, + 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x30, 0x30, 0x32, 0x12, 0x59, 0x0a, + 0x11, 0x64, 0x73, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x30, + 0x30, 0x32, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, + 0x2e, 0x44, 0x53, 0x53, 0x45, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x30, 0x30, 0x32, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x0f, 0x64, 0x73, 0x73, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x56, 0x30, 0x30, 0x32, 0x42, 0x06, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, + 0x42, 0x7e, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x42, + 0x0c, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x56, 0x32, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x50, 0x01, 0x5a, + 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, 0x74, 0x69, 0x6c, 0x65, 0x73, + 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0xea, 0x02, 0x13, 0x53, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x3a, 0x3a, 0x56, 0x32, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_rekor_v2_entry_proto_rawDescOnce sync.Once + file_rekor_v2_entry_proto_rawDescData []byte +) + +func file_rekor_v2_entry_proto_rawDescGZIP() []byte { + file_rekor_v2_entry_proto_rawDescOnce.Do(func() { + file_rekor_v2_entry_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_rekor_v2_entry_proto_rawDesc), len(file_rekor_v2_entry_proto_rawDesc))) + }) + return file_rekor_v2_entry_proto_rawDescData +} + +var file_rekor_v2_entry_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_rekor_v2_entry_proto_goTypes = []any{ + (*Entry)(nil), // 0: dev.sigstore.rekor.v2.Entry + (*Spec)(nil), // 1: dev.sigstore.rekor.v2.Spec + (*CreateEntryRequest)(nil), // 2: dev.sigstore.rekor.v2.CreateEntryRequest + (*HashedRekordLogEntryV002)(nil), // 3: dev.sigstore.rekor.v2.HashedRekordLogEntryV002 + (*DSSELogEntryV002)(nil), // 4: dev.sigstore.rekor.v2.DSSELogEntryV002 + (*HashedRekordRequestV002)(nil), // 5: dev.sigstore.rekor.v2.HashedRekordRequestV002 + (*DSSERequestV002)(nil), // 6: dev.sigstore.rekor.v2.DSSERequestV002 +} +var file_rekor_v2_entry_proto_depIdxs = []int32{ + 1, // 0: dev.sigstore.rekor.v2.Entry.spec:type_name -> dev.sigstore.rekor.v2.Spec + 3, // 1: dev.sigstore.rekor.v2.Spec.hashed_rekord_v002:type_name -> dev.sigstore.rekor.v2.HashedRekordLogEntryV002 + 4, // 2: dev.sigstore.rekor.v2.Spec.dsse_v002:type_name -> dev.sigstore.rekor.v2.DSSELogEntryV002 + 5, // 3: dev.sigstore.rekor.v2.CreateEntryRequest.hashed_rekord_request_v002:type_name -> dev.sigstore.rekor.v2.HashedRekordRequestV002 + 6, // 4: dev.sigstore.rekor.v2.CreateEntryRequest.dsse_request_v002:type_name -> dev.sigstore.rekor.v2.DSSERequestV002 + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_rekor_v2_entry_proto_init() } +func file_rekor_v2_entry_proto_init() { + if File_rekor_v2_entry_proto != nil { + return + } + file_rekor_v2_dsse_proto_init() + file_rekor_v2_hashedrekord_proto_init() + file_rekor_v2_entry_proto_msgTypes[1].OneofWrappers = []any{ + (*Spec_HashedRekordV002)(nil), + (*Spec_DsseV002)(nil), + } + file_rekor_v2_entry_proto_msgTypes[2].OneofWrappers = []any{ + (*CreateEntryRequest_HashedRekordRequestV002)(nil), + (*CreateEntryRequest_DsseRequestV002)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_rekor_v2_entry_proto_rawDesc), len(file_rekor_v2_entry_proto_rawDesc)), + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_rekor_v2_entry_proto_goTypes, + DependencyIndexes: file_rekor_v2_entry_proto_depIdxs, + MessageInfos: file_rekor_v2_entry_proto_msgTypes, + }.Build() + File_rekor_v2_entry_proto = out.File + file_rekor_v2_entry_proto_goTypes = nil + file_rekor_v2_entry_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/hashedrekord.pb.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/hashedrekord.pb.go new file mode 100644 index 000000000000..21d0612dafde --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/hashedrekord.pb.go @@ -0,0 +1,243 @@ +// Copyright 2025 The Sigstore Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v6.30.2 +// source: rekor/v2/hashedrekord.proto + +package protobuf + +import ( + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A request to add a hashedrekord v0.0.2 to the log +type HashedRekordRequestV002 struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The hashed data + Digest []byte `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` + // A single signature over the hashed data with the verifier needed to validate it + Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HashedRekordRequestV002) Reset() { + *x = HashedRekordRequestV002{} + mi := &file_rekor_v2_hashedrekord_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HashedRekordRequestV002) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HashedRekordRequestV002) ProtoMessage() {} + +func (x *HashedRekordRequestV002) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_hashedrekord_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HashedRekordRequestV002.ProtoReflect.Descriptor instead. +func (*HashedRekordRequestV002) Descriptor() ([]byte, []int) { + return file_rekor_v2_hashedrekord_proto_rawDescGZIP(), []int{0} +} + +func (x *HashedRekordRequestV002) GetDigest() []byte { + if x != nil { + return x.Digest + } + return nil +} + +func (x *HashedRekordRequestV002) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} + +type HashedRekordLogEntryV002 struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The hashed data + Data *v1.HashOutput `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // A single signature over the hashed data with the verifier needed to validate it + Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HashedRekordLogEntryV002) Reset() { + *x = HashedRekordLogEntryV002{} + mi := &file_rekor_v2_hashedrekord_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HashedRekordLogEntryV002) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HashedRekordLogEntryV002) ProtoMessage() {} + +func (x *HashedRekordLogEntryV002) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_hashedrekord_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HashedRekordLogEntryV002.ProtoReflect.Descriptor instead. +func (*HashedRekordLogEntryV002) Descriptor() ([]byte, []int) { + return file_rekor_v2_hashedrekord_proto_rawDescGZIP(), []int{1} +} + +func (x *HashedRekordLogEntryV002) GetData() *v1.HashOutput { + if x != nil { + return x.Data + } + return nil +} + +func (x *HashedRekordLogEntryV002) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} + +var File_rekor_v2_hashedrekord_proto protoreflect.FileDescriptor + +var file_rekor_v2_hashedrekord_proto_rawDesc = string([]byte{ + 0x0a, 0x1b, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x65, + 0x64, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, + 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, + 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x72, 0x65, + 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7b, 0x0a, 0x17, 0x48, 0x61, 0x73, 0x68, 0x65, 0x64, 0x52, + 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x30, 0x30, 0x32, + 0x12, 0x1b, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, + 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x22, 0x9c, 0x01, 0x0a, 0x18, 0x48, 0x61, 0x73, 0x68, 0x65, 0x64, 0x52, 0x65, 0x6b, + 0x6f, 0x72, 0x64, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x56, 0x30, 0x30, 0x32, 0x12, + 0x3b, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x43, 0x0a, 0x09, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, + 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x42, 0x85, 0x01, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, + 0x32, 0x42, 0x13, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x56, 0x32, 0x48, 0x61, 0x73, 0x68, 0x65, 0x64, + 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, + 0x6b, 0x6f, 0x72, 0x2d, 0x74, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, + 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0xea, 0x02, 0x13, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, + 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +}) + +var ( + file_rekor_v2_hashedrekord_proto_rawDescOnce sync.Once + file_rekor_v2_hashedrekord_proto_rawDescData []byte +) + +func file_rekor_v2_hashedrekord_proto_rawDescGZIP() []byte { + file_rekor_v2_hashedrekord_proto_rawDescOnce.Do(func() { + file_rekor_v2_hashedrekord_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_rekor_v2_hashedrekord_proto_rawDesc), len(file_rekor_v2_hashedrekord_proto_rawDesc))) + }) + return file_rekor_v2_hashedrekord_proto_rawDescData +} + +var file_rekor_v2_hashedrekord_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_rekor_v2_hashedrekord_proto_goTypes = []any{ + (*HashedRekordRequestV002)(nil), // 0: dev.sigstore.rekor.v2.HashedRekordRequestV002 + (*HashedRekordLogEntryV002)(nil), // 1: dev.sigstore.rekor.v2.HashedRekordLogEntryV002 + (*Signature)(nil), // 2: dev.sigstore.rekor.v2.Signature + (*v1.HashOutput)(nil), // 3: dev.sigstore.common.v1.HashOutput +} +var file_rekor_v2_hashedrekord_proto_depIdxs = []int32{ + 2, // 0: dev.sigstore.rekor.v2.HashedRekordRequestV002.signature:type_name -> dev.sigstore.rekor.v2.Signature + 3, // 1: dev.sigstore.rekor.v2.HashedRekordLogEntryV002.data:type_name -> dev.sigstore.common.v1.HashOutput + 2, // 2: dev.sigstore.rekor.v2.HashedRekordLogEntryV002.signature:type_name -> dev.sigstore.rekor.v2.Signature + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_rekor_v2_hashedrekord_proto_init() } +func file_rekor_v2_hashedrekord_proto_init() { + if File_rekor_v2_hashedrekord_proto != nil { + return + } + file_rekor_v2_verifier_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_rekor_v2_hashedrekord_proto_rawDesc), len(file_rekor_v2_hashedrekord_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_rekor_v2_hashedrekord_proto_goTypes, + DependencyIndexes: file_rekor_v2_hashedrekord_proto_depIdxs, + MessageInfos: file_rekor_v2_hashedrekord_proto_msgTypes, + }.Build() + File_rekor_v2_hashedrekord_proto = out.File + file_rekor_v2_hashedrekord_proto_goTypes = nil + file_rekor_v2_hashedrekord_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service.pb.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service.pb.go new file mode 100644 index 000000000000..f261cdc35965 --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service.pb.go @@ -0,0 +1,287 @@ +// Copyright 2025 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v6.30.2 +// source: rekor/v2/rekor_service.proto + +package protobuf + +import ( + _ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options" + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1" + _ "google.golang.org/genproto/googleapis/api/annotations" + httpbody "google.golang.org/genproto/googleapis/api/httpbody" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Request for a full or partial tile (see https://github.com/C2SP/C2SP/blob/main/tlog-tiles.md#merkle-tree) +type TileRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + L uint32 `protobuf:"varint,1,opt,name=L,proto3" json:"L,omitempty"` + // N must be either an index encoded as zero-padded 3-digit path elements, e.g. "x123/x456/789", + // and may end with ".p/", where "" is a uint8 + N string `protobuf:"bytes,2,opt,name=N,proto3" json:"N,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TileRequest) Reset() { + *x = TileRequest{} + mi := &file_rekor_v2_rekor_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TileRequest) ProtoMessage() {} + +func (x *TileRequest) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_rekor_service_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TileRequest.ProtoReflect.Descriptor instead. +func (*TileRequest) Descriptor() ([]byte, []int) { + return file_rekor_v2_rekor_service_proto_rawDescGZIP(), []int{0} +} + +func (x *TileRequest) GetL() uint32 { + if x != nil { + return x.L + } + return 0 +} + +func (x *TileRequest) GetN() string { + if x != nil { + return x.N + } + return "" +} + +// Request for a full or partial entry bundle (see https://github.com/C2SP/C2SP/blob/main/tlog-tiles.md#log-entries) +type EntryBundleRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // N must be either an index encoded as zero-padded 3-digit path elements, e.g. "x123/x456/789", + // and may end with ".p/", where "" is a uint8 + N string `protobuf:"bytes,1,opt,name=N,proto3" json:"N,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EntryBundleRequest) Reset() { + *x = EntryBundleRequest{} + mi := &file_rekor_v2_rekor_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EntryBundleRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntryBundleRequest) ProtoMessage() {} + +func (x *EntryBundleRequest) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_rekor_service_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EntryBundleRequest.ProtoReflect.Descriptor instead. +func (*EntryBundleRequest) Descriptor() ([]byte, []int) { + return file_rekor_v2_rekor_service_proto_rawDescGZIP(), []int{1} +} + +func (x *EntryBundleRequest) GetN() string { + if x != nil { + return x.N + } + return "" +} + +var File_rekor_v2_rekor_service_proto protoreflect.FileDescriptor + +var file_rekor_v2_rekor_service_proto_rawDesc = string([]byte{ + 0x0a, 0x1c, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, + 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, + 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x14, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x29, 0x0a, 0x0b, 0x54, 0x69, 0x6c, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0c, 0x0a, 0x01, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x01, 0x4c, 0x12, 0x0c, 0x0a, 0x01, 0x4e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x01, 0x4e, 0x22, 0x22, 0x0a, 0x12, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x75, 0x6e, 0x64, 0x6c, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0c, 0x0a, 0x01, 0x4e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x01, 0x4e, 0x32, 0xc8, 0x03, 0x0a, 0x05, 0x52, 0x65, 0x6b, 0x6f, 0x72, + 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x29, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x64, 0x65, + 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, + 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, + 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x1e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, + 0x3a, 0x01, 0x2a, 0x22, 0x13, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x6c, 0x6f, 0x67, + 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x64, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x54, + 0x69, 0x6c, 0x65, 0x12, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x6c, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x1f, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x74, + 0x69, 0x6c, 0x65, 0x2f, 0x7b, 0x4c, 0x7d, 0x2f, 0x7b, 0x4e, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x76, + 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, + 0x12, 0x29, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x75, + 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, + 0x79, 0x22, 0x23, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1d, 0x12, 0x1b, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x76, 0x32, 0x2f, 0x74, 0x69, 0x6c, 0x65, 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x2f, + 0x7b, 0x4e, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x59, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, + 0x70, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x42, 0xc0, 0x03, 0x92, 0x41, 0xbc, 0x02, 0x12, 0xbc, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x6b, + 0x6f, 0x72, 0x20, 0x76, 0x32, 0x22, 0x5a, 0x0a, 0x10, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x20, 0x76, + 0x32, 0x20, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x68, 0x74, 0x74, 0x70, 0x73, + 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, 0x74, 0x69, 0x6c, + 0x65, 0x73, 0x1a, 0x1d, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2d, 0x64, 0x65, 0x76, + 0x40, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2a, 0x4f, 0x0a, 0x12, 0x41, 0x70, 0x61, 0x63, 0x68, 0x65, 0x20, 0x4c, 0x69, 0x63, 0x65, + 0x6e, 0x73, 0x65, 0x20, 0x32, 0x2e, 0x30, 0x12, 0x39, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, 0x74, 0x69, 0x6c, 0x65, 0x73, + 0x2f, 0x62, 0x6c, 0x6f, 0x62, 0x2f, 0x6d, 0x61, 0x69, 0x6e, 0x2f, 0x4c, 0x49, 0x43, 0x45, 0x4e, + 0x53, 0x45, 0x32, 0x03, 0x32, 0x2e, 0x30, 0x1a, 0x14, 0x2a, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, + 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x2a, 0x01, 0x01, + 0x32, 0x10, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6a, 0x73, + 0x6f, 0x6e, 0x3a, 0x10, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, + 0x6a, 0x73, 0x6f, 0x6e, 0x72, 0x3e, 0x0a, 0x13, 0x4d, 0x6f, 0x72, 0x65, 0x20, 0x61, 0x62, 0x6f, + 0x75, 0x74, 0x20, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x20, 0x76, 0x32, 0x12, 0x27, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, 0x74, + 0x69, 0x6c, 0x65, 0x73, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, + 0x32, 0x42, 0x0e, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x56, 0x32, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, 0x74, + 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0xea, 0x02, + 0x13, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x52, 0x65, 0x6b, 0x6f, 0x72, + 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_rekor_v2_rekor_service_proto_rawDescOnce sync.Once + file_rekor_v2_rekor_service_proto_rawDescData []byte +) + +func file_rekor_v2_rekor_service_proto_rawDescGZIP() []byte { + file_rekor_v2_rekor_service_proto_rawDescOnce.Do(func() { + file_rekor_v2_rekor_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_rekor_v2_rekor_service_proto_rawDesc), len(file_rekor_v2_rekor_service_proto_rawDesc))) + }) + return file_rekor_v2_rekor_service_proto_rawDescData +} + +var file_rekor_v2_rekor_service_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_rekor_v2_rekor_service_proto_goTypes = []any{ + (*TileRequest)(nil), // 0: dev.sigstore.rekor.v2.TileRequest + (*EntryBundleRequest)(nil), // 1: dev.sigstore.rekor.v2.EntryBundleRequest + (*CreateEntryRequest)(nil), // 2: dev.sigstore.rekor.v2.CreateEntryRequest + (*emptypb.Empty)(nil), // 3: google.protobuf.Empty + (*v1.TransparencyLogEntry)(nil), // 4: dev.sigstore.rekor.v1.TransparencyLogEntry + (*httpbody.HttpBody)(nil), // 5: google.api.HttpBody +} +var file_rekor_v2_rekor_service_proto_depIdxs = []int32{ + 2, // 0: dev.sigstore.rekor.v2.Rekor.CreateEntry:input_type -> dev.sigstore.rekor.v2.CreateEntryRequest + 0, // 1: dev.sigstore.rekor.v2.Rekor.GetTile:input_type -> dev.sigstore.rekor.v2.TileRequest + 1, // 2: dev.sigstore.rekor.v2.Rekor.GetEntryBundle:input_type -> dev.sigstore.rekor.v2.EntryBundleRequest + 3, // 3: dev.sigstore.rekor.v2.Rekor.GetCheckpoint:input_type -> google.protobuf.Empty + 4, // 4: dev.sigstore.rekor.v2.Rekor.CreateEntry:output_type -> dev.sigstore.rekor.v1.TransparencyLogEntry + 5, // 5: dev.sigstore.rekor.v2.Rekor.GetTile:output_type -> google.api.HttpBody + 5, // 6: dev.sigstore.rekor.v2.Rekor.GetEntryBundle:output_type -> google.api.HttpBody + 5, // 7: dev.sigstore.rekor.v2.Rekor.GetCheckpoint:output_type -> google.api.HttpBody + 4, // [4:8] is the sub-list for method output_type + 0, // [0:4] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_rekor_v2_rekor_service_proto_init() } +func file_rekor_v2_rekor_service_proto_init() { + if File_rekor_v2_rekor_service_proto != nil { + return + } + file_rekor_v2_entry_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_rekor_v2_rekor_service_proto_rawDesc), len(file_rekor_v2_rekor_service_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_rekor_v2_rekor_service_proto_goTypes, + DependencyIndexes: file_rekor_v2_rekor_service_proto_depIdxs, + MessageInfos: file_rekor_v2_rekor_service_proto_msgTypes, + }.Build() + File_rekor_v2_rekor_service_proto = out.File + file_rekor_v2_rekor_service_proto_goTypes = nil + file_rekor_v2_rekor_service_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service.pb.gw.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service.pb.gw.go new file mode 100644 index 000000000000..e0d4857bee40 --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service.pb.gw.go @@ -0,0 +1,381 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: rekor/v2/rekor_service.proto + +/* +Package protobuf is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package protobuf + +import ( + "context" + "errors" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/emptypb" +) + +// Suppress "imported and not used" errors +var ( + _ codes.Code + _ io.Reader + _ status.Status + _ = errors.New + _ = runtime.String + _ = utilities.NewDoubleArray + _ = metadata.Join +) + +func request_Rekor_CreateEntry_0(ctx context.Context, marshaler runtime.Marshaler, client RekorClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq CreateEntryRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := client.CreateEntry(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_Rekor_CreateEntry_0(ctx context.Context, marshaler runtime.Marshaler, server RekorServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq CreateEntryRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := server.CreateEntry(ctx, &protoReq) + return msg, metadata, err +} + +func request_Rekor_GetTile_0(ctx context.Context, marshaler runtime.Marshaler, client RekorClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq TileRequest + metadata runtime.ServerMetadata + err error + ) + io.Copy(io.Discard, req.Body) + val, ok := pathParams["L"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "L") + } + protoReq.L, err = runtime.Uint32(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "L", err) + } + val, ok = pathParams["N"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "N") + } + protoReq.N, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "N", err) + } + msg, err := client.GetTile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_Rekor_GetTile_0(ctx context.Context, marshaler runtime.Marshaler, server RekorServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq TileRequest + metadata runtime.ServerMetadata + err error + ) + val, ok := pathParams["L"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "L") + } + protoReq.L, err = runtime.Uint32(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "L", err) + } + val, ok = pathParams["N"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "N") + } + protoReq.N, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "N", err) + } + msg, err := server.GetTile(ctx, &protoReq) + return msg, metadata, err +} + +func request_Rekor_GetEntryBundle_0(ctx context.Context, marshaler runtime.Marshaler, client RekorClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq EntryBundleRequest + metadata runtime.ServerMetadata + err error + ) + io.Copy(io.Discard, req.Body) + val, ok := pathParams["N"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "N") + } + protoReq.N, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "N", err) + } + msg, err := client.GetEntryBundle(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_Rekor_GetEntryBundle_0(ctx context.Context, marshaler runtime.Marshaler, server RekorServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq EntryBundleRequest + metadata runtime.ServerMetadata + err error + ) + val, ok := pathParams["N"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "N") + } + protoReq.N, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "N", err) + } + msg, err := server.GetEntryBundle(ctx, &protoReq) + return msg, metadata, err +} + +func request_Rekor_GetCheckpoint_0(ctx context.Context, marshaler runtime.Marshaler, client RekorClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq emptypb.Empty + metadata runtime.ServerMetadata + ) + io.Copy(io.Discard, req.Body) + msg, err := client.GetCheckpoint(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_Rekor_GetCheckpoint_0(ctx context.Context, marshaler runtime.Marshaler, server RekorServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq emptypb.Empty + metadata runtime.ServerMetadata + ) + msg, err := server.GetCheckpoint(ctx, &protoReq) + return msg, metadata, err +} + +// RegisterRekorHandlerServer registers the http handlers for service Rekor to "mux". +// UnaryRPC :call RekorServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterRekorHandlerFromEndpoint instead. +// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call. +func RegisterRekorHandlerServer(ctx context.Context, mux *runtime.ServeMux, server RekorServer) error { + mux.Handle(http.MethodPost, pattern_Rekor_CreateEntry_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/CreateEntry", runtime.WithHTTPPathPattern("/api/v2/log/entries")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Rekor_CreateEntry_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_Rekor_CreateEntry_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_Rekor_GetTile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/GetTile", runtime.WithHTTPPathPattern("/api/v2/tile/{L}/{N=**}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Rekor_GetTile_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_Rekor_GetTile_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_Rekor_GetEntryBundle_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/GetEntryBundle", runtime.WithHTTPPathPattern("/api/v2/tile/entries/{N=**}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Rekor_GetEntryBundle_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_Rekor_GetEntryBundle_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_Rekor_GetCheckpoint_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/GetCheckpoint", runtime.WithHTTPPathPattern("/api/v2/checkpoint")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Rekor_GetCheckpoint_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_Rekor_GetCheckpoint_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + + return nil +} + +// RegisterRekorHandlerFromEndpoint is same as RegisterRekorHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterRekorHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.NewClient(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + return RegisterRekorHandler(ctx, mux, conn) +} + +// RegisterRekorHandler registers the http handlers for service Rekor to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterRekorHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterRekorHandlerClient(ctx, mux, NewRekorClient(conn)) +} + +// RegisterRekorHandlerClient registers the http handlers for service Rekor +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "RekorClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "RekorClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "RekorClient" to call the correct interceptors. This client ignores the HTTP middlewares. +func RegisterRekorHandlerClient(ctx context.Context, mux *runtime.ServeMux, client RekorClient) error { + mux.Handle(http.MethodPost, pattern_Rekor_CreateEntry_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/CreateEntry", runtime.WithHTTPPathPattern("/api/v2/log/entries")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Rekor_CreateEntry_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_Rekor_CreateEntry_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_Rekor_GetTile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/GetTile", runtime.WithHTTPPathPattern("/api/v2/tile/{L}/{N=**}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Rekor_GetTile_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_Rekor_GetTile_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_Rekor_GetEntryBundle_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/GetEntryBundle", runtime.WithHTTPPathPattern("/api/v2/tile/entries/{N=**}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Rekor_GetEntryBundle_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_Rekor_GetEntryBundle_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_Rekor_GetCheckpoint_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/GetCheckpoint", runtime.WithHTTPPathPattern("/api/v2/checkpoint")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Rekor_GetCheckpoint_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_Rekor_GetCheckpoint_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + return nil +} + +var ( + pattern_Rekor_CreateEntry_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v2", "log", "entries"}, "")) + pattern_Rekor_GetTile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 3, 0, 4, 1, 5, 4}, []string{"api", "v2", "tile", "L", "N"}, "")) + pattern_Rekor_GetEntryBundle_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 3, 0, 4, 1, 5, 4}, []string{"api", "v2", "tile", "entries", "N"}, "")) + pattern_Rekor_GetCheckpoint_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v2", "checkpoint"}, "")) +) + +var ( + forward_Rekor_CreateEntry_0 = runtime.ForwardResponseMessage + forward_Rekor_GetTile_0 = runtime.ForwardResponseMessage + forward_Rekor_GetEntryBundle_0 = runtime.ForwardResponseMessage + forward_Rekor_GetCheckpoint_0 = runtime.ForwardResponseMessage +) diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service_grpc.pb.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service_grpc.pb.go new file mode 100644 index 000000000000..807d630524a0 --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service_grpc.pb.go @@ -0,0 +1,266 @@ +// Copyright 2025 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v6.30.2 +// source: rekor/v2/rekor_service.proto + +package protobuf + +import ( + context "context" + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1" + httpbody "google.golang.org/genproto/googleapis/api/httpbody" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + Rekor_CreateEntry_FullMethodName = "/dev.sigstore.rekor.v2.Rekor/CreateEntry" + Rekor_GetTile_FullMethodName = "/dev.sigstore.rekor.v2.Rekor/GetTile" + Rekor_GetEntryBundle_FullMethodName = "/dev.sigstore.rekor.v2.Rekor/GetEntryBundle" + Rekor_GetCheckpoint_FullMethodName = "/dev.sigstore.rekor.v2.Rekor/GetCheckpoint" +) + +// RekorClient is the client API for Rekor service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// A service for sigstore clients to connect to to create log entries +// and for log monitors and witnesses to audit/inspect the log +type RekorClient interface { + // Create an entry in the log + CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*v1.TransparencyLogEntry, error) + // Get a tile from the log + GetTile(ctx context.Context, in *TileRequest, opts ...grpc.CallOption) (*httpbody.HttpBody, error) + // Get an entry bundle from the log + GetEntryBundle(ctx context.Context, in *EntryBundleRequest, opts ...grpc.CallOption) (*httpbody.HttpBody, error) + // Get a checkpoint from the log + GetCheckpoint(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*httpbody.HttpBody, error) +} + +type rekorClient struct { + cc grpc.ClientConnInterface +} + +func NewRekorClient(cc grpc.ClientConnInterface) RekorClient { + return &rekorClient{cc} +} + +func (c *rekorClient) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*v1.TransparencyLogEntry, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(v1.TransparencyLogEntry) + err := c.cc.Invoke(ctx, Rekor_CreateEntry_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *rekorClient) GetTile(ctx context.Context, in *TileRequest, opts ...grpc.CallOption) (*httpbody.HttpBody, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(httpbody.HttpBody) + err := c.cc.Invoke(ctx, Rekor_GetTile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *rekorClient) GetEntryBundle(ctx context.Context, in *EntryBundleRequest, opts ...grpc.CallOption) (*httpbody.HttpBody, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(httpbody.HttpBody) + err := c.cc.Invoke(ctx, Rekor_GetEntryBundle_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *rekorClient) GetCheckpoint(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*httpbody.HttpBody, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(httpbody.HttpBody) + err := c.cc.Invoke(ctx, Rekor_GetCheckpoint_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// RekorServer is the server API for Rekor service. +// All implementations must embed UnimplementedRekorServer +// for forward compatibility. +// +// A service for sigstore clients to connect to to create log entries +// and for log monitors and witnesses to audit/inspect the log +type RekorServer interface { + // Create an entry in the log + CreateEntry(context.Context, *CreateEntryRequest) (*v1.TransparencyLogEntry, error) + // Get a tile from the log + GetTile(context.Context, *TileRequest) (*httpbody.HttpBody, error) + // Get an entry bundle from the log + GetEntryBundle(context.Context, *EntryBundleRequest) (*httpbody.HttpBody, error) + // Get a checkpoint from the log + GetCheckpoint(context.Context, *emptypb.Empty) (*httpbody.HttpBody, error) + mustEmbedUnimplementedRekorServer() +} + +// UnimplementedRekorServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedRekorServer struct{} + +func (UnimplementedRekorServer) CreateEntry(context.Context, *CreateEntryRequest) (*v1.TransparencyLogEntry, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateEntry not implemented") +} +func (UnimplementedRekorServer) GetTile(context.Context, *TileRequest) (*httpbody.HttpBody, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTile not implemented") +} +func (UnimplementedRekorServer) GetEntryBundle(context.Context, *EntryBundleRequest) (*httpbody.HttpBody, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetEntryBundle not implemented") +} +func (UnimplementedRekorServer) GetCheckpoint(context.Context, *emptypb.Empty) (*httpbody.HttpBody, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCheckpoint not implemented") +} +func (UnimplementedRekorServer) mustEmbedUnimplementedRekorServer() {} +func (UnimplementedRekorServer) testEmbeddedByValue() {} + +// UnsafeRekorServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to RekorServer will +// result in compilation errors. +type UnsafeRekorServer interface { + mustEmbedUnimplementedRekorServer() +} + +func RegisterRekorServer(s grpc.ServiceRegistrar, srv RekorServer) { + // If the following call pancis, it indicates UnimplementedRekorServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&Rekor_ServiceDesc, srv) +} + +func _Rekor_CreateEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateEntryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RekorServer).CreateEntry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Rekor_CreateEntry_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RekorServer).CreateEntry(ctx, req.(*CreateEntryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Rekor_GetTile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RekorServer).GetTile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Rekor_GetTile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RekorServer).GetTile(ctx, req.(*TileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Rekor_GetEntryBundle_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EntryBundleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RekorServer).GetEntryBundle(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Rekor_GetEntryBundle_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RekorServer).GetEntryBundle(ctx, req.(*EntryBundleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Rekor_GetCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RekorServer).GetCheckpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Rekor_GetCheckpoint_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RekorServer).GetCheckpoint(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +// Rekor_ServiceDesc is the grpc.ServiceDesc for Rekor service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Rekor_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "dev.sigstore.rekor.v2.Rekor", + HandlerType: (*RekorServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateEntry", + Handler: _Rekor_CreateEntry_Handler, + }, + { + MethodName: "GetTile", + Handler: _Rekor_GetTile_Handler, + }, + { + MethodName: "GetEntryBundle", + Handler: _Rekor_GetEntryBundle_Handler, + }, + { + MethodName: "GetCheckpoint", + Handler: _Rekor_GetCheckpoint_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rekor/v2/rekor_service.proto", +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/verifier.pb.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/verifier.pb.go new file mode 100644 index 000000000000..8b9b4f1f81ed --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/verifier.pb.go @@ -0,0 +1,338 @@ +// Copyright 2025 The Sigstore Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v6.30.2 +// source: rekor/v2/verifier.proto + +package protobuf + +import ( + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// PublicKey contains an encoded public key +type PublicKey struct { + state protoimpl.MessageState `protogen:"open.v1"` + // DER-encoded public key + RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes,proto3" json:"raw_bytes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PublicKey) Reset() { + *x = PublicKey{} + mi := &file_rekor_v2_verifier_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PublicKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublicKey) ProtoMessage() {} + +func (x *PublicKey) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_verifier_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublicKey.ProtoReflect.Descriptor instead. +func (*PublicKey) Descriptor() ([]byte, []int) { + return file_rekor_v2_verifier_proto_rawDescGZIP(), []int{0} +} + +func (x *PublicKey) GetRawBytes() []byte { + if x != nil { + return x.RawBytes + } + return nil +} + +// Either a public key or a X.509 cerificiate with an embedded public key +type Verifier struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Verifier: + // + // *Verifier_PublicKey + // *Verifier_X509Certificate + Verifier isVerifier_Verifier `protobuf_oneof:"verifier"` + // Key encoding and signature algorithm to use for this key + KeyDetails v1.PublicKeyDetails `protobuf:"varint,3,opt,name=key_details,json=keyDetails,proto3,enum=dev.sigstore.common.v1.PublicKeyDetails" json:"key_details,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Verifier) Reset() { + *x = Verifier{} + mi := &file_rekor_v2_verifier_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Verifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Verifier) ProtoMessage() {} + +func (x *Verifier) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_verifier_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Verifier.ProtoReflect.Descriptor instead. +func (*Verifier) Descriptor() ([]byte, []int) { + return file_rekor_v2_verifier_proto_rawDescGZIP(), []int{1} +} + +func (x *Verifier) GetVerifier() isVerifier_Verifier { + if x != nil { + return x.Verifier + } + return nil +} + +func (x *Verifier) GetPublicKey() *PublicKey { + if x != nil { + if x, ok := x.Verifier.(*Verifier_PublicKey); ok { + return x.PublicKey + } + } + return nil +} + +func (x *Verifier) GetX509Certificate() *v1.X509Certificate { + if x != nil { + if x, ok := x.Verifier.(*Verifier_X509Certificate); ok { + return x.X509Certificate + } + } + return nil +} + +func (x *Verifier) GetKeyDetails() v1.PublicKeyDetails { + if x != nil { + return x.KeyDetails + } + return v1.PublicKeyDetails(0) +} + +type isVerifier_Verifier interface { + isVerifier_Verifier() +} + +type Verifier_PublicKey struct { + // DER-encoded public key. Encoding method is specified by the key_details attribute + PublicKey *PublicKey `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3,oneof"` +} + +type Verifier_X509Certificate struct { + // DER-encoded certificate + X509Certificate *v1.X509Certificate `protobuf:"bytes,2,opt,name=x509_certificate,json=x509Certificate,proto3,oneof"` +} + +func (*Verifier_PublicKey) isVerifier_Verifier() {} + +func (*Verifier_X509Certificate) isVerifier_Verifier() {} + +// A signature and an associated verifier +type Signature struct { + state protoimpl.MessageState `protogen:"open.v1"` + Content []byte `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + Verifier *Verifier `protobuf:"bytes,2,opt,name=verifier,proto3" json:"verifier,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Signature) Reset() { + *x = Signature{} + mi := &file_rekor_v2_verifier_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Signature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Signature) ProtoMessage() {} + +func (x *Signature) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_verifier_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Signature.ProtoReflect.Descriptor instead. +func (*Signature) Descriptor() ([]byte, []int) { + return file_rekor_v2_verifier_proto_rawDescGZIP(), []int{2} +} + +func (x *Signature) GetContent() []byte { + if x != nil { + return x.Content + } + return nil +} + +func (x *Signature) GetVerifier() *Verifier { + if x != nil { + return x.Verifier + } + return nil +} + +var File_rekor_v2_verifier_proto protoreflect.FileDescriptor + +var file_rekor_v2_verifier_proto_rawDesc = string([]byte{ + 0x0a, 0x17, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, 0x65, 0x76, 0x2e, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, + 0x1a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2d, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, + 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x08, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, + 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, + 0x00, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x59, 0x0a, 0x10, + 0x78, 0x35, 0x30, 0x39, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x58, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x4e, 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x64, + 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x6b, 0x65, 0x79, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x22, 0x6c, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x12, 0x1d, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, + 0x40, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x42, 0x81, 0x01, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, + 0x32, 0x42, 0x0f, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x56, 0x32, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, + 0x74, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0xea, + 0x02, 0x13, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x52, 0x65, 0x6b, 0x6f, + 0x72, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_rekor_v2_verifier_proto_rawDescOnce sync.Once + file_rekor_v2_verifier_proto_rawDescData []byte +) + +func file_rekor_v2_verifier_proto_rawDescGZIP() []byte { + file_rekor_v2_verifier_proto_rawDescOnce.Do(func() { + file_rekor_v2_verifier_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_rekor_v2_verifier_proto_rawDesc), len(file_rekor_v2_verifier_proto_rawDesc))) + }) + return file_rekor_v2_verifier_proto_rawDescData +} + +var file_rekor_v2_verifier_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_rekor_v2_verifier_proto_goTypes = []any{ + (*PublicKey)(nil), // 0: dev.sigstore.rekor.v2.PublicKey + (*Verifier)(nil), // 1: dev.sigstore.rekor.v2.Verifier + (*Signature)(nil), // 2: dev.sigstore.rekor.v2.Signature + (*v1.X509Certificate)(nil), // 3: dev.sigstore.common.v1.X509Certificate + (v1.PublicKeyDetails)(0), // 4: dev.sigstore.common.v1.PublicKeyDetails +} +var file_rekor_v2_verifier_proto_depIdxs = []int32{ + 0, // 0: dev.sigstore.rekor.v2.Verifier.public_key:type_name -> dev.sigstore.rekor.v2.PublicKey + 3, // 1: dev.sigstore.rekor.v2.Verifier.x509_certificate:type_name -> dev.sigstore.common.v1.X509Certificate + 4, // 2: dev.sigstore.rekor.v2.Verifier.key_details:type_name -> dev.sigstore.common.v1.PublicKeyDetails + 1, // 3: dev.sigstore.rekor.v2.Signature.verifier:type_name -> dev.sigstore.rekor.v2.Verifier + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_rekor_v2_verifier_proto_init() } +func file_rekor_v2_verifier_proto_init() { + if File_rekor_v2_verifier_proto != nil { + return + } + file_rekor_v2_verifier_proto_msgTypes[1].OneofWrappers = []any{ + (*Verifier_PublicKey)(nil), + (*Verifier_X509Certificate)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_rekor_v2_verifier_proto_rawDesc), len(file_rekor_v2_verifier_proto_rawDesc)), + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_rekor_v2_verifier_proto_goTypes, + DependencyIndexes: file_rekor_v2_verifier_proto_depIdxs, + MessageInfos: file_rekor_v2_verifier_proto_msgTypes, + }.Build() + File_rekor_v2_verifier_proto = out.File + file_rekor_v2_verifier_proto_goTypes = nil + file_rekor_v2_verifier_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/note/note.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/note/note.go new file mode 100644 index 000000000000..03bd6b839cf8 --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/note/note.go @@ -0,0 +1,219 @@ +/* +Copyright 2025 The Sigstore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Heavily borrowed from https://gist.githubusercontent.com/AlCutter/c6c69076dc55652e2d278900ccc1a5e7/raw/aac2bafc17a8efa162bd99b4453070b724779307/ecdsa_note.go - thanks, Al + +package note + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/binary" + "fmt" + "strings" + "unicode" + "unicode/utf8" + + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/options" + "golang.org/x/mod/sumdb/note" +) + +const ( + algEd25519 = 1 + algUndef = 255 + rsaID = "PKIX-RSA-PKCS#1v1.5" +) + +// noteSigner uses an arbitrary sigstore signer to implement golang.org/x/mod/sumdb/note.Signer, +// which is used in Tessera to sign checkpoints in the signed notes format +// (https://github.com/C2SP/C2SP/blob/main/signed-note.md). +type noteSigner struct { + name string + hash uint32 + sign func(msg []byte) ([]byte, error) +} + +// Name returns the server name associated with the key. +func (n *noteSigner) Name() string { + return n.name +} + +// KeyHash returns the key hash. +func (n *noteSigner) KeyHash() uint32 { + return n.hash +} + +// Sign returns a signature for the given message. +func (n *noteSigner) Sign(msg []byte) ([]byte, error) { + return n.sign(msg) +} + +type noteVerifier struct { + name string + hash uint32 + verify func(msg, sig []byte) bool +} + +// Name implements note.Verifier. +func (n *noteVerifier) Name() string { + return n.name +} + +// Keyhash implements note.Verifier. +func (n *noteVerifier) KeyHash() uint32 { + return n.hash +} + +// Verify implements note.Verifier. +func (n *noteVerifier) Verify(msg, sig []byte) bool { + return n.verify(msg, sig) +} + +// isValidName reports whether the name conforms to the spec for the origin string of the note text +// as defined in https://github.com/C2SP/C2SP/blob/main/tlog-checkpoint.md#note-text. +func isValidName(name string) bool { + return name != "" && utf8.ValidString(name) && strings.IndexFunc(name, unicode.IsSpace) < 0 && !strings.Contains(name, "+") +} + +// genConformantKeyHash generates a truncated (4-byte) and non-truncated +// identifier for typical (non-ECDSA) keys. +func genConformantKeyHash(name string, sigType, key []byte) (uint32, []byte) { + hash := sha256.New() + hash.Write([]byte(name)) + hash.Write([]byte("\n")) + hash.Write(sigType) + hash.Write(key) + sum := hash.Sum(nil) + return binary.BigEndian.Uint32(sum), sum +} + +// ed25519KeyHash generates the 4-byte key ID for an Ed25519 public key. +// Ed25519 keys are the only key type compatible with witnessing. +func ed25519KeyHash(name string, key []byte) (uint32, []byte) { + return genConformantKeyHash(name, []byte{algEd25519}, key) +} + +// ecdsaKeyHash generates the 4-byte key ID for an ECDSA public key. +// ECDSA key IDs do not conform to the note standard for other key type +// (see https://github.com/C2SP/C2SP/blob/8991f70ddf8a11de3a68d5a081e7be27e59d87c8/signed-note.md#signature-types). +func ecdsaKeyHash(key *ecdsa.PublicKey) (uint32, []byte, error) { + marshaled, err := x509.MarshalPKIXPublicKey(key) + if err != nil { + return 0, nil, fmt.Errorf("marshaling public key: %w", err) + } + hash := sha256.Sum256(marshaled) + return binary.BigEndian.Uint32(hash[:]), hash[:], nil +} + +// rsaKeyhash generates the 4-byte key ID for an RSA public key. +func rsaKeyHash(name string, key *rsa.PublicKey) (uint32, []byte, error) { + marshaled, err := x509.MarshalPKIXPublicKey(key) + if err != nil { + return 0, nil, fmt.Errorf("marshaling public key: %w", err) + } + rsaAlg := append([]byte{algUndef}, []byte(rsaID)...) + id, hash := genConformantKeyHash(name, rsaAlg, marshaled) + return id, hash, nil +} + +// KeyHash generates a truncated (4-byte) and non-truncated identifier for a +// public key/origin +func KeyHash(origin string, key crypto.PublicKey) (uint32, []byte, error) { + var keyID uint32 + var logID []byte + var err error + + switch pk := key.(type) { + case *ecdsa.PublicKey: + keyID, logID, err = ecdsaKeyHash(pk) + if err != nil { + return 0, nil, fmt.Errorf("getting ECDSA key hash: %w", err) + } + case ed25519.PublicKey: + keyID, logID = ed25519KeyHash(origin, pk) + case *rsa.PublicKey: + keyID, logID, err = rsaKeyHash(origin, pk) + if err != nil { + return 0, nil, fmt.Errorf("getting RSA key hash: %w", err) + } + default: + return 0, nil, fmt.Errorf("unsupported key type: %T", key) + } + + return keyID, logID, nil +} + +// NewNoteSigner converts a sigstore/sigstore/pkg/signature.Signer into a note.Signer. +func NewNoteSigner(ctx context.Context, origin string, signer signature.Signer) (note.Signer, error) { + if !isValidName(origin) { + return nil, fmt.Errorf("invalid name %s", origin) + } + + pubKey, err := signer.PublicKey() + if err != nil { + return nil, fmt.Errorf("getting public key: %w", err) + } + + keyID, _, err := KeyHash(origin, pubKey) + if err != nil { + return nil, err + } + + sign := func(msg []byte) ([]byte, error) { + return signer.SignMessage(bytes.NewReader(msg), options.WithContext(ctx)) + } + + return ¬eSigner{ + name: origin, + hash: keyID, + sign: sign, + }, nil +} + +// NewNoteVerifier converts a sigstore/sigstore/pkg/signature.Verifier into a note.Verifier. +func NewNoteVerifier(origin string, verifier signature.Verifier) (note.Verifier, error) { + if !isValidName(origin) { + return nil, fmt.Errorf("invalid name %s", origin) + } + + pubKey, err := verifier.PublicKey() + if err != nil { + return nil, fmt.Errorf("getting public key: %w", err) + } + + keyID, _, err := KeyHash(origin, pubKey) + if err != nil { + return nil, err + } + + return ¬eVerifier{ + name: origin, + hash: keyID, + verify: func(msg, sig []byte) bool { + if err := verifier.VerifySignature(bytes.NewReader(sig), bytes.NewReader(msg)); err != nil { + return false + } + return true + }, + }, nil +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/types/verifier/verifier.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/types/verifier/verifier.go new file mode 100644 index 000000000000..b080977229dc --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/types/verifier/verifier.go @@ -0,0 +1,41 @@ +// Copyright 2025 The Sigstore Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verifier + +import ( + "fmt" + + pb "github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf" +) + +// Validate validates there are no missing field in a Verifier protobuf +func Validate(v *pb.Verifier) error { + publicKey := v.GetPublicKey() + x509Cert := v.GetX509Certificate() + if publicKey == nil && x509Cert == nil { + return fmt.Errorf("missing signature public key or X.509 certificate") + } + if publicKey != nil { + if len(publicKey.GetRawBytes()) == 0 { + return fmt.Errorf("missing public key raw bytes") + } + } + if x509Cert != nil { + if len(x509Cert.GetRawBytes()) == 0 { + return fmt.Errorf("missing X.509 certificate raw bytes") + } + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/verify/verify.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/verify/verify.go new file mode 100644 index 000000000000..bb4fedf591c3 --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/verify/verify.go @@ -0,0 +1,89 @@ +// +// Copyright 2025 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "fmt" + + pbs "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1" + "github.com/sigstore/rekor-tiles/v2/internal/safeint" + f_log "github.com/transparency-dev/formats/log" + "github.com/transparency-dev/merkle/proof" + "github.com/transparency-dev/merkle/rfc6962" + sumdb_note "golang.org/x/mod/sumdb/note" +) + +// VerifyInclusionProof verifies an entry's inclusion proof +func VerifyInclusionProof(entry *pbs.TransparencyLogEntry, cp *f_log.Checkpoint) error { //nolint: revive + leafHash := rfc6962.DefaultHasher.HashLeaf(entry.CanonicalizedBody) + index, err := safeint.NewSafeInt64(entry.LogIndex) + if err != nil { + return fmt.Errorf("invalid index: %w", err) + } + if err := proof.VerifyInclusion(rfc6962.DefaultHasher, index.U(), cp.Size, leafHash, entry.InclusionProof.Hashes, cp.Hash); err != nil { + return fmt.Errorf("verifying inclusion: %w", err) + } + return nil +} + +// VerifyCheckpoint verifies the signature on the entry's inclusion proof checkpoint +func VerifyCheckpoint(unverifiedCp string, verifier sumdb_note.Verifier) (*f_log.Checkpoint, error) { //nolint: revive + cp, _, _, err := f_log.ParseCheckpoint([]byte(unverifiedCp), verifier.Name(), verifier) + if err != nil { + return nil, fmt.Errorf("unverified checkpoint signature: %v", err) + } + return cp, nil +} + +// VerifyWitnessedCheckpoint verifies the signature on the entry's inclusion proof checkpoint in addition to witness cosignatures. +// This returns the underlying note which contains all verified signatures. +func VerifyWitnessedCheckpoint(unverifiedCp string, verifier sumdb_note.Verifier, otherVerifiers ...sumdb_note.Verifier) (*f_log.Checkpoint, *sumdb_note.Note, error) { //nolint: revive + cp, _, n, err := f_log.ParseCheckpoint([]byte(unverifiedCp), verifier.Name(), verifier, otherVerifiers...) + if err != nil { + return nil, nil, fmt.Errorf("unverified checkpoint signature: %v", err) + } + return cp, n, nil +} + +// VerifyLogEntry verifies the log entry. This includes verifying the signature on the entry's +// inclusion proof checkpoint and verifying the entry inclusion proof +func VerifyLogEntry(entry *pbs.TransparencyLogEntry, verifier sumdb_note.Verifier) error { //nolint: revive + cp, err := VerifyCheckpoint(entry.GetInclusionProof().GetCheckpoint().GetEnvelope(), verifier) + if err != nil { + return err + } + return VerifyInclusionProof(entry, cp) +} + +// VerifyConsistencyProof verifies the latest checkpoint signature and the consistency proof between a previous log size +// and root hash and the latest checkpoint's size and root hash. This may be used by a C2SP witness. +func VerifyConsistencyProof(consistencyProof [][]byte, oldSize uint64, oldRootHash []byte, newUnverifiedCp string, verifier sumdb_note.Verifier) error { //nolint: revive + newCp, err := VerifyCheckpoint(newUnverifiedCp, verifier) + if err != nil { + return err + } + return proof.VerifyConsistency(rfc6962.DefaultHasher, oldSize, newCp.Size, consistencyProof, oldRootHash, newCp.Hash) +} + +// VerifyConsistencyProofWithCheckpoints verifies previous and latest checkpoint signatures and the consistency proof +// between these checkpoints. This may be used by a monitor that persists checkpoints. +func VerifyConsistencyProofWithCheckpoints(consistencyProof [][]byte, oldUnverifiedCp, newUnverifiedCp string, verifier sumdb_note.Verifier) error { //nolint: revive + oldCp, err := VerifyCheckpoint(oldUnverifiedCp, verifier) + if err != nil { + return err + } + return VerifyConsistencyProof(consistencyProof, oldCp.Size, oldCp.Hash, newUnverifiedCp, verifier) +} diff --git a/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md b/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md new file mode 100644 index 000000000000..bdff02765c56 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md @@ -0,0 +1,122 @@ +# Contributing + +When contributing to this repository, please first discuss the change you wish +to make via an [issue](https://github.com/sigstore/rekor/issues). + +## Pull Request Process + +1. Create an [issue](https://github.com/sigstore/rekor/issues) + outlining the fix or feature. +2. Fork the rekor repository to your own github account and clone it locally. +3. Hack on your changes. +4. Update the README.md with details of changes to any interface, this includes new environment + variables, exposed ports, useful file locations, CLI parameters and + new or changed configuration values. +5. Correctly format your commit message see [Commit Messages](#Commit Message Guidelines) + below. +6. Ensure that CI passes, if it fails, fix the failures. +7. Every pull request requires a review from the [core rekor team](https://github.com/orgs/github.com/sigstore/teams/core-team) + before merging. +8. If your pull request consists of more than one commit, please squash your + commits as described in [Squash Commits](#Squash Commits) + +## Commit Message Guidelines + +We follow the commit formatting recommendations found on [Chris Beams' How to Write a Git Commit Message article]((https://chris.beams.io/posts/git-commit/). + +Well formed commit messages not only help reviewers understand the nature of +the Pull Request, but also assists the release process where commit messages +are used to generate release notes. + +A good example of a commit message would be as follows: + +``` +Summarize changes in around 50 characters or less + +More detailed explanatory text, if necessary. Wrap it to about 72 +characters or so. In some contexts, the first line is treated as the +subject of the commit and the rest of the text as the body. The +blank line separating the summary from the body is critical (unless +you omit the body entirely); various tools like `log`, `shortlog` +and `rebase` can get confused if you run the two together. + +Explain the problem that this commit is solving. Focus on why you +are making this change as opposed to how (the code explains that). +Are there side effects or other unintuitive consequences of this +change? Here's the place to explain them. + +Further paragraphs come after blank lines. + + - Bullet points are okay, too + + - Typically a hyphen or asterisk is used for the bullet, preceded + by a single space, with blank lines in between, but conventions + vary here + +If you use an issue tracker, put references to them at the bottom, +like this: + +Resolves: #123 +See also: #456, #789 +``` + +Note the `Resolves #123` tag, this references the issue raised and allows us to +ensure issues are associated and closed when a pull request is merged. + +Please refer to [the github help page on message types](https://help.github.com/articles/closing-issues-using-keywords/) +for a complete list of issue references. + +## Squash Commits + +Should your pull request consist of more than one commit (perhaps due to +a change being requested during the review cycle), please perform a git squash +once a reviewer has approved your pull request. + +A squash can be performed as follows. Let's say you have the following commits: + + initial commit + second commit + final commit + +Run the command below with the number set to the total commits you wish to +squash (in our case 3 commits): + + git rebase -i HEAD~3 + +You default text editor will then open up and you will see the following:: + + pick eb36612 initial commit + pick 9ac8968 second commit + pick a760569 final commit + + # Rebase eb1429f..a760569 onto eb1429f (3 commands) + +We want to rebase on top of our first commit, so we change the other two commits +to `squash`: + + pick eb36612 initial commit + squash 9ac8968 second commit + squash a760569 final commit + +After this, should you wish to update your commit message to better summarise +all of your pull request, run: + + git commit --amend + +You will then need to force push (assuming your initial commit(s) were posted +to github): + + git push origin your-branch --force + +Alternatively, a core member can squash your commits within Github. + +## DCO Signoff + +Make sure to sign the [Developer Certificate of +Origin](https://git-scm.com/docs/git-commit#Documentation/git-commit.txt---signoff). + +## Code of Conduct + +Rekor adheres to and enforces the [Contributor Covenant](http://contributor-covenant.org/version/1/4/) Code of Conduct. +Please take a moment to read the [CODE_OF_CONDUCT.md](https://github.com/sigstore/rekor/blob/master/CODE_OF_CONDUCT.md) document. + diff --git a/vendor/github.com/sigstore/rekor/COPYRIGHT.txt b/vendor/github.com/sigstore/rekor/COPYRIGHT.txt new file mode 100644 index 000000000000..7a01c8498642 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/COPYRIGHT.txt @@ -0,0 +1,14 @@ + +Copyright 2021 The Sigstore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/sigstore/rekor/LICENSE b/vendor/github.com/sigstore/rekor/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_parameters.go new file mode 100644 index 000000000000..481fa2bda5ec --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_parameters.go @@ -0,0 +1,164 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package entries + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// NewCreateLogEntryParams creates a new CreateLogEntryParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateLogEntryParams() *CreateLogEntryParams { + return &CreateLogEntryParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateLogEntryParamsWithTimeout creates a new CreateLogEntryParams object +// with the ability to set a timeout on a request. +func NewCreateLogEntryParamsWithTimeout(timeout time.Duration) *CreateLogEntryParams { + return &CreateLogEntryParams{ + timeout: timeout, + } +} + +// NewCreateLogEntryParamsWithContext creates a new CreateLogEntryParams object +// with the ability to set a context for a request. +func NewCreateLogEntryParamsWithContext(ctx context.Context) *CreateLogEntryParams { + return &CreateLogEntryParams{ + Context: ctx, + } +} + +// NewCreateLogEntryParamsWithHTTPClient creates a new CreateLogEntryParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateLogEntryParamsWithHTTPClient(client *http.Client) *CreateLogEntryParams { + return &CreateLogEntryParams{ + HTTPClient: client, + } +} + +/* +CreateLogEntryParams contains all the parameters to send to the API endpoint + + for the create log entry operation. + + Typically these are written to a http.Request. +*/ +type CreateLogEntryParams struct { + + // ProposedEntry. + ProposedEntry models.ProposedEntry + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create log entry params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateLogEntryParams) WithDefaults() *CreateLogEntryParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create log entry params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateLogEntryParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create log entry params +func (o *CreateLogEntryParams) WithTimeout(timeout time.Duration) *CreateLogEntryParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create log entry params +func (o *CreateLogEntryParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create log entry params +func (o *CreateLogEntryParams) WithContext(ctx context.Context) *CreateLogEntryParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create log entry params +func (o *CreateLogEntryParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create log entry params +func (o *CreateLogEntryParams) WithHTTPClient(client *http.Client) *CreateLogEntryParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create log entry params +func (o *CreateLogEntryParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithProposedEntry adds the proposedEntry to the create log entry params +func (o *CreateLogEntryParams) WithProposedEntry(proposedEntry models.ProposedEntry) *CreateLogEntryParams { + o.SetProposedEntry(proposedEntry) + return o +} + +// SetProposedEntry adds the proposedEntry to the create log entry params +func (o *CreateLogEntryParams) SetProposedEntry(proposedEntry models.ProposedEntry) { + o.ProposedEntry = proposedEntry +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateLogEntryParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if err := r.SetBodyParam(o.ProposedEntry); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_responses.go new file mode 100644 index 000000000000..de665ed9cb52 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_responses.go @@ -0,0 +1,397 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package entries + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// CreateLogEntryReader is a Reader for the CreateLogEntry structure. +type CreateLogEntryReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateLogEntryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 201: + result := NewCreateLogEntryCreated() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateLogEntryBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 409: + result := NewCreateLogEntryConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + result := NewCreateLogEntryDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCreateLogEntryCreated creates a CreateLogEntryCreated with default headers values +func NewCreateLogEntryCreated() *CreateLogEntryCreated { + return &CreateLogEntryCreated{} +} + +/* +CreateLogEntryCreated describes a response with status code 201, with default header values. + +Returns the entry created in the transparency log +*/ +type CreateLogEntryCreated struct { + + /* UUID of log entry + */ + ETag string + + /* URI location of log entry + + Format: uri + */ + Location strfmt.URI + + Payload models.LogEntry +} + +// IsSuccess returns true when this create log entry created response has a 2xx status code +func (o *CreateLogEntryCreated) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create log entry created response has a 3xx status code +func (o *CreateLogEntryCreated) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create log entry created response has a 4xx status code +func (o *CreateLogEntryCreated) IsClientError() bool { + return false +} + +// IsServerError returns true when this create log entry created response has a 5xx status code +func (o *CreateLogEntryCreated) IsServerError() bool { + return false +} + +// IsCode returns true when this create log entry created response a status code equal to that given +func (o *CreateLogEntryCreated) IsCode(code int) bool { + return code == 201 +} + +// Code gets the status code for the create log entry created response +func (o *CreateLogEntryCreated) Code() int { + return 201 +} + +func (o *CreateLogEntryCreated) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryCreated %s", 201, payload) +} + +func (o *CreateLogEntryCreated) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryCreated %s", 201, payload) +} + +func (o *CreateLogEntryCreated) GetPayload() models.LogEntry { + return o.Payload +} + +func (o *CreateLogEntryCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // hydrates response header ETag + hdrETag := response.GetHeader("ETag") + + if hdrETag != "" { + o.ETag = hdrETag + } + + // hydrates response header Location + hdrLocation := response.GetHeader("Location") + + if hdrLocation != "" { + vallocation, err := formats.Parse("uri", hdrLocation) + if err != nil { + return errors.InvalidType("Location", "header", "strfmt.URI", hdrLocation) + } + o.Location = *(vallocation.(*strfmt.URI)) + } + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateLogEntryBadRequest creates a CreateLogEntryBadRequest with default headers values +func NewCreateLogEntryBadRequest() *CreateLogEntryBadRequest { + return &CreateLogEntryBadRequest{} +} + +/* +CreateLogEntryBadRequest describes a response with status code 400, with default header values. + +The content supplied to the server was invalid +*/ +type CreateLogEntryBadRequest struct { + Payload *models.Error +} + +// IsSuccess returns true when this create log entry bad request response has a 2xx status code +func (o *CreateLogEntryBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create log entry bad request response has a 3xx status code +func (o *CreateLogEntryBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create log entry bad request response has a 4xx status code +func (o *CreateLogEntryBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create log entry bad request response has a 5xx status code +func (o *CreateLogEntryBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create log entry bad request response a status code equal to that given +func (o *CreateLogEntryBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create log entry bad request response +func (o *CreateLogEntryBadRequest) Code() int { + return 400 +} + +func (o *CreateLogEntryBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryBadRequest %s", 400, payload) +} + +func (o *CreateLogEntryBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryBadRequest %s", 400, payload) +} + +func (o *CreateLogEntryBadRequest) GetPayload() *models.Error { + return o.Payload +} + +func (o *CreateLogEntryBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateLogEntryConflict creates a CreateLogEntryConflict with default headers values +func NewCreateLogEntryConflict() *CreateLogEntryConflict { + return &CreateLogEntryConflict{} +} + +/* +CreateLogEntryConflict describes a response with status code 409, with default header values. + +The request conflicts with the current state of the transparency log +*/ +type CreateLogEntryConflict struct { + Location strfmt.URI + + Payload *models.Error +} + +// IsSuccess returns true when this create log entry conflict response has a 2xx status code +func (o *CreateLogEntryConflict) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create log entry conflict response has a 3xx status code +func (o *CreateLogEntryConflict) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create log entry conflict response has a 4xx status code +func (o *CreateLogEntryConflict) IsClientError() bool { + return true +} + +// IsServerError returns true when this create log entry conflict response has a 5xx status code +func (o *CreateLogEntryConflict) IsServerError() bool { + return false +} + +// IsCode returns true when this create log entry conflict response a status code equal to that given +func (o *CreateLogEntryConflict) IsCode(code int) bool { + return code == 409 +} + +// Code gets the status code for the create log entry conflict response +func (o *CreateLogEntryConflict) Code() int { + return 409 +} + +func (o *CreateLogEntryConflict) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryConflict %s", 409, payload) +} + +func (o *CreateLogEntryConflict) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryConflict %s", 409, payload) +} + +func (o *CreateLogEntryConflict) GetPayload() *models.Error { + return o.Payload +} + +func (o *CreateLogEntryConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // hydrates response header Location + hdrLocation := response.GetHeader("Location") + + if hdrLocation != "" { + vallocation, err := formats.Parse("uri", hdrLocation) + if err != nil { + return errors.InvalidType("Location", "header", "strfmt.URI", hdrLocation) + } + o.Location = *(vallocation.(*strfmt.URI)) + } + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateLogEntryDefault creates a CreateLogEntryDefault with default headers values +func NewCreateLogEntryDefault(code int) *CreateLogEntryDefault { + return &CreateLogEntryDefault{ + _statusCode: code, + } +} + +/* +CreateLogEntryDefault describes a response with status code -1, with default header values. + +There was an internal error in the server while processing the request +*/ +type CreateLogEntryDefault struct { + _statusCode int + + Payload *models.Error +} + +// IsSuccess returns true when this create log entry default response has a 2xx status code +func (o *CreateLogEntryDefault) IsSuccess() bool { + return o._statusCode/100 == 2 +} + +// IsRedirect returns true when this create log entry default response has a 3xx status code +func (o *CreateLogEntryDefault) IsRedirect() bool { + return o._statusCode/100 == 3 +} + +// IsClientError returns true when this create log entry default response has a 4xx status code +func (o *CreateLogEntryDefault) IsClientError() bool { + return o._statusCode/100 == 4 +} + +// IsServerError returns true when this create log entry default response has a 5xx status code +func (o *CreateLogEntryDefault) IsServerError() bool { + return o._statusCode/100 == 5 +} + +// IsCode returns true when this create log entry default response a status code equal to that given +func (o *CreateLogEntryDefault) IsCode(code int) bool { + return o._statusCode == code +} + +// Code gets the status code for the create log entry default response +func (o *CreateLogEntryDefault) Code() int { + return o._statusCode +} + +func (o *CreateLogEntryDefault) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntry default %s", o._statusCode, payload) +} + +func (o *CreateLogEntryDefault) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntry default %s", o._statusCode, payload) +} + +func (o *CreateLogEntryDefault) GetPayload() *models.Error { + return o.Payload +} + +func (o *CreateLogEntryDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/entries_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/entries_client.go new file mode 100644 index 000000000000..f893db3dc971 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/entries_client.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package entries + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// New creates a new entries API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +// New creates a new entries API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new entries API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + +/* +Client for entries API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption may be used to customize the behavior of Client methods. +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + CreateLogEntry(params *CreateLogEntryParams, opts ...ClientOption) (*CreateLogEntryCreated, error) + + GetLogEntryByIndex(params *GetLogEntryByIndexParams, opts ...ClientOption) (*GetLogEntryByIndexOK, error) + + GetLogEntryByUUID(params *GetLogEntryByUUIDParams, opts ...ClientOption) (*GetLogEntryByUUIDOK, error) + + SearchLogQuery(params *SearchLogQueryParams, opts ...ClientOption) (*SearchLogQueryOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +CreateLogEntry creates an entry in the transparency log + +Creates an entry in the transparency log for a detached signature, public key, and content. Items can be included in the request or fetched by the server when URLs are specified. +*/ +func (a *Client) CreateLogEntry(params *CreateLogEntryParams, opts ...ClientOption) (*CreateLogEntryCreated, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateLogEntryParams() + } + op := &runtime.ClientOperation{ + ID: "createLogEntry", + Method: "POST", + PathPattern: "/api/v1/log/entries", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateLogEntryReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateLogEntryCreated) + if ok { + return success, nil + } + + // unexpected success response. + // + // a default response is provided: fill this and return an error + unexpectedSuccess := result.(*CreateLogEntryDefault) + + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +GetLogEntryByIndex retrieves an entry and inclusion proof from the transparency log if it exists by index +*/ +func (a *Client) GetLogEntryByIndex(params *GetLogEntryByIndexParams, opts ...ClientOption) (*GetLogEntryByIndexOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewGetLogEntryByIndexParams() + } + op := &runtime.ClientOperation{ + ID: "getLogEntryByIndex", + Method: "GET", + PathPattern: "/api/v1/log/entries", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetLogEntryByIndexReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*GetLogEntryByIndexOK) + if ok { + return success, nil + } + + // unexpected success response. + // + // a default response is provided: fill this and return an error + unexpectedSuccess := result.(*GetLogEntryByIndexDefault) + + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +GetLogEntryByUUID gets log entry and information required to generate an inclusion proof for the entry in the transparency log + +Returns the entry, root hash, tree size, and a list of hashes that can be used to calculate proof of an entry being included in the transparency log +*/ +func (a *Client) GetLogEntryByUUID(params *GetLogEntryByUUIDParams, opts ...ClientOption) (*GetLogEntryByUUIDOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewGetLogEntryByUUIDParams() + } + op := &runtime.ClientOperation{ + ID: "getLogEntryByUUID", + Method: "GET", + PathPattern: "/api/v1/log/entries/{entryUUID}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetLogEntryByUUIDReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*GetLogEntryByUUIDOK) + if ok { + return success, nil + } + + // unexpected success response. + // + // a default response is provided: fill this and return an error + unexpectedSuccess := result.(*GetLogEntryByUUIDDefault) + + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +SearchLogQuery searches transparency log for one or more log entries +*/ +func (a *Client) SearchLogQuery(params *SearchLogQueryParams, opts ...ClientOption) (*SearchLogQueryOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewSearchLogQueryParams() + } + op := &runtime.ClientOperation{ + ID: "searchLogQuery", + Method: "POST", + PathPattern: "/api/v1/log/entries/retrieve", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &SearchLogQueryReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*SearchLogQueryOK) + if ok { + return success, nil + } + + // unexpected success response. + // + // a default response is provided: fill this and return an error + unexpectedSuccess := result.(*SearchLogQueryDefault) + + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_parameters.go new file mode 100644 index 000000000000..e22522751190 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_parameters.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package entries + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetLogEntryByIndexParams creates a new GetLogEntryByIndexParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetLogEntryByIndexParams() *GetLogEntryByIndexParams { + return &GetLogEntryByIndexParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetLogEntryByIndexParamsWithTimeout creates a new GetLogEntryByIndexParams object +// with the ability to set a timeout on a request. +func NewGetLogEntryByIndexParamsWithTimeout(timeout time.Duration) *GetLogEntryByIndexParams { + return &GetLogEntryByIndexParams{ + timeout: timeout, + } +} + +// NewGetLogEntryByIndexParamsWithContext creates a new GetLogEntryByIndexParams object +// with the ability to set a context for a request. +func NewGetLogEntryByIndexParamsWithContext(ctx context.Context) *GetLogEntryByIndexParams { + return &GetLogEntryByIndexParams{ + Context: ctx, + } +} + +// NewGetLogEntryByIndexParamsWithHTTPClient creates a new GetLogEntryByIndexParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetLogEntryByIndexParamsWithHTTPClient(client *http.Client) *GetLogEntryByIndexParams { + return &GetLogEntryByIndexParams{ + HTTPClient: client, + } +} + +/* +GetLogEntryByIndexParams contains all the parameters to send to the API endpoint + + for the get log entry by index operation. + + Typically these are written to a http.Request. +*/ +type GetLogEntryByIndexParams struct { + + /* LogIndex. + + specifies the index of the entry in the transparency log to be retrieved + */ + LogIndex int64 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get log entry by index params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetLogEntryByIndexParams) WithDefaults() *GetLogEntryByIndexParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get log entry by index params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetLogEntryByIndexParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get log entry by index params +func (o *GetLogEntryByIndexParams) WithTimeout(timeout time.Duration) *GetLogEntryByIndexParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get log entry by index params +func (o *GetLogEntryByIndexParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get log entry by index params +func (o *GetLogEntryByIndexParams) WithContext(ctx context.Context) *GetLogEntryByIndexParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get log entry by index params +func (o *GetLogEntryByIndexParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get log entry by index params +func (o *GetLogEntryByIndexParams) WithHTTPClient(client *http.Client) *GetLogEntryByIndexParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get log entry by index params +func (o *GetLogEntryByIndexParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithLogIndex adds the logIndex to the get log entry by index params +func (o *GetLogEntryByIndexParams) WithLogIndex(logIndex int64) *GetLogEntryByIndexParams { + o.SetLogIndex(logIndex) + return o +} + +// SetLogIndex adds the logIndex to the get log entry by index params +func (o *GetLogEntryByIndexParams) SetLogIndex(logIndex int64) { + o.LogIndex = logIndex +} + +// WriteToRequest writes these params to a swagger request +func (o *GetLogEntryByIndexParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param logIndex + qrLogIndex := o.LogIndex + qLogIndex := swag.FormatInt64(qrLogIndex) + if qLogIndex != "" { + + if err := r.SetQueryParam("logIndex", qLogIndex); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_responses.go new file mode 100644 index 000000000000..40e17b3cfcdd --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_responses.go @@ -0,0 +1,264 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package entries + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// GetLogEntryByIndexReader is a Reader for the GetLogEntryByIndex structure. +type GetLogEntryByIndexReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetLogEntryByIndexReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewGetLogEntryByIndexOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 404: + result := NewGetLogEntryByIndexNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + result := NewGetLogEntryByIndexDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewGetLogEntryByIndexOK creates a GetLogEntryByIndexOK with default headers values +func NewGetLogEntryByIndexOK() *GetLogEntryByIndexOK { + return &GetLogEntryByIndexOK{} +} + +/* +GetLogEntryByIndexOK describes a response with status code 200, with default header values. + +the entry in the transparency log requested along with an inclusion proof +*/ +type GetLogEntryByIndexOK struct { + Payload models.LogEntry +} + +// IsSuccess returns true when this get log entry by index o k response has a 2xx status code +func (o *GetLogEntryByIndexOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get log entry by index o k response has a 3xx status code +func (o *GetLogEntryByIndexOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get log entry by index o k response has a 4xx status code +func (o *GetLogEntryByIndexOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get log entry by index o k response has a 5xx status code +func (o *GetLogEntryByIndexOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get log entry by index o k response a status code equal to that given +func (o *GetLogEntryByIndexOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get log entry by index o k response +func (o *GetLogEntryByIndexOK) Code() int { + return 200 +} + +func (o *GetLogEntryByIndexOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexOK %s", 200, payload) +} + +func (o *GetLogEntryByIndexOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexOK %s", 200, payload) +} + +func (o *GetLogEntryByIndexOK) GetPayload() models.LogEntry { + return o.Payload +} + +func (o *GetLogEntryByIndexOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewGetLogEntryByIndexNotFound creates a GetLogEntryByIndexNotFound with default headers values +func NewGetLogEntryByIndexNotFound() *GetLogEntryByIndexNotFound { + return &GetLogEntryByIndexNotFound{} +} + +/* +GetLogEntryByIndexNotFound describes a response with status code 404, with default header values. + +The content requested could not be found +*/ +type GetLogEntryByIndexNotFound struct { +} + +// IsSuccess returns true when this get log entry by index not found response has a 2xx status code +func (o *GetLogEntryByIndexNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get log entry by index not found response has a 3xx status code +func (o *GetLogEntryByIndexNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get log entry by index not found response has a 4xx status code +func (o *GetLogEntryByIndexNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this get log entry by index not found response has a 5xx status code +func (o *GetLogEntryByIndexNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this get log entry by index not found response a status code equal to that given +func (o *GetLogEntryByIndexNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the get log entry by index not found response +func (o *GetLogEntryByIndexNotFound) Code() int { + return 404 +} + +func (o *GetLogEntryByIndexNotFound) Error() string { + return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexNotFound", 404) +} + +func (o *GetLogEntryByIndexNotFound) String() string { + return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexNotFound", 404) +} + +func (o *GetLogEntryByIndexNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetLogEntryByIndexDefault creates a GetLogEntryByIndexDefault with default headers values +func NewGetLogEntryByIndexDefault(code int) *GetLogEntryByIndexDefault { + return &GetLogEntryByIndexDefault{ + _statusCode: code, + } +} + +/* +GetLogEntryByIndexDefault describes a response with status code -1, with default header values. + +There was an internal error in the server while processing the request +*/ +type GetLogEntryByIndexDefault struct { + _statusCode int + + Payload *models.Error +} + +// IsSuccess returns true when this get log entry by index default response has a 2xx status code +func (o *GetLogEntryByIndexDefault) IsSuccess() bool { + return o._statusCode/100 == 2 +} + +// IsRedirect returns true when this get log entry by index default response has a 3xx status code +func (o *GetLogEntryByIndexDefault) IsRedirect() bool { + return o._statusCode/100 == 3 +} + +// IsClientError returns true when this get log entry by index default response has a 4xx status code +func (o *GetLogEntryByIndexDefault) IsClientError() bool { + return o._statusCode/100 == 4 +} + +// IsServerError returns true when this get log entry by index default response has a 5xx status code +func (o *GetLogEntryByIndexDefault) IsServerError() bool { + return o._statusCode/100 == 5 +} + +// IsCode returns true when this get log entry by index default response a status code equal to that given +func (o *GetLogEntryByIndexDefault) IsCode(code int) bool { + return o._statusCode == code +} + +// Code gets the status code for the get log entry by index default response +func (o *GetLogEntryByIndexDefault) Code() int { + return o._statusCode +} + +func (o *GetLogEntryByIndexDefault) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndex default %s", o._statusCode, payload) +} + +func (o *GetLogEntryByIndexDefault) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndex default %s", o._statusCode, payload) +} + +func (o *GetLogEntryByIndexDefault) GetPayload() *models.Error { + return o.Payload +} + +func (o *GetLogEntryByIndexDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_parameters.go new file mode 100644 index 000000000000..5c88b5265464 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_parameters.go @@ -0,0 +1,167 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package entries + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetLogEntryByUUIDParams creates a new GetLogEntryByUUIDParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetLogEntryByUUIDParams() *GetLogEntryByUUIDParams { + return &GetLogEntryByUUIDParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetLogEntryByUUIDParamsWithTimeout creates a new GetLogEntryByUUIDParams object +// with the ability to set a timeout on a request. +func NewGetLogEntryByUUIDParamsWithTimeout(timeout time.Duration) *GetLogEntryByUUIDParams { + return &GetLogEntryByUUIDParams{ + timeout: timeout, + } +} + +// NewGetLogEntryByUUIDParamsWithContext creates a new GetLogEntryByUUIDParams object +// with the ability to set a context for a request. +func NewGetLogEntryByUUIDParamsWithContext(ctx context.Context) *GetLogEntryByUUIDParams { + return &GetLogEntryByUUIDParams{ + Context: ctx, + } +} + +// NewGetLogEntryByUUIDParamsWithHTTPClient creates a new GetLogEntryByUUIDParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetLogEntryByUUIDParamsWithHTTPClient(client *http.Client) *GetLogEntryByUUIDParams { + return &GetLogEntryByUUIDParams{ + HTTPClient: client, + } +} + +/* +GetLogEntryByUUIDParams contains all the parameters to send to the API endpoint + + for the get log entry by UUID operation. + + Typically these are written to a http.Request. +*/ +type GetLogEntryByUUIDParams struct { + + /* EntryUUID. + + the UUID of the entry for which the inclusion proof information should be returned + */ + EntryUUID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get log entry by UUID params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetLogEntryByUUIDParams) WithDefaults() *GetLogEntryByUUIDParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get log entry by UUID params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetLogEntryByUUIDParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get log entry by UUID params +func (o *GetLogEntryByUUIDParams) WithTimeout(timeout time.Duration) *GetLogEntryByUUIDParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get log entry by UUID params +func (o *GetLogEntryByUUIDParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get log entry by UUID params +func (o *GetLogEntryByUUIDParams) WithContext(ctx context.Context) *GetLogEntryByUUIDParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get log entry by UUID params +func (o *GetLogEntryByUUIDParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get log entry by UUID params +func (o *GetLogEntryByUUIDParams) WithHTTPClient(client *http.Client) *GetLogEntryByUUIDParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get log entry by UUID params +func (o *GetLogEntryByUUIDParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithEntryUUID adds the entryUUID to the get log entry by UUID params +func (o *GetLogEntryByUUIDParams) WithEntryUUID(entryUUID string) *GetLogEntryByUUIDParams { + o.SetEntryUUID(entryUUID) + return o +} + +// SetEntryUUID adds the entryUuid to the get log entry by UUID params +func (o *GetLogEntryByUUIDParams) SetEntryUUID(entryUUID string) { + o.EntryUUID = entryUUID +} + +// WriteToRequest writes these params to a swagger request +func (o *GetLogEntryByUUIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param entryUUID + if err := r.SetPathParam("entryUUID", o.EntryUUID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_responses.go new file mode 100644 index 000000000000..3498e27287ed --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_responses.go @@ -0,0 +1,264 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package entries + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// GetLogEntryByUUIDReader is a Reader for the GetLogEntryByUUID structure. +type GetLogEntryByUUIDReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetLogEntryByUUIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewGetLogEntryByUUIDOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 404: + result := NewGetLogEntryByUUIDNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + result := NewGetLogEntryByUUIDDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewGetLogEntryByUUIDOK creates a GetLogEntryByUUIDOK with default headers values +func NewGetLogEntryByUUIDOK() *GetLogEntryByUUIDOK { + return &GetLogEntryByUUIDOK{} +} + +/* +GetLogEntryByUUIDOK describes a response with status code 200, with default header values. + +Information needed for a client to compute the inclusion proof +*/ +type GetLogEntryByUUIDOK struct { + Payload models.LogEntry +} + +// IsSuccess returns true when this get log entry by Uuid o k response has a 2xx status code +func (o *GetLogEntryByUUIDOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get log entry by Uuid o k response has a 3xx status code +func (o *GetLogEntryByUUIDOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get log entry by Uuid o k response has a 4xx status code +func (o *GetLogEntryByUUIDOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get log entry by Uuid o k response has a 5xx status code +func (o *GetLogEntryByUUIDOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get log entry by Uuid o k response a status code equal to that given +func (o *GetLogEntryByUUIDOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get log entry by Uuid o k response +func (o *GetLogEntryByUUIDOK) Code() int { + return 200 +} + +func (o *GetLogEntryByUUIDOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidOK %s", 200, payload) +} + +func (o *GetLogEntryByUUIDOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidOK %s", 200, payload) +} + +func (o *GetLogEntryByUUIDOK) GetPayload() models.LogEntry { + return o.Payload +} + +func (o *GetLogEntryByUUIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewGetLogEntryByUUIDNotFound creates a GetLogEntryByUUIDNotFound with default headers values +func NewGetLogEntryByUUIDNotFound() *GetLogEntryByUUIDNotFound { + return &GetLogEntryByUUIDNotFound{} +} + +/* +GetLogEntryByUUIDNotFound describes a response with status code 404, with default header values. + +The content requested could not be found +*/ +type GetLogEntryByUUIDNotFound struct { +} + +// IsSuccess returns true when this get log entry by Uuid not found response has a 2xx status code +func (o *GetLogEntryByUUIDNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get log entry by Uuid not found response has a 3xx status code +func (o *GetLogEntryByUUIDNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get log entry by Uuid not found response has a 4xx status code +func (o *GetLogEntryByUUIDNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this get log entry by Uuid not found response has a 5xx status code +func (o *GetLogEntryByUUIDNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this get log entry by Uuid not found response a status code equal to that given +func (o *GetLogEntryByUUIDNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the get log entry by Uuid not found response +func (o *GetLogEntryByUUIDNotFound) Code() int { + return 404 +} + +func (o *GetLogEntryByUUIDNotFound) Error() string { + return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidNotFound", 404) +} + +func (o *GetLogEntryByUUIDNotFound) String() string { + return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidNotFound", 404) +} + +func (o *GetLogEntryByUUIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetLogEntryByUUIDDefault creates a GetLogEntryByUUIDDefault with default headers values +func NewGetLogEntryByUUIDDefault(code int) *GetLogEntryByUUIDDefault { + return &GetLogEntryByUUIDDefault{ + _statusCode: code, + } +} + +/* +GetLogEntryByUUIDDefault describes a response with status code -1, with default header values. + +There was an internal error in the server while processing the request +*/ +type GetLogEntryByUUIDDefault struct { + _statusCode int + + Payload *models.Error +} + +// IsSuccess returns true when this get log entry by UUID default response has a 2xx status code +func (o *GetLogEntryByUUIDDefault) IsSuccess() bool { + return o._statusCode/100 == 2 +} + +// IsRedirect returns true when this get log entry by UUID default response has a 3xx status code +func (o *GetLogEntryByUUIDDefault) IsRedirect() bool { + return o._statusCode/100 == 3 +} + +// IsClientError returns true when this get log entry by UUID default response has a 4xx status code +func (o *GetLogEntryByUUIDDefault) IsClientError() bool { + return o._statusCode/100 == 4 +} + +// IsServerError returns true when this get log entry by UUID default response has a 5xx status code +func (o *GetLogEntryByUUIDDefault) IsServerError() bool { + return o._statusCode/100 == 5 +} + +// IsCode returns true when this get log entry by UUID default response a status code equal to that given +func (o *GetLogEntryByUUIDDefault) IsCode(code int) bool { + return o._statusCode == code +} + +// Code gets the status code for the get log entry by UUID default response +func (o *GetLogEntryByUUIDDefault) Code() int { + return o._statusCode +} + +func (o *GetLogEntryByUUIDDefault) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUUID default %s", o._statusCode, payload) +} + +func (o *GetLogEntryByUUIDDefault) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUUID default %s", o._statusCode, payload) +} + +func (o *GetLogEntryByUUIDDefault) GetPayload() *models.Error { + return o.Payload +} + +func (o *GetLogEntryByUUIDDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_parameters.go new file mode 100644 index 000000000000..ed158ce23e35 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_parameters.go @@ -0,0 +1,166 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package entries + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// NewSearchLogQueryParams creates a new SearchLogQueryParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewSearchLogQueryParams() *SearchLogQueryParams { + return &SearchLogQueryParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewSearchLogQueryParamsWithTimeout creates a new SearchLogQueryParams object +// with the ability to set a timeout on a request. +func NewSearchLogQueryParamsWithTimeout(timeout time.Duration) *SearchLogQueryParams { + return &SearchLogQueryParams{ + timeout: timeout, + } +} + +// NewSearchLogQueryParamsWithContext creates a new SearchLogQueryParams object +// with the ability to set a context for a request. +func NewSearchLogQueryParamsWithContext(ctx context.Context) *SearchLogQueryParams { + return &SearchLogQueryParams{ + Context: ctx, + } +} + +// NewSearchLogQueryParamsWithHTTPClient creates a new SearchLogQueryParams object +// with the ability to set a custom HTTPClient for a request. +func NewSearchLogQueryParamsWithHTTPClient(client *http.Client) *SearchLogQueryParams { + return &SearchLogQueryParams{ + HTTPClient: client, + } +} + +/* +SearchLogQueryParams contains all the parameters to send to the API endpoint + + for the search log query operation. + + Typically these are written to a http.Request. +*/ +type SearchLogQueryParams struct { + + // Entry. + Entry *models.SearchLogQuery + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the search log query params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SearchLogQueryParams) WithDefaults() *SearchLogQueryParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the search log query params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SearchLogQueryParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the search log query params +func (o *SearchLogQueryParams) WithTimeout(timeout time.Duration) *SearchLogQueryParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the search log query params +func (o *SearchLogQueryParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the search log query params +func (o *SearchLogQueryParams) WithContext(ctx context.Context) *SearchLogQueryParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the search log query params +func (o *SearchLogQueryParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the search log query params +func (o *SearchLogQueryParams) WithHTTPClient(client *http.Client) *SearchLogQueryParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the search log query params +func (o *SearchLogQueryParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithEntry adds the entry to the search log query params +func (o *SearchLogQueryParams) WithEntry(entry *models.SearchLogQuery) *SearchLogQueryParams { + o.SetEntry(entry) + return o +} + +// SetEntry adds the entry to the search log query params +func (o *SearchLogQueryParams) SetEntry(entry *models.SearchLogQuery) { + o.Entry = entry +} + +// WriteToRequest writes these params to a swagger request +func (o *SearchLogQueryParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Entry != nil { + if err := r.SetBodyParam(o.Entry); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_responses.go new file mode 100644 index 000000000000..13d5ba27864e --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_responses.go @@ -0,0 +1,354 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package entries + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// SearchLogQueryReader is a Reader for the SearchLogQuery structure. +type SearchLogQueryReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SearchLogQueryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewSearchLogQueryOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewSearchLogQueryBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewSearchLogQueryUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + result := NewSearchLogQueryDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewSearchLogQueryOK creates a SearchLogQueryOK with default headers values +func NewSearchLogQueryOK() *SearchLogQueryOK { + return &SearchLogQueryOK{} +} + +/* +SearchLogQueryOK describes a response with status code 200, with default header values. + +Returns zero or more entries from the transparency log, according to how many were included in request query +*/ +type SearchLogQueryOK struct { + Payload []models.LogEntry +} + +// IsSuccess returns true when this search log query o k response has a 2xx status code +func (o *SearchLogQueryOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this search log query o k response has a 3xx status code +func (o *SearchLogQueryOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this search log query o k response has a 4xx status code +func (o *SearchLogQueryOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this search log query o k response has a 5xx status code +func (o *SearchLogQueryOK) IsServerError() bool { + return false +} + +// IsCode returns true when this search log query o k response a status code equal to that given +func (o *SearchLogQueryOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the search log query o k response +func (o *SearchLogQueryOK) Code() int { + return 200 +} + +func (o *SearchLogQueryOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryOK %s", 200, payload) +} + +func (o *SearchLogQueryOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryOK %s", 200, payload) +} + +func (o *SearchLogQueryOK) GetPayload() []models.LogEntry { + return o.Payload +} + +func (o *SearchLogQueryOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewSearchLogQueryBadRequest creates a SearchLogQueryBadRequest with default headers values +func NewSearchLogQueryBadRequest() *SearchLogQueryBadRequest { + return &SearchLogQueryBadRequest{} +} + +/* +SearchLogQueryBadRequest describes a response with status code 400, with default header values. + +The content supplied to the server was invalid +*/ +type SearchLogQueryBadRequest struct { + Payload *models.Error +} + +// IsSuccess returns true when this search log query bad request response has a 2xx status code +func (o *SearchLogQueryBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this search log query bad request response has a 3xx status code +func (o *SearchLogQueryBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this search log query bad request response has a 4xx status code +func (o *SearchLogQueryBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this search log query bad request response has a 5xx status code +func (o *SearchLogQueryBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this search log query bad request response a status code equal to that given +func (o *SearchLogQueryBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the search log query bad request response +func (o *SearchLogQueryBadRequest) Code() int { + return 400 +} + +func (o *SearchLogQueryBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryBadRequest %s", 400, payload) +} + +func (o *SearchLogQueryBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryBadRequest %s", 400, payload) +} + +func (o *SearchLogQueryBadRequest) GetPayload() *models.Error { + return o.Payload +} + +func (o *SearchLogQueryBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewSearchLogQueryUnprocessableEntity creates a SearchLogQueryUnprocessableEntity with default headers values +func NewSearchLogQueryUnprocessableEntity() *SearchLogQueryUnprocessableEntity { + return &SearchLogQueryUnprocessableEntity{} +} + +/* +SearchLogQueryUnprocessableEntity describes a response with status code 422, with default header values. + +The server understood the request but is unable to process the contained instructions +*/ +type SearchLogQueryUnprocessableEntity struct { + Payload *models.Error +} + +// IsSuccess returns true when this search log query unprocessable entity response has a 2xx status code +func (o *SearchLogQueryUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this search log query unprocessable entity response has a 3xx status code +func (o *SearchLogQueryUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this search log query unprocessable entity response has a 4xx status code +func (o *SearchLogQueryUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this search log query unprocessable entity response has a 5xx status code +func (o *SearchLogQueryUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this search log query unprocessable entity response a status code equal to that given +func (o *SearchLogQueryUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the search log query unprocessable entity response +func (o *SearchLogQueryUnprocessableEntity) Code() int { + return 422 +} + +func (o *SearchLogQueryUnprocessableEntity) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %s", 422, payload) +} + +func (o *SearchLogQueryUnprocessableEntity) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %s", 422, payload) +} + +func (o *SearchLogQueryUnprocessableEntity) GetPayload() *models.Error { + return o.Payload +} + +func (o *SearchLogQueryUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewSearchLogQueryDefault creates a SearchLogQueryDefault with default headers values +func NewSearchLogQueryDefault(code int) *SearchLogQueryDefault { + return &SearchLogQueryDefault{ + _statusCode: code, + } +} + +/* +SearchLogQueryDefault describes a response with status code -1, with default header values. + +There was an internal error in the server while processing the request +*/ +type SearchLogQueryDefault struct { + _statusCode int + + Payload *models.Error +} + +// IsSuccess returns true when this search log query default response has a 2xx status code +func (o *SearchLogQueryDefault) IsSuccess() bool { + return o._statusCode/100 == 2 +} + +// IsRedirect returns true when this search log query default response has a 3xx status code +func (o *SearchLogQueryDefault) IsRedirect() bool { + return o._statusCode/100 == 3 +} + +// IsClientError returns true when this search log query default response has a 4xx status code +func (o *SearchLogQueryDefault) IsClientError() bool { + return o._statusCode/100 == 4 +} + +// IsServerError returns true when this search log query default response has a 5xx status code +func (o *SearchLogQueryDefault) IsServerError() bool { + return o._statusCode/100 == 5 +} + +// IsCode returns true when this search log query default response a status code equal to that given +func (o *SearchLogQueryDefault) IsCode(code int) bool { + return o._statusCode == code +} + +// Code gets the status code for the search log query default response +func (o *SearchLogQueryDefault) Code() int { + return o._statusCode +} + +func (o *SearchLogQueryDefault) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQuery default %s", o._statusCode, payload) +} + +func (o *SearchLogQueryDefault) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQuery default %s", o._statusCode, payload) +} + +func (o *SearchLogQueryDefault) GetPayload() *models.Error { + return o.Payload +} + +func (o *SearchLogQueryDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/index/index_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/index_client.go new file mode 100644 index 000000000000..80db490317cd --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/index_client.go @@ -0,0 +1,127 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package index + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// New creates a new index API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +// New creates a new index API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new index API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + +/* +Client for index API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption may be used to customize the behavior of Client methods. +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + SearchIndex(params *SearchIndexParams, opts ...ClientOption) (*SearchIndexOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* + SearchIndex searches index by entry metadata + + EXPERIMENTAL - this endpoint is offered as best effort only and may be changed or removed in future releases. + +The results returned from this endpoint may be incomplete. +*/ +func (a *Client) SearchIndex(params *SearchIndexParams, opts ...ClientOption) (*SearchIndexOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewSearchIndexParams() + } + op := &runtime.ClientOperation{ + ID: "searchIndex", + Method: "POST", + PathPattern: "/api/v1/index/retrieve", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &SearchIndexReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*SearchIndexOK) + if ok { + return success, nil + } + + // unexpected success response. + // + // a default response is provided: fill this and return an error + unexpectedSuccess := result.(*SearchIndexDefault) + + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_parameters.go new file mode 100644 index 000000000000..c1694193ef94 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_parameters.go @@ -0,0 +1,166 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package index + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// NewSearchIndexParams creates a new SearchIndexParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewSearchIndexParams() *SearchIndexParams { + return &SearchIndexParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewSearchIndexParamsWithTimeout creates a new SearchIndexParams object +// with the ability to set a timeout on a request. +func NewSearchIndexParamsWithTimeout(timeout time.Duration) *SearchIndexParams { + return &SearchIndexParams{ + timeout: timeout, + } +} + +// NewSearchIndexParamsWithContext creates a new SearchIndexParams object +// with the ability to set a context for a request. +func NewSearchIndexParamsWithContext(ctx context.Context) *SearchIndexParams { + return &SearchIndexParams{ + Context: ctx, + } +} + +// NewSearchIndexParamsWithHTTPClient creates a new SearchIndexParams object +// with the ability to set a custom HTTPClient for a request. +func NewSearchIndexParamsWithHTTPClient(client *http.Client) *SearchIndexParams { + return &SearchIndexParams{ + HTTPClient: client, + } +} + +/* +SearchIndexParams contains all the parameters to send to the API endpoint + + for the search index operation. + + Typically these are written to a http.Request. +*/ +type SearchIndexParams struct { + + // Query. + Query *models.SearchIndex + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the search index params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SearchIndexParams) WithDefaults() *SearchIndexParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the search index params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SearchIndexParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the search index params +func (o *SearchIndexParams) WithTimeout(timeout time.Duration) *SearchIndexParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the search index params +func (o *SearchIndexParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the search index params +func (o *SearchIndexParams) WithContext(ctx context.Context) *SearchIndexParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the search index params +func (o *SearchIndexParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the search index params +func (o *SearchIndexParams) WithHTTPClient(client *http.Client) *SearchIndexParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the search index params +func (o *SearchIndexParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithQuery adds the query to the search index params +func (o *SearchIndexParams) WithQuery(query *models.SearchIndex) *SearchIndexParams { + o.SetQuery(query) + return o +} + +// SetQuery adds the query to the search index params +func (o *SearchIndexParams) SetQuery(query *models.SearchIndex) { + o.Query = query +} + +// WriteToRequest writes these params to a swagger request +func (o *SearchIndexParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Query != nil { + if err := r.SetBodyParam(o.Query); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_responses.go new file mode 100644 index 000000000000..8c62eca17b10 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_responses.go @@ -0,0 +1,278 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package index + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// SearchIndexReader is a Reader for the SearchIndex structure. +type SearchIndexReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SearchIndexReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewSearchIndexOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewSearchIndexBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + result := NewSearchIndexDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewSearchIndexOK creates a SearchIndexOK with default headers values +func NewSearchIndexOK() *SearchIndexOK { + return &SearchIndexOK{} +} + +/* +SearchIndexOK describes a response with status code 200, with default header values. + +Returns zero or more entry UUIDs from the transparency log based on search query +*/ +type SearchIndexOK struct { + Payload []string +} + +// IsSuccess returns true when this search index o k response has a 2xx status code +func (o *SearchIndexOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this search index o k response has a 3xx status code +func (o *SearchIndexOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this search index o k response has a 4xx status code +func (o *SearchIndexOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this search index o k response has a 5xx status code +func (o *SearchIndexOK) IsServerError() bool { + return false +} + +// IsCode returns true when this search index o k response a status code equal to that given +func (o *SearchIndexOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the search index o k response +func (o *SearchIndexOK) Code() int { + return 200 +} + +func (o *SearchIndexOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexOK %s", 200, payload) +} + +func (o *SearchIndexOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexOK %s", 200, payload) +} + +func (o *SearchIndexOK) GetPayload() []string { + return o.Payload +} + +func (o *SearchIndexOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewSearchIndexBadRequest creates a SearchIndexBadRequest with default headers values +func NewSearchIndexBadRequest() *SearchIndexBadRequest { + return &SearchIndexBadRequest{} +} + +/* +SearchIndexBadRequest describes a response with status code 400, with default header values. + +The content supplied to the server was invalid +*/ +type SearchIndexBadRequest struct { + Payload *models.Error +} + +// IsSuccess returns true when this search index bad request response has a 2xx status code +func (o *SearchIndexBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this search index bad request response has a 3xx status code +func (o *SearchIndexBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this search index bad request response has a 4xx status code +func (o *SearchIndexBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this search index bad request response has a 5xx status code +func (o *SearchIndexBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this search index bad request response a status code equal to that given +func (o *SearchIndexBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the search index bad request response +func (o *SearchIndexBadRequest) Code() int { + return 400 +} + +func (o *SearchIndexBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexBadRequest %s", 400, payload) +} + +func (o *SearchIndexBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexBadRequest %s", 400, payload) +} + +func (o *SearchIndexBadRequest) GetPayload() *models.Error { + return o.Payload +} + +func (o *SearchIndexBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewSearchIndexDefault creates a SearchIndexDefault with default headers values +func NewSearchIndexDefault(code int) *SearchIndexDefault { + return &SearchIndexDefault{ + _statusCode: code, + } +} + +/* +SearchIndexDefault describes a response with status code -1, with default header values. + +There was an internal error in the server while processing the request +*/ +type SearchIndexDefault struct { + _statusCode int + + Payload *models.Error +} + +// IsSuccess returns true when this search index default response has a 2xx status code +func (o *SearchIndexDefault) IsSuccess() bool { + return o._statusCode/100 == 2 +} + +// IsRedirect returns true when this search index default response has a 3xx status code +func (o *SearchIndexDefault) IsRedirect() bool { + return o._statusCode/100 == 3 +} + +// IsClientError returns true when this search index default response has a 4xx status code +func (o *SearchIndexDefault) IsClientError() bool { + return o._statusCode/100 == 4 +} + +// IsServerError returns true when this search index default response has a 5xx status code +func (o *SearchIndexDefault) IsServerError() bool { + return o._statusCode/100 == 5 +} + +// IsCode returns true when this search index default response a status code equal to that given +func (o *SearchIndexDefault) IsCode(code int) bool { + return o._statusCode == code +} + +// Code gets the status code for the search index default response +func (o *SearchIndexDefault) Code() int { + return o._statusCode +} + +func (o *SearchIndexDefault) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndex default %s", o._statusCode, payload) +} + +func (o *SearchIndexDefault) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndex default %s", o._statusCode, payload) +} + +func (o *SearchIndexDefault) GetPayload() *models.Error { + return o.Payload +} + +func (o *SearchIndexDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_parameters.go new file mode 100644 index 000000000000..b4248c933a7e --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_parameters.go @@ -0,0 +1,179 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package pubkey + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetPublicKeyParams creates a new GetPublicKeyParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetPublicKeyParams() *GetPublicKeyParams { + return &GetPublicKeyParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetPublicKeyParamsWithTimeout creates a new GetPublicKeyParams object +// with the ability to set a timeout on a request. +func NewGetPublicKeyParamsWithTimeout(timeout time.Duration) *GetPublicKeyParams { + return &GetPublicKeyParams{ + timeout: timeout, + } +} + +// NewGetPublicKeyParamsWithContext creates a new GetPublicKeyParams object +// with the ability to set a context for a request. +func NewGetPublicKeyParamsWithContext(ctx context.Context) *GetPublicKeyParams { + return &GetPublicKeyParams{ + Context: ctx, + } +} + +// NewGetPublicKeyParamsWithHTTPClient creates a new GetPublicKeyParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetPublicKeyParamsWithHTTPClient(client *http.Client) *GetPublicKeyParams { + return &GetPublicKeyParams{ + HTTPClient: client, + } +} + +/* +GetPublicKeyParams contains all the parameters to send to the API endpoint + + for the get public key operation. + + Typically these are written to a http.Request. +*/ +type GetPublicKeyParams struct { + + /* TreeID. + + The tree ID of the tree you wish to get a public key for + */ + TreeID *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get public key params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetPublicKeyParams) WithDefaults() *GetPublicKeyParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get public key params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetPublicKeyParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get public key params +func (o *GetPublicKeyParams) WithTimeout(timeout time.Duration) *GetPublicKeyParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get public key params +func (o *GetPublicKeyParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get public key params +func (o *GetPublicKeyParams) WithContext(ctx context.Context) *GetPublicKeyParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get public key params +func (o *GetPublicKeyParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get public key params +func (o *GetPublicKeyParams) WithHTTPClient(client *http.Client) *GetPublicKeyParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get public key params +func (o *GetPublicKeyParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithTreeID adds the treeID to the get public key params +func (o *GetPublicKeyParams) WithTreeID(treeID *string) *GetPublicKeyParams { + o.SetTreeID(treeID) + return o +} + +// SetTreeID adds the treeId to the get public key params +func (o *GetPublicKeyParams) SetTreeID(treeID *string) { + o.TreeID = treeID +} + +// WriteToRequest writes these params to a swagger request +func (o *GetPublicKeyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.TreeID != nil { + + // query param treeID + var qrTreeID string + + if o.TreeID != nil { + qrTreeID = *o.TreeID + } + qTreeID := qrTreeID + if qTreeID != "" { + + if err := r.SetQueryParam("treeID", qTreeID); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_responses.go new file mode 100644 index 000000000000..70dbc452a3bb --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_responses.go @@ -0,0 +1,202 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package pubkey + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// GetPublicKeyReader is a Reader for the GetPublicKey structure. +type GetPublicKeyReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetPublicKeyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewGetPublicKeyOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewGetPublicKeyDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewGetPublicKeyOK creates a GetPublicKeyOK with default headers values +func NewGetPublicKeyOK() *GetPublicKeyOK { + return &GetPublicKeyOK{} +} + +/* +GetPublicKeyOK describes a response with status code 200, with default header values. + +The public key +*/ +type GetPublicKeyOK struct { + Payload string +} + +// IsSuccess returns true when this get public key o k response has a 2xx status code +func (o *GetPublicKeyOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get public key o k response has a 3xx status code +func (o *GetPublicKeyOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get public key o k response has a 4xx status code +func (o *GetPublicKeyOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get public key o k response has a 5xx status code +func (o *GetPublicKeyOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get public key o k response a status code equal to that given +func (o *GetPublicKeyOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get public key o k response +func (o *GetPublicKeyOK) Code() int { + return 200 +} + +func (o *GetPublicKeyOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKeyOK %s", 200, payload) +} + +func (o *GetPublicKeyOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKeyOK %s", 200, payload) +} + +func (o *GetPublicKeyOK) GetPayload() string { + return o.Payload +} + +func (o *GetPublicKeyOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewGetPublicKeyDefault creates a GetPublicKeyDefault with default headers values +func NewGetPublicKeyDefault(code int) *GetPublicKeyDefault { + return &GetPublicKeyDefault{ + _statusCode: code, + } +} + +/* +GetPublicKeyDefault describes a response with status code -1, with default header values. + +There was an internal error in the server while processing the request +*/ +type GetPublicKeyDefault struct { + _statusCode int + + Payload *models.Error +} + +// IsSuccess returns true when this get public key default response has a 2xx status code +func (o *GetPublicKeyDefault) IsSuccess() bool { + return o._statusCode/100 == 2 +} + +// IsRedirect returns true when this get public key default response has a 3xx status code +func (o *GetPublicKeyDefault) IsRedirect() bool { + return o._statusCode/100 == 3 +} + +// IsClientError returns true when this get public key default response has a 4xx status code +func (o *GetPublicKeyDefault) IsClientError() bool { + return o._statusCode/100 == 4 +} + +// IsServerError returns true when this get public key default response has a 5xx status code +func (o *GetPublicKeyDefault) IsServerError() bool { + return o._statusCode/100 == 5 +} + +// IsCode returns true when this get public key default response a status code equal to that given +func (o *GetPublicKeyDefault) IsCode(code int) bool { + return o._statusCode == code +} + +// Code gets the status code for the get public key default response +func (o *GetPublicKeyDefault) Code() int { + return o._statusCode +} + +func (o *GetPublicKeyDefault) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKey default %s", o._statusCode, payload) +} + +func (o *GetPublicKeyDefault) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKey default %s", o._statusCode, payload) +} + +func (o *GetPublicKeyDefault) GetPayload() *models.Error { + return o.Payload +} + +func (o *GetPublicKeyDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/pubkey_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/pubkey_client.go new file mode 100644 index 000000000000..7f4db7d8cf1b --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/pubkey_client.go @@ -0,0 +1,149 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package pubkey + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// New creates a new pubkey API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +// New creates a new pubkey API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new pubkey API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + +/* +Client for pubkey API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption may be used to customize the behavior of Client methods. +type ClientOption func(*runtime.ClientOperation) + +// This client is generated with a few options you might find useful for your swagger spec. +// +// Feel free to add you own set of options. + +// WithAccept allows the client to force the Accept header +// to negotiate a specific Producer from the server. +// +// You may use this option to set arbitrary extensions to your MIME media type. +func WithAccept(mime string) ClientOption { + return func(r *runtime.ClientOperation) { + r.ProducesMediaTypes = []string{mime} + } +} + +// WithAcceptApplicationJSON sets the Accept header to "application/json". +func WithAcceptApplicationJSON(r *runtime.ClientOperation) { + r.ProducesMediaTypes = []string{"application/json"} +} + +// WithAcceptApplicationxPemFile sets the Accept header to "application/x-pem-file". +func WithAcceptApplicationxPemFile(r *runtime.ClientOperation) { + r.ProducesMediaTypes = []string{"application/x-pem-file"} +} + +// ClientService is the interface for Client methods +type ClientService interface { + GetPublicKey(params *GetPublicKeyParams, opts ...ClientOption) (*GetPublicKeyOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +GetPublicKey retrieves the public key that can be used to validate the signed tree head + +Returns the public key that can be used to validate the signed tree head +*/ +func (a *Client) GetPublicKey(params *GetPublicKeyParams, opts ...ClientOption) (*GetPublicKeyOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewGetPublicKeyParams() + } + op := &runtime.ClientOperation{ + ID: "getPublicKey", + Method: "GET", + PathPattern: "/api/v1/log/publicKey", + ProducesMediaTypes: []string{"application/x-pem-file"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetPublicKeyReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*GetPublicKeyOK) + if ok { + return success, nil + } + + // unexpected success response. + // + // a default response is provided: fill this and return an error + unexpectedSuccess := result.(*GetPublicKeyDefault) + + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/rekor_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/rekor_client.go new file mode 100644 index 000000000000..bee3811184a9 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/rekor_client.go @@ -0,0 +1,143 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package client + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/client/entries" + "github.com/sigstore/rekor/pkg/generated/client/index" + "github.com/sigstore/rekor/pkg/generated/client/pubkey" + "github.com/sigstore/rekor/pkg/generated/client/tlog" +) + +// Default rekor HTTP client. +var Default = NewHTTPClient(nil) + +const ( + // DefaultHost is the default Host + // found in Meta (info) section of spec file + DefaultHost string = "rekor.sigstore.dev" + // DefaultBasePath is the default BasePath + // found in Meta (info) section of spec file + DefaultBasePath string = "/" +) + +// DefaultSchemes are the default schemes found in Meta (info) section of spec file +var DefaultSchemes = []string{"http"} + +// NewHTTPClient creates a new rekor HTTP client. +func NewHTTPClient(formats strfmt.Registry) *Rekor { + return NewHTTPClientWithConfig(formats, nil) +} + +// NewHTTPClientWithConfig creates a new rekor HTTP client, +// using a customizable transport config. +func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *Rekor { + // ensure nullable parameters have default + if cfg == nil { + cfg = DefaultTransportConfig() + } + + // create transport and client + transport := httptransport.New(cfg.Host, cfg.BasePath, cfg.Schemes) + return New(transport, formats) +} + +// New creates a new rekor client +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Rekor { + // ensure nullable parameters have default + if formats == nil { + formats = strfmt.Default + } + + cli := new(Rekor) + cli.Transport = transport + cli.Entries = entries.New(transport, formats) + cli.Index = index.New(transport, formats) + cli.Pubkey = pubkey.New(transport, formats) + cli.Tlog = tlog.New(transport, formats) + return cli +} + +// DefaultTransportConfig creates a TransportConfig with the +// default settings taken from the meta section of the spec file. +func DefaultTransportConfig() *TransportConfig { + return &TransportConfig{ + Host: DefaultHost, + BasePath: DefaultBasePath, + Schemes: DefaultSchemes, + } +} + +// TransportConfig contains the transport related info, +// found in the meta section of the spec file. +type TransportConfig struct { + Host string + BasePath string + Schemes []string +} + +// WithHost overrides the default host, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithHost(host string) *TransportConfig { + cfg.Host = host + return cfg +} + +// WithBasePath overrides the default basePath, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithBasePath(basePath string) *TransportConfig { + cfg.BasePath = basePath + return cfg +} + +// WithSchemes overrides the default schemes, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithSchemes(schemes []string) *TransportConfig { + cfg.Schemes = schemes + return cfg +} + +// Rekor is a client for rekor +type Rekor struct { + Entries entries.ClientService + + Index index.ClientService + + Pubkey pubkey.ClientService + + Tlog tlog.ClientService + + Transport runtime.ClientTransport +} + +// SetTransport changes the transport on the client and all its subresources +func (c *Rekor) SetTransport(transport runtime.ClientTransport) { + c.Transport = transport + c.Entries.SetTransport(transport) + c.Index.SetTransport(transport) + c.Pubkey.SetTransport(transport) + c.Tlog.SetTransport(transport) +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_parameters.go new file mode 100644 index 000000000000..e0ae2cdd31e2 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_parameters.go @@ -0,0 +1,144 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tlog + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetLogInfoParams creates a new GetLogInfoParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetLogInfoParams() *GetLogInfoParams { + return &GetLogInfoParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetLogInfoParamsWithTimeout creates a new GetLogInfoParams object +// with the ability to set a timeout on a request. +func NewGetLogInfoParamsWithTimeout(timeout time.Duration) *GetLogInfoParams { + return &GetLogInfoParams{ + timeout: timeout, + } +} + +// NewGetLogInfoParamsWithContext creates a new GetLogInfoParams object +// with the ability to set a context for a request. +func NewGetLogInfoParamsWithContext(ctx context.Context) *GetLogInfoParams { + return &GetLogInfoParams{ + Context: ctx, + } +} + +// NewGetLogInfoParamsWithHTTPClient creates a new GetLogInfoParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetLogInfoParamsWithHTTPClient(client *http.Client) *GetLogInfoParams { + return &GetLogInfoParams{ + HTTPClient: client, + } +} + +/* +GetLogInfoParams contains all the parameters to send to the API endpoint + + for the get log info operation. + + Typically these are written to a http.Request. +*/ +type GetLogInfoParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get log info params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetLogInfoParams) WithDefaults() *GetLogInfoParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get log info params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetLogInfoParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get log info params +func (o *GetLogInfoParams) WithTimeout(timeout time.Duration) *GetLogInfoParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get log info params +func (o *GetLogInfoParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get log info params +func (o *GetLogInfoParams) WithContext(ctx context.Context) *GetLogInfoParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get log info params +func (o *GetLogInfoParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get log info params +func (o *GetLogInfoParams) WithHTTPClient(client *http.Client) *GetLogInfoParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get log info params +func (o *GetLogInfoParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *GetLogInfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_responses.go new file mode 100644 index 000000000000..3d98f88cdfd3 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_responses.go @@ -0,0 +1,204 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tlog + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// GetLogInfoReader is a Reader for the GetLogInfo structure. +type GetLogInfoReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetLogInfoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewGetLogInfoOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewGetLogInfoDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewGetLogInfoOK creates a GetLogInfoOK with default headers values +func NewGetLogInfoOK() *GetLogInfoOK { + return &GetLogInfoOK{} +} + +/* +GetLogInfoOK describes a response with status code 200, with default header values. + +A JSON object with the root hash and tree size as properties +*/ +type GetLogInfoOK struct { + Payload *models.LogInfo +} + +// IsSuccess returns true when this get log info o k response has a 2xx status code +func (o *GetLogInfoOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get log info o k response has a 3xx status code +func (o *GetLogInfoOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get log info o k response has a 4xx status code +func (o *GetLogInfoOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get log info o k response has a 5xx status code +func (o *GetLogInfoOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get log info o k response a status code equal to that given +func (o *GetLogInfoOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get log info o k response +func (o *GetLogInfoOK) Code() int { + return 200 +} + +func (o *GetLogInfoOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfoOK %s", 200, payload) +} + +func (o *GetLogInfoOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfoOK %s", 200, payload) +} + +func (o *GetLogInfoOK) GetPayload() *models.LogInfo { + return o.Payload +} + +func (o *GetLogInfoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.LogInfo) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewGetLogInfoDefault creates a GetLogInfoDefault with default headers values +func NewGetLogInfoDefault(code int) *GetLogInfoDefault { + return &GetLogInfoDefault{ + _statusCode: code, + } +} + +/* +GetLogInfoDefault describes a response with status code -1, with default header values. + +There was an internal error in the server while processing the request +*/ +type GetLogInfoDefault struct { + _statusCode int + + Payload *models.Error +} + +// IsSuccess returns true when this get log info default response has a 2xx status code +func (o *GetLogInfoDefault) IsSuccess() bool { + return o._statusCode/100 == 2 +} + +// IsRedirect returns true when this get log info default response has a 3xx status code +func (o *GetLogInfoDefault) IsRedirect() bool { + return o._statusCode/100 == 3 +} + +// IsClientError returns true when this get log info default response has a 4xx status code +func (o *GetLogInfoDefault) IsClientError() bool { + return o._statusCode/100 == 4 +} + +// IsServerError returns true when this get log info default response has a 5xx status code +func (o *GetLogInfoDefault) IsServerError() bool { + return o._statusCode/100 == 5 +} + +// IsCode returns true when this get log info default response a status code equal to that given +func (o *GetLogInfoDefault) IsCode(code int) bool { + return o._statusCode == code +} + +// Code gets the status code for the get log info default response +func (o *GetLogInfoDefault) Code() int { + return o._statusCode +} + +func (o *GetLogInfoDefault) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfo default %s", o._statusCode, payload) +} + +func (o *GetLogInfoDefault) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfo default %s", o._statusCode, payload) +} + +func (o *GetLogInfoDefault) GetPayload() *models.Error { + return o.Payload +} + +func (o *GetLogInfoDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_parameters.go new file mode 100644 index 000000000000..2b21ad887c16 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_parameters.go @@ -0,0 +1,255 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tlog + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetLogProofParams creates a new GetLogProofParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetLogProofParams() *GetLogProofParams { + return &GetLogProofParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetLogProofParamsWithTimeout creates a new GetLogProofParams object +// with the ability to set a timeout on a request. +func NewGetLogProofParamsWithTimeout(timeout time.Duration) *GetLogProofParams { + return &GetLogProofParams{ + timeout: timeout, + } +} + +// NewGetLogProofParamsWithContext creates a new GetLogProofParams object +// with the ability to set a context for a request. +func NewGetLogProofParamsWithContext(ctx context.Context) *GetLogProofParams { + return &GetLogProofParams{ + Context: ctx, + } +} + +// NewGetLogProofParamsWithHTTPClient creates a new GetLogProofParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetLogProofParamsWithHTTPClient(client *http.Client) *GetLogProofParams { + return &GetLogProofParams{ + HTTPClient: client, + } +} + +/* +GetLogProofParams contains all the parameters to send to the API endpoint + + for the get log proof operation. + + Typically these are written to a http.Request. +*/ +type GetLogProofParams struct { + + /* FirstSize. + + The size of the tree that you wish to prove consistency from (1 means the beginning of the log) Defaults to 1 if not specified + + + Default: 1 + */ + FirstSize *int64 + + /* LastSize. + + The size of the tree that you wish to prove consistency to + */ + LastSize int64 + + /* TreeID. + + The tree ID of the tree that you wish to prove consistency for + */ + TreeID *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get log proof params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetLogProofParams) WithDefaults() *GetLogProofParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get log proof params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetLogProofParams) SetDefaults() { + var ( + firstSizeDefault = int64(1) + ) + + val := GetLogProofParams{ + FirstSize: &firstSizeDefault, + } + + val.timeout = o.timeout + val.Context = o.Context + val.HTTPClient = o.HTTPClient + *o = val +} + +// WithTimeout adds the timeout to the get log proof params +func (o *GetLogProofParams) WithTimeout(timeout time.Duration) *GetLogProofParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get log proof params +func (o *GetLogProofParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get log proof params +func (o *GetLogProofParams) WithContext(ctx context.Context) *GetLogProofParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get log proof params +func (o *GetLogProofParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get log proof params +func (o *GetLogProofParams) WithHTTPClient(client *http.Client) *GetLogProofParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get log proof params +func (o *GetLogProofParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithFirstSize adds the firstSize to the get log proof params +func (o *GetLogProofParams) WithFirstSize(firstSize *int64) *GetLogProofParams { + o.SetFirstSize(firstSize) + return o +} + +// SetFirstSize adds the firstSize to the get log proof params +func (o *GetLogProofParams) SetFirstSize(firstSize *int64) { + o.FirstSize = firstSize +} + +// WithLastSize adds the lastSize to the get log proof params +func (o *GetLogProofParams) WithLastSize(lastSize int64) *GetLogProofParams { + o.SetLastSize(lastSize) + return o +} + +// SetLastSize adds the lastSize to the get log proof params +func (o *GetLogProofParams) SetLastSize(lastSize int64) { + o.LastSize = lastSize +} + +// WithTreeID adds the treeID to the get log proof params +func (o *GetLogProofParams) WithTreeID(treeID *string) *GetLogProofParams { + o.SetTreeID(treeID) + return o +} + +// SetTreeID adds the treeId to the get log proof params +func (o *GetLogProofParams) SetTreeID(treeID *string) { + o.TreeID = treeID +} + +// WriteToRequest writes these params to a swagger request +func (o *GetLogProofParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.FirstSize != nil { + + // query param firstSize + var qrFirstSize int64 + + if o.FirstSize != nil { + qrFirstSize = *o.FirstSize + } + qFirstSize := swag.FormatInt64(qrFirstSize) + if qFirstSize != "" { + + if err := r.SetQueryParam("firstSize", qFirstSize); err != nil { + return err + } + } + } + + // query param lastSize + qrLastSize := o.LastSize + qLastSize := swag.FormatInt64(qrLastSize) + if qLastSize != "" { + + if err := r.SetQueryParam("lastSize", qLastSize); err != nil { + return err + } + } + + if o.TreeID != nil { + + // query param treeID + var qrTreeID string + + if o.TreeID != nil { + qrTreeID = *o.TreeID + } + qTreeID := qrTreeID + if qTreeID != "" { + + if err := r.SetQueryParam("treeID", qTreeID); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_responses.go new file mode 100644 index 000000000000..ae8e50d28798 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_responses.go @@ -0,0 +1,280 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tlog + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// GetLogProofReader is a Reader for the GetLogProof structure. +type GetLogProofReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetLogProofReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewGetLogProofOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewGetLogProofBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + result := NewGetLogProofDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewGetLogProofOK creates a GetLogProofOK with default headers values +func NewGetLogProofOK() *GetLogProofOK { + return &GetLogProofOK{} +} + +/* +GetLogProofOK describes a response with status code 200, with default header values. + +All hashes required to compute the consistency proof +*/ +type GetLogProofOK struct { + Payload *models.ConsistencyProof +} + +// IsSuccess returns true when this get log proof o k response has a 2xx status code +func (o *GetLogProofOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get log proof o k response has a 3xx status code +func (o *GetLogProofOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get log proof o k response has a 4xx status code +func (o *GetLogProofOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get log proof o k response has a 5xx status code +func (o *GetLogProofOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get log proof o k response a status code equal to that given +func (o *GetLogProofOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get log proof o k response +func (o *GetLogProofOK) Code() int { + return 200 +} + +func (o *GetLogProofOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofOK %s", 200, payload) +} + +func (o *GetLogProofOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofOK %s", 200, payload) +} + +func (o *GetLogProofOK) GetPayload() *models.ConsistencyProof { + return o.Payload +} + +func (o *GetLogProofOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ConsistencyProof) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewGetLogProofBadRequest creates a GetLogProofBadRequest with default headers values +func NewGetLogProofBadRequest() *GetLogProofBadRequest { + return &GetLogProofBadRequest{} +} + +/* +GetLogProofBadRequest describes a response with status code 400, with default header values. + +The content supplied to the server was invalid +*/ +type GetLogProofBadRequest struct { + Payload *models.Error +} + +// IsSuccess returns true when this get log proof bad request response has a 2xx status code +func (o *GetLogProofBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get log proof bad request response has a 3xx status code +func (o *GetLogProofBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get log proof bad request response has a 4xx status code +func (o *GetLogProofBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this get log proof bad request response has a 5xx status code +func (o *GetLogProofBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this get log proof bad request response a status code equal to that given +func (o *GetLogProofBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the get log proof bad request response +func (o *GetLogProofBadRequest) Code() int { + return 400 +} + +func (o *GetLogProofBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofBadRequest %s", 400, payload) +} + +func (o *GetLogProofBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofBadRequest %s", 400, payload) +} + +func (o *GetLogProofBadRequest) GetPayload() *models.Error { + return o.Payload +} + +func (o *GetLogProofBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewGetLogProofDefault creates a GetLogProofDefault with default headers values +func NewGetLogProofDefault(code int) *GetLogProofDefault { + return &GetLogProofDefault{ + _statusCode: code, + } +} + +/* +GetLogProofDefault describes a response with status code -1, with default header values. + +There was an internal error in the server while processing the request +*/ +type GetLogProofDefault struct { + _statusCode int + + Payload *models.Error +} + +// IsSuccess returns true when this get log proof default response has a 2xx status code +func (o *GetLogProofDefault) IsSuccess() bool { + return o._statusCode/100 == 2 +} + +// IsRedirect returns true when this get log proof default response has a 3xx status code +func (o *GetLogProofDefault) IsRedirect() bool { + return o._statusCode/100 == 3 +} + +// IsClientError returns true when this get log proof default response has a 4xx status code +func (o *GetLogProofDefault) IsClientError() bool { + return o._statusCode/100 == 4 +} + +// IsServerError returns true when this get log proof default response has a 5xx status code +func (o *GetLogProofDefault) IsServerError() bool { + return o._statusCode/100 == 5 +} + +// IsCode returns true when this get log proof default response a status code equal to that given +func (o *GetLogProofDefault) IsCode(code int) bool { + return o._statusCode == code +} + +// Code gets the status code for the get log proof default response +func (o *GetLogProofDefault) Code() int { + return o._statusCode +} + +func (o *GetLogProofDefault) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProof default %s", o._statusCode, payload) +} + +func (o *GetLogProofDefault) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProof default %s", o._statusCode, payload) +} + +func (o *GetLogProofDefault) GetPayload() *models.Error { + return o.Payload +} + +func (o *GetLogProofDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/tlog_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/tlog_client.go new file mode 100644 index 000000000000..ff174ebfa230 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/tlog_client.go @@ -0,0 +1,171 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tlog + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// New creates a new tlog API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +// New creates a new tlog API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new tlog API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + +/* +Client for tlog API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption may be used to customize the behavior of Client methods. +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + GetLogInfo(params *GetLogInfoParams, opts ...ClientOption) (*GetLogInfoOK, error) + + GetLogProof(params *GetLogProofParams, opts ...ClientOption) (*GetLogProofOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +GetLogInfo gets information about the current state of the transparency log + +Returns the current root hash and size of the merkle tree used to store the log entries. +*/ +func (a *Client) GetLogInfo(params *GetLogInfoParams, opts ...ClientOption) (*GetLogInfoOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewGetLogInfoParams() + } + op := &runtime.ClientOperation{ + ID: "getLogInfo", + Method: "GET", + PathPattern: "/api/v1/log", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetLogInfoReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*GetLogInfoOK) + if ok { + return success, nil + } + + // unexpected success response. + // + // a default response is provided: fill this and return an error + unexpectedSuccess := result.(*GetLogInfoDefault) + + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +GetLogProof gets information required to generate a consistency proof for the transparency log + +Returns a list of hashes for specified tree sizes that can be used to confirm the consistency of the transparency log +*/ +func (a *Client) GetLogProof(params *GetLogProofParams, opts ...ClientOption) (*GetLogProofOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewGetLogProofParams() + } + op := &runtime.ClientOperation{ + ID: "getLogProof", + Method: "GET", + PathPattern: "/api/v1/log/proof", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetLogProofReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*GetLogProofOK) + if ok { + return success, nil + } + + // unexpected success response. + // + // a default response is provided: fill this and return an error + unexpectedSuccess := result.(*GetLogProofDefault) + + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go new file mode 100644 index 000000000000..5607679fdf7e --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Alpine Alpine package +// +// swagger:model alpine +type Alpine struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec AlpineSchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *Alpine) Kind() string { + return "alpine" +} + +// SetKind sets the kind of this subtype +func (m *Alpine) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *Alpine) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec AlpineSchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result Alpine + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m Alpine) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec AlpineSchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this alpine +func (m *Alpine) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Alpine) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *Alpine) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this alpine based on the context it is used +func (m *Alpine) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *Alpine) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Alpine) UnmarshalBinary(b []byte) error { + var res Alpine + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go new file mode 100644 index 000000000000..00f76926c39c --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// AlpineSchema Alpine Package Schema +// +// # Schema for Alpine package objects +// +// swagger:model alpineSchema +type AlpineSchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go new file mode 100644 index 000000000000..c77008ce7a84 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go @@ -0,0 +1,480 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// AlpineV001Schema Alpine v0.0.1 Schema +// +// # Schema for Alpine Package entries +// +// swagger:model alpineV001Schema +type AlpineV001Schema struct { + + // package + // Required: true + Package *AlpineV001SchemaPackage `json:"package"` + + // public key + // Required: true + PublicKey *AlpineV001SchemaPublicKey `json:"publicKey"` +} + +// Validate validates this alpine v001 schema +func (m *AlpineV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validatePackage(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AlpineV001Schema) validatePackage(formats strfmt.Registry) error { + + if err := validate.Required("package", "body", m.Package); err != nil { + return err + } + + if m.Package != nil { + if err := m.Package.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("package") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("package") + } + + return err + } + } + + return nil +} + +func (m *AlpineV001Schema) validatePublicKey(formats strfmt.Registry) error { + + if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { + return err + } + + if m.PublicKey != nil { + if err := m.PublicKey.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("publicKey") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this alpine v001 schema based on the context it is used +func (m *AlpineV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidatePackage(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePublicKey(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AlpineV001Schema) contextValidatePackage(ctx context.Context, formats strfmt.Registry) error { + + if m.Package != nil { + + if err := m.Package.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("package") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("package") + } + + return err + } + } + + return nil +} + +func (m *AlpineV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { + + if m.PublicKey != nil { + + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("publicKey") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *AlpineV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AlpineV001Schema) UnmarshalBinary(b []byte) error { + var res AlpineV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// AlpineV001SchemaPackage Information about the package associated with the entry +// +// swagger:model AlpineV001SchemaPackage +type AlpineV001SchemaPackage struct { + + // Specifies the package inline within the document + // Format: byte + Content strfmt.Base64 `json:"content,omitempty"` + + // hash + Hash *AlpineV001SchemaPackageHash `json:"hash,omitempty"` + + // Values of the .PKGINFO key / value pairs + // Read Only: true + Pkginfo map[string]string `json:"pkginfo,omitempty"` +} + +// Validate validates this alpine v001 schema package +func (m *AlpineV001SchemaPackage) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHash(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AlpineV001SchemaPackage) validateHash(formats strfmt.Registry) error { + if swag.IsZero(m.Hash) { // not required + return nil + } + + if m.Hash != nil { + if err := m.Hash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("package" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("package" + "." + "hash") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this alpine v001 schema package based on the context it is used +func (m *AlpineV001SchemaPackage) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateHash(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePkginfo(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AlpineV001SchemaPackage) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { + + if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + + if err := m.Hash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("package" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("package" + "." + "hash") + } + + return err + } + } + + return nil +} + +func (m *AlpineV001SchemaPackage) contextValidatePkginfo(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *AlpineV001SchemaPackage) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AlpineV001SchemaPackage) UnmarshalBinary(b []byte) error { + var res AlpineV001SchemaPackage + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// AlpineV001SchemaPackageHash Specifies the hash algorithm and value for the package +// +// swagger:model AlpineV001SchemaPackageHash +type AlpineV001SchemaPackageHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the package + // Required: true + Value *string `json:"value"` +} + +// Validate validates this alpine v001 schema package hash +func (m *AlpineV001SchemaPackageHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var alpineV001SchemaPackageHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + alpineV001SchemaPackageHashTypeAlgorithmPropEnum = append(alpineV001SchemaPackageHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // AlpineV001SchemaPackageHashAlgorithmSha256 captures enum value "sha256" + AlpineV001SchemaPackageHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *AlpineV001SchemaPackageHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, alpineV001SchemaPackageHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *AlpineV001SchemaPackageHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("package"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("package"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *AlpineV001SchemaPackageHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("package"+"."+"hash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this alpine v001 schema package hash based on the context it is used +func (m *AlpineV001SchemaPackageHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *AlpineV001SchemaPackageHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AlpineV001SchemaPackageHash) UnmarshalBinary(b []byte) error { + var res AlpineV001SchemaPackageHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// AlpineV001SchemaPublicKey The public key that can verify the package signature +// +// swagger:model AlpineV001SchemaPublicKey +type AlpineV001SchemaPublicKey struct { + + // Specifies the content of the public key inline within the document + // Required: true + // Format: byte + Content *strfmt.Base64 `json:"content"` +} + +// Validate validates this alpine v001 schema public key +func (m *AlpineV001SchemaPublicKey) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AlpineV001SchemaPublicKey) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this alpine v001 schema public key based on context it is used +func (m *AlpineV001SchemaPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *AlpineV001SchemaPublicKey) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AlpineV001SchemaPublicKey) UnmarshalBinary(b []byte) error { + var res AlpineV001SchemaPublicKey + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go new file mode 100644 index 000000000000..804ddd11a969 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go @@ -0,0 +1,118 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ConsistencyProof consistency proof +// +// swagger:model ConsistencyProof +type ConsistencyProof struct { + + // hashes + // Required: true + Hashes []string `json:"hashes"` + + // The hash value stored at the root of the merkle tree at the time the proof was generated + // Required: true + // Pattern: ^[0-9a-fA-F]{64}$ + RootHash *string `json:"rootHash"` +} + +// Validate validates this consistency proof +func (m *ConsistencyProof) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHashes(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRootHash(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ConsistencyProof) validateHashes(formats strfmt.Registry) error { + + if err := validate.Required("hashes", "body", m.Hashes); err != nil { + return err + } + + for i := 0; i < len(m.Hashes); i++ { + + if err := validate.Pattern("hashes"+"."+strconv.Itoa(i), "body", m.Hashes[i], `^[0-9a-fA-F]{64}$`); err != nil { + return err + } + + } + + return nil +} + +func (m *ConsistencyProof) validateRootHash(formats strfmt.Registry) error { + + if err := validate.Required("rootHash", "body", m.RootHash); err != nil { + return err + } + + if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this consistency proof based on context it is used +func (m *ConsistencyProof) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ConsistencyProof) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ConsistencyProof) UnmarshalBinary(b []byte) error { + var res ConsistencyProof + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go new file mode 100644 index 000000000000..8de4083baf8e --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Cose COSE object +// +// swagger:model cose +type Cose struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec CoseSchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *Cose) Kind() string { + return "cose" +} + +// SetKind sets the kind of this subtype +func (m *Cose) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *Cose) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec CoseSchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result Cose + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m Cose) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec CoseSchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this cose +func (m *Cose) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Cose) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *Cose) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this cose based on the context it is used +func (m *Cose) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *Cose) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Cose) UnmarshalBinary(b []byte) error { + var res Cose + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go new file mode 100644 index 000000000000..8f9016050386 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CoseSchema COSE Schema +// +// # COSE for Rekord objects +// +// swagger:model coseSchema +type CoseSchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go new file mode 100644 index 000000000000..9dafe29ce37b --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go @@ -0,0 +1,546 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// CoseV001Schema cose v0.0.1 Schema +// +// # Schema for cose object +// +// swagger:model coseV001Schema +type CoseV001Schema struct { + + // data + Data *CoseV001SchemaData `json:"data,omitempty"` + + // The COSE Sign1 Message + // Format: byte + Message strfmt.Base64 `json:"message,omitempty"` + + // The public key that can verify the signature + // Required: true + // Format: byte + PublicKey *strfmt.Base64 `json:"publicKey"` +} + +// Validate validates this cose v001 schema +func (m *CoseV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateData(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CoseV001Schema) validateData(formats strfmt.Registry) error { + if swag.IsZero(m.Data) { // not required + return nil + } + + if m.Data != nil { + if err := m.Data.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data") + } + + return err + } + } + + return nil +} + +func (m *CoseV001Schema) validatePublicKey(formats strfmt.Registry) error { + + if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this cose v001 schema based on the context it is used +func (m *CoseV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateData(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CoseV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error { + + if m.Data != nil { + + if swag.IsZero(m.Data) { // not required + return nil + } + + if err := m.Data.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CoseV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CoseV001Schema) UnmarshalBinary(b []byte) error { + var res CoseV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// CoseV001SchemaData Information about the content associated with the entry +// +// swagger:model CoseV001SchemaData +type CoseV001SchemaData struct { + + // Specifies the additional authenticated data required to verify the signature + // Format: byte + Aad strfmt.Base64 `json:"aad,omitempty"` + + // envelope hash + EnvelopeHash *CoseV001SchemaDataEnvelopeHash `json:"envelopeHash,omitempty"` + + // payload hash + PayloadHash *CoseV001SchemaDataPayloadHash `json:"payloadHash,omitempty"` +} + +// Validate validates this cose v001 schema data +func (m *CoseV001SchemaData) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateEnvelopeHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePayloadHash(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CoseV001SchemaData) validateEnvelopeHash(formats strfmt.Registry) error { + if swag.IsZero(m.EnvelopeHash) { // not required + return nil + } + + if m.EnvelopeHash != nil { + if err := m.EnvelopeHash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data" + "." + "envelopeHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data" + "." + "envelopeHash") + } + + return err + } + } + + return nil +} + +func (m *CoseV001SchemaData) validatePayloadHash(formats strfmt.Registry) error { + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + + if m.PayloadHash != nil { + if err := m.PayloadHash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data" + "." + "payloadHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data" + "." + "payloadHash") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this cose v001 schema data based on the context it is used +func (m *CoseV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateEnvelopeHash(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePayloadHash(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CoseV001SchemaData) contextValidateEnvelopeHash(ctx context.Context, formats strfmt.Registry) error { + + if m.EnvelopeHash != nil { + + if swag.IsZero(m.EnvelopeHash) { // not required + return nil + } + + if err := m.EnvelopeHash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data" + "." + "envelopeHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data" + "." + "envelopeHash") + } + + return err + } + } + + return nil +} + +func (m *CoseV001SchemaData) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { + + if m.PayloadHash != nil { + + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + + if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data" + "." + "payloadHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data" + "." + "payloadHash") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CoseV001SchemaData) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CoseV001SchemaData) UnmarshalBinary(b []byte) error { + var res CoseV001SchemaData + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// CoseV001SchemaDataEnvelopeHash Specifies the hash algorithm and value for the COSE envelope +// +// swagger:model CoseV001SchemaDataEnvelopeHash +type CoseV001SchemaDataEnvelopeHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the envelope + // Required: true + Value *string `json:"value"` +} + +// Validate validates this cose v001 schema data envelope hash +func (m *CoseV001SchemaDataEnvelopeHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum = append(coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // CoseV001SchemaDataEnvelopeHashAlgorithmSha256 captures enum value "sha256" + CoseV001SchemaDataEnvelopeHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *CoseV001SchemaDataEnvelopeHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *CoseV001SchemaDataEnvelopeHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"envelopeHash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("data"+"."+"envelopeHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *CoseV001SchemaDataEnvelopeHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"envelopeHash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this cose v001 schema data envelope hash based on the context it is used +func (m *CoseV001SchemaDataEnvelopeHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *CoseV001SchemaDataEnvelopeHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CoseV001SchemaDataEnvelopeHash) UnmarshalBinary(b []byte) error { + var res CoseV001SchemaDataEnvelopeHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// CoseV001SchemaDataPayloadHash Specifies the hash algorithm and value for the content +// +// swagger:model CoseV001SchemaDataPayloadHash +type CoseV001SchemaDataPayloadHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the content + // Required: true + Value *string `json:"value"` +} + +// Validate validates this cose v001 schema data payload hash +func (m *CoseV001SchemaDataPayloadHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum = append(coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // CoseV001SchemaDataPayloadHashAlgorithmSha256 captures enum value "sha256" + CoseV001SchemaDataPayloadHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *CoseV001SchemaDataPayloadHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *CoseV001SchemaDataPayloadHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("data"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *CoseV001SchemaDataPayloadHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this cose v001 schema data payload hash based on the context it is used +func (m *CoseV001SchemaDataPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *CoseV001SchemaDataPayloadHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CoseV001SchemaDataPayloadHash) UnmarshalBinary(b []byte) error { + var res CoseV001SchemaDataPayloadHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go new file mode 100644 index 000000000000..dde562054c37 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// DSSE DSSE envelope +// +// swagger:model dsse +type DSSE struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec DSSESchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *DSSE) Kind() string { + return "dsse" +} + +// SetKind sets the kind of this subtype +func (m *DSSE) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *DSSE) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec DSSESchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result DSSE + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m DSSE) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec DSSESchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this dsse +func (m *DSSE) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DSSE) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *DSSE) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this dsse based on the context it is used +func (m *DSSE) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *DSSE) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DSSE) UnmarshalBinary(b []byte) error { + var res DSSE + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go new file mode 100644 index 000000000000..0dc5c87ed37a --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DSSESchema DSSE Schema +// +// log entry schema for dsse envelopes +// +// swagger:model dsseSchema +type DSSESchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go new file mode 100644 index 000000000000..8cad568daf86 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go @@ -0,0 +1,718 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// DSSEV001Schema DSSE v0.0.1 Schema +// +// # Schema for DSSE envelopes +// +// swagger:model dsseV001Schema +type DSSEV001Schema struct { + + // envelope hash + EnvelopeHash *DSSEV001SchemaEnvelopeHash `json:"envelopeHash,omitempty"` + + // payload hash + PayloadHash *DSSEV001SchemaPayloadHash `json:"payloadHash,omitempty"` + + // proposed content + ProposedContent *DSSEV001SchemaProposedContent `json:"proposedContent,omitempty"` + + // extracted collection of all signatures of the envelope's payload; elements will be sorted by lexicographical order of the base64 encoded signature strings + // Read Only: true + // Min Items: 1 + Signatures []*DSSEV001SchemaSignaturesItems0 `json:"signatures"` +} + +// Validate validates this dsse v001 schema +func (m *DSSEV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateEnvelopeHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePayloadHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProposedContent(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSignatures(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DSSEV001Schema) validateEnvelopeHash(formats strfmt.Registry) error { + if swag.IsZero(m.EnvelopeHash) { // not required + return nil + } + + if m.EnvelopeHash != nil { + if err := m.EnvelopeHash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("envelopeHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("envelopeHash") + } + + return err + } + } + + return nil +} + +func (m *DSSEV001Schema) validatePayloadHash(formats strfmt.Registry) error { + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + + if m.PayloadHash != nil { + if err := m.PayloadHash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("payloadHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("payloadHash") + } + + return err + } + } + + return nil +} + +func (m *DSSEV001Schema) validateProposedContent(formats strfmt.Registry) error { + if swag.IsZero(m.ProposedContent) { // not required + return nil + } + + if m.ProposedContent != nil { + if err := m.ProposedContent.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("proposedContent") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("proposedContent") + } + + return err + } + } + + return nil +} + +func (m *DSSEV001Schema) validateSignatures(formats strfmt.Registry) error { + if swag.IsZero(m.Signatures) { // not required + return nil + } + + iSignaturesSize := int64(len(m.Signatures)) + + if err := validate.MinItems("signatures", "body", iSignaturesSize, 1); err != nil { + return err + } + + for i := 0; i < len(m.Signatures); i++ { + if swag.IsZero(m.Signatures[i]) { // not required + continue + } + + if m.Signatures[i] != nil { + if err := m.Signatures[i].Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signatures" + "." + strconv.Itoa(i)) + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signatures" + "." + strconv.Itoa(i)) + } + + return err + } + } + + } + + return nil +} + +// ContextValidate validate this dsse v001 schema based on the context it is used +func (m *DSSEV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateEnvelopeHash(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePayloadHash(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateProposedContent(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateSignatures(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DSSEV001Schema) contextValidateEnvelopeHash(ctx context.Context, formats strfmt.Registry) error { + + if m.EnvelopeHash != nil { + + if swag.IsZero(m.EnvelopeHash) { // not required + return nil + } + + if err := m.EnvelopeHash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("envelopeHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("envelopeHash") + } + + return err + } + } + + return nil +} + +func (m *DSSEV001Schema) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { + + if m.PayloadHash != nil { + + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + + if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("payloadHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("payloadHash") + } + + return err + } + } + + return nil +} + +func (m *DSSEV001Schema) contextValidateProposedContent(ctx context.Context, formats strfmt.Registry) error { + + if m.ProposedContent != nil { + + if swag.IsZero(m.ProposedContent) { // not required + return nil + } + + if err := m.ProposedContent.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("proposedContent") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("proposedContent") + } + + return err + } + } + + return nil +} + +func (m *DSSEV001Schema) contextValidateSignatures(ctx context.Context, formats strfmt.Registry) error { + + if err := validate.ReadOnly(ctx, "signatures", "body", m.Signatures); err != nil { + return err + } + + for i := 0; i < len(m.Signatures); i++ { + + if m.Signatures[i] != nil { + + if swag.IsZero(m.Signatures[i]) { // not required + return nil + } + + if err := m.Signatures[i].ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signatures" + "." + strconv.Itoa(i)) + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signatures" + "." + strconv.Itoa(i)) + } + + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *DSSEV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DSSEV001Schema) UnmarshalBinary(b []byte) error { + var res DSSEV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// DSSEV001SchemaEnvelopeHash Specifies the hash algorithm and value encompassing the entire envelope sent to Rekor +// +// swagger:model DSSEV001SchemaEnvelopeHash +type DSSEV001SchemaEnvelopeHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The value of the computed digest over the entire envelope + // Required: true + Value *string `json:"value"` +} + +// Validate validates this DSSE v001 schema envelope hash +func (m *DSSEV001SchemaEnvelopeHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum = append(dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // DSSEV001SchemaEnvelopeHashAlgorithmSha256 captures enum value "sha256" + DSSEV001SchemaEnvelopeHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *DSSEV001SchemaEnvelopeHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *DSSEV001SchemaEnvelopeHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("envelopeHash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("envelopeHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *DSSEV001SchemaEnvelopeHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("envelopeHash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this DSSE v001 schema envelope hash based on the context it is used +func (m *DSSEV001SchemaEnvelopeHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *DSSEV001SchemaEnvelopeHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DSSEV001SchemaEnvelopeHash) UnmarshalBinary(b []byte) error { + var res DSSEV001SchemaEnvelopeHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// DSSEV001SchemaPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope +// +// swagger:model DSSEV001SchemaPayloadHash +type DSSEV001SchemaPayloadHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The value of the computed digest over the payload within the envelope + // Required: true + Value *string `json:"value"` +} + +// Validate validates this DSSE v001 schema payload hash +func (m *DSSEV001SchemaPayloadHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var dsseV001SchemaPayloadHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + dsseV001SchemaPayloadHashTypeAlgorithmPropEnum = append(dsseV001SchemaPayloadHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // DSSEV001SchemaPayloadHashAlgorithmSha256 captures enum value "sha256" + DSSEV001SchemaPayloadHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *DSSEV001SchemaPayloadHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, dsseV001SchemaPayloadHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *DSSEV001SchemaPayloadHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *DSSEV001SchemaPayloadHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("payloadHash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this DSSE v001 schema payload hash based on the context it is used +func (m *DSSEV001SchemaPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *DSSEV001SchemaPayloadHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DSSEV001SchemaPayloadHash) UnmarshalBinary(b []byte) error { + var res DSSEV001SchemaPayloadHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// DSSEV001SchemaProposedContent DSSE v001 schema proposed content +// +// swagger:model DSSEV001SchemaProposedContent +type DSSEV001SchemaProposedContent struct { + + // DSSE envelope specified as a stringified JSON object + // Required: true + Envelope *string `json:"envelope"` + + // collection of all verification material (e.g. public keys or certificates) used to verify signatures over envelope's payload, specified as base64-encoded strings + // Required: true + // Min Items: 1 + Verifiers []strfmt.Base64 `json:"verifiers"` +} + +// Validate validates this DSSE v001 schema proposed content +func (m *DSSEV001SchemaProposedContent) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateEnvelope(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVerifiers(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DSSEV001SchemaProposedContent) validateEnvelope(formats strfmt.Registry) error { + + if err := validate.Required("proposedContent"+"."+"envelope", "body", m.Envelope); err != nil { + return err + } + + return nil +} + +func (m *DSSEV001SchemaProposedContent) validateVerifiers(formats strfmt.Registry) error { + + if err := validate.Required("proposedContent"+"."+"verifiers", "body", m.Verifiers); err != nil { + return err + } + + iVerifiersSize := int64(len(m.Verifiers)) + + if err := validate.MinItems("proposedContent"+"."+"verifiers", "body", iVerifiersSize, 1); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this DSSE v001 schema proposed content based on context it is used +func (m *DSSEV001SchemaProposedContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *DSSEV001SchemaProposedContent) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DSSEV001SchemaProposedContent) UnmarshalBinary(b []byte) error { + var res DSSEV001SchemaProposedContent + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// DSSEV001SchemaSignaturesItems0 a signature of the envelope's payload along with the verification material for the signature +// +// swagger:model DSSEV001SchemaSignaturesItems0 +type DSSEV001SchemaSignaturesItems0 struct { + + // base64 encoded signature of the payload + // Required: true + // Pattern: ^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=|[A-Za-z0-9+\/]{4})$ + Signature *string `json:"signature"` + + // verification material that was used to verify the corresponding signature, specified as a base64 encoded string + // Required: true + // Format: byte + Verifier *strfmt.Base64 `json:"verifier"` +} + +// Validate validates this DSSE v001 schema signatures items0 +func (m *DSSEV001SchemaSignaturesItems0) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateSignature(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVerifier(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DSSEV001SchemaSignaturesItems0) validateSignature(formats strfmt.Registry) error { + + if err := validate.Required("signature", "body", m.Signature); err != nil { + return err + } + + if err := validate.Pattern("signature", "body", *m.Signature, `^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=|[A-Za-z0-9+\/]{4})$`); err != nil { + return err + } + + return nil +} + +func (m *DSSEV001SchemaSignaturesItems0) validateVerifier(formats strfmt.Registry) error { + + if err := validate.Required("verifier", "body", m.Verifier); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this DSSE v001 schema signatures items0 based on context it is used +func (m *DSSEV001SchemaSignaturesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *DSSEV001SchemaSignaturesItems0) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DSSEV001SchemaSignaturesItems0) UnmarshalBinary(b []byte) error { + var res DSSEV001SchemaSignaturesItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/error.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/error.go new file mode 100644 index 000000000000..ac14f2026e40 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/error.go @@ -0,0 +1,69 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Error error +// +// swagger:model Error +type Error struct { + + // code + Code int64 `json:"code,omitempty"` + + // message + Message string `json:"message,omitempty"` +} + +// Validate validates this error +func (m *Error) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this error based on context it is used +func (m *Error) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Error) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Error) UnmarshalBinary(b []byte) error { + var res Error + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go new file mode 100644 index 000000000000..b3e1f8a3bda4 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Hashedrekord Hashed Rekord object +// +// swagger:model hashedrekord +type Hashedrekord struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec HashedrekordSchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *Hashedrekord) Kind() string { + return "hashedrekord" +} + +// SetKind sets the kind of this subtype +func (m *Hashedrekord) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *Hashedrekord) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec HashedrekordSchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result Hashedrekord + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m Hashedrekord) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec HashedrekordSchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this hashedrekord +func (m *Hashedrekord) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Hashedrekord) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *Hashedrekord) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this hashedrekord based on the context it is used +func (m *Hashedrekord) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *Hashedrekord) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Hashedrekord) UnmarshalBinary(b []byte) error { + var res Hashedrekord + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go new file mode 100644 index 000000000000..67fc8ababad8 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// HashedrekordSchema Hashedrekord Schema +// +// # Schema for Hashedrekord objects +// +// swagger:model hashedrekordSchema +type HashedrekordSchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go new file mode 100644 index 000000000000..866842e5690a --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go @@ -0,0 +1,552 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// HashedrekordV001Schema Hashed Rekor v0.0.1 Schema +// +// # Schema for Hashed Rekord object +// +// swagger:model hashedrekordV001Schema +type HashedrekordV001Schema struct { + + // data + // Required: true + Data *HashedrekordV001SchemaData `json:"data"` + + // signature + // Required: true + Signature *HashedrekordV001SchemaSignature `json:"signature"` +} + +// Validate validates this hashedrekord v001 schema +func (m *HashedrekordV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateData(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSignature(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HashedrekordV001Schema) validateData(formats strfmt.Registry) error { + + if err := validate.Required("data", "body", m.Data); err != nil { + return err + } + + if m.Data != nil { + if err := m.Data.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data") + } + + return err + } + } + + return nil +} + +func (m *HashedrekordV001Schema) validateSignature(formats strfmt.Registry) error { + + if err := validate.Required("signature", "body", m.Signature); err != nil { + return err + } + + if m.Signature != nil { + if err := m.Signature.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this hashedrekord v001 schema based on the context it is used +func (m *HashedrekordV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateData(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateSignature(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HashedrekordV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error { + + if m.Data != nil { + + if err := m.Data.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data") + } + + return err + } + } + + return nil +} + +func (m *HashedrekordV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { + + if m.Signature != nil { + + if err := m.Signature.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HashedrekordV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HashedrekordV001Schema) UnmarshalBinary(b []byte) error { + var res HashedrekordV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// HashedrekordV001SchemaData Information about the content associated with the entry +// +// swagger:model HashedrekordV001SchemaData +type HashedrekordV001SchemaData struct { + + // hash + Hash *HashedrekordV001SchemaDataHash `json:"hash,omitempty"` +} + +// Validate validates this hashedrekord v001 schema data +func (m *HashedrekordV001SchemaData) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHash(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HashedrekordV001SchemaData) validateHash(formats strfmt.Registry) error { + if swag.IsZero(m.Hash) { // not required + return nil + } + + if m.Hash != nil { + if err := m.Hash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data" + "." + "hash") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this hashedrekord v001 schema data based on the context it is used +func (m *HashedrekordV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateHash(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HashedrekordV001SchemaData) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { + + if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + + if err := m.Hash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data" + "." + "hash") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HashedrekordV001SchemaData) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HashedrekordV001SchemaData) UnmarshalBinary(b []byte) error { + var res HashedrekordV001SchemaData + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// HashedrekordV001SchemaDataHash Specifies the hash algorithm and value for the content +// +// swagger:model HashedrekordV001SchemaDataHash +type HashedrekordV001SchemaDataHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256","sha384","sha512"] + Algorithm *string `json:"algorithm"` + + // The hash value for the content, as represented by a lower case hexadecimal string + // Required: true + Value *string `json:"value"` +} + +// Validate validates this hashedrekord v001 schema data hash +func (m *HashedrekordV001SchemaDataHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256","sha384","sha512"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum = append(hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // HashedrekordV001SchemaDataHashAlgorithmSha256 captures enum value "sha256" + HashedrekordV001SchemaDataHashAlgorithmSha256 string = "sha256" + + // HashedrekordV001SchemaDataHashAlgorithmSha384 captures enum value "sha384" + HashedrekordV001SchemaDataHashAlgorithmSha384 string = "sha384" + + // HashedrekordV001SchemaDataHashAlgorithmSha512 captures enum value "sha512" + HashedrekordV001SchemaDataHashAlgorithmSha512 string = "sha512" +) + +// prop value enum +func (m *HashedrekordV001SchemaDataHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *HashedrekordV001SchemaDataHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("data"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *HashedrekordV001SchemaDataHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"hash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this hashedrekord v001 schema data hash based on context it is used +func (m *HashedrekordV001SchemaDataHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *HashedrekordV001SchemaDataHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HashedrekordV001SchemaDataHash) UnmarshalBinary(b []byte) error { + var res HashedrekordV001SchemaDataHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// HashedrekordV001SchemaSignature Information about the detached signature associated with the entry +// +// swagger:model HashedrekordV001SchemaSignature +type HashedrekordV001SchemaSignature struct { + + // Specifies the content of the signature inline within the document + // Format: byte + Content strfmt.Base64 `json:"content,omitempty"` + + // public key + PublicKey *HashedrekordV001SchemaSignaturePublicKey `json:"publicKey,omitempty"` +} + +// Validate validates this hashedrekord v001 schema signature +func (m *HashedrekordV001SchemaSignature) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HashedrekordV001SchemaSignature) validatePublicKey(formats strfmt.Registry) error { + if swag.IsZero(m.PublicKey) { // not required + return nil + } + + if m.PublicKey != nil { + if err := m.PublicKey.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature" + "." + "publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature" + "." + "publicKey") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this hashedrekord v001 schema signature based on the context it is used +func (m *HashedrekordV001SchemaSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidatePublicKey(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HashedrekordV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { + + if m.PublicKey != nil { + + if swag.IsZero(m.PublicKey) { // not required + return nil + } + + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature" + "." + "publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature" + "." + "publicKey") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HashedrekordV001SchemaSignature) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HashedrekordV001SchemaSignature) UnmarshalBinary(b []byte) error { + var res HashedrekordV001SchemaSignature + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// HashedrekordV001SchemaSignaturePublicKey The public key that can verify the signature; this can also be an X509 code signing certificate that contains the raw public key information +// +// swagger:model HashedrekordV001SchemaSignaturePublicKey +type HashedrekordV001SchemaSignaturePublicKey struct { + + // Specifies the content of the public key or code signing certificate inline within the document + // Format: byte + Content strfmt.Base64 `json:"content,omitempty"` +} + +// Validate validates this hashedrekord v001 schema signature public key +func (m *HashedrekordV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this hashedrekord v001 schema signature public key based on context it is used +func (m *HashedrekordV001SchemaSignaturePublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *HashedrekordV001SchemaSignaturePublicKey) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HashedrekordV001SchemaSignaturePublicKey) UnmarshalBinary(b []byte) error { + var res HashedrekordV001SchemaSignaturePublicKey + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go new file mode 100644 index 000000000000..d19b8bc8c9d1 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Helm Helm chart +// +// swagger:model helm +type Helm struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec HelmSchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *Helm) Kind() string { + return "helm" +} + +// SetKind sets the kind of this subtype +func (m *Helm) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *Helm) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec HelmSchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result Helm + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m Helm) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec HelmSchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this helm +func (m *Helm) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Helm) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *Helm) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this helm based on the context it is used +func (m *Helm) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *Helm) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Helm) UnmarshalBinary(b []byte) error { + var res Helm + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go new file mode 100644 index 000000000000..305a9e16fde8 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// HelmSchema Helm Schema +// +// # Schema for Helm objects +// +// swagger:model helmSchema +type HelmSchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go new file mode 100644 index 000000000000..1d52e1e4f562 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go @@ -0,0 +1,703 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// HelmV001Schema Helm v0.0.1 Schema +// +// # Schema for Helm object +// +// swagger:model helmV001Schema +type HelmV001Schema struct { + + // chart + // Required: true + Chart *HelmV001SchemaChart `json:"chart"` + + // public key + // Required: true + PublicKey *HelmV001SchemaPublicKey `json:"publicKey"` +} + +// Validate validates this helm v001 schema +func (m *HelmV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateChart(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HelmV001Schema) validateChart(formats strfmt.Registry) error { + + if err := validate.Required("chart", "body", m.Chart); err != nil { + return err + } + + if m.Chart != nil { + if err := m.Chart.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("chart") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("chart") + } + + return err + } + } + + return nil +} + +func (m *HelmV001Schema) validatePublicKey(formats strfmt.Registry) error { + + if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { + return err + } + + if m.PublicKey != nil { + if err := m.PublicKey.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("publicKey") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this helm v001 schema based on the context it is used +func (m *HelmV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateChart(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePublicKey(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HelmV001Schema) contextValidateChart(ctx context.Context, formats strfmt.Registry) error { + + if m.Chart != nil { + + if err := m.Chart.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("chart") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("chart") + } + + return err + } + } + + return nil +} + +func (m *HelmV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { + + if m.PublicKey != nil { + + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("publicKey") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HelmV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HelmV001Schema) UnmarshalBinary(b []byte) error { + var res HelmV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// HelmV001SchemaChart Information about the Helm chart associated with the entry +// +// swagger:model HelmV001SchemaChart +type HelmV001SchemaChart struct { + + // hash + Hash *HelmV001SchemaChartHash `json:"hash,omitempty"` + + // provenance + // Required: true + Provenance *HelmV001SchemaChartProvenance `json:"provenance"` +} + +// Validate validates this helm v001 schema chart +func (m *HelmV001SchemaChart) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProvenance(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HelmV001SchemaChart) validateHash(formats strfmt.Registry) error { + if swag.IsZero(m.Hash) { // not required + return nil + } + + if m.Hash != nil { + if err := m.Hash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("chart" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("chart" + "." + "hash") + } + + return err + } + } + + return nil +} + +func (m *HelmV001SchemaChart) validateProvenance(formats strfmt.Registry) error { + + if err := validate.Required("chart"+"."+"provenance", "body", m.Provenance); err != nil { + return err + } + + if m.Provenance != nil { + if err := m.Provenance.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("chart" + "." + "provenance") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("chart" + "." + "provenance") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this helm v001 schema chart based on the context it is used +func (m *HelmV001SchemaChart) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateHash(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateProvenance(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HelmV001SchemaChart) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { + + if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + + if err := m.Hash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("chart" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("chart" + "." + "hash") + } + + return err + } + } + + return nil +} + +func (m *HelmV001SchemaChart) contextValidateProvenance(ctx context.Context, formats strfmt.Registry) error { + + if m.Provenance != nil { + + if err := m.Provenance.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("chart" + "." + "provenance") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("chart" + "." + "provenance") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HelmV001SchemaChart) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HelmV001SchemaChart) UnmarshalBinary(b []byte) error { + var res HelmV001SchemaChart + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// HelmV001SchemaChartHash Specifies the hash algorithm and value for the chart +// +// swagger:model HelmV001SchemaChartHash +type HelmV001SchemaChartHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the chart + // Required: true + Value *string `json:"value"` +} + +// Validate validates this helm v001 schema chart hash +func (m *HelmV001SchemaChartHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var helmV001SchemaChartHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + helmV001SchemaChartHashTypeAlgorithmPropEnum = append(helmV001SchemaChartHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // HelmV001SchemaChartHashAlgorithmSha256 captures enum value "sha256" + HelmV001SchemaChartHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *HelmV001SchemaChartHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, helmV001SchemaChartHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *HelmV001SchemaChartHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("chart"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("chart"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *HelmV001SchemaChartHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("chart"+"."+"hash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this helm v001 schema chart hash based on the context it is used +func (m *HelmV001SchemaChartHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *HelmV001SchemaChartHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HelmV001SchemaChartHash) UnmarshalBinary(b []byte) error { + var res HelmV001SchemaChartHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// HelmV001SchemaChartProvenance The provenance entry associated with the signed Helm Chart +// +// swagger:model HelmV001SchemaChartProvenance +type HelmV001SchemaChartProvenance struct { + + // Specifies the content of the provenance file inline within the document + // Format: byte + Content strfmt.Base64 `json:"content,omitempty"` + + // signature + Signature *HelmV001SchemaChartProvenanceSignature `json:"signature,omitempty"` +} + +// Validate validates this helm v001 schema chart provenance +func (m *HelmV001SchemaChartProvenance) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateSignature(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HelmV001SchemaChartProvenance) validateSignature(formats strfmt.Registry) error { + if swag.IsZero(m.Signature) { // not required + return nil + } + + if m.Signature != nil { + if err := m.Signature.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("chart" + "." + "provenance" + "." + "signature") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("chart" + "." + "provenance" + "." + "signature") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this helm v001 schema chart provenance based on the context it is used +func (m *HelmV001SchemaChartProvenance) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateSignature(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HelmV001SchemaChartProvenance) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { + + if m.Signature != nil { + + if swag.IsZero(m.Signature) { // not required + return nil + } + + if err := m.Signature.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("chart" + "." + "provenance" + "." + "signature") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("chart" + "." + "provenance" + "." + "signature") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HelmV001SchemaChartProvenance) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HelmV001SchemaChartProvenance) UnmarshalBinary(b []byte) error { + var res HelmV001SchemaChartProvenance + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// HelmV001SchemaChartProvenanceSignature Information about the included signature in the provenance file +// +// swagger:model HelmV001SchemaChartProvenanceSignature +type HelmV001SchemaChartProvenanceSignature struct { + + // Specifies the signature embedded within the provenance file + // Required: true + // Read Only: true + // Format: byte + Content strfmt.Base64 `json:"content"` +} + +// Validate validates this helm v001 schema chart provenance signature +func (m *HelmV001SchemaChartProvenanceSignature) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HelmV001SchemaChartProvenanceSignature) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("chart"+"."+"provenance"+"."+"signature"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this helm v001 schema chart provenance signature based on the context it is used +func (m *HelmV001SchemaChartProvenanceSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateContent(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HelmV001SchemaChartProvenanceSignature) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { + + if err := validate.ReadOnly(ctx, "chart"+"."+"provenance"+"."+"signature"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HelmV001SchemaChartProvenanceSignature) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HelmV001SchemaChartProvenanceSignature) UnmarshalBinary(b []byte) error { + var res HelmV001SchemaChartProvenanceSignature + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// HelmV001SchemaPublicKey The public key that can verify the package signature +// +// swagger:model HelmV001SchemaPublicKey +type HelmV001SchemaPublicKey struct { + + // Specifies the content of the public key inline within the document + // Required: true + // Format: byte + Content *strfmt.Base64 `json:"content"` +} + +// Validate validates this helm v001 schema public key +func (m *HelmV001SchemaPublicKey) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HelmV001SchemaPublicKey) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this helm v001 schema public key based on context it is used +func (m *HelmV001SchemaPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *HelmV001SchemaPublicKey) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HelmV001SchemaPublicKey) UnmarshalBinary(b []byte) error { + var res HelmV001SchemaPublicKey + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go new file mode 100644 index 000000000000..c555eb2da643 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// InactiveShardLogInfo inactive shard log info +// +// swagger:model InactiveShardLogInfo +type InactiveShardLogInfo struct { + + // The current hash value stored at the root of the merkle tree + // Required: true + // Pattern: ^[0-9a-fA-F]{64}$ + RootHash *string `json:"rootHash"` + + // The current signed tree head + // Required: true + SignedTreeHead *string `json:"signedTreeHead"` + + // The current treeID + // Required: true + // Pattern: ^[0-9]+$ + TreeID *string `json:"treeID"` + + // The current number of nodes in the merkle tree + // Required: true + // Minimum: 1 + TreeSize *int64 `json:"treeSize"` +} + +// Validate validates this inactive shard log info +func (m *InactiveShardLogInfo) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateRootHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSignedTreeHead(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTreeID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTreeSize(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *InactiveShardLogInfo) validateRootHash(formats strfmt.Registry) error { + + if err := validate.Required("rootHash", "body", m.RootHash); err != nil { + return err + } + + if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { + return err + } + + return nil +} + +func (m *InactiveShardLogInfo) validateSignedTreeHead(formats strfmt.Registry) error { + + if err := validate.Required("signedTreeHead", "body", m.SignedTreeHead); err != nil { + return err + } + + return nil +} + +func (m *InactiveShardLogInfo) validateTreeID(formats strfmt.Registry) error { + + if err := validate.Required("treeID", "body", m.TreeID); err != nil { + return err + } + + if err := validate.Pattern("treeID", "body", *m.TreeID, `^[0-9]+$`); err != nil { + return err + } + + return nil +} + +func (m *InactiveShardLogInfo) validateTreeSize(formats strfmt.Registry) error { + + if err := validate.Required("treeSize", "body", m.TreeSize); err != nil { + return err + } + + if err := validate.MinimumInt("treeSize", "body", *m.TreeSize, 1, false); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this inactive shard log info based on context it is used +func (m *InactiveShardLogInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *InactiveShardLogInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *InactiveShardLogInfo) UnmarshalBinary(b []byte) error { + var res InactiveShardLogInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go new file mode 100644 index 000000000000..86f0d7b94e21 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go @@ -0,0 +1,179 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// InclusionProof inclusion proof +// +// swagger:model InclusionProof +type InclusionProof struct { + + // The checkpoint (signed tree head) that the inclusion proof is based on + // Required: true + Checkpoint *string `json:"checkpoint"` + + // A list of hashes required to compute the inclusion proof, sorted in order from leaf to root + // Required: true + Hashes []string `json:"hashes"` + + // The index of the entry in the transparency log + // Required: true + // Minimum: 0 + LogIndex *int64 `json:"logIndex"` + + // The hash value stored at the root of the merkle tree at the time the proof was generated + // Required: true + // Pattern: ^[0-9a-fA-F]{64}$ + RootHash *string `json:"rootHash"` + + // The size of the merkle tree at the time the inclusion proof was generated + // Required: true + // Minimum: 1 + TreeSize *int64 `json:"treeSize"` +} + +// Validate validates this inclusion proof +func (m *InclusionProof) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCheckpoint(formats); err != nil { + res = append(res, err) + } + + if err := m.validateHashes(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLogIndex(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRootHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTreeSize(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *InclusionProof) validateCheckpoint(formats strfmt.Registry) error { + + if err := validate.Required("checkpoint", "body", m.Checkpoint); err != nil { + return err + } + + return nil +} + +func (m *InclusionProof) validateHashes(formats strfmt.Registry) error { + + if err := validate.Required("hashes", "body", m.Hashes); err != nil { + return err + } + + for i := 0; i < len(m.Hashes); i++ { + + if err := validate.Pattern("hashes"+"."+strconv.Itoa(i), "body", m.Hashes[i], `^[0-9a-fA-F]{64}$`); err != nil { + return err + } + + } + + return nil +} + +func (m *InclusionProof) validateLogIndex(formats strfmt.Registry) error { + + if err := validate.Required("logIndex", "body", m.LogIndex); err != nil { + return err + } + + if err := validate.MinimumInt("logIndex", "body", *m.LogIndex, 0, false); err != nil { + return err + } + + return nil +} + +func (m *InclusionProof) validateRootHash(formats strfmt.Registry) error { + + if err := validate.Required("rootHash", "body", m.RootHash); err != nil { + return err + } + + if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { + return err + } + + return nil +} + +func (m *InclusionProof) validateTreeSize(formats strfmt.Registry) error { + + if err := validate.Required("treeSize", "body", m.TreeSize); err != nil { + return err + } + + if err := validate.MinimumInt("treeSize", "body", *m.TreeSize, 1, false); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this inclusion proof based on context it is used +func (m *InclusionProof) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *InclusionProof) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *InclusionProof) UnmarshalBinary(b []byte) error { + var res InclusionProof + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go new file mode 100644 index 000000000000..4f208de1d55d --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Intoto Intoto object +// +// swagger:model intoto +type Intoto struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec IntotoSchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *Intoto) Kind() string { + return "intoto" +} + +// SetKind sets the kind of this subtype +func (m *Intoto) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *Intoto) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec IntotoSchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result Intoto + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m Intoto) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec IntotoSchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this intoto +func (m *Intoto) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Intoto) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *Intoto) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this intoto based on the context it is used +func (m *Intoto) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *Intoto) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Intoto) UnmarshalBinary(b []byte) error { + var res Intoto + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go new file mode 100644 index 000000000000..142f0a19429c --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IntotoSchema Intoto Schema +// +// # Intoto for Rekord objects +// +// swagger:model intotoSchema +type IntotoSchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go new file mode 100644 index 000000000000..d3a6ca42f4c2 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go @@ -0,0 +1,539 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// IntotoV001Schema intoto v0.0.1 Schema +// +// # Schema for intoto object +// +// swagger:model intotoV001Schema +type IntotoV001Schema struct { + + // content + // Required: true + Content *IntotoV001SchemaContent `json:"content"` + + // The public key that can verify the signature + // Required: true + // Format: byte + PublicKey *strfmt.Base64 `json:"publicKey"` +} + +// Validate validates this intoto v001 schema +func (m *IntotoV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV001Schema) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("content", "body", m.Content); err != nil { + return err + } + + if m.Content != nil { + if err := m.Content.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content") + } + + return err + } + } + + return nil +} + +func (m *IntotoV001Schema) validatePublicKey(formats strfmt.Registry) error { + + if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this intoto v001 schema based on the context it is used +func (m *IntotoV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateContent(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV001Schema) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { + + if m.Content != nil { + + if err := m.Content.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV001Schema) UnmarshalBinary(b []byte) error { + var res IntotoV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// IntotoV001SchemaContent intoto v001 schema content +// +// swagger:model IntotoV001SchemaContent +type IntotoV001SchemaContent struct { + + // envelope + Envelope string `json:"envelope,omitempty"` + + // hash + Hash *IntotoV001SchemaContentHash `json:"hash,omitempty"` + + // payload hash + PayloadHash *IntotoV001SchemaContentPayloadHash `json:"payloadHash,omitempty"` +} + +// Validate validates this intoto v001 schema content +func (m *IntotoV001SchemaContent) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePayloadHash(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV001SchemaContent) validateHash(formats strfmt.Registry) error { + if swag.IsZero(m.Hash) { // not required + return nil + } + + if m.Hash != nil { + if err := m.Hash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "hash") + } + + return err + } + } + + return nil +} + +func (m *IntotoV001SchemaContent) validatePayloadHash(formats strfmt.Registry) error { + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + + if m.PayloadHash != nil { + if err := m.PayloadHash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "payloadHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "payloadHash") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this intoto v001 schema content based on the context it is used +func (m *IntotoV001SchemaContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateHash(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePayloadHash(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV001SchemaContent) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { + + if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + + if err := m.Hash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "hash") + } + + return err + } + } + + return nil +} + +func (m *IntotoV001SchemaContent) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { + + if m.PayloadHash != nil { + + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + + if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "payloadHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "payloadHash") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV001SchemaContent) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV001SchemaContent) UnmarshalBinary(b []byte) error { + var res IntotoV001SchemaContent + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// IntotoV001SchemaContentHash Specifies the hash algorithm and value encompassing the entire signed envelope; this is computed by the rekor server, client-provided values are ignored +// +// swagger:model IntotoV001SchemaContentHash +type IntotoV001SchemaContentHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the archive + // Required: true + Value *string `json:"value"` +} + +// Validate validates this intoto v001 schema content hash +func (m *IntotoV001SchemaContentHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var intotoV001SchemaContentHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + intotoV001SchemaContentHashTypeAlgorithmPropEnum = append(intotoV001SchemaContentHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // IntotoV001SchemaContentHashAlgorithmSha256 captures enum value "sha256" + IntotoV001SchemaContentHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *IntotoV001SchemaContentHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, intotoV001SchemaContentHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *IntotoV001SchemaContentHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("content"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *IntotoV001SchemaContentHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"hash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this intoto v001 schema content hash based on the context it is used +func (m *IntotoV001SchemaContentHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV001SchemaContentHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV001SchemaContentHash) UnmarshalBinary(b []byte) error { + var res IntotoV001SchemaContentHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// IntotoV001SchemaContentPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope; this is computed by the rekor server, client-provided values are ignored +// +// swagger:model IntotoV001SchemaContentPayloadHash +type IntotoV001SchemaContentPayloadHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the envelope's payload + // Required: true + Value *string `json:"value"` +} + +// Validate validates this intoto v001 schema content payload hash +func (m *IntotoV001SchemaContentPayloadHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum = append(intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // IntotoV001SchemaContentPayloadHashAlgorithmSha256 captures enum value "sha256" + IntotoV001SchemaContentPayloadHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *IntotoV001SchemaContentPayloadHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *IntotoV001SchemaContentPayloadHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("content"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *IntotoV001SchemaContentPayloadHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this intoto v001 schema content payload hash based on the context it is used +func (m *IntotoV001SchemaContentPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV001SchemaContentPayloadHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV001SchemaContentPayloadHash) UnmarshalBinary(b []byte) error { + var res IntotoV001SchemaContentPayloadHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go new file mode 100644 index 000000000000..4ea4dcc58a59 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go @@ -0,0 +1,798 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// IntotoV002Schema intoto v0.0.2 Schema +// +// # Schema for intoto object +// +// swagger:model intotoV002Schema +type IntotoV002Schema struct { + + // content + // Required: true + Content *IntotoV002SchemaContent `json:"content"` +} + +// Validate validates this intoto v002 schema +func (m *IntotoV002Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV002Schema) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("content", "body", m.Content); err != nil { + return err + } + + if m.Content != nil { + if err := m.Content.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this intoto v002 schema based on the context it is used +func (m *IntotoV002Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateContent(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV002Schema) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { + + if m.Content != nil { + + if err := m.Content.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV002Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV002Schema) UnmarshalBinary(b []byte) error { + var res IntotoV002Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// IntotoV002SchemaContent intoto v002 schema content +// +// swagger:model IntotoV002SchemaContent +type IntotoV002SchemaContent struct { + + // envelope + // Required: true + Envelope *IntotoV002SchemaContentEnvelope `json:"envelope"` + + // hash + Hash *IntotoV002SchemaContentHash `json:"hash,omitempty"` + + // payload hash + PayloadHash *IntotoV002SchemaContentPayloadHash `json:"payloadHash,omitempty"` +} + +// Validate validates this intoto v002 schema content +func (m *IntotoV002SchemaContent) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateEnvelope(formats); err != nil { + res = append(res, err) + } + + if err := m.validateHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePayloadHash(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV002SchemaContent) validateEnvelope(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"envelope", "body", m.Envelope); err != nil { + return err + } + + if m.Envelope != nil { + if err := m.Envelope.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "envelope") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "envelope") + } + + return err + } + } + + return nil +} + +func (m *IntotoV002SchemaContent) validateHash(formats strfmt.Registry) error { + if swag.IsZero(m.Hash) { // not required + return nil + } + + if m.Hash != nil { + if err := m.Hash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "hash") + } + + return err + } + } + + return nil +} + +func (m *IntotoV002SchemaContent) validatePayloadHash(formats strfmt.Registry) error { + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + + if m.PayloadHash != nil { + if err := m.PayloadHash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "payloadHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "payloadHash") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this intoto v002 schema content based on the context it is used +func (m *IntotoV002SchemaContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateEnvelope(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateHash(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePayloadHash(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV002SchemaContent) contextValidateEnvelope(ctx context.Context, formats strfmt.Registry) error { + + if m.Envelope != nil { + + if err := m.Envelope.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "envelope") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "envelope") + } + + return err + } + } + + return nil +} + +func (m *IntotoV002SchemaContent) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { + + if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + + if err := m.Hash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "hash") + } + + return err + } + } + + return nil +} + +func (m *IntotoV002SchemaContent) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { + + if m.PayloadHash != nil { + + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + + if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "payloadHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "payloadHash") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV002SchemaContent) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV002SchemaContent) UnmarshalBinary(b []byte) error { + var res IntotoV002SchemaContent + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// IntotoV002SchemaContentEnvelope dsse envelope +// +// swagger:model IntotoV002SchemaContentEnvelope +type IntotoV002SchemaContentEnvelope struct { + + // payload of the envelope + // Format: byte + Payload strfmt.Base64 `json:"payload,omitempty"` + + // type describing the payload + // Required: true + PayloadType *string `json:"payloadType"` + + // collection of all signatures of the envelope's payload + // Required: true + // Min Items: 1 + Signatures []*IntotoV002SchemaContentEnvelopeSignaturesItems0 `json:"signatures"` +} + +// Validate validates this intoto v002 schema content envelope +func (m *IntotoV002SchemaContentEnvelope) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validatePayloadType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSignatures(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV002SchemaContentEnvelope) validatePayloadType(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"envelope"+"."+"payloadType", "body", m.PayloadType); err != nil { + return err + } + + return nil +} + +func (m *IntotoV002SchemaContentEnvelope) validateSignatures(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"envelope"+"."+"signatures", "body", m.Signatures); err != nil { + return err + } + + iSignaturesSize := int64(len(m.Signatures)) + + if err := validate.MinItems("content"+"."+"envelope"+"."+"signatures", "body", iSignaturesSize, 1); err != nil { + return err + } + + for i := 0; i < len(m.Signatures); i++ { + if swag.IsZero(m.Signatures[i]) { // not required + continue + } + + if m.Signatures[i] != nil { + if err := m.Signatures[i].Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) + } + + return err + } + } + + } + + return nil +} + +// ContextValidate validate this intoto v002 schema content envelope based on the context it is used +func (m *IntotoV002SchemaContentEnvelope) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateSignatures(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV002SchemaContentEnvelope) contextValidateSignatures(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Signatures); i++ { + + if m.Signatures[i] != nil { + + if swag.IsZero(m.Signatures[i]) { // not required + return nil + } + + if err := m.Signatures[i].ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) + } + + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV002SchemaContentEnvelope) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV002SchemaContentEnvelope) UnmarshalBinary(b []byte) error { + var res IntotoV002SchemaContentEnvelope + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// IntotoV002SchemaContentEnvelopeSignaturesItems0 a signature of the envelope's payload along with the public key for the signature +// +// swagger:model IntotoV002SchemaContentEnvelopeSignaturesItems0 +type IntotoV002SchemaContentEnvelopeSignaturesItems0 struct { + + // optional id of the key used to create the signature + Keyid string `json:"keyid,omitempty"` + + // public key that corresponds to this signature + // Required: true + // Format: byte + PublicKey *strfmt.Base64 `json:"publicKey"` + + // signature of the payload + // Required: true + // Format: byte + Sig *strfmt.Base64 `json:"sig"` +} + +// Validate validates this intoto v002 schema content envelope signatures items0 +func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) validatePublicKey(formats strfmt.Registry) error { + + if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { + return err + } + + return nil +} + +func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) validateSig(formats strfmt.Registry) error { + + if err := validate.Required("sig", "body", m.Sig); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this intoto v002 schema content envelope signatures items0 based on context it is used +func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) UnmarshalBinary(b []byte) error { + var res IntotoV002SchemaContentEnvelopeSignaturesItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// IntotoV002SchemaContentHash Specifies the hash algorithm and value encompassing the entire signed envelope +// +// swagger:model IntotoV002SchemaContentHash +type IntotoV002SchemaContentHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the archive + // Required: true + Value *string `json:"value"` +} + +// Validate validates this intoto v002 schema content hash +func (m *IntotoV002SchemaContentHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var intotoV002SchemaContentHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + intotoV002SchemaContentHashTypeAlgorithmPropEnum = append(intotoV002SchemaContentHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // IntotoV002SchemaContentHashAlgorithmSha256 captures enum value "sha256" + IntotoV002SchemaContentHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *IntotoV002SchemaContentHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, intotoV002SchemaContentHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *IntotoV002SchemaContentHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("content"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *IntotoV002SchemaContentHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"hash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this intoto v002 schema content hash based on the context it is used +func (m *IntotoV002SchemaContentHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV002SchemaContentHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV002SchemaContentHash) UnmarshalBinary(b []byte) error { + var res IntotoV002SchemaContentHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// IntotoV002SchemaContentPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope +// +// swagger:model IntotoV002SchemaContentPayloadHash +type IntotoV002SchemaContentPayloadHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value of the payload + // Required: true + Value *string `json:"value"` +} + +// Validate validates this intoto v002 schema content payload hash +func (m *IntotoV002SchemaContentPayloadHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum = append(intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // IntotoV002SchemaContentPayloadHashAlgorithmSha256 captures enum value "sha256" + IntotoV002SchemaContentPayloadHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *IntotoV002SchemaContentPayloadHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *IntotoV002SchemaContentPayloadHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("content"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *IntotoV002SchemaContentPayloadHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this intoto v002 schema content payload hash based on the context it is used +func (m *IntotoV002SchemaContentPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV002SchemaContentPayloadHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV002SchemaContentPayloadHash) UnmarshalBinary(b []byte) error { + var res IntotoV002SchemaContentPayloadHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go new file mode 100644 index 000000000000..3df3d21b8a4f --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Jar Java Archive (JAR) +// +// swagger:model jar +type Jar struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec JarSchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *Jar) Kind() string { + return "jar" +} + +// SetKind sets the kind of this subtype +func (m *Jar) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *Jar) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec JarSchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result Jar + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m Jar) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec JarSchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this jar +func (m *Jar) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Jar) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *Jar) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this jar based on the context it is used +func (m *Jar) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *Jar) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Jar) UnmarshalBinary(b []byte) error { + var res Jar + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go new file mode 100644 index 000000000000..0cd3126ef515 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// JarSchema JAR Schema +// +// # Schema for JAR objects +// +// swagger:model jarSchema +type JarSchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go new file mode 100644 index 000000000000..64335c368793 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go @@ -0,0 +1,602 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// JarV001Schema JAR v0.0.1 Schema +// +// # Schema for JAR entries +// +// swagger:model jarV001Schema +type JarV001Schema struct { + + // archive + // Required: true + Archive *JarV001SchemaArchive `json:"archive"` + + // signature + Signature *JarV001SchemaSignature `json:"signature,omitempty"` +} + +// Validate validates this jar v001 schema +func (m *JarV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateArchive(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSignature(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *JarV001Schema) validateArchive(formats strfmt.Registry) error { + + if err := validate.Required("archive", "body", m.Archive); err != nil { + return err + } + + if m.Archive != nil { + if err := m.Archive.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("archive") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("archive") + } + + return err + } + } + + return nil +} + +func (m *JarV001Schema) validateSignature(formats strfmt.Registry) error { + if swag.IsZero(m.Signature) { // not required + return nil + } + + if m.Signature != nil { + if err := m.Signature.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this jar v001 schema based on the context it is used +func (m *JarV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateArchive(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateSignature(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *JarV001Schema) contextValidateArchive(ctx context.Context, formats strfmt.Registry) error { + + if m.Archive != nil { + + if err := m.Archive.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("archive") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("archive") + } + + return err + } + } + + return nil +} + +func (m *JarV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { + + if m.Signature != nil { + + if swag.IsZero(m.Signature) { // not required + return nil + } + + if err := m.Signature.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *JarV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *JarV001Schema) UnmarshalBinary(b []byte) error { + var res JarV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// JarV001SchemaArchive Information about the archive associated with the entry +// +// swagger:model JarV001SchemaArchive +type JarV001SchemaArchive struct { + + // Specifies the archive inline within the document + // Format: byte + Content strfmt.Base64 `json:"content,omitempty"` + + // hash + Hash *JarV001SchemaArchiveHash `json:"hash,omitempty"` +} + +// Validate validates this jar v001 schema archive +func (m *JarV001SchemaArchive) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHash(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *JarV001SchemaArchive) validateHash(formats strfmt.Registry) error { + if swag.IsZero(m.Hash) { // not required + return nil + } + + if m.Hash != nil { + if err := m.Hash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("archive" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("archive" + "." + "hash") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this jar v001 schema archive based on the context it is used +func (m *JarV001SchemaArchive) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateHash(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *JarV001SchemaArchive) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { + + if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + + if err := m.Hash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("archive" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("archive" + "." + "hash") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *JarV001SchemaArchive) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *JarV001SchemaArchive) UnmarshalBinary(b []byte) error { + var res JarV001SchemaArchive + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// JarV001SchemaArchiveHash Specifies the hash algorithm and value encompassing the entire signed archive +// +// swagger:model JarV001SchemaArchiveHash +type JarV001SchemaArchiveHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the archive + // Required: true + Value *string `json:"value"` +} + +// Validate validates this jar v001 schema archive hash +func (m *JarV001SchemaArchiveHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var jarV001SchemaArchiveHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + jarV001SchemaArchiveHashTypeAlgorithmPropEnum = append(jarV001SchemaArchiveHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // JarV001SchemaArchiveHashAlgorithmSha256 captures enum value "sha256" + JarV001SchemaArchiveHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *JarV001SchemaArchiveHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, jarV001SchemaArchiveHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *JarV001SchemaArchiveHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("archive"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("archive"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *JarV001SchemaArchiveHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("archive"+"."+"hash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this jar v001 schema archive hash based on context it is used +func (m *JarV001SchemaArchiveHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *JarV001SchemaArchiveHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *JarV001SchemaArchiveHash) UnmarshalBinary(b []byte) error { + var res JarV001SchemaArchiveHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// JarV001SchemaSignature Information about the included signature in the JAR file +// +// swagger:model JarV001SchemaSignature +type JarV001SchemaSignature struct { + + // Specifies the PKCS7 signature embedded within the JAR file + // Required: true + // Read Only: true + // Format: byte + Content strfmt.Base64 `json:"content"` + + // public key + // Required: true + PublicKey *JarV001SchemaSignaturePublicKey `json:"publicKey"` +} + +// Validate validates this jar v001 schema signature +func (m *JarV001SchemaSignature) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *JarV001SchemaSignature) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("signature"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +func (m *JarV001SchemaSignature) validatePublicKey(formats strfmt.Registry) error { + + if err := validate.Required("signature"+"."+"publicKey", "body", m.PublicKey); err != nil { + return err + } + + if m.PublicKey != nil { + if err := m.PublicKey.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature" + "." + "publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature" + "." + "publicKey") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this jar v001 schema signature based on the context it is used +func (m *JarV001SchemaSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateContent(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePublicKey(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *JarV001SchemaSignature) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { + + if err := validate.ReadOnly(ctx, "signature"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +func (m *JarV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { + + if m.PublicKey != nil { + + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature" + "." + "publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature" + "." + "publicKey") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *JarV001SchemaSignature) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *JarV001SchemaSignature) UnmarshalBinary(b []byte) error { + var res JarV001SchemaSignature + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// JarV001SchemaSignaturePublicKey The X509 certificate containing the public key JAR which verifies the signature of the JAR +// +// swagger:model JarV001SchemaSignaturePublicKey +type JarV001SchemaSignaturePublicKey struct { + + // Specifies the content of the X509 certificate containing the public key used to verify the signature + // Required: true + // Format: byte + Content *strfmt.Base64 `json:"content"` +} + +// Validate validates this jar v001 schema signature public key +func (m *JarV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *JarV001SchemaSignaturePublicKey) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("signature"+"."+"publicKey"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this jar v001 schema signature public key based on the context it is used +func (m *JarV001SchemaSignaturePublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *JarV001SchemaSignaturePublicKey) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *JarV001SchemaSignaturePublicKey) UnmarshalBinary(b []byte) error { + var res JarV001SchemaSignaturePublicKey + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go new file mode 100644 index 000000000000..65cf4f4b77e9 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go @@ -0,0 +1,474 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// LogEntry log entry +// +// swagger:model LogEntry +type LogEntry map[string]LogEntryAnon + +// Validate validates this log entry +func (m LogEntry) Validate(formats strfmt.Registry) error { + var res []error + + for k := range m { + + if swag.IsZero(m[k]) { // not required + continue + } + if val, ok := m[k]; ok { + if err := val.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName(k) + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName(k) + } + + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this log entry based on the context it is used +func (m LogEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for k := range m { + + if val, ok := m[k]; ok { + if err := val.ContextValidate(ctx, formats); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// LogEntryAnon log entry anon +// +// swagger:model LogEntryAnon +type LogEntryAnon struct { + + // attestation + Attestation *LogEntryAnonAttestation `json:"attestation,omitempty"` + + // body + // Required: true + Body any `json:"body"` + + // The time the entry was added to the log as a Unix timestamp in seconds + // Required: true + IntegratedTime *int64 `json:"integratedTime"` + + // This is the SHA256 hash of the DER-encoded public key for the log at the time the entry was included in the log + // Required: true + // Pattern: ^[0-9a-fA-F]{64}$ + LogID *string `json:"logID"` + + // log index + // Required: true + // Minimum: 0 + LogIndex *int64 `json:"logIndex"` + + // verification + Verification *LogEntryAnonVerification `json:"verification,omitempty"` +} + +// Validate validates this log entry anon +func (m *LogEntryAnon) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAttestation(formats); err != nil { + res = append(res, err) + } + + if err := m.validateBody(formats); err != nil { + res = append(res, err) + } + + if err := m.validateIntegratedTime(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLogID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLogIndex(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVerification(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LogEntryAnon) validateAttestation(formats strfmt.Registry) error { + if swag.IsZero(m.Attestation) { // not required + return nil + } + + if m.Attestation != nil { + if err := m.Attestation.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("attestation") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("attestation") + } + + return err + } + } + + return nil +} + +func (m *LogEntryAnon) validateBody(formats strfmt.Registry) error { + + if m.Body == nil { + return errors.Required("body", "body", nil) + } + + return nil +} + +func (m *LogEntryAnon) validateIntegratedTime(formats strfmt.Registry) error { + + if err := validate.Required("integratedTime", "body", m.IntegratedTime); err != nil { + return err + } + + return nil +} + +func (m *LogEntryAnon) validateLogID(formats strfmt.Registry) error { + + if err := validate.Required("logID", "body", m.LogID); err != nil { + return err + } + + if err := validate.Pattern("logID", "body", *m.LogID, `^[0-9a-fA-F]{64}$`); err != nil { + return err + } + + return nil +} + +func (m *LogEntryAnon) validateLogIndex(formats strfmt.Registry) error { + + if err := validate.Required("logIndex", "body", m.LogIndex); err != nil { + return err + } + + if err := validate.MinimumInt("logIndex", "body", *m.LogIndex, 0, false); err != nil { + return err + } + + return nil +} + +func (m *LogEntryAnon) validateVerification(formats strfmt.Registry) error { + if swag.IsZero(m.Verification) { // not required + return nil + } + + if m.Verification != nil { + if err := m.Verification.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("verification") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("verification") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this log entry anon based on the context it is used +func (m *LogEntryAnon) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateAttestation(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateVerification(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LogEntryAnon) contextValidateAttestation(ctx context.Context, formats strfmt.Registry) error { + + if m.Attestation != nil { + + if swag.IsZero(m.Attestation) { // not required + return nil + } + + if err := m.Attestation.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("attestation") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("attestation") + } + + return err + } + } + + return nil +} + +func (m *LogEntryAnon) contextValidateVerification(ctx context.Context, formats strfmt.Registry) error { + + if m.Verification != nil { + + if swag.IsZero(m.Verification) { // not required + return nil + } + + if err := m.Verification.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("verification") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("verification") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *LogEntryAnon) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *LogEntryAnon) UnmarshalBinary(b []byte) error { + var res LogEntryAnon + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// LogEntryAnonAttestation log entry anon attestation +// +// swagger:model LogEntryAnonAttestation +type LogEntryAnonAttestation struct { + + // data + // Format: byte + Data strfmt.Base64 `json:"data,omitempty"` +} + +// Validate validates this log entry anon attestation +func (m *LogEntryAnonAttestation) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this log entry anon attestation based on context it is used +func (m *LogEntryAnonAttestation) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *LogEntryAnonAttestation) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *LogEntryAnonAttestation) UnmarshalBinary(b []byte) error { + var res LogEntryAnonAttestation + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// LogEntryAnonVerification log entry anon verification +// +// swagger:model LogEntryAnonVerification +type LogEntryAnonVerification struct { + + // inclusion proof + InclusionProof *InclusionProof `json:"inclusionProof,omitempty"` + + // Signature over the logID, logIndex, body and integratedTime. + // Format: byte + SignedEntryTimestamp strfmt.Base64 `json:"signedEntryTimestamp,omitempty"` +} + +// Validate validates this log entry anon verification +func (m *LogEntryAnonVerification) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateInclusionProof(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LogEntryAnonVerification) validateInclusionProof(formats strfmt.Registry) error { + if swag.IsZero(m.InclusionProof) { // not required + return nil + } + + if m.InclusionProof != nil { + if err := m.InclusionProof.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("verification" + "." + "inclusionProof") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("verification" + "." + "inclusionProof") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this log entry anon verification based on the context it is used +func (m *LogEntryAnonVerification) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateInclusionProof(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LogEntryAnonVerification) contextValidateInclusionProof(ctx context.Context, formats strfmt.Registry) error { + + if m.InclusionProof != nil { + + if swag.IsZero(m.InclusionProof) { // not required + return nil + } + + if err := m.InclusionProof.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("verification" + "." + "inclusionProof") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("verification" + "." + "inclusionProof") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *LogEntryAnonVerification) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *LogEntryAnonVerification) UnmarshalBinary(b []byte) error { + var res LogEntryAnonVerification + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go new file mode 100644 index 000000000000..6cbb9d64a2fc --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go @@ -0,0 +1,230 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + stderrors "errors" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// LogInfo log info +// +// swagger:model LogInfo +type LogInfo struct { + + // inactive shards + InactiveShards []*InactiveShardLogInfo `json:"inactiveShards"` + + // The current hash value stored at the root of the merkle tree + // Required: true + // Pattern: ^[0-9a-fA-F]{64}$ + RootHash *string `json:"rootHash"` + + // The current signed tree head + // Required: true + SignedTreeHead *string `json:"signedTreeHead"` + + // The current treeID + // Required: true + // Pattern: ^[0-9]+$ + TreeID *string `json:"treeID"` + + // The current number of nodes in the merkle tree + // Required: true + // Minimum: 1 + TreeSize *int64 `json:"treeSize"` +} + +// Validate validates this log info +func (m *LogInfo) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateInactiveShards(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRootHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSignedTreeHead(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTreeID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTreeSize(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LogInfo) validateInactiveShards(formats strfmt.Registry) error { + if swag.IsZero(m.InactiveShards) { // not required + return nil + } + + for i := 0; i < len(m.InactiveShards); i++ { + if swag.IsZero(m.InactiveShards[i]) { // not required + continue + } + + if m.InactiveShards[i] != nil { + if err := m.InactiveShards[i].Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) + } + + return err + } + } + + } + + return nil +} + +func (m *LogInfo) validateRootHash(formats strfmt.Registry) error { + + if err := validate.Required("rootHash", "body", m.RootHash); err != nil { + return err + } + + if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { + return err + } + + return nil +} + +func (m *LogInfo) validateSignedTreeHead(formats strfmt.Registry) error { + + if err := validate.Required("signedTreeHead", "body", m.SignedTreeHead); err != nil { + return err + } + + return nil +} + +func (m *LogInfo) validateTreeID(formats strfmt.Registry) error { + + if err := validate.Required("treeID", "body", m.TreeID); err != nil { + return err + } + + if err := validate.Pattern("treeID", "body", *m.TreeID, `^[0-9]+$`); err != nil { + return err + } + + return nil +} + +func (m *LogInfo) validateTreeSize(formats strfmt.Registry) error { + + if err := validate.Required("treeSize", "body", m.TreeSize); err != nil { + return err + } + + if err := validate.MinimumInt("treeSize", "body", *m.TreeSize, 1, false); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this log info based on the context it is used +func (m *LogInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateInactiveShards(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LogInfo) contextValidateInactiveShards(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.InactiveShards); i++ { + + if m.InactiveShards[i] != nil { + + if swag.IsZero(m.InactiveShards[i]) { // not required + return nil + } + + if err := m.InactiveShards[i].ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) + } + + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *LogInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *LogInfo) UnmarshalBinary(b []byte) error { + var res LogInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go new file mode 100644 index 000000000000..5b734a5fff30 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go @@ -0,0 +1,195 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// ProposedEntry proposed entry +// +// swagger:discriminator ProposedEntry kind +type ProposedEntry interface { + runtime.Validatable + runtime.ContextValidatable + + // kind + // Required: true + Kind() string + SetKind(string) + + // AdditionalProperties in base type shoud be handled just like regular properties + // At this moment, the base type property is pushed down to the subtype +} + +type proposedEntry struct { + kindField string +} + +// Kind gets the kind of this polymorphic type +func (m *proposedEntry) Kind() string { + return "ProposedEntry" +} + +// SetKind sets the kind of this polymorphic type +func (m *proposedEntry) SetKind(val string) { +} + +// UnmarshalProposedEntrySlice unmarshals polymorphic slices of ProposedEntry +func UnmarshalProposedEntrySlice(reader io.Reader, consumer runtime.Consumer) ([]ProposedEntry, error) { + var elements []json.RawMessage + if err := consumer.Consume(reader, &elements); err != nil { + return nil, err + } + + var result []ProposedEntry + for _, element := range elements { + obj, err := unmarshalProposedEntry(element, consumer) + if err != nil { + return nil, err + } + result = append(result, obj) + } + return result, nil +} + +// UnmarshalProposedEntry unmarshals polymorphic ProposedEntry +func UnmarshalProposedEntry(reader io.Reader, consumer runtime.Consumer) (ProposedEntry, error) { + // we need to read this twice, so first into a buffer + data, err := io.ReadAll(reader) + if err != nil { + return nil, err + } + return unmarshalProposedEntry(data, consumer) +} + +func unmarshalProposedEntry(data []byte, consumer runtime.Consumer) (ProposedEntry, error) { + buf := bytes.NewBuffer(data) + buf2 := bytes.NewBuffer(data) + + // the first time this is read is to fetch the value of the kind property. + var getType struct { + Kind string `json:"kind"` + } + if err := consumer.Consume(buf, &getType); err != nil { + return nil, err + } + + if err := validate.RequiredString("kind", "body", getType.Kind); err != nil { + return nil, err + } + + // The value of kind is used to determine which type to create and unmarshal the data into + switch getType.Kind { + case "ProposedEntry": + var result proposedEntry + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "alpine": + var result Alpine + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "cose": + var result Cose + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "dsse": + var result DSSE + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "hashedrekord": + var result Hashedrekord + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "helm": + var result Helm + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "intoto": + var result Intoto + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "jar": + var result Jar + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "rekord": + var result Rekord + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "rfc3161": + var result Rfc3161 + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "rpm": + var result Rpm + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "tuf": + var result TUF + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + } + return nil, errors.New(422, "invalid kind value: %q", getType.Kind) +} + +// Validate validates this proposed entry +func (m *proposedEntry) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this proposed entry based on context it is used +func (m *proposedEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go new file mode 100644 index 000000000000..81c8ff054541 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Rekord Rekord object +// +// swagger:model rekord +type Rekord struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec RekordSchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *Rekord) Kind() string { + return "rekord" +} + +// SetKind sets the kind of this subtype +func (m *Rekord) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *Rekord) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec RekordSchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result Rekord + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m Rekord) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec RekordSchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this rekord +func (m *Rekord) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Rekord) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *Rekord) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this rekord based on the context it is used +func (m *Rekord) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *Rekord) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Rekord) UnmarshalBinary(b []byte) error { + var res Rekord + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go new file mode 100644 index 000000000000..9c33e4044e92 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// RekordSchema Rekor Schema +// +// # Schema for Rekord objects +// +// swagger:model rekordSchema +type RekordSchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go new file mode 100644 index 000000000000..0f4977ca7364 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go @@ -0,0 +1,644 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// RekordV001Schema Rekor v0.0.1 Schema +// +// # Schema for Rekord object +// +// swagger:model rekordV001Schema +type RekordV001Schema struct { + + // data + // Required: true + Data *RekordV001SchemaData `json:"data"` + + // signature + // Required: true + Signature *RekordV001SchemaSignature `json:"signature"` +} + +// Validate validates this rekord v001 schema +func (m *RekordV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateData(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSignature(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RekordV001Schema) validateData(formats strfmt.Registry) error { + + if err := validate.Required("data", "body", m.Data); err != nil { + return err + } + + if m.Data != nil { + if err := m.Data.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data") + } + + return err + } + } + + return nil +} + +func (m *RekordV001Schema) validateSignature(formats strfmt.Registry) error { + + if err := validate.Required("signature", "body", m.Signature); err != nil { + return err + } + + if m.Signature != nil { + if err := m.Signature.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this rekord v001 schema based on the context it is used +func (m *RekordV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateData(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateSignature(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RekordV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error { + + if m.Data != nil { + + if err := m.Data.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data") + } + + return err + } + } + + return nil +} + +func (m *RekordV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { + + if m.Signature != nil { + + if err := m.Signature.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *RekordV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RekordV001Schema) UnmarshalBinary(b []byte) error { + var res RekordV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// RekordV001SchemaData Information about the content associated with the entry +// +// swagger:model RekordV001SchemaData +type RekordV001SchemaData struct { + + // Specifies the content inline within the document + // Format: byte + Content strfmt.Base64 `json:"content,omitempty"` + + // hash + Hash *RekordV001SchemaDataHash `json:"hash,omitempty"` +} + +// Validate validates this rekord v001 schema data +func (m *RekordV001SchemaData) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHash(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RekordV001SchemaData) validateHash(formats strfmt.Registry) error { + if swag.IsZero(m.Hash) { // not required + return nil + } + + if m.Hash != nil { + if err := m.Hash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data" + "." + "hash") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this rekord v001 schema data based on the context it is used +func (m *RekordV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateHash(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RekordV001SchemaData) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { + + if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + + if err := m.Hash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data" + "." + "hash") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *RekordV001SchemaData) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RekordV001SchemaData) UnmarshalBinary(b []byte) error { + var res RekordV001SchemaData + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// RekordV001SchemaDataHash Specifies the hash algorithm and value for the content +// +// swagger:model RekordV001SchemaDataHash +type RekordV001SchemaDataHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the content + // Required: true + Value *string `json:"value"` +} + +// Validate validates this rekord v001 schema data hash +func (m *RekordV001SchemaDataHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var rekordV001SchemaDataHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + rekordV001SchemaDataHashTypeAlgorithmPropEnum = append(rekordV001SchemaDataHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // RekordV001SchemaDataHashAlgorithmSha256 captures enum value "sha256" + RekordV001SchemaDataHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *RekordV001SchemaDataHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, rekordV001SchemaDataHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *RekordV001SchemaDataHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("data"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *RekordV001SchemaDataHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"hash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this rekord v001 schema data hash based on the context it is used +func (m *RekordV001SchemaDataHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *RekordV001SchemaDataHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RekordV001SchemaDataHash) UnmarshalBinary(b []byte) error { + var res RekordV001SchemaDataHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// RekordV001SchemaSignature Information about the detached signature associated with the entry +// +// swagger:model RekordV001SchemaSignature +type RekordV001SchemaSignature struct { + + // Specifies the content of the signature inline within the document + // Required: true + // Format: byte + Content *strfmt.Base64 `json:"content"` + + // Specifies the format of the signature + // Required: true + // Enum: ["pgp","minisign","x509","ssh"] + Format *string `json:"format"` + + // public key + // Required: true + PublicKey *RekordV001SchemaSignaturePublicKey `json:"publicKey"` +} + +// Validate validates this rekord v001 schema signature +func (m *RekordV001SchemaSignature) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if err := m.validateFormat(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RekordV001SchemaSignature) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("signature"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +var rekordV001SchemaSignatureTypeFormatPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["pgp","minisign","x509","ssh"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + rekordV001SchemaSignatureTypeFormatPropEnum = append(rekordV001SchemaSignatureTypeFormatPropEnum, v) + } +} + +const ( + + // RekordV001SchemaSignatureFormatPgp captures enum value "pgp" + RekordV001SchemaSignatureFormatPgp string = "pgp" + + // RekordV001SchemaSignatureFormatMinisign captures enum value "minisign" + RekordV001SchemaSignatureFormatMinisign string = "minisign" + + // RekordV001SchemaSignatureFormatX509 captures enum value "x509" + RekordV001SchemaSignatureFormatX509 string = "x509" + + // RekordV001SchemaSignatureFormatSSH captures enum value "ssh" + RekordV001SchemaSignatureFormatSSH string = "ssh" +) + +// prop value enum +func (m *RekordV001SchemaSignature) validateFormatEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, rekordV001SchemaSignatureTypeFormatPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *RekordV001SchemaSignature) validateFormat(formats strfmt.Registry) error { + + if err := validate.Required("signature"+"."+"format", "body", m.Format); err != nil { + return err + } + + // value enum + if err := m.validateFormatEnum("signature"+"."+"format", "body", *m.Format); err != nil { + return err + } + + return nil +} + +func (m *RekordV001SchemaSignature) validatePublicKey(formats strfmt.Registry) error { + + if err := validate.Required("signature"+"."+"publicKey", "body", m.PublicKey); err != nil { + return err + } + + if m.PublicKey != nil { + if err := m.PublicKey.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature" + "." + "publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature" + "." + "publicKey") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this rekord v001 schema signature based on the context it is used +func (m *RekordV001SchemaSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidatePublicKey(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RekordV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { + + if m.PublicKey != nil { + + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature" + "." + "publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature" + "." + "publicKey") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *RekordV001SchemaSignature) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RekordV001SchemaSignature) UnmarshalBinary(b []byte) error { + var res RekordV001SchemaSignature + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// RekordV001SchemaSignaturePublicKey The public key that can verify the signature +// +// swagger:model RekordV001SchemaSignaturePublicKey +type RekordV001SchemaSignaturePublicKey struct { + + // Specifies the content of the public key inline within the document + // Required: true + // Format: byte + Content *strfmt.Base64 `json:"content"` +} + +// Validate validates this rekord v001 schema signature public key +func (m *RekordV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RekordV001SchemaSignaturePublicKey) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("signature"+"."+"publicKey"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this rekord v001 schema signature public key based on context it is used +func (m *RekordV001SchemaSignaturePublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *RekordV001SchemaSignaturePublicKey) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RekordV001SchemaSignaturePublicKey) UnmarshalBinary(b []byte) error { + var res RekordV001SchemaSignaturePublicKey + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go new file mode 100644 index 000000000000..ef8d42e7a2d7 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Rfc3161 RFC3161 Timestamp +// +// swagger:model rfc3161 +type Rfc3161 struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec Rfc3161Schema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *Rfc3161) Kind() string { + return "rfc3161" +} + +// SetKind sets the kind of this subtype +func (m *Rfc3161) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *Rfc3161) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec Rfc3161Schema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result Rfc3161 + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m Rfc3161) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec Rfc3161Schema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this rfc3161 +func (m *Rfc3161) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Rfc3161) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *Rfc3161) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this rfc3161 based on the context it is used +func (m *Rfc3161) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *Rfc3161) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Rfc3161) UnmarshalBinary(b []byte) error { + var res Rfc3161 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go new file mode 100644 index 000000000000..319358d400c5 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Rfc3161Schema Timestamp Schema +// +// # Schema for RFC 3161 timestamp objects +// +// swagger:model rfc3161Schema +type Rfc3161Schema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go new file mode 100644 index 000000000000..c2037cd7ade0 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go @@ -0,0 +1,192 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Rfc3161V001Schema Timestamp v0.0.1 Schema +// +// # Schema for RFC3161 entries +// +// swagger:model rfc3161V001Schema +type Rfc3161V001Schema struct { + + // tsr + // Required: true + Tsr *Rfc3161V001SchemaTsr `json:"tsr"` +} + +// Validate validates this rfc3161 v001 schema +func (m *Rfc3161V001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateTsr(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Rfc3161V001Schema) validateTsr(formats strfmt.Registry) error { + + if err := validate.Required("tsr", "body", m.Tsr); err != nil { + return err + } + + if m.Tsr != nil { + if err := m.Tsr.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("tsr") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("tsr") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this rfc3161 v001 schema based on the context it is used +func (m *Rfc3161V001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateTsr(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Rfc3161V001Schema) contextValidateTsr(ctx context.Context, formats strfmt.Registry) error { + + if m.Tsr != nil { + + if err := m.Tsr.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("tsr") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("tsr") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Rfc3161V001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Rfc3161V001Schema) UnmarshalBinary(b []byte) error { + var res Rfc3161V001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// Rfc3161V001SchemaTsr Information about the tsr file associated with the entry +// +// swagger:model Rfc3161V001SchemaTsr +type Rfc3161V001SchemaTsr struct { + + // Specifies the tsr file content inline within the document + // Required: true + // Format: byte + Content *strfmt.Base64 `json:"content"` +} + +// Validate validates this rfc3161 v001 schema tsr +func (m *Rfc3161V001SchemaTsr) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Rfc3161V001SchemaTsr) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("tsr"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this rfc3161 v001 schema tsr based on context it is used +func (m *Rfc3161V001SchemaTsr) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Rfc3161V001SchemaTsr) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Rfc3161V001SchemaTsr) UnmarshalBinary(b []byte) error { + var res Rfc3161V001SchemaTsr + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go new file mode 100644 index 000000000000..8b1f10c77e67 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Rpm RPM package +// +// swagger:model rpm +type Rpm struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec RpmSchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *Rpm) Kind() string { + return "rpm" +} + +// SetKind sets the kind of this subtype +func (m *Rpm) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *Rpm) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec RpmSchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result Rpm + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m Rpm) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec RpmSchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this rpm +func (m *Rpm) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Rpm) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *Rpm) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this rpm based on the context it is used +func (m *Rpm) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *Rpm) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Rpm) UnmarshalBinary(b []byte) error { + var res Rpm + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go new file mode 100644 index 000000000000..2520dfb9c782 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// RpmSchema RPM Schema +// +// # Schema for RPM objects +// +// swagger:model rpmSchema +type RpmSchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go new file mode 100644 index 000000000000..a7636bd5fcda --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go @@ -0,0 +1,475 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// RpmV001Schema RPM v0.0.1 Schema +// +// # Schema for RPM entries +// +// swagger:model rpmV001Schema +type RpmV001Schema struct { + + // package + // Required: true + Package *RpmV001SchemaPackage `json:"package"` + + // public key + // Required: true + PublicKey *RpmV001SchemaPublicKey `json:"publicKey"` +} + +// Validate validates this rpm v001 schema +func (m *RpmV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validatePackage(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RpmV001Schema) validatePackage(formats strfmt.Registry) error { + + if err := validate.Required("package", "body", m.Package); err != nil { + return err + } + + if m.Package != nil { + if err := m.Package.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("package") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("package") + } + + return err + } + } + + return nil +} + +func (m *RpmV001Schema) validatePublicKey(formats strfmt.Registry) error { + + if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { + return err + } + + if m.PublicKey != nil { + if err := m.PublicKey.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("publicKey") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this rpm v001 schema based on the context it is used +func (m *RpmV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidatePackage(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePublicKey(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RpmV001Schema) contextValidatePackage(ctx context.Context, formats strfmt.Registry) error { + + if m.Package != nil { + + if err := m.Package.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("package") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("package") + } + + return err + } + } + + return nil +} + +func (m *RpmV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { + + if m.PublicKey != nil { + + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("publicKey") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *RpmV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RpmV001Schema) UnmarshalBinary(b []byte) error { + var res RpmV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// RpmV001SchemaPackage Information about the package associated with the entry +// +// swagger:model RpmV001SchemaPackage +type RpmV001SchemaPackage struct { + + // Specifies the package inline within the document + // Format: byte + Content strfmt.Base64 `json:"content,omitempty"` + + // hash + Hash *RpmV001SchemaPackageHash `json:"hash,omitempty"` + + // Values of the RPM headers + // Read Only: true + Headers map[string]string `json:"headers,omitempty"` +} + +// Validate validates this rpm v001 schema package +func (m *RpmV001SchemaPackage) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHash(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RpmV001SchemaPackage) validateHash(formats strfmt.Registry) error { + if swag.IsZero(m.Hash) { // not required + return nil + } + + if m.Hash != nil { + if err := m.Hash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("package" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("package" + "." + "hash") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this rpm v001 schema package based on the context it is used +func (m *RpmV001SchemaPackage) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateHash(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateHeaders(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RpmV001SchemaPackage) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { + + if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + + if err := m.Hash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("package" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("package" + "." + "hash") + } + + return err + } + } + + return nil +} + +func (m *RpmV001SchemaPackage) contextValidateHeaders(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *RpmV001SchemaPackage) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RpmV001SchemaPackage) UnmarshalBinary(b []byte) error { + var res RpmV001SchemaPackage + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// RpmV001SchemaPackageHash Specifies the hash algorithm and value for the package +// +// swagger:model RpmV001SchemaPackageHash +type RpmV001SchemaPackageHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the package + // Required: true + Value *string `json:"value"` +} + +// Validate validates this rpm v001 schema package hash +func (m *RpmV001SchemaPackageHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var rpmV001SchemaPackageHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + rpmV001SchemaPackageHashTypeAlgorithmPropEnum = append(rpmV001SchemaPackageHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // RpmV001SchemaPackageHashAlgorithmSha256 captures enum value "sha256" + RpmV001SchemaPackageHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *RpmV001SchemaPackageHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, rpmV001SchemaPackageHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *RpmV001SchemaPackageHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("package"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("package"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *RpmV001SchemaPackageHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("package"+"."+"hash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this rpm v001 schema package hash based on context it is used +func (m *RpmV001SchemaPackageHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *RpmV001SchemaPackageHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RpmV001SchemaPackageHash) UnmarshalBinary(b []byte) error { + var res RpmV001SchemaPackageHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// RpmV001SchemaPublicKey The PGP public key that can verify the RPM signature +// +// swagger:model RpmV001SchemaPublicKey +type RpmV001SchemaPublicKey struct { + + // Specifies the content of the public key inline within the document + // Required: true + // Format: byte + Content *strfmt.Base64 `json:"content"` +} + +// Validate validates this rpm v001 schema public key +func (m *RpmV001SchemaPublicKey) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RpmV001SchemaPublicKey) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this rpm v001 schema public key based on context it is used +func (m *RpmV001SchemaPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *RpmV001SchemaPublicKey) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RpmV001SchemaPublicKey) UnmarshalBinary(b []byte) error { + var res RpmV001SchemaPublicKey + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go new file mode 100644 index 000000000000..a1b7d08001ff --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go @@ -0,0 +1,350 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// SearchIndex search index +// +// swagger:model SearchIndex +type SearchIndex struct { + + // email + // Format: email + Email strfmt.Email `json:"email,omitempty"` + + // hash + // Pattern: ^(sha512:)?[0-9a-fA-F]{128}$|^(sha256:)?[0-9a-fA-F]{64}$|^(sha1:)?[0-9a-fA-F]{40}$ + Hash string `json:"hash,omitempty"` + + // operator + // Enum: ["and","or"] + Operator string `json:"operator,omitempty"` + + // public key + PublicKey *SearchIndexPublicKey `json:"publicKey,omitempty"` +} + +// Validate validates this search index +func (m *SearchIndex) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateEmail(formats); err != nil { + res = append(res, err) + } + + if err := m.validateHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validateOperator(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SearchIndex) validateEmail(formats strfmt.Registry) error { + if swag.IsZero(m.Email) { // not required + return nil + } + + if err := validate.FormatOf("email", "body", "email", m.Email.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *SearchIndex) validateHash(formats strfmt.Registry) error { + if swag.IsZero(m.Hash) { // not required + return nil + } + + if err := validate.Pattern("hash", "body", m.Hash, `^(sha512:)?[0-9a-fA-F]{128}$|^(sha256:)?[0-9a-fA-F]{64}$|^(sha1:)?[0-9a-fA-F]{40}$`); err != nil { + return err + } + + return nil +} + +var searchIndexTypeOperatorPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["and","or"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + searchIndexTypeOperatorPropEnum = append(searchIndexTypeOperatorPropEnum, v) + } +} + +const ( + + // SearchIndexOperatorAnd captures enum value "and" + SearchIndexOperatorAnd string = "and" + + // SearchIndexOperatorOr captures enum value "or" + SearchIndexOperatorOr string = "or" +) + +// prop value enum +func (m *SearchIndex) validateOperatorEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, searchIndexTypeOperatorPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *SearchIndex) validateOperator(formats strfmt.Registry) error { + if swag.IsZero(m.Operator) { // not required + return nil + } + + // value enum + if err := m.validateOperatorEnum("operator", "body", m.Operator); err != nil { + return err + } + + return nil +} + +func (m *SearchIndex) validatePublicKey(formats strfmt.Registry) error { + if swag.IsZero(m.PublicKey) { // not required + return nil + } + + if m.PublicKey != nil { + if err := m.PublicKey.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("publicKey") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this search index based on the context it is used +func (m *SearchIndex) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidatePublicKey(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SearchIndex) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { + + if m.PublicKey != nil { + + if swag.IsZero(m.PublicKey) { // not required + return nil + } + + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("publicKey") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *SearchIndex) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SearchIndex) UnmarshalBinary(b []byte) error { + var res SearchIndex + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// SearchIndexPublicKey search index public key +// +// swagger:model SearchIndexPublicKey +type SearchIndexPublicKey struct { + + // content + // Format: byte + Content strfmt.Base64 `json:"content,omitempty"` + + // format + // Required: true + // Enum: ["pgp","x509","minisign","ssh","tuf"] + Format *string `json:"format"` + + // url + // Format: uri + URL strfmt.URI `json:"url,omitempty"` +} + +// Validate validates this search index public key +func (m *SearchIndexPublicKey) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateFormat(formats); err != nil { + res = append(res, err) + } + + if err := m.validateURL(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var searchIndexPublicKeyTypeFormatPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["pgp","x509","minisign","ssh","tuf"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + searchIndexPublicKeyTypeFormatPropEnum = append(searchIndexPublicKeyTypeFormatPropEnum, v) + } +} + +const ( + + // SearchIndexPublicKeyFormatPgp captures enum value "pgp" + SearchIndexPublicKeyFormatPgp string = "pgp" + + // SearchIndexPublicKeyFormatX509 captures enum value "x509" + SearchIndexPublicKeyFormatX509 string = "x509" + + // SearchIndexPublicKeyFormatMinisign captures enum value "minisign" + SearchIndexPublicKeyFormatMinisign string = "minisign" + + // SearchIndexPublicKeyFormatSSH captures enum value "ssh" + SearchIndexPublicKeyFormatSSH string = "ssh" + + // SearchIndexPublicKeyFormatTUF captures enum value "tuf" + SearchIndexPublicKeyFormatTUF string = "tuf" +) + +// prop value enum +func (m *SearchIndexPublicKey) validateFormatEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, searchIndexPublicKeyTypeFormatPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *SearchIndexPublicKey) validateFormat(formats strfmt.Registry) error { + + if err := validate.Required("publicKey"+"."+"format", "body", m.Format); err != nil { + return err + } + + // value enum + if err := m.validateFormatEnum("publicKey"+"."+"format", "body", *m.Format); err != nil { + return err + } + + return nil +} + +func (m *SearchIndexPublicKey) validateURL(formats strfmt.Registry) error { + if swag.IsZero(m.URL) { // not required + return nil + } + + if err := validate.FormatOf("publicKey"+"."+"url", "body", "uri", m.URL.String(), formats); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this search index public key based on context it is used +func (m *SearchIndexPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *SearchIndexPublicKey) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SearchIndexPublicKey) UnmarshalBinary(b []byte) error { + var res SearchIndexPublicKey + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go new file mode 100644 index 000000000000..6833c8f6d8f5 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go @@ -0,0 +1,306 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + stderrors "errors" + "io" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// SearchLogQuery search log query +// +// swagger:model SearchLogQuery +type SearchLogQuery struct { + entriesField []ProposedEntry + + // entry u UI ds + // Max Items: 10 + // Min Items: 1 + EntryUUIDs []string `json:"entryUUIDs"` + + // log indexes + // Max Items: 10 + // Min Items: 1 + LogIndexes []*int64 `json:"logIndexes"` +} + +// Entries gets the entries of this base type +func (m *SearchLogQuery) Entries() []ProposedEntry { + return m.entriesField +} + +// SetEntries sets the entries of this base type +func (m *SearchLogQuery) SetEntries(val []ProposedEntry) { + m.entriesField = val +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *SearchLogQuery) UnmarshalJSON(raw []byte) error { + var data struct { + Entries json.RawMessage `json:"entries"` + + EntryUUIDs []string `json:"entryUUIDs"` + + LogIndexes []*int64 `json:"logIndexes"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var propEntries []ProposedEntry + if string(data.Entries) != "null" { + entries, err := UnmarshalProposedEntrySlice(bytes.NewBuffer(data.Entries), runtime.JSONConsumer()) + if err != nil && !stderrors.Is(err, io.EOF) { + return err + } + propEntries = entries + } + + var result SearchLogQuery + + // entries + result.entriesField = propEntries + + // entryUUIDs + result.EntryUUIDs = data.EntryUUIDs + + // logIndexes + result.LogIndexes = data.LogIndexes + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m SearchLogQuery) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + EntryUUIDs []string `json:"entryUUIDs"` + + LogIndexes []*int64 `json:"logIndexes"` + }{ + + EntryUUIDs: m.EntryUUIDs, + + LogIndexes: m.LogIndexes, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Entries []ProposedEntry `json:"entries"` + }{ + + Entries: m.entriesField, + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this search log query +func (m *SearchLogQuery) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateEntries(formats); err != nil { + res = append(res, err) + } + + if err := m.validateEntryUUIDs(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLogIndexes(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SearchLogQuery) validateEntries(formats strfmt.Registry) error { + if swag.IsZero(m.Entries()) { // not required + return nil + } + + iEntriesSize := int64(len(m.Entries())) + + if err := validate.MinItems("entries", "body", iEntriesSize, 1); err != nil { + return err + } + + if err := validate.MaxItems("entries", "body", iEntriesSize, 10); err != nil { + return err + } + + for i := 0; i < len(m.Entries()); i++ { + + if err := m.entriesField[i].Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("entries" + "." + strconv.Itoa(i)) + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("entries" + "." + strconv.Itoa(i)) + } + + return err + } + + } + + return nil +} + +func (m *SearchLogQuery) validateEntryUUIDs(formats strfmt.Registry) error { + if swag.IsZero(m.EntryUUIDs) { // not required + return nil + } + + iEntryUUIDsSize := int64(len(m.EntryUUIDs)) + + if err := validate.MinItems("entryUUIDs", "body", iEntryUUIDsSize, 1); err != nil { + return err + } + + if err := validate.MaxItems("entryUUIDs", "body", iEntryUUIDsSize, 10); err != nil { + return err + } + + for i := 0; i < len(m.EntryUUIDs); i++ { + + if err := validate.Pattern("entryUUIDs"+"."+strconv.Itoa(i), "body", m.EntryUUIDs[i], `^([0-9a-fA-F]{64}|[0-9a-fA-F]{80})$`); err != nil { + return err + } + + } + + return nil +} + +func (m *SearchLogQuery) validateLogIndexes(formats strfmt.Registry) error { + if swag.IsZero(m.LogIndexes) { // not required + return nil + } + + iLogIndexesSize := int64(len(m.LogIndexes)) + + if err := validate.MinItems("logIndexes", "body", iLogIndexesSize, 1); err != nil { + return err + } + + if err := validate.MaxItems("logIndexes", "body", iLogIndexesSize, 10); err != nil { + return err + } + + for i := 0; i < len(m.LogIndexes); i++ { + if swag.IsZero(m.LogIndexes[i]) { // not required + continue + } + + if err := validate.MinimumInt("logIndexes"+"."+strconv.Itoa(i), "body", *m.LogIndexes[i], 0, false); err != nil { + return err + } + + } + + return nil +} + +// ContextValidate validate this search log query based on the context it is used +func (m *SearchLogQuery) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateEntries(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SearchLogQuery) contextValidateEntries(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Entries()); i++ { + + if swag.IsZero(m.entriesField[i]) { // not required + return nil + } + + if err := m.entriesField[i].ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("entries" + "." + strconv.Itoa(i)) + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("entries" + "." + strconv.Itoa(i)) + } + + return err + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *SearchLogQuery) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SearchLogQuery) UnmarshalBinary(b []byte) error { + var res SearchLogQuery + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go new file mode 100644 index 000000000000..a5f6eff0f7a7 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// TUF TUF metadata +// +// swagger:model tuf +type TUF struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec TUFSchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *TUF) Kind() string { + return "tuf" +} + +// SetKind sets the kind of this subtype +func (m *TUF) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *TUF) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec TUFSchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result TUF + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m TUF) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec TUFSchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this tuf +func (m *TUF) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *TUF) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *TUF) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this tuf based on the context it is used +func (m *TUF) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *TUF) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *TUF) UnmarshalBinary(b []byte) error { + var res TUF + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go new file mode 100644 index 000000000000..7c944ef92dad --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// TUFSchema TUF Schema +// +// # Schema for TUF metadata objects +// +// swagger:model tufSchema +type TUFSchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go new file mode 100644 index 000000000000..69b5e93d6145 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go @@ -0,0 +1,321 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// TUFV001Schema TUF v0.0.1 Schema +// +// # Schema for TUF metadata entries +// +// swagger:model tufV001Schema +type TUFV001Schema struct { + + // metadata + // Required: true + Metadata *TUFV001SchemaMetadata `json:"metadata"` + + // root + // Required: true + Root *TUFV001SchemaRoot `json:"root"` + + // TUF specification version + // Read Only: true + SpecVersion string `json:"spec_version,omitempty"` +} + +// Validate validates this tuf v001 schema +func (m *TUFV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMetadata(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRoot(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *TUFV001Schema) validateMetadata(formats strfmt.Registry) error { + + if err := validate.Required("metadata", "body", m.Metadata); err != nil { + return err + } + + if m.Metadata != nil { + if err := m.Metadata.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("metadata") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("metadata") + } + + return err + } + } + + return nil +} + +func (m *TUFV001Schema) validateRoot(formats strfmt.Registry) error { + + if err := validate.Required("root", "body", m.Root); err != nil { + return err + } + + if m.Root != nil { + if err := m.Root.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("root") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("root") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this tuf v001 schema based on the context it is used +func (m *TUFV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateMetadata(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateRoot(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateSpecVersion(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *TUFV001Schema) contextValidateMetadata(ctx context.Context, formats strfmt.Registry) error { + + if m.Metadata != nil { + + if err := m.Metadata.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("metadata") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("metadata") + } + + return err + } + } + + return nil +} + +func (m *TUFV001Schema) contextValidateRoot(ctx context.Context, formats strfmt.Registry) error { + + if m.Root != nil { + + if err := m.Root.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("root") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("root") + } + + return err + } + } + + return nil +} + +func (m *TUFV001Schema) contextValidateSpecVersion(ctx context.Context, formats strfmt.Registry) error { + + if err := validate.ReadOnly(ctx, "spec_version", "body", m.SpecVersion); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *TUFV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *TUFV001Schema) UnmarshalBinary(b []byte) error { + var res TUFV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// TUFV001SchemaMetadata TUF metadata +// +// swagger:model TUFV001SchemaMetadata +type TUFV001SchemaMetadata struct { + + // Specifies the metadata inline within the document + // Required: true + Content any `json:"content"` +} + +// Validate validates this TUF v001 schema metadata +func (m *TUFV001SchemaMetadata) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *TUFV001SchemaMetadata) validateContent(formats strfmt.Registry) error { + + if m.Content == nil { + return errors.Required("metadata"+"."+"content", "body", nil) + } + + return nil +} + +// ContextValidate validates this TUF v001 schema metadata based on context it is used +func (m *TUFV001SchemaMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *TUFV001SchemaMetadata) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *TUFV001SchemaMetadata) UnmarshalBinary(b []byte) error { + var res TUFV001SchemaMetadata + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// TUFV001SchemaRoot root metadata containing about the public keys used to sign the manifest +// +// swagger:model TUFV001SchemaRoot +type TUFV001SchemaRoot struct { + + // Specifies the metadata inline within the document + // Required: true + Content any `json:"content"` +} + +// Validate validates this TUF v001 schema root +func (m *TUFV001SchemaRoot) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *TUFV001SchemaRoot) validateContent(formats strfmt.Registry) error { + + if m.Content == nil { + return errors.Required("root"+"."+"content", "body", nil) + } + + return nil +} + +// ContextValidate validates this TUF v001 schema root based on context it is used +func (m *TUFV001SchemaRoot) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *TUFV001SchemaRoot) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *TUFV001SchemaRoot) UnmarshalBinary(b []byte) error { + var res TUFV001SchemaRoot + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/internal/log/logger.go b/vendor/github.com/sigstore/rekor/pkg/internal/log/logger.go new file mode 100644 index 000000000000..3d2a2ed0b95b --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/internal/log/logger.go @@ -0,0 +1,36 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +type LoggerImpl interface { + Error(...any) + Errorf(string, ...any) + Panic(...any) + Info(...any) + Infof(string, ...any) +} + +var Logger LoggerImpl = &discardLogger{} + +type discardLogger struct{} + +func (l *discardLogger) Panic(err ...any) { + panic(err) +} +func (l *discardLogger) Error(...any) {} +func (l *discardLogger) Errorf(string, ...any) {} +func (l *discardLogger) Infof(string, ...any) {} +func (l *discardLogger) Info(...any) {} diff --git a/vendor/github.com/sigstore/rekor/pkg/pki/identity/identity.go b/vendor/github.com/sigstore/rekor/pkg/pki/identity/identity.go new file mode 100644 index 000000000000..4566e1ddfe1d --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/pki/identity/identity.go @@ -0,0 +1,38 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package identity + +type Identity struct { + // Types include: + // - *rsa.PublicKey + // - *ecdsa.PublicKey + // - ed25519.PublicKey + // - *x509.Certificate + // - openpgp.EntityList (golang.org/x/crypto/openpgp) + // - *minisign.PublicKey (github.com/jedisct1/go-minisign) + // - ssh.PublicKey (golang.org/x/crypto/ssh) + Crypto any + // Raw key or certificate extracted from Crypto. Values include: + // - PKIX ASN.1 DER-encoded public key + // - ASN.1 DER-encoded certificate + Raw []byte + // For keys, certificates, and minisign, hex-encoded SHA-256 digest + // of the public key or certificate + // For SSH and PGP, Fingerprint is the standard for each ecosystem + // For SSH, unpadded base-64 encoded SHA-256 digest of the key + // For PGP, hex-encoded SHA-1 digest of a key, which can be either + // a primary key or subkey + Fingerprint string +} diff --git a/vendor/github.com/sigstore/rekor/pkg/pki/pkitypes/types.go b/vendor/github.com/sigstore/rekor/pkg/pki/pkitypes/types.go new file mode 100644 index 000000000000..19688bd1bdb7 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/pki/pkitypes/types.go @@ -0,0 +1,40 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pki + +import ( + "io" + + "github.com/sigstore/rekor/pkg/pki/identity" + sigsig "github.com/sigstore/sigstore/pkg/signature" +) + +// PublicKey Generic object representing a public key (regardless of format & algorithm) +type PublicKey interface { + CanonicalValue() ([]byte, error) + // Deprecated: EmailAddresses() will be deprecated in favor of Subjects() which will + // also return Subject URIs present in public keys. + EmailAddresses() []string + Subjects() []string + // Identities returns a list of typed keys and certificates. + Identities() ([]identity.Identity, error) +} + +// Signature Generic object representing a signature (regardless of format & algorithm) +type Signature interface { + CanonicalValue() ([]byte, error) + Verify(r io.Reader, k interface{}, opts ...sigsig.VerifyOption) error +} diff --git a/vendor/github.com/sigstore/rekor/pkg/pki/x509/x509.go b/vendor/github.com/sigstore/rekor/pkg/pki/x509/x509.go new file mode 100644 index 000000000000..2cfaf816009f --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/pki/x509/x509.go @@ -0,0 +1,276 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package x509 + +import ( + "bytes" + "crypto" + "crypto/sha256" + "crypto/x509" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "io" + "strings" + + "github.com/asaskevich/govalidator" + "github.com/sigstore/rekor/pkg/pki/identity" + "github.com/sigstore/sigstore/pkg/cryptoutils" + sigsig "github.com/sigstore/sigstore/pkg/signature" +) + +// EmailAddressOID defined by https://oidref.com/1.2.840.113549.1.9.1 +var EmailAddressOID asn1.ObjectIdentifier = []int{1, 2, 840, 113549, 1, 9, 1} + +type Signature struct { + signature []byte + verifierLoadOpts []sigsig.LoadOption +} + +// NewSignature creates and validates an x509 signature object +func NewSignature(r io.Reader) (*Signature, error) { + return NewSignatureWithOpts(r) +} + +func NewSignatureWithOpts(r io.Reader, opts ...sigsig.LoadOption) (*Signature, error) { + b, err := io.ReadAll(r) + if err != nil { + return nil, err + } + return &Signature{ + signature: b, + verifierLoadOpts: opts, + }, nil +} + +// CanonicalValue implements the pki.Signature interface +func (s Signature) CanonicalValue() ([]byte, error) { + return s.signature, nil +} + +// Verify implements the pki.Signature interface +func (s Signature) Verify(r io.Reader, k interface{}, opts ...sigsig.VerifyOption) error { + if len(s.signature) == 0 { + //lint:ignore ST1005 X509 is proper use of term + return errors.New("X509 signature has not been initialized") + } + + key, ok := k.(*PublicKey) + if !ok { + return fmt.Errorf("invalid public key type for: %v", k) + } + + p := key.key + if p == nil { + switch { + case key.cert != nil: + p = key.cert.c.PublicKey + case len(key.certs) > 0: + if err := verifyCertChain(key.certs); err != nil { + return err + } + p = key.certs[0].PublicKey + default: + return errors.New("no public key found") + } + } + + verifier, err := sigsig.LoadVerifierWithOpts(p, s.verifierLoadOpts...) + if err != nil { + return err + } + return verifier.VerifySignature(bytes.NewReader(s.signature), r, opts...) +} + +// PublicKey Public Key that follows the x509 standard +type PublicKey struct { + key interface{} + cert *cert + certs []*x509.Certificate +} + +type cert struct { + c *x509.Certificate + b []byte +} + +// NewPublicKey implements the pki.PublicKey interface +func NewPublicKey(r io.Reader) (*PublicKey, error) { + rawPub, err := io.ReadAll(r) + if err != nil { + return nil, err + } + trimmedRawPub := bytes.TrimSpace(rawPub) + + block, rest := pem.Decode(trimmedRawPub) + if block == nil { + return nil, errors.New("invalid public key: failure decoding PEM") + } + + // Handle certificate chain, concatenated PEM-encoded certificates + if len(rest) > 0 { + // Support up to 10 certificates in a chain, to avoid parsing extremely long chains + certs, err := cryptoutils.UnmarshalCertificatesFromPEMLimited(trimmedRawPub, 10) + if err != nil { + return nil, err + } + return &PublicKey{certs: certs}, nil + } + + switch block.Type { + case string(cryptoutils.PublicKeyPEMType): + key, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return nil, err + } + return &PublicKey{key: key}, nil + case string(cryptoutils.CertificatePEMType): + c, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + return &PublicKey{ + cert: &cert{ + c: c, + b: block.Bytes, + }}, nil + } + return nil, fmt.Errorf("invalid public key: cannot handle type %v", block.Type) +} + +// CanonicalValue implements the pki.PublicKey interface +func (k PublicKey) CanonicalValue() (encoded []byte, err error) { + + switch { + case k.key != nil: + encoded, err = cryptoutils.MarshalPublicKeyToPEM(k.key) + case k.cert != nil: + encoded, err = cryptoutils.MarshalCertificateToPEM(k.cert.c) + case k.certs != nil: + encoded, err = cryptoutils.MarshalCertificatesToPEM(k.certs) + default: + err = errors.New("x509 public key has not been initialized") + } + + return +} + +func (k PublicKey) CryptoPubKey() crypto.PublicKey { + if k.cert != nil { + return k.cert.c.PublicKey + } + if len(k.certs) > 0 { + return k.certs[0].PublicKey + } + return k.key +} + +// EmailAddresses implements the pki.PublicKey interface +func (k PublicKey) EmailAddresses() []string { + var names []string + var cert *x509.Certificate + if k.cert != nil { + cert = k.cert.c + } else if len(k.certs) > 0 { + cert = k.certs[0] + } + if cert != nil { + for _, name := range cert.EmailAddresses { + if govalidator.IsEmail(name) { + names = append(names, strings.ToLower(name)) + } + } + } + return names +} + +// Subjects implements the pki.PublicKey interface +func (k PublicKey) Subjects() []string { + var subjects []string + var cert *x509.Certificate + if k.cert != nil { + cert = k.cert.c + } else if len(k.certs) > 0 { + cert = k.certs[0] + } + if cert != nil { + subjects = cryptoutils.GetSubjectAlternateNames(cert) + } + return subjects +} + +// Identities implements the pki.PublicKey interface +func (k PublicKey) Identities() ([]identity.Identity, error) { + // k contains either a key, a cert, or a list of certs + if k.key != nil { + pkixKey, err := cryptoutils.MarshalPublicKeyToDER(k.key) + if err != nil { + return nil, err + } + digest := sha256.Sum256(pkixKey) + return []identity.Identity{{ + Crypto: k.key, + Raw: pkixKey, + Fingerprint: hex.EncodeToString(digest[:]), + }}, nil + } + + var cert *x509.Certificate + switch { + case k.cert != nil: + cert = k.cert.c + case len(k.certs) > 0: + cert = k.certs[0] + default: + return nil, errors.New("no key, certificate or certificate chain provided") + } + + digest := sha256.Sum256(cert.Raw) + return []identity.Identity{{ + Crypto: cert, + Raw: cert.Raw, + Fingerprint: hex.EncodeToString(digest[:]), + }}, nil +} + +func verifyCertChain(certChain []*x509.Certificate) error { + if len(certChain) == 0 { + return errors.New("no certificate chain provided") + } + // No certificate chain to verify + if len(certChain) == 1 { + return nil + } + rootPool := x509.NewCertPool() + rootPool.AddCert(certChain[len(certChain)-1]) + subPool := x509.NewCertPool() + for _, c := range certChain[1 : len(certChain)-1] { + subPool.AddCert(c) + } + if _, err := certChain[0].Verify(x509.VerifyOptions{ + Roots: rootPool, + Intermediates: subPool, + // Allow any key usage + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + // Expired certificates can be uploaded and should be verifiable + CurrentTime: certChain[0].NotBefore, + }); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/README.md b/vendor/github.com/sigstore/rekor/pkg/types/README.md new file mode 100644 index 000000000000..76cb36e61ade --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/README.md @@ -0,0 +1,32 @@ +# Pluggable Types + +## Description + +Rekor supports pluggable types (aka different schemas) for entries stored in the transparency log. + +### Currently supported types + +- Alpine Packages [schema](alpine/alpine_schema.json) + - Versions: 0.0.1 +- COSE Envelopes [schema](cose/cose_schema.json) + - Versions: 0.0.1 +- DSSE Envelopes [schema](dsse/dsse_schema.json) + - Versions: 0.0.1 +- HashedRekord [schema](hashedrekord/hashedrekord_schema.json) + - Versions: 0.0.1 +- Helm Provenance Files [schema](helm/helm_schema.json) + - Versions: 0.0.1 +- In-Toto Attestations [schema](intoto/intoto_schema.json) + - Versions: 0.0.1, 0.0.2 +- Java Archives (JAR Files) [schema](jar/jar_schema.json) + - Versions: 0.0.1 +- Rekord *(default type)* [schema](rekord/rekord_schema.json) + - Versions: 0.0.1 +- RFC3161 Timestamps [schema](rfc3161/rfc3161_schema.json) + - Versions: 0.0.1 +- RPM Packages [schema](rpm/rpm_schema.json) + - Versions: 0.0.1 +- TUF Metadata [schema](tuf/tuf_schema.json) + - Versions: 0.0.1 + +Refer to [Rekor docs](https://docs.sigstore.dev/rekor/pluggable-types) for adding support for new types. diff --git a/vendor/github.com/sigstore/rekor/pkg/types/dsse/README.md b/vendor/github.com/sigstore/rekor/pkg/types/dsse/README.md new file mode 100644 index 000000000000..3244c98ec9ac --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/dsse/README.md @@ -0,0 +1,25 @@ +**DSSE Type Data Documentation** + +This document provides a definition for each field that is not otherwise described in the [dsse +schema](https://github.com/sigstore/rekor/blob/main/pkg/types/dsse/v0.0.1/dsse_v0_0_1_schema.json). This +document also notes any additional information about the values +associated with each field such as the format in which the data is +stored and any necessary transformations. + +**How do you identify an object as an DSSE object?** + +The "Body" field will include an "dsseObj" field. + +**Recognized content types** + +- [in-toto + statements](https://github.com/in-toto/attestation/tree/main/spec#statement) + are recognized and parsed. The found subject hashes are indexed so + they can be searched for. + +**What data about the envelope is stored in Rekor** + +Only the hash of the payload (the content covered by the digital signature inside the envelope), the hash of the entire DSSE envelope (including signatures), +the signature(s) and their corresponding verifying materials (e.g. public key(s) or certificates) are stored. + +Even if Rekor is configured to use attestation storage, the entire DSSE envelope will not be stored. diff --git a/vendor/github.com/sigstore/rekor/pkg/types/dsse/dsse.go b/vendor/github.com/sigstore/rekor/pkg/types/dsse/dsse.go new file mode 100644 index 000000000000..9036fe5625ce --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/dsse/dsse.go @@ -0,0 +1,74 @@ +// +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dsse + +import ( + "context" + "errors" + "fmt" + + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/types" +) + +const ( + KIND = "dsse" +) + +type BaseDSSEType struct { + types.RekorType +} + +func init() { + types.TypeMap.Store(KIND, New) +} + +func New() types.TypeImpl { + bit := BaseDSSEType{} + bit.Kind = KIND + bit.VersionMap = VersionMap + return &bit +} + +var VersionMap = types.NewSemVerEntryFactoryMap() + +func (it BaseDSSEType) UnmarshalEntry(pe models.ProposedEntry) (types.EntryImpl, error) { + if pe == nil { + return nil, errors.New("proposed entry cannot be nil") + } + + in, ok := pe.(*models.DSSE) + if !ok { + return nil, errors.New("cannot unmarshal non-DSSE types") + } + + return it.VersionedUnmarshal(in, *in.APIVersion) +} + +func (it *BaseDSSEType) CreateProposedEntry(ctx context.Context, version string, props types.ArtifactProperties) (models.ProposedEntry, error) { + if version == "" { + version = it.DefaultVersion() + } + ei, err := it.VersionedUnmarshal(nil, version) + if err != nil { + return nil, fmt.Errorf("fetching DSSE version implementation: %w", err) + } + return ei.CreateFromArtifactProperties(ctx, props) +} + +func (it BaseDSSEType) DefaultVersion() string { + return "0.0.1" +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/dsse/dsse_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/dsse/dsse_schema.json new file mode 100644 index 000000000000..7dc710fe2155 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/dsse/dsse_schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://rekor.sigstore.dev/types/dsse/dsse_schema.json", + "title": "DSSE Schema", + "description": "log entry schema for dsse envelopes", + "type": "object", + "oneOf": [ + { + "$ref": "v0.0.1/dsse_v0_0_1_schema.json" + } + ] +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/dsse_v0_0_1_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/dsse_v0_0_1_schema.json new file mode 100644 index 000000000000..51ebe3990afb --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/dsse_v0_0_1_schema.json @@ -0,0 +1,96 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://rekor.sigstore.dev/types/dsse/dsse_v0_0_1_schema.json", + "title": "DSSE v0.0.1 Schema", + "description": "Schema for DSSE envelopes", + "type": "object", + "properties": { + "proposedContent": { + "type": "object", + "properties": { + "envelope": { + "description": "DSSE envelope specified as a stringified JSON object", + "type": "string", + "writeOnly": true + }, + "verifiers": { + "description": "collection of all verification material (e.g. public keys or certificates) used to verify signatures over envelope's payload, specified as base64-encoded strings", + "type": "array", + "minItems": 1, + "items": { + "type": "string", + "format": "byte" + }, + "writeOnly": true + } + }, + "writeOnly": true, + "required": [ "envelope", "verifiers" ] + }, + "signatures": { + "description": "extracted collection of all signatures of the envelope's payload; elements will be sorted by lexicographical order of the base64 encoded signature strings", + "type": "array", + "minItems": 1, + "items": { + "description": "a signature of the envelope's payload along with the verification material for the signature", + "type": "object", + "properties": { + "signature": { + "description": "base64 encoded signature of the payload", + "type": "string", + "pattern": "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" + }, + "verifier": { + "description": "verification material that was used to verify the corresponding signature, specified as a base64 encoded string", + "type": "string", + "format": "byte" + } + }, + "required": [ "signature", "verifier" ] + }, + "readOnly": true + }, + "envelopeHash": { + "description": "Specifies the hash algorithm and value encompassing the entire envelope sent to Rekor", + "type": "object", + "properties": { + "algorithm": { + "description": "The hashing function used to compute the hash value", + "type": "string", + "enum": [ "sha256" ] + }, + "value": { + "description": "The value of the computed digest over the entire envelope", + "type": "string" + } + }, + "required": [ "algorithm", "value" ], + "readOnly": true + }, + "payloadHash": { + "description": "Specifies the hash algorithm and value covering the payload within the DSSE envelope", + "type": "object", + "properties": { + "algorithm": { + "description": "The hashing function used to compute the hash value", + "type": "string", + "enum": [ "sha256" ] + }, + "value": { + "description": "The value of the computed digest over the payload within the envelope", + "type": "string" + } + }, + "required": [ "algorithm", "value" ], + "readOnly": true + } + }, + "oneOf": [ + { + "required": [ "proposedContent" ] + }, + { + "required": [ "signatures", "envelopeHash", "payloadHash" ] + } + ] +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/entry.go b/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/entry.go new file mode 100644 index 000000000000..75fac99bdf30 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/entry.go @@ -0,0 +1,544 @@ +// +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dsse + +import ( + "bytes" + "context" + "crypto" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag/conv" + "github.com/in-toto/in-toto-golang/in_toto" + "github.com/secure-systems-lab/go-securesystemslib/dsse" + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/internal/log" + pkitypes "github.com/sigstore/rekor/pkg/pki/pkitypes" + "github.com/sigstore/rekor/pkg/pki/x509" + "github.com/sigstore/rekor/pkg/types" + dsseType "github.com/sigstore/rekor/pkg/types/dsse" + "github.com/sigstore/sigstore/pkg/signature" + sigdsse "github.com/sigstore/sigstore/pkg/signature/dsse" +) + +const ( + APIVERSION = "0.0.1" +) + +func init() { + if err := dsseType.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil { + log.Logger.Panic(err) + } +} + +type V001Entry struct { + DSSEObj models.DSSEV001Schema + env *dsse.Envelope +} + +func (v V001Entry) APIVersion() string { + return APIVERSION +} + +func NewEntry() types.EntryImpl { + return &V001Entry{} +} + +// IndexKeys computes the list of keys that should map back to this entry. +// It should *never* reference v.DSSEObj.ProposedContent as those values would only +// be present at the time of insertion +func (v V001Entry) IndexKeys() ([]string, error) { + var result []string + + for _, sig := range v.DSSEObj.Signatures { + if sig == nil || sig.Verifier == nil { + return result, errors.New("missing or malformed public key") + } + keyObj, err := x509.NewPublicKey(bytes.NewReader(*sig.Verifier)) + if err != nil { + return result, err + } + + canonKey, err := keyObj.CanonicalValue() + if err != nil { + return result, fmt.Errorf("could not canonicalize key: %w", err) + } + + keyHash := sha256.Sum256(canonKey) + result = append(result, "sha256:"+hex.EncodeToString(keyHash[:])) + + result = append(result, keyObj.Subjects()...) + } + + if v.DSSEObj.PayloadHash != nil { + payloadHashKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.DSSEObj.PayloadHash.Algorithm, *v.DSSEObj.PayloadHash.Value)) + result = append(result, payloadHashKey) + } + + if v.DSSEObj.EnvelopeHash != nil { + envelopeHashKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.DSSEObj.EnvelopeHash.Algorithm, *v.DSSEObj.EnvelopeHash.Value)) + result = append(result, envelopeHashKey) + } + + if v.env == nil { + log.Logger.Info("DSSEObj content or DSSE envelope is nil, returning partial set of keys") + return result, nil + } + + switch v.env.PayloadType { + case in_toto.PayloadType: + + if v.env.Payload == "" { + log.Logger.Info("DSSEObj DSSE payload is empty") + return result, nil + } + decodedPayload, err := v.env.DecodeB64Payload() + if err != nil { + return result, fmt.Errorf("could not decode envelope payload: %w", err) + } + statement, err := parseStatement(decodedPayload) + if err != nil { + return result, err + } + for _, s := range statement.Subject { + for alg, ds := range s.Digest { + result = append(result, alg+":"+ds) + } + } + // Not all in-toto statements will contain a SLSA provenance predicate. + // See https://github.com/in-toto/attestation/blob/main/spec/README.md#predicate + // for other predicates. + if predicate, err := parseSlsaPredicate(decodedPayload); err == nil { + if predicate.Predicate.Materials != nil { + for _, s := range predicate.Predicate.Materials { + for alg, ds := range s.Digest { + result = append(result, alg+":"+ds) + } + } + } + } + default: + log.Logger.Infof("Unknown DSSE envelope payloadType: %s", v.env.PayloadType) + } + return result, nil +} + +func parseStatement(p []byte) (*in_toto.Statement, error) { + ps := in_toto.Statement{} + if err := json.Unmarshal(p, &ps); err != nil { + return nil, err + } + return &ps, nil +} + +func parseSlsaPredicate(p []byte) (*in_toto.ProvenanceStatement, error) { + predicate := in_toto.ProvenanceStatement{} + if err := json.Unmarshal(p, &predicate); err != nil { + return nil, err + } + return &predicate, nil +} + +// DecodeEntry performs direct decode into the provided output pointer +// without mutating the receiver on error. +func DecodeEntry(input any, output *models.DSSEV001Schema) error { + if output == nil { + return fmt.Errorf("nil output *models.DSSEV001Schema") + } + var m models.DSSEV001Schema + // Single switch with map fast path + switch data := input.(type) { + case map[string]any: + mm := data + if pcRaw, ok := mm["proposedContent"].(map[string]any); ok { + m.ProposedContent = &models.DSSEV001SchemaProposedContent{} + if env, ok := pcRaw["envelope"].(string); ok { + m.ProposedContent.Envelope = &env + } + if vsIF, ok := pcRaw["verifiers"].([]any); ok { + m.ProposedContent.Verifiers = make([]strfmt.Base64, 0, len(vsIF)) + for _, it := range vsIF { + if s, ok := it.(string); ok && s != "" { + outb := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(outb, []byte(s)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for verifier: %w", err) + } + m.ProposedContent.Verifiers = append(m.ProposedContent.Verifiers, strfmt.Base64(outb[:n])) + } + } + } else if vsStr, ok := pcRaw["verifiers"].([]string); ok { + m.ProposedContent.Verifiers = make([]strfmt.Base64, 0, len(vsStr)) + for _, s := range vsStr { + if s == "" { + continue + } + outb := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(outb, []byte(s)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for verifier: %w", err) + } + m.ProposedContent.Verifiers = append(m.ProposedContent.Verifiers, strfmt.Base64(outb[:n])) + } + } + } + if sigs, ok := mm["signatures"].([]any); ok { + m.Signatures = make([]*models.DSSEV001SchemaSignaturesItems0, 0, len(sigs)) + for _, s := range sigs { + if sm, ok := s.(map[string]any); ok { + item := &models.DSSEV001SchemaSignaturesItems0{} + if sig, ok := sm["signature"].(string); ok { + item.Signature = &sig + } + if vr, ok := sm["verifier"].(string); ok && vr != "" { + outb := make([]byte, base64.StdEncoding.DecodedLen(len(vr))) + n, err := base64.StdEncoding.Decode(outb, []byte(vr)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for signature verifier: %w", err) + } + b := strfmt.Base64(outb[:n]) + item.Verifier = &b + } + m.Signatures = append(m.Signatures, item) + } + } + } + if eh, ok := mm["envelopeHash"].(map[string]any); ok { + m.EnvelopeHash = &models.DSSEV001SchemaEnvelopeHash{} + if alg, ok := eh["algorithm"].(string); ok { + m.EnvelopeHash.Algorithm = &alg + } + if val, ok := eh["value"].(string); ok { + m.EnvelopeHash.Value = &val + } + } + if ph, ok := mm["payloadHash"].(map[string]any); ok { + m.PayloadHash = &models.DSSEV001SchemaPayloadHash{} + if alg, ok := ph["algorithm"].(string); ok { + m.PayloadHash.Algorithm = &alg + } + if val, ok := ph["value"].(string); ok { + m.PayloadHash.Value = &val + } + } + *output = m + return nil + case *models.DSSEV001Schema: + if data == nil { + return fmt.Errorf("nil *models.DSSEV001Schema") + } + *output = *data + return nil + case models.DSSEV001Schema: + *output = data + return nil + default: + return fmt.Errorf("unsupported input type %T for DecodeEntry", input) + } +} + +func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error { + it, ok := pe.(*models.DSSE) + if !ok { + return errors.New("cannot unmarshal non DSSE v0.0.1 type") + } + + dsseObj := &models.DSSEV001Schema{} + + if err := DecodeEntry(it.Spec, dsseObj); err != nil { + return err + } + + // field validation + if err := dsseObj.Validate(strfmt.Default); err != nil { + return err + } + + // either we have just proposed content or the canonicalized fields + if dsseObj.ProposedContent == nil { + // then we need canonicalized fields, and all must be present (if present, they would have been validated in the above call to Validate()) + if dsseObj.EnvelopeHash == nil || dsseObj.PayloadHash == nil || len(dsseObj.Signatures) == 0 { + return errors.New("either proposedContent or envelopeHash, payloadHash, and signatures must be present") + } + v.DSSEObj = *dsseObj + return nil + } + // if we're here, then we're trying to propose a new entry so we check to ensure client's aren't setting server-side computed fields + if dsseObj.EnvelopeHash != nil || dsseObj.PayloadHash != nil || len(dsseObj.Signatures) != 0 { + return errors.New("either proposedContent or envelopeHash, payloadHash, and signatures must be present but not both") + } + + env := &dsse.Envelope{} + if err := json.Unmarshal([]byte(*dsseObj.ProposedContent.Envelope), env); err != nil { + return err + } + + if len(env.Signatures) == 0 { + return errors.New("DSSE envelope must contain 1 or more signatures") + } + + allPubKeyBytes := make([][]byte, 0) + for _, publicKey := range dsseObj.ProposedContent.Verifiers { + if publicKey == nil { + return errors.New("an invalid null verifier was provided in ProposedContent") + } + + allPubKeyBytes = append(allPubKeyBytes, publicKey) + } + + sigToKeyMap, err := verifyEnvelope(allPubKeyBytes, env) + if err != nil { + return err + } + + // we need to ensure we canonicalize the ordering of signatures + sortedSigs := make([]string, 0, len(sigToKeyMap)) + for sig := range sigToKeyMap { + sortedSigs = append(sortedSigs, sig) + } + sort.Strings(sortedSigs) + + for i, sig := range sortedSigs { + key := sigToKeyMap[sig] + canonicalizedKey, err := key.CanonicalValue() + if err != nil { + return err + } + b64CanonicalizedKey := strfmt.Base64(canonicalizedKey) + + dsseObj.Signatures = append(dsseObj.Signatures, &models.DSSEV001SchemaSignaturesItems0{ + Signature: &sortedSigs[i], + Verifier: &b64CanonicalizedKey, + }) + } + + decodedPayload, err := env.DecodeB64Payload() + if err != nil { + // this shouldn't happen because failure would have occurred in verifyEnvelope call above + return err + } + + payloadHash := sha256.Sum256(decodedPayload) + dsseObj.PayloadHash = &models.DSSEV001SchemaPayloadHash{ + Algorithm: conv.Pointer(models.DSSEV001SchemaPayloadHashAlgorithmSha256), + Value: conv.Pointer(hex.EncodeToString(payloadHash[:])), + } + + envelopeHash := sha256.Sum256([]byte(*dsseObj.ProposedContent.Envelope)) + dsseObj.EnvelopeHash = &models.DSSEV001SchemaEnvelopeHash{ + Algorithm: conv.Pointer(models.DSSEV001SchemaEnvelopeHashAlgorithmSha256), + Value: conv.Pointer(hex.EncodeToString(envelopeHash[:])), + } + + // we've gotten through all processing without error, now update the object we're unmarshalling into + v.DSSEObj = *dsseObj + v.env = env + + return nil +} + +// Canonicalize returns a JSON representation of the entry to be persisted into the log. This +// will be further canonicalized by JSON Canonicalization Scheme (JCS) before being written. +// +// This function should not use v.DSSEObj.ProposedContent fields as they are client provided and +// should not be trusted; the other fields at the top level are only set server side. +func (v *V001Entry) Canonicalize(_ context.Context) ([]byte, error) { + canonicalEntry := models.DSSEV001Schema{ + Signatures: v.DSSEObj.Signatures, + EnvelopeHash: v.DSSEObj.EnvelopeHash, + PayloadHash: v.DSSEObj.PayloadHash, + ProposedContent: nil, // this is explicitly done as we don't want to canonicalize the envelope + } + + for _, s := range canonicalEntry.Signatures { + if s.Signature == nil { + return nil, errors.New("canonical entry missing required signature") + } + } + + sort.Slice(canonicalEntry.Signatures, func(i, j int) bool { + return *canonicalEntry.Signatures[i].Signature < *canonicalEntry.Signatures[j].Signature + }) + + itObj := models.DSSE{} + itObj.APIVersion = conv.Pointer(APIVERSION) + itObj.Spec = &canonicalEntry + + return json.Marshal(&itObj) +} + +// AttestationKey and AttestationKeyValue are not implemented so the envelopes will not be persisted in Rekor + +func (v V001Entry) CreateFromArtifactProperties(_ context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { + returnVal := models.DSSE{} + re := V001Entry{ + DSSEObj: models.DSSEV001Schema{ + ProposedContent: &models.DSSEV001SchemaProposedContent{}, + }, + } + var err error + artifactBytes := props.ArtifactBytes + if artifactBytes == nil { + if props.ArtifactPath == nil { + return nil, errors.New("path to artifact file must be specified") + } + if props.ArtifactPath.IsAbs() { + return nil, errors.New("dsse envelopes cannot be fetched over HTTP(S)") + } + artifactBytes, err = os.ReadFile(filepath.Clean(props.ArtifactPath.Path)) + if err != nil { + return nil, err + } + } + + env := &dsse.Envelope{} + if err := json.Unmarshal(artifactBytes, env); err != nil { + return nil, fmt.Errorf("payload must be a valid DSSE envelope: %w", err) + } + + allPubKeyBytes := make([][]byte, 0) + if len(props.PublicKeyBytes) > 0 { + allPubKeyBytes = append(allPubKeyBytes, props.PublicKeyBytes...) + } + + if len(props.PublicKeyPaths) > 0 { + for _, path := range props.PublicKeyPaths { + if path.IsAbs() { + return nil, errors.New("dsse public keys cannot be fetched over HTTP(S)") + } + + publicKeyBytes, err := os.ReadFile(filepath.Clean(path.Path)) + if err != nil { + return nil, fmt.Errorf("error reading public key file: %w", err) + } + + allPubKeyBytes = append(allPubKeyBytes, publicKeyBytes) + } + } + + keysBySig, err := verifyEnvelope(allPubKeyBytes, env) + if err != nil { + return nil, err + } + for _, key := range keysBySig { + canonicalKey, err := key.CanonicalValue() + if err != nil { + return nil, err + } + re.DSSEObj.ProposedContent.Verifiers = append(re.DSSEObj.ProposedContent.Verifiers, strfmt.Base64(canonicalKey)) + } + re.DSSEObj.ProposedContent.Envelope = conv.Pointer(string(artifactBytes)) + + returnVal.Spec = re.DSSEObj + returnVal.APIVersion = conv.Pointer(re.APIVersion()) + + return &returnVal, nil +} + +// verifyEnvelope takes in an array of possible key bytes and attempts to parse them as x509 public keys. +// it then uses these to verify the envelope and makes sure that every signature on the envelope is verified. +// it returns a map of verifiers indexed by the signature the verifier corresponds to. +func verifyEnvelope(allPubKeyBytes [][]byte, env *dsse.Envelope) (map[string]*x509.PublicKey, error) { + // generate a fake id for these keys so we can get back to the key bytes and match them to their corresponding signature + verifierBySig := make(map[string]*x509.PublicKey) + allSigs := make(map[string]struct{}) + for _, sig := range env.Signatures { + allSigs[sig.Sig] = struct{}{} + } + + for _, pubKeyBytes := range allPubKeyBytes { + if len(allSigs) == 0 { + break // if all signatures have been verified, do not attempt anymore + } + key, err := x509.NewPublicKey(bytes.NewReader(pubKeyBytes)) + if err != nil { + return nil, fmt.Errorf("could not parse public key as x509: %w", err) + } + + vfr, err := signature.LoadVerifier(key.CryptoPubKey(), crypto.SHA256) + if err != nil { + return nil, fmt.Errorf("could not load verifier: %w", err) + } + + dsseVfr, err := dsse.NewEnvelopeVerifier(&sigdsse.VerifierAdapter{SignatureVerifier: vfr}) + if err != nil { + return nil, fmt.Errorf("could not use public key as a dsse verifier: %w", err) + } + + accepted, err := dsseVfr.Verify(context.Background(), env) + if err != nil { + return nil, fmt.Errorf("could not verify envelope: %w", err) + } + + for _, accept := range accepted { + delete(allSigs, accept.Sig.Sig) + verifierBySig[accept.Sig.Sig] = key + } + } + + if len(allSigs) > 0 { + return nil, errors.New("all signatures must have a key that verifies it") + } + + return verifierBySig, nil +} + +func (v V001Entry) Verifiers() ([]pkitypes.PublicKey, error) { + if len(v.DSSEObj.Signatures) == 0 { + return nil, errors.New("dsse v0.0.1 entry not initialized") + } + + var keys []pkitypes.PublicKey + for _, s := range v.DSSEObj.Signatures { + key, err := x509.NewPublicKey(bytes.NewReader(*s.Verifier)) + if err != nil { + return nil, err + } + keys = append(keys, key) + } + return keys, nil +} + +func (v V001Entry) ArtifactHash() (string, error) { + if v.DSSEObj.PayloadHash == nil || v.DSSEObj.PayloadHash.Algorithm == nil || v.DSSEObj.PayloadHash.Value == nil { + return "", errors.New("dsse v0.0.1 entry not initialized") + } + return strings.ToLower(fmt.Sprintf("%s:%s", *v.DSSEObj.PayloadHash.Algorithm, *v.DSSEObj.PayloadHash.Value)), nil +} + +func (v V001Entry) Insertable() (bool, error) { + if v.DSSEObj.ProposedContent == nil { + return false, errors.New("missing proposed content") + } + if v.DSSEObj.ProposedContent.Envelope == nil || len(*v.DSSEObj.ProposedContent.Envelope) == 0 { + return false, errors.New("missing proposed DSSE envelope") + } + if len(v.DSSEObj.ProposedContent.Verifiers) == 0 { + return false, errors.New("missing proposed verifiers") + } + + return true, nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/entries.go b/vendor/github.com/sigstore/rekor/pkg/types/entries.go new file mode 100644 index 000000000000..06a8525bd9a7 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/entries.go @@ -0,0 +1,176 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "net/url" + "reflect" + + "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer" + "github.com/go-openapi/strfmt" + "github.com/go-viper/mapstructure/v2" + "github.com/sigstore/rekor/pkg/generated/models" + pkitypes "github.com/sigstore/rekor/pkg/pki/pkitypes" +) + +// EntryImpl specifies the behavior of a versioned type +type EntryImpl interface { + APIVersion() string // the supported versions for this implementation + IndexKeys() ([]string, error) // the keys that should be added to the external index for this entry + Canonicalize(ctx context.Context) ([]byte, error) // marshal the canonical entry to be put into the tlog + Unmarshal(e models.ProposedEntry) error // unmarshal the abstract entry into the specific struct for this versioned type + CreateFromArtifactProperties(context.Context, ArtifactProperties) (models.ProposedEntry, error) + Verifiers() ([]pkitypes.PublicKey, error) // list of keys or certificates that can verify an entry's signature + ArtifactHash() (string, error) // hex-encoded artifact hash prefixed with hash name, e.g. sha256:abcdef + Insertable() (bool, error) // denotes whether the entry that was unmarshalled has the writeOnly fields required to validate and insert into the log +} + +// EntryWithAttestationImpl specifies the behavior of a versioned type that also stores attestations +type EntryWithAttestationImpl interface { + EntryImpl + AttestationKey() string // returns the key used to look up the attestation from storage (should be sha256:digest) + AttestationKeyValue() (string, []byte) // returns the key to be used when storing the attestation as well as the attestation itself +} + +// ProposedEntryIterator is an iterator over a list of proposed entries +type ProposedEntryIterator interface { + models.ProposedEntry + HasNext() bool + Get() models.ProposedEntry + GetNext() models.ProposedEntry +} + +// EntryFactory describes a factory function that can generate structs for a specific versioned type +type EntryFactory func() EntryImpl + +func NewProposedEntry(ctx context.Context, kind, version string, props ArtifactProperties) (models.ProposedEntry, error) { + if tf, found := TypeMap.Load(kind); found { + t := tf.(func() TypeImpl)() + if t == nil { + return nil, fmt.Errorf("error generating object for kind '%v'", kind) + } + return t.CreateProposedEntry(ctx, version, props) + } + return nil, fmt.Errorf("could not create entry for kind '%v'", kind) +} + +// CreateVersionedEntry returns the specific instance for the type and version specified in the doc +// This method should be used on the insertion flow, which validates that the specific version proposed +// is permitted to be entered into the log. +func CreateVersionedEntry(pe models.ProposedEntry) (EntryImpl, error) { + ei, err := UnmarshalEntry(pe) + if err != nil { + return nil, err + } + kind := pe.Kind() + if tf, found := TypeMap.Load(kind); found { + if !tf.(func() TypeImpl)().IsSupportedVersion(ei.APIVersion()) { + return nil, fmt.Errorf("entry kind '%v' does not support inserting entries of version '%v'", kind, ei.APIVersion()) + } + } else { + return nil, fmt.Errorf("unknown kind '%v' specified", kind) + } + + if ok, err := ei.Insertable(); !ok { + return nil, fmt.Errorf("entry not insertable into log: %w", err) + } + + return ei, nil +} + +// UnmarshalEntry returns the specific instance for the type and version specified in the doc +// This method does not check for whether the version of the entry could be currently inserted into the log, +// and is useful when dealing with entries that have been persisted to the log. +func UnmarshalEntry(pe models.ProposedEntry) (EntryImpl, error) { + if pe == nil { + return nil, errors.New("proposed entry cannot be nil") + } + + kind := pe.Kind() + if tf, found := TypeMap.Load(kind); found { + t := tf.(func() TypeImpl)() + if t == nil { + return nil, fmt.Errorf("error generating object for kind '%v'", kind) + } + return t.UnmarshalEntry(pe) + } + return nil, fmt.Errorf("could not unmarshal entry for kind '%v'", kind) +} + +// DecodeEntry maps the (abstract) input structure into the specific entry implementation class; +// while doing so, it detects the case where we need to convert from string to []byte and does +// the base64 decoding required to make that happen. +// This also detects converting from string to strfmt.DateTime +func DecodeEntry(input, output interface{}) error { + cfg := mapstructure.DecoderConfig{ + DecodeHook: func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Slice && t != reflect.TypeOf(strfmt.DateTime{}) { + return data, nil + } + + if data == nil { + return nil, errors.New("attempted to decode nil data") + } + + if t == reflect.TypeOf(strfmt.DateTime{}) { + return strfmt.ParseDateTime(data.(string)) + } + + bytes, err := base64.StdEncoding.DecodeString(data.(string)) + if err != nil { + return []byte{}, fmt.Errorf("failed parsing base64 data: %w", err) + } + return bytes, nil + }, + Result: output, + } + + dec, err := mapstructure.NewDecoder(&cfg) + if err != nil { + return fmt.Errorf("error initializing decoder: %w", err) + } + + return dec.Decode(input) +} + +// CanonicalizeEntry returns the entry marshalled in JSON according to the +// canonicalization rules of RFC8785 to protect against any changes in golang's JSON +// marshalling logic that may reorder elements +func CanonicalizeEntry(ctx context.Context, entry EntryImpl) ([]byte, error) { + canonicalEntry, err := entry.Canonicalize(ctx) + if err != nil { + return nil, err + } + return jsoncanonicalizer.Transform(canonicalEntry) +} + +// ArtifactProperties provide a consistent struct for passing values from +// CLI flags to the type+version specific CreateProposeEntry() methods +type ArtifactProperties struct { + AdditionalAuthenticatedData []byte + ArtifactPath *url.URL + ArtifactHash string + ArtifactBytes []byte + SignaturePath *url.URL + SignatureBytes []byte + PublicKeyPaths []*url.URL + PublicKeyBytes [][]byte + PKIFormat string +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/error.go b/vendor/github.com/sigstore/rekor/pkg/types/error.go new file mode 100644 index 000000000000..57f0c77518b0 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/error.go @@ -0,0 +1,31 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +// ValidationError indicates that there is an issue with the content in the HTTP Request that +// should result in an HTTP 400 Bad Request error being returned to the client +// +// Deprecated: use InputValidationError instead to take advantage of Go's error wrapping +type ValidationError error + +// InputValidationError indicates that there is an issue with the content in the HTTP Request that +// should result in an HTTP 400 Bad Request error being returned to the client +type InputValidationError struct { + Err error +} + +func (v *InputValidationError) Error() string { return v.Err.Error() } +func (v *InputValidationError) Unwrap() error { return v.Err } diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord.go b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord.go new file mode 100644 index 000000000000..66395c7a05a9 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord.go @@ -0,0 +1,75 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hashedrekord + +import ( + "context" + "errors" + "fmt" + + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/types" +) + +const ( + KIND = "hashedrekord" +) + +type BaseRekordType struct { + types.RekorType +} + +func init() { + types.TypeMap.Store(KIND, New) +} + +func New() types.TypeImpl { + brt := BaseRekordType{} + brt.Kind = KIND + brt.VersionMap = VersionMap + return &brt +} + +var VersionMap = types.NewSemVerEntryFactoryMap() + +func (rt BaseRekordType) UnmarshalEntry(pe models.ProposedEntry) (types.EntryImpl, error) { + if pe == nil { + return nil, errors.New("proposed entry cannot be nil") + } + + rekord, ok := pe.(*models.Hashedrekord) + if !ok { + return nil, fmt.Errorf("cannot unmarshal non-hashed Rekord types: %s", pe.Kind()) + } + + return rt.VersionedUnmarshal(rekord, *rekord.APIVersion) +} + +func (rt *BaseRekordType) CreateProposedEntry(ctx context.Context, version string, props types.ArtifactProperties) (models.ProposedEntry, error) { + if version == "" { + version = rt.DefaultVersion() + } + ei, err := rt.VersionedUnmarshal(nil, version) + if err != nil { + return nil, fmt.Errorf("fetching hashed Rekord version implementation: %w", err) + } + + return ei.CreateFromArtifactProperties(ctx, props) +} + +func (rt BaseRekordType) DefaultVersion() string { + return "0.0.1" +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord_schema.json new file mode 100644 index 000000000000..be9beaeb6a25 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord_schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://rekor.sigstore.dev/types/hashedrekord/hasehedrekord_schema.json", + "title": "Hashedrekord Schema", + "description": "Schema for Hashedrekord objects", + "type": "object", + "oneOf": [ + { + "$ref": "v0.0.1/hashedrekord_v0_0_1_schema.json" + } + ] +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/entry.go b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/entry.go new file mode 100644 index 000000000000..7d0ef04e61f9 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/entry.go @@ -0,0 +1,377 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hashedrekord + +import ( + "bytes" + "context" + "crypto" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag/conv" + + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/internal/log" + pkitypes "github.com/sigstore/rekor/pkg/pki/pkitypes" + "github.com/sigstore/rekor/pkg/pki/x509" + "github.com/sigstore/rekor/pkg/types" + hashedrekord "github.com/sigstore/rekor/pkg/types/hashedrekord" + "github.com/sigstore/rekor/pkg/util" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +const ( + APIVERSION = "0.0.1" +) + +func init() { + if err := hashedrekord.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil { + log.Logger.Panic(err) + } +} + +type V001Entry struct { + HashedRekordObj models.HashedrekordV001Schema +} + +func (v V001Entry) APIVersion() string { + return APIVERSION +} + +func NewEntry() types.EntryImpl { + return &V001Entry{} +} + +func (v V001Entry) IndexKeys() ([]string, error) { + var result []string + + key := v.HashedRekordObj.Signature.PublicKey.Content + keyHash := sha256.Sum256(key) + result = append(result, strings.ToLower(hex.EncodeToString(keyHash[:]))) + + pub, err := x509.NewPublicKey(bytes.NewReader(key)) + if err != nil { + return nil, err + } + result = append(result, pub.Subjects()...) + + if v.HashedRekordObj.Data.Hash != nil { + hashKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.HashedRekordObj.Data.Hash.Algorithm, *v.HashedRekordObj.Data.Hash.Value)) + result = append(result, hashKey) + } + + return result, nil +} + +// DecodeEntry performs direct decode into the provided output pointer +// without mutating the receiver on error. +func DecodeEntry(input any, output *models.HashedrekordV001Schema) error { + if output == nil { + return fmt.Errorf("nil output *models.HashedrekordV001Schema") + } + var m models.HashedrekordV001Schema + // Single type-switch with map fast path + switch data := input.(type) { + case map[string]any: + mm := data + if sigRaw, ok := mm["signature"].(map[string]any); ok { + m.Signature = &models.HashedrekordV001SchemaSignature{} + if c, ok := sigRaw["content"].(string); ok && c != "" { + outb := make([]byte, base64.StdEncoding.DecodedLen(len(c))) + n, err := base64.StdEncoding.Decode(outb, []byte(c)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for signature content: %w", err) + } + m.Signature.Content = outb[:n] + } + if pkRaw, ok := sigRaw["publicKey"].(map[string]any); ok { + m.Signature.PublicKey = &models.HashedrekordV001SchemaSignaturePublicKey{} + if c, ok := pkRaw["content"].(string); ok && c != "" { + outb := make([]byte, base64.StdEncoding.DecodedLen(len(c))) + n, err := base64.StdEncoding.Decode(outb, []byte(c)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for public key content: %w", err) + } + m.Signature.PublicKey.Content = outb[:n] + } + } + } + if dataRaw, ok := mm["data"].(map[string]any); ok { + if hRaw, ok := dataRaw["hash"].(map[string]any); ok { + m.Data = &models.HashedrekordV001SchemaData{Hash: &models.HashedrekordV001SchemaDataHash{}} + if alg, ok := hRaw["algorithm"].(string); ok { + m.Data.Hash.Algorithm = &alg + } + if val, ok := hRaw["value"].(string); ok { + m.Data.Hash.Value = &val + } + } + } + *output = m + return nil + case *models.HashedrekordV001Schema: + if data == nil { + return fmt.Errorf("nil *models.HashedrekordV001Schema") + } + *output = *data + return nil + case models.HashedrekordV001Schema: + *output = data + return nil + default: + return fmt.Errorf("unsupported input type %T for DecodeEntry", input) + } +} + +func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error { + rekord, ok := pe.(*models.Hashedrekord) + if !ok { + return errors.New("cannot unmarshal non Rekord v0.0.1 type") + } + + if err := DecodeEntry(rekord.Spec, &v.HashedRekordObj); err != nil { + return err + } + + // field validation + if err := v.HashedRekordObj.Validate(strfmt.Default); err != nil { + return err + } + + // cross field validation + _, _, err := v.validate() + return err +} + +func (v *V001Entry) Canonicalize(_ context.Context) ([]byte, error) { + sigObj, keyObj, err := v.validate() + if err != nil { + return nil, &types.InputValidationError{Err: err} + } + + canonicalEntry := models.HashedrekordV001Schema{} + + // need to canonicalize signature & key content + canonicalEntry.Signature = &models.HashedrekordV001SchemaSignature{} + canonicalEntry.Signature.Content, err = sigObj.CanonicalValue() + if err != nil { + return nil, err + } + + // key URL (if known) is not set deliberately + canonicalEntry.Signature.PublicKey = &models.HashedrekordV001SchemaSignaturePublicKey{} + canonicalEntry.Signature.PublicKey.Content, err = keyObj.CanonicalValue() + if err != nil { + return nil, err + } + + canonicalEntry.Data = &models.HashedrekordV001SchemaData{} + canonicalEntry.Data.Hash = v.HashedRekordObj.Data.Hash + // data content is not set deliberately + + v.HashedRekordObj = canonicalEntry + // wrap in valid object with kind and apiVersion set + rekordObj := models.Hashedrekord{} + rekordObj.APIVersion = conv.Pointer(APIVERSION) + rekordObj.Spec = &canonicalEntry + + return json.Marshal(&rekordObj) +} + +// validate performs cross-field validation for fields in object +func (v *V001Entry) validate() (pkitypes.Signature, pkitypes.PublicKey, error) { + sig := v.HashedRekordObj.Signature + if sig == nil { + return nil, nil, &types.InputValidationError{Err: errors.New("missing signature")} + } + // Hashed rekord type only works for x509 signature types + sigObj, err := x509.NewSignatureWithOpts(bytes.NewReader(sig.Content), options.WithED25519ph()) + if err != nil { + return nil, nil, &types.InputValidationError{Err: err} + } + + key := sig.PublicKey + if key == nil { + return nil, nil, &types.InputValidationError{Err: errors.New("missing public key")} + } + keyObj, err := x509.NewPublicKey(bytes.NewReader(key.Content)) + if err != nil { + return nil, nil, &types.InputValidationError{Err: err} + } + + data := v.HashedRekordObj.Data + if data == nil { + return nil, nil, &types.InputValidationError{Err: errors.New("missing data")} + } + + hash := data.Hash + if hash == nil { + return nil, nil, &types.InputValidationError{Err: errors.New("missing hash")} + } + + var alg crypto.Hash + switch conv.Value(hash.Algorithm) { + case models.HashedrekordV001SchemaDataHashAlgorithmSha384: + if len(*hash.Value) != crypto.SHA384.Size()*2 { + return nil, nil, &types.InputValidationError{Err: errors.New("invalid value for hash")} + } + alg = crypto.SHA384 + case models.HashedrekordV001SchemaDataHashAlgorithmSha512: + if len(*hash.Value) != crypto.SHA512.Size()*2 { + return nil, nil, &types.InputValidationError{Err: errors.New("invalid value for hash")} + } + alg = crypto.SHA512 + default: + if len(*hash.Value) != crypto.SHA256.Size()*2 { + return nil, nil, &types.InputValidationError{Err: errors.New("invalid value for hash")} + } + alg = crypto.SHA256 + } + + decoded, err := hex.DecodeString(*hash.Value) + if err != nil { + return nil, nil, err + } + if err := sigObj.Verify(nil, keyObj, options.WithDigest(decoded), options.WithCryptoSignerOpts(alg)); err != nil { + return nil, nil, &types.InputValidationError{Err: fmt.Errorf("verifying signature: %w", err)} + } + + return sigObj, keyObj, nil +} + +func getDataHashAlgorithm(hashAlgorithm crypto.Hash) string { + switch hashAlgorithm { + case crypto.SHA384: + return models.HashedrekordV001SchemaDataHashAlgorithmSha384 + case crypto.SHA512: + return models.HashedrekordV001SchemaDataHashAlgorithmSha512 + default: + return models.HashedrekordV001SchemaDataHashAlgorithmSha256 + } +} + +func (v V001Entry) CreateFromArtifactProperties(_ context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { + returnVal := models.Hashedrekord{} + re := V001Entry{} + + // we will need artifact, public-key, signature + re.HashedRekordObj.Data = &models.HashedrekordV001SchemaData{} + + var err error + + if props.PKIFormat != "x509" { + return nil, errors.New("hashedrekord entries can only be created for artifacts signed with x509-based PKI") + } + + re.HashedRekordObj.Signature = &models.HashedrekordV001SchemaSignature{} + sigBytes := props.SignatureBytes + if sigBytes == nil { + if props.SignaturePath == nil { + return nil, errors.New("a detached signature must be provided") + } + sigBytes, err = os.ReadFile(filepath.Clean(props.SignaturePath.Path)) + if err != nil { + return nil, fmt.Errorf("error reading signature file: %w", err) + } + } + re.HashedRekordObj.Signature.Content = strfmt.Base64(sigBytes) + + re.HashedRekordObj.Signature.PublicKey = &models.HashedrekordV001SchemaSignaturePublicKey{} + publicKeyBytes := props.PublicKeyBytes + if len(publicKeyBytes) == 0 { + if len(props.PublicKeyPaths) != 1 { + return nil, errors.New("only one public key must be provided to verify detached signature") + } + keyBytes, err := os.ReadFile(filepath.Clean(props.PublicKeyPaths[0].Path)) + if err != nil { + return nil, fmt.Errorf("error reading public key file: %w", err) + } + publicKeyBytes = append(publicKeyBytes, keyBytes) + } else if len(publicKeyBytes) != 1 { + return nil, errors.New("only one public key must be provided") + } + + hashAlgorithm, hashValue := util.UnprefixSHA(props.ArtifactHash) + re.HashedRekordObj.Signature.PublicKey.Content = strfmt.Base64(publicKeyBytes[0]) + re.HashedRekordObj.Data.Hash = &models.HashedrekordV001SchemaDataHash{ + Algorithm: conv.Pointer(getDataHashAlgorithm(hashAlgorithm)), + Value: conv.Pointer(hashValue), + } + + if _, _, err := re.validate(); err != nil { + return nil, err + } + + returnVal.APIVersion = conv.Pointer(re.APIVersion()) + returnVal.Spec = re.HashedRekordObj + + return &returnVal, nil +} + +func (v V001Entry) Verifiers() ([]pkitypes.PublicKey, error) { + if v.HashedRekordObj.Signature == nil || v.HashedRekordObj.Signature.PublicKey == nil || v.HashedRekordObj.Signature.PublicKey.Content == nil { + return nil, errors.New("hashedrekord v0.0.1 entry not initialized") + } + key, err := x509.NewPublicKey(bytes.NewReader(v.HashedRekordObj.Signature.PublicKey.Content)) + if err != nil { + return nil, err + } + return []pkitypes.PublicKey{key}, nil +} + +func (v V001Entry) ArtifactHash() (string, error) { + if v.HashedRekordObj.Data == nil || v.HashedRekordObj.Data.Hash == nil || v.HashedRekordObj.Data.Hash.Value == nil || v.HashedRekordObj.Data.Hash.Algorithm == nil { + return "", errors.New("hashedrekord v0.0.1 entry not initialized") + } + return strings.ToLower(fmt.Sprintf("%s:%s", *v.HashedRekordObj.Data.Hash.Algorithm, *v.HashedRekordObj.Data.Hash.Value)), nil +} + +func (v V001Entry) Insertable() (bool, error) { + if v.HashedRekordObj.Signature == nil { + return false, errors.New("missing signature property") + } + if len(v.HashedRekordObj.Signature.Content) == 0 { + return false, errors.New("missing signature content") + } + if v.HashedRekordObj.Signature.PublicKey == nil { + return false, errors.New("missing publicKey property") + } + if len(v.HashedRekordObj.Signature.PublicKey.Content) == 0 { + return false, errors.New("missing publicKey content") + } + if v.HashedRekordObj.Data == nil { + return false, errors.New("missing data property") + } + if v.HashedRekordObj.Data.Hash == nil { + return false, errors.New("missing hash property") + } + if v.HashedRekordObj.Data.Hash.Algorithm == nil { + return false, errors.New("missing hash algorithm") + } + if v.HashedRekordObj.Data.Hash.Value == nil { + return false, errors.New("missing hash value") + } + return true, nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json new file mode 100644 index 000000000000..3d536eb49e1d --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json @@ -0,0 +1,54 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://rekor.sigstore.dev/types/rekord/hashedrekord_v0_0_1_schema.json", + "title": "Hashed Rekor v0.0.1 Schema", + "description": "Schema for Hashed Rekord object", + "type": "object", + "properties": { + "signature": { + "description": "Information about the detached signature associated with the entry", + "type": "object", + "properties": { + "content": { + "description": "Specifies the content of the signature inline within the document", + "type": "string", + "format": "byte" + }, + "publicKey" : { + "description": "The public key that can verify the signature; this can also be an X509 code signing certificate that contains the raw public key information", + "type": "object", + "properties": { + "content": { + "description": "Specifies the content of the public key or code signing certificate inline within the document", + "type": "string", + "format": "byte" + } + } + } + } + }, + "data": { + "description": "Information about the content associated with the entry", + "type": "object", + "properties": { + "hash": { + "description": "Specifies the hash algorithm and value for the content", + "type": "object", + "properties": { + "algorithm": { + "description": "The hashing function used to compute the hash value", + "type": "string", + "enum": [ "sha256", "sha384", "sha512" ] + }, + "value": { + "description": "The hash value for the content, as represented by a lower case hexadecimal string", + "type": "string" + } + }, + "required": [ "algorithm", "value" ] + } + } + } + }, + "required": [ "signature", "data" ] +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/intoto/README.md b/vendor/github.com/sigstore/rekor/pkg/types/intoto/README.md new file mode 100644 index 000000000000..0c4d1d73fafa --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/intoto/README.md @@ -0,0 +1,13 @@ +**in-toto Type Data Documentation** + +This document provides a definition for each field that is not otherwise described in the [in-toto schema](https://github.com/sigstore/rekor/blob/main/pkg/types/intoto/v0.0.1/intoto_v0_0_1_schema.json). This document also notes any additional information about the values associated with each field such as the format in which the data is stored and any necessary transformations. + +**Attestation:** authenticated, machine-readable metadata about one or more software artifacts. [SLSA definition](https://github.com/slsa-framework/slsa/blob/main/controls/attestations.md) +- The Attestation value ought to be a Base64-encoded JSON object. +- The [in-toto Attestation specification](https://github.com/in-toto/attestation/blob/main/spec/README.md#statement) provides detailed guidance on understanding and parsing this JSON object. + +**AttestationType:** Identifies the type of attestation being made, such as a provenance attestation or a vulnerability scan attestation. AttestationType's value, even when prefixed with an http, is not necessarily a working URL. + +**How do you identify an object as an in-toto object?** + +The "Body" field will include an "IntotoObj" field. diff --git a/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto.go b/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto.go new file mode 100644 index 000000000000..2bfba39468b1 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto.go @@ -0,0 +1,135 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package intoto + +import ( + "context" + "errors" + "fmt" + "slices" + + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/internal/log" + "github.com/sigstore/rekor/pkg/types" +) + +const ( + KIND = "intoto" +) + +type BaseIntotoType struct { + types.RekorType +} + +func init() { + types.TypeMap.Store(KIND, New) +} + +func New() types.TypeImpl { + bit := BaseIntotoType{} + bit.Kind = KIND + bit.VersionMap = VersionMap + return &bit +} + +var VersionMap = types.NewSemVerEntryFactoryMap() + +func (it BaseIntotoType) UnmarshalEntry(pe models.ProposedEntry) (types.EntryImpl, error) { + if pe == nil { + return nil, errors.New("proposed entry cannot be nil") + } + + in, ok := pe.(*models.Intoto) + if !ok { + return nil, errors.New("cannot unmarshal non-Rekord types") + } + + return it.VersionedUnmarshal(in, *in.APIVersion) +} + +func (it *BaseIntotoType) CreateProposedEntry(ctx context.Context, version string, props types.ArtifactProperties) (models.ProposedEntry, error) { + var head ProposedIntotoEntryIterator + var next *ProposedIntotoEntryIterator + if version == "" { + // get default version as head of list + version = it.DefaultVersion() + ei, err := it.VersionedUnmarshal(nil, version) + if err != nil { + return nil, fmt.Errorf("fetching default Intoto version implementation: %w", err) + } + pe, err := ei.CreateFromArtifactProperties(ctx, props) + if err != nil { + return nil, fmt.Errorf("creating default Intoto entry: %w", err) + } + head.ProposedEntry = pe + next = &head + for _, v := range it.SupportedVersions() { + if v == it.DefaultVersion() { + continue + } + ei, err := it.VersionedUnmarshal(nil, v) + if err != nil { + log.Logger.Errorf("fetching Intoto version (%v) implementation: %w", v, err) + continue + } + versionedPE, err := ei.CreateFromArtifactProperties(ctx, props) + if err != nil { + log.Logger.Errorf("error creating Intoto entry of version (%v): %w", v, err) + continue + } + next.next = &ProposedIntotoEntryIterator{versionedPE, nil} + next = next.next.(*ProposedIntotoEntryIterator) + } + return head, nil + } + + ei, err := it.VersionedUnmarshal(nil, version) + if err != nil { + return nil, fmt.Errorf("fetching Intoto version implementation: %w", err) + } + return ei.CreateFromArtifactProperties(ctx, props) +} + +func (it BaseIntotoType) DefaultVersion() string { + return "0.0.2" +} + +// SupportedVersions returns the supported versions for this type in the order of preference +func (it BaseIntotoType) SupportedVersions() []string { + return []string{"0.0.2", "0.0.1"} +} + +// IsSupportedVersion returns true if the version can be inserted into the log, and false if not +func (it *BaseIntotoType) IsSupportedVersion(proposedVersion string) bool { + return slices.Contains(it.SupportedVersions(), proposedVersion) +} + +type ProposedIntotoEntryIterator struct { + models.ProposedEntry + next models.ProposedEntry +} + +func (p ProposedIntotoEntryIterator) HasNext() bool { + return p.next != nil +} + +func (p ProposedIntotoEntryIterator) GetNext() models.ProposedEntry { + return p.next +} + +func (p ProposedIntotoEntryIterator) Get() models.ProposedEntry { + return p.ProposedEntry +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto_schema.json new file mode 100644 index 000000000000..16f6172afa74 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto_schema.json @@ -0,0 +1,15 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://rekor.sigstore.dev/types/intoto/intoto_schema.json", + "title": "Intoto Schema", + "description": "Intoto for Rekord objects", + "type": "object", + "oneOf": [ + { + "$ref": "v0.0.1/intoto_v0_0_1_schema.json" + }, + { + "$ref": "v0.0.2/intoto_v0_0_2_schema.json" + } + ] +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.2/entry.go b/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.2/entry.go new file mode 100644 index 000000000000..ef895151c33b --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.2/entry.go @@ -0,0 +1,648 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package intoto + +import ( + "bytes" + "context" + "crypto" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "slices" + "strings" + + "github.com/in-toto/in-toto-golang/in_toto" + "github.com/secure-systems-lab/go-securesystemslib/dsse" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag/conv" + + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/internal/log" + pkitypes "github.com/sigstore/rekor/pkg/pki/pkitypes" + "github.com/sigstore/rekor/pkg/pki/x509" + "github.com/sigstore/rekor/pkg/types" + "github.com/sigstore/rekor/pkg/types/intoto" + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +const ( + APIVERSION = "0.0.2" +) + +func init() { + if err := intoto.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil { + log.Logger.Panic(err) + } +} + +var maxAttestationSize = 100 * 1024 + +func SetMaxAttestationSize(limit int) { + maxAttestationSize = limit +} + +type V002Entry struct { + IntotoObj models.IntotoV002Schema + env dsse.Envelope +} + +func (v V002Entry) APIVersion() string { + return APIVERSION +} + +func NewEntry() types.EntryImpl { + return &V002Entry{} +} + +func (v V002Entry) IndexKeys() ([]string, error) { + var result []string + + if v.IntotoObj.Content == nil || v.IntotoObj.Content.Envelope == nil { + log.Logger.Info("IntotoObj content or dsse envelope is nil") + return result, nil + } + + for _, sig := range v.IntotoObj.Content.Envelope.Signatures { + if sig == nil || sig.PublicKey == nil { + return result, errors.New("malformed or missing signature") + } + keyObj, err := x509.NewPublicKey(bytes.NewReader(*sig.PublicKey)) + if err != nil { + return result, err + } + + canonKey, err := keyObj.CanonicalValue() + if err != nil { + return result, fmt.Errorf("could not canonicize key: %w", err) + } + + keyHash := sha256.Sum256(canonKey) + result = append(result, "sha256:"+hex.EncodeToString(keyHash[:])) + + result = append(result, keyObj.Subjects()...) + } + + payloadKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.IntotoObj.Content.PayloadHash.Algorithm, *v.IntotoObj.Content.PayloadHash.Value)) + result = append(result, payloadKey) + + // since we can't deterministically calculate this server-side (due to public keys being added inline, and also canonicalization being potentially different), + // we'll just skip adding this index key + // hashkey := strings.ToLower(fmt.Sprintf("%s:%s", *v.IntotoObj.Content.Hash.Algorithm, *v.IntotoObj.Content.Hash.Value)) + // result = append(result, hashkey) + + switch *v.IntotoObj.Content.Envelope.PayloadType { + case in_toto.PayloadType: + + if v.IntotoObj.Content.Envelope.Payload == nil { + log.Logger.Info("IntotoObj DSSE payload is empty") + return result, nil + } + decodedPayload, err := base64.StdEncoding.DecodeString(string(v.IntotoObj.Content.Envelope.Payload)) + if err != nil { + return result, fmt.Errorf("could not decode envelope payload: %w", err) + } + statement, err := parseStatement(decodedPayload) + if err != nil { + return result, err + } + for _, s := range statement.Subject { + for alg, ds := range s.Digest { + result = append(result, alg+":"+ds) + } + } + // Not all in-toto statements will contain a SLSA provenance predicate. + // See https://github.com/in-toto/attestation/blob/main/spec/README.md#predicate + // for other predicates. + if predicate, err := parseSlsaPredicate(decodedPayload); err == nil { + if predicate.Predicate.Materials != nil { + for _, s := range predicate.Predicate.Materials { + for alg, ds := range s.Digest { + result = append(result, alg+":"+ds) + } + } + } + } + default: + log.Logger.Infof("Unknown in_toto DSSE envelope Type: %s", *v.IntotoObj.Content.Envelope.PayloadType) + } + return result, nil +} + +func parseStatement(p []byte) (*in_toto.Statement, error) { + ps := in_toto.Statement{} + if err := json.Unmarshal(p, &ps); err != nil { + return nil, err + } + return &ps, nil +} + +func parseSlsaPredicate(p []byte) (*in_toto.ProvenanceStatement, error) { + predicate := in_toto.ProvenanceStatement{} + if err := json.Unmarshal(p, &predicate); err != nil { + return nil, err + } + return &predicate, nil +} + +// DecodeEntry performs direct decode into the provided output pointer +// without mutating the receiver on error. +func DecodeEntry(input any, output *models.IntotoV002Schema) error { + if output == nil { + return fmt.Errorf("nil output *models.IntotoV002Schema") + } + var m models.IntotoV002Schema + switch in := input.(type) { + case map[string]any: + mm := in + m.Content = &models.IntotoV002SchemaContent{Envelope: &models.IntotoV002SchemaContentEnvelope{}} + if c, ok := mm["content"].(map[string]any); ok { + if env, ok := c["envelope"].(map[string]any); ok { + if pt, ok := env["payloadType"].(string); ok { + m.Content.Envelope.PayloadType = &pt + } + if p, ok := env["payload"].(string); ok && p != "" { + outb := make([]byte, base64.StdEncoding.DecodedLen(len(p))) + n, err := base64.StdEncoding.Decode(outb, []byte(p)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for payload: %w", err) + } + m.Content.Envelope.Payload = strfmt.Base64(outb[:n]) + } + if raw, ok := env["signatures"]; ok { + switch sigs := raw.(type) { + case []any: + m.Content.Envelope.Signatures = make([]*models.IntotoV002SchemaContentEnvelopeSignaturesItems0, 0, len(sigs)) + for _, s := range sigs { + if sm, ok := s.(map[string]any); ok { + item := &models.IntotoV002SchemaContentEnvelopeSignaturesItems0{} + if kid, ok := sm["keyid"].(string); ok { + item.Keyid = kid + } + if sig, ok := sm["sig"].(string); ok { + outb := make([]byte, base64.StdEncoding.DecodedLen(len(sig))) + n, err := base64.StdEncoding.Decode(outb, []byte(sig)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for signature: %w", err) + } + b := strfmt.Base64(outb[:n]) + item.Sig = &b + } + if pk, ok := sm["publicKey"].(string); ok { + outb := make([]byte, base64.StdEncoding.DecodedLen(len(pk))) + n, err := base64.StdEncoding.Decode(outb, []byte(pk)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for public key: %w", err) + } + b := strfmt.Base64(outb[:n]) + item.PublicKey = &b + } + m.Content.Envelope.Signatures = append(m.Content.Envelope.Signatures, item) + } + } + case []map[string]any: + m.Content.Envelope.Signatures = make([]*models.IntotoV002SchemaContentEnvelopeSignaturesItems0, 0, len(sigs)) + for _, sm := range sigs { + item := &models.IntotoV002SchemaContentEnvelopeSignaturesItems0{} + if kid, ok := sm["keyid"].(string); ok { + item.Keyid = kid + } + if sig, ok := sm["sig"].(string); ok { + outb := make([]byte, base64.StdEncoding.DecodedLen(len(sig))) + n, err := base64.StdEncoding.Decode(outb, []byte(sig)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for signature: %w", err) + } + b := strfmt.Base64(outb[:n]) + item.Sig = &b + } + if pk, ok := sm["publicKey"].(string); ok { + outb := make([]byte, base64.StdEncoding.DecodedLen(len(pk))) + n, err := base64.StdEncoding.Decode(outb, []byte(pk)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for public key: %w", err) + } + b := strfmt.Base64(outb[:n]) + item.PublicKey = &b + } + m.Content.Envelope.Signatures = append(m.Content.Envelope.Signatures, item) + } + } + } + } + if h, ok := c["hash"].(map[string]any); ok { + m.Content.Hash = &models.IntotoV002SchemaContentHash{} + if alg, ok := h["algorithm"].(string); ok { + m.Content.Hash.Algorithm = &alg + } + if val, ok := h["value"].(string); ok { + m.Content.Hash.Value = &val + } + } + if ph, ok := c["payloadHash"].(map[string]any); ok { + m.Content.PayloadHash = &models.IntotoV002SchemaContentPayloadHash{} + if alg, ok := ph["algorithm"].(string); ok { + m.Content.PayloadHash.Algorithm = &alg + } + if val, ok := ph["value"].(string); ok { + m.Content.PayloadHash.Value = &val + } + } + } + *output = m + return nil + case *models.IntotoV002Schema: + if in == nil { + return fmt.Errorf("nil *models.IntotoV002Schema") + } + *output = *in + return nil + case models.IntotoV002Schema: + *output = in + return nil + default: + return fmt.Errorf("unsupported input type %T for DecodeEntry", input) + } +} + +func (v *V002Entry) Unmarshal(pe models.ProposedEntry) error { + it, ok := pe.(*models.Intoto) + if !ok { + return errors.New("cannot unmarshal non Intoto v0.0.2 type") + } + + var err error + if err := DecodeEntry(it.Spec, &v.IntotoObj); err != nil { + return err + } + + // field validation + if err := v.IntotoObj.Validate(strfmt.Default); err != nil { + return err + } + + if string(v.IntotoObj.Content.Envelope.Payload) == "" { + return nil + } + + env := &dsse.Envelope{ + Payload: string(v.IntotoObj.Content.Envelope.Payload), + PayloadType: *v.IntotoObj.Content.Envelope.PayloadType, + } + + allPubKeyBytes := make([][]byte, 0) + for i, sig := range v.IntotoObj.Content.Envelope.Signatures { + if sig == nil { + v.IntotoObj.Content.Envelope.Signatures = slices.Delete(v.IntotoObj.Content.Envelope.Signatures, i, i) + continue + } + env.Signatures = append(env.Signatures, dsse.Signature{ + KeyID: sig.Keyid, + Sig: string(*sig.Sig), + }) + + allPubKeyBytes = append(allPubKeyBytes, *sig.PublicKey) + } + + if _, err := verifyEnvelope(allPubKeyBytes, env); err != nil { + return err + } + + v.env = *env + + decodedPayload, err := base64.StdEncoding.DecodeString(string(v.IntotoObj.Content.Envelope.Payload)) + if err != nil { + return fmt.Errorf("could not decode envelope payload: %w", err) + } + + h := sha256.Sum256(decodedPayload) + v.IntotoObj.Content.PayloadHash = &models.IntotoV002SchemaContentPayloadHash{ + Algorithm: conv.Pointer(models.IntotoV002SchemaContentPayloadHashAlgorithmSha256), + Value: conv.Pointer(hex.EncodeToString(h[:])), + } + + return nil +} + +func (v *V002Entry) Canonicalize(_ context.Context) ([]byte, error) { + if err := v.IntotoObj.Validate(strfmt.Default); err != nil { + return nil, err + } + + if v.IntotoObj.Content.Hash == nil { + return nil, errors.New("missing envelope digest") + } + + if err := v.IntotoObj.Content.Hash.Validate(strfmt.Default); err != nil { + return nil, fmt.Errorf("error validating envelope digest: %w", err) + } + + if v.IntotoObj.Content.PayloadHash == nil { + return nil, errors.New("missing payload digest") + } + + if err := v.IntotoObj.Content.PayloadHash.Validate(strfmt.Default); err != nil { + return nil, fmt.Errorf("error validating payload digest: %w", err) + } + + if len(v.IntotoObj.Content.Envelope.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + + canonicalEntry := models.IntotoV002Schema{ + Content: &models.IntotoV002SchemaContent{ + Envelope: &models.IntotoV002SchemaContentEnvelope{ + PayloadType: v.IntotoObj.Content.Envelope.PayloadType, + Signatures: v.IntotoObj.Content.Envelope.Signatures, + }, + Hash: v.IntotoObj.Content.Hash, + PayloadHash: v.IntotoObj.Content.PayloadHash, + }, + } + itObj := models.Intoto{} + itObj.APIVersion = conv.Pointer(APIVERSION) + itObj.Spec = &canonicalEntry + + return json.Marshal(&itObj) +} + +// AttestationKey returns the digest of the attestation that was uploaded, to be used to lookup the attestation from storage +func (v *V002Entry) AttestationKey() string { + if v.IntotoObj.Content != nil && v.IntotoObj.Content.PayloadHash != nil { + return fmt.Sprintf("%s:%s", *v.IntotoObj.Content.PayloadHash.Algorithm, *v.IntotoObj.Content.PayloadHash.Value) + } + return "" +} + +// AttestationKeyValue returns both the key and value to be persisted into attestation storage +func (v *V002Entry) AttestationKeyValue() (string, []byte) { + storageSize := base64.StdEncoding.DecodedLen(len(v.env.Payload)) + if storageSize > maxAttestationSize { + log.Logger.Infof("Skipping attestation storage, size %d is greater than max %d", storageSize, maxAttestationSize) + return "", nil + } + attBytes, err := base64.StdEncoding.DecodeString(v.env.Payload) + if err != nil { + log.Logger.Infof("could not decode envelope payload: %w", err) + return "", nil + } + return v.AttestationKey(), attBytes +} + +type verifier struct { + s signature.Signer + v signature.Verifier +} + +func (v *verifier) KeyID() (string, error) { + return "", nil +} + +func (v *verifier) Public() crypto.PublicKey { + // the dsse library uses this to generate a key ID if the KeyID function returns an empty string + // as well for the AcceptedKey return value. Unfortunately since key ids can be arbitrary, we don't + // know how to generate a matching id for the key id on the envelope's signature... + // dsse verify will skip verifiers whose key id doesn't match the signature's key id, unless it fails + // to generate one from the public key... so we trick it by returning nil ¯\_(ツ)_/¯ + return nil +} + +func (v *verifier) Sign(_ context.Context, data []byte) (sig []byte, err error) { + if v.s == nil { + return nil, errors.New("nil signer") + } + sig, err = v.s.SignMessage(bytes.NewReader(data), options.WithCryptoSignerOpts(crypto.SHA256)) + if err != nil { + return nil, err + } + return sig, nil +} + +func (v *verifier) Verify(_ context.Context, data, sig []byte) error { + if v.v == nil { + return errors.New("nil verifier") + } + return v.v.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data)) +} + +func (v V002Entry) CreateFromArtifactProperties(_ context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { + returnVal := models.Intoto{} + re := V002Entry{ + IntotoObj: models.IntotoV002Schema{ + Content: &models.IntotoV002SchemaContent{ + Envelope: &models.IntotoV002SchemaContentEnvelope{}, + }, + }} + var err error + artifactBytes := props.ArtifactBytes + if artifactBytes == nil { + if props.ArtifactPath == nil { + return nil, errors.New("path to artifact file must be specified") + } + if props.ArtifactPath.IsAbs() { + return nil, errors.New("intoto envelopes cannot be fetched over HTTP(S)") + } + artifactBytes, err = os.ReadFile(filepath.Clean(props.ArtifactPath.Path)) + if err != nil { + return nil, err + } + } + + env := dsse.Envelope{} + if err := json.Unmarshal(artifactBytes, &env); err != nil { + return nil, fmt.Errorf("payload must be a valid dsse envelope: %w", err) + } + + allPubKeyBytes := make([][]byte, 0) + if len(props.PublicKeyBytes) > 0 { + allPubKeyBytes = append(allPubKeyBytes, props.PublicKeyBytes...) + } + + if len(props.PublicKeyPaths) > 0 { + for _, path := range props.PublicKeyPaths { + if path.IsAbs() { + return nil, errors.New("dsse public keys cannot be fetched over HTTP(S)") + } + + publicKeyBytes, err := os.ReadFile(filepath.Clean(path.Path)) + if err != nil { + return nil, fmt.Errorf("error reading public key file: %w", err) + } + + allPubKeyBytes = append(allPubKeyBytes, publicKeyBytes) + } + } + + keysBySig, err := verifyEnvelope(allPubKeyBytes, &env) + if err != nil { + return nil, err + } + + b64 := strfmt.Base64([]byte(env.Payload)) + re.IntotoObj.Content.Envelope.Payload = b64 + re.IntotoObj.Content.Envelope.PayloadType = &env.PayloadType + + for _, sig := range env.Signatures { + key, ok := keysBySig[sig.Sig] + if !ok { + return nil, errors.New("all signatures must have a key that verifies it") + } + + canonKey, err := key.CanonicalValue() + if err != nil { + return nil, fmt.Errorf("could not canonicize key: %w", err) + } + + keyBytes := strfmt.Base64(canonKey) + sigBytes := strfmt.Base64([]byte(sig.Sig)) + re.IntotoObj.Content.Envelope.Signatures = append(re.IntotoObj.Content.Envelope.Signatures, &models.IntotoV002SchemaContentEnvelopeSignaturesItems0{ + Keyid: sig.KeyID, + Sig: &sigBytes, + PublicKey: &keyBytes, + }) + } + + h := sha256.Sum256([]byte(artifactBytes)) + re.IntotoObj.Content.Hash = &models.IntotoV002SchemaContentHash{ + Algorithm: conv.Pointer(models.IntotoV001SchemaContentHashAlgorithmSha256), + Value: conv.Pointer(hex.EncodeToString(h[:])), + } + + returnVal.Spec = re.IntotoObj + returnVal.APIVersion = conv.Pointer(re.APIVersion()) + + return &returnVal, nil +} + +// verifyEnvelope takes in an array of possible key bytes and attempts to parse them as x509 public keys. +// it then uses these to verify the envelope and makes sure that every signature on the envelope is verified. +// it returns a map of verifiers indexed by the signature the verifier corresponds to. +func verifyEnvelope(allPubKeyBytes [][]byte, env *dsse.Envelope) (map[string]*x509.PublicKey, error) { + // generate a fake id for these keys so we can get back to the key bytes and match them to their corresponding signature + verifierBySig := make(map[string]*x509.PublicKey) + allSigs := make(map[string]struct{}) + for _, sig := range env.Signatures { + allSigs[sig.Sig] = struct{}{} + } + + for _, pubKeyBytes := range allPubKeyBytes { + key, err := x509.NewPublicKey(bytes.NewReader(pubKeyBytes)) + if err != nil { + return nil, fmt.Errorf("could not parse public key as x509: %w", err) + } + + vfr, err := signature.LoadVerifier(key.CryptoPubKey(), crypto.SHA256) + if err != nil { + return nil, fmt.Errorf("could not load verifier: %w", err) + } + + dsseVfr, err := dsse.NewEnvelopeVerifier(&verifier{ + v: vfr, + }) + + if err != nil { + return nil, fmt.Errorf("could not use public key as a dsse verifier: %w", err) + } + + accepted, err := dsseVfr.Verify(context.Background(), env) + if err != nil { + return nil, fmt.Errorf("could not verify envelope: %w", err) + } + + for _, accept := range accepted { + delete(allSigs, accept.Sig.Sig) + verifierBySig[accept.Sig.Sig] = key + } + } + + if len(allSigs) > 0 { + return nil, errors.New("all signatures must have a key that verifies it") + } + + return verifierBySig, nil +} + +func (v V002Entry) Verifiers() ([]pkitypes.PublicKey, error) { + if v.IntotoObj.Content == nil || v.IntotoObj.Content.Envelope == nil { + return nil, errors.New("intoto v0.0.2 entry not initialized") + } + + sigs := v.IntotoObj.Content.Envelope.Signatures + if len(sigs) == 0 { + return nil, errors.New("no signatures found on intoto entry") + } + + var keys []pkitypes.PublicKey + for _, s := range v.IntotoObj.Content.Envelope.Signatures { + key, err := x509.NewPublicKey(bytes.NewReader(*s.PublicKey)) + if err != nil { + return nil, err + } + keys = append(keys, key) + } + return keys, nil +} + +func (v V002Entry) ArtifactHash() (string, error) { + if v.IntotoObj.Content == nil || v.IntotoObj.Content.PayloadHash == nil || v.IntotoObj.Content.PayloadHash.Algorithm == nil || v.IntotoObj.Content.PayloadHash.Value == nil { + return "", errors.New("intoto v0.0.2 entry not initialized") + } + return strings.ToLower(fmt.Sprintf("%s:%s", *v.IntotoObj.Content.PayloadHash.Algorithm, *v.IntotoObj.Content.PayloadHash.Value)), nil +} + +func (v V002Entry) Insertable() (bool, error) { + if v.IntotoObj.Content == nil { + return false, errors.New("missing content property") + } + if v.IntotoObj.Content.Envelope == nil { + return false, errors.New("missing envelope property") + } + if len(v.IntotoObj.Content.Envelope.Payload) == 0 { + return false, errors.New("missing envelope content") + } + + if v.IntotoObj.Content.Envelope.PayloadType == nil || len(*v.IntotoObj.Content.Envelope.PayloadType) == 0 { + return false, errors.New("missing payloadType content") + } + + if len(v.IntotoObj.Content.Envelope.Signatures) == 0 { + return false, errors.New("missing signatures content") + } + for _, sig := range v.IntotoObj.Content.Envelope.Signatures { + if sig == nil { + return false, errors.New("missing signature entry") + } + if sig.Sig == nil || len(*sig.Sig) == 0 { + return false, errors.New("missing signature content") + } + if sig.PublicKey == nil || len(*sig.PublicKey) == 0 { + return false, errors.New("missing publicKey content") + } + } + + if v.env.Payload == "" || v.env.PayloadType == "" || len(v.env.Signatures) == 0 { + return false, errors.New("invalid DSSE envelope") + } + + return true, nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.2/intoto_v0_0_2_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.2/intoto_v0_0_2_schema.json new file mode 100644 index 000000000000..3c404a30a026 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.2/intoto_v0_0_2_schema.json @@ -0,0 +1,105 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://rekor.sigstore.dev/types/intoto/intoto_v0_0_2_schema.json", + "title": "intoto v0.0.2 Schema", + "description": "Schema for intoto object", + "type": "object", + "properties": { + "content": { + "type": "object", + "properties": { + "envelope": { + "description": "dsse envelope", + "type": "object", + "properties": { + "payload": { + "description": "payload of the envelope", + "type": "string", + "format": "byte", + "writeOnly": true + }, + "payloadType": { + "description": "type describing the payload", + "type": "string" + }, + "signatures": { + "description": "collection of all signatures of the envelope's payload", + "type": "array", + "minItems": 1, + "items": { + "description": "a signature of the envelope's payload along with the public key for the signature", + "type": "object", + "properties": { + "keyid": { + "description": "optional id of the key used to create the signature", + "type": "string" + }, + "sig": { + "description": "signature of the payload", + "type": "string", + "format": "byte" + }, + "publicKey": { + "description": "public key that corresponds to this signature", + "type": "string", + "format": "byte" + } + }, + "required": ["sig", "publicKey"] + } + } + }, + "required": ["payloadType", "signatures"] + }, + "hash": { + "description": "Specifies the hash algorithm and value encompassing the entire signed envelope", + "type": "object", + "properties": { + "algorithm": { + "description": "The hashing function used to compute the hash value", + "type": "string", + "enum": [ + "sha256" + ] + }, + "value": { + "description": "The hash value for the archive", + "type": "string" + } + }, + "required": [ + "algorithm", + "value" + ], + "readOnly": true + }, + "payloadHash": { + "description": "Specifies the hash algorithm and value covering the payload within the DSSE envelope", + "type": "object", + "properties": { + "algorithm": { + "description": "The hashing function used to compute the hash value", + "type": "string", + "enum": [ "sha256" ] + }, + "value": { + "description": "The hash value of the payload", + "type": "string" + } + }, + "required": [ + "algorithm", + "value" + ], + "readOnly": true + } + }, + "required": [ + "envelope" + ] + } + }, + "required": [ + "content" + ] +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/test_util.go b/vendor/github.com/sigstore/rekor/pkg/types/test_util.go new file mode 100644 index 000000000000..b4daa3556e78 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/test_util.go @@ -0,0 +1,94 @@ +/* +Copyright © 2021 The Sigstore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "context" + + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" + pkitypes "github.com/sigstore/rekor/pkg/pki/pkitypes" +) + +type BaseUnmarshalTester struct{} + +func (u BaseUnmarshalTester) NewEntry() EntryImpl { + return &BaseUnmarshalTester{} +} + +func (u BaseUnmarshalTester) ArtifactHash() (string, error) { + return "", nil +} + +func (u BaseUnmarshalTester) Verifiers() ([]pkitypes.PublicKey, error) { + return nil, nil +} + +func (u BaseUnmarshalTester) APIVersion() string { + return "2.0.1" +} + +func (u BaseUnmarshalTester) IndexKeys() ([]string, error) { + return []string{}, nil +} + +func (u BaseUnmarshalTester) Canonicalize(_ context.Context) ([]byte, error) { + return nil, nil +} + +func (u BaseUnmarshalTester) Unmarshal(_ models.ProposedEntry) error { + return nil +} + +func (u BaseUnmarshalTester) Validate() error { + return nil +} + +func (u BaseUnmarshalTester) AttestationKey() string { + return "" +} + +func (u BaseUnmarshalTester) AttestationKeyValue() (string, []byte) { + return "", nil +} + +func (u BaseUnmarshalTester) CreateFromArtifactProperties(_ context.Context, _ ArtifactProperties) (models.ProposedEntry, error) { + return nil, nil +} + +func (u BaseUnmarshalTester) Insertable() (bool, error) { + return false, nil +} + +type BaseProposedEntryTester struct{} + +func (b BaseProposedEntryTester) Kind() string { + return "nil" +} + +func (b BaseProposedEntryTester) SetKind(_ string) { + +} + +func (b BaseProposedEntryTester) Validate(_ strfmt.Registry) error { + return nil +} + +func (b BaseProposedEntryTester) ContextValidate(_ context.Context, _ strfmt.Registry) error { + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/types.go b/vendor/github.com/sigstore/rekor/pkg/types/types.go new file mode 100644 index 000000000000..72722321b18b --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/types.go @@ -0,0 +1,88 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "context" + "errors" + "fmt" + "slices" + "sync" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// TypeMap stores mapping between type strings and entry constructors +// entries are written once at process initialization and read for each transaction, so we use +// sync.Map which is optimized for this case +var TypeMap sync.Map + +// RekorType is the base struct that is embedded in all type implementations +type RekorType struct { + Kind string // this is the unique string that identifies the type + VersionMap VersionEntryFactoryMap // this maps the supported versions to implementation +} + +// TypeImpl is implemented by all types to support the polymorphic conversion of abstract +// proposed entry to a working implementation for the versioned type requested, if supported +type TypeImpl interface { + CreateProposedEntry(context.Context, string, ArtifactProperties) (models.ProposedEntry, error) + DefaultVersion() string + SupportedVersions() []string + IsSupportedVersion(string) bool + UnmarshalEntry(pe models.ProposedEntry) (EntryImpl, error) +} + +// VersionedUnmarshal extracts the correct implementing factory function from the type's version map, +// creates an entry of that versioned type and then calls that versioned type's unmarshal method +func (rt *RekorType) VersionedUnmarshal(pe models.ProposedEntry, version string) (EntryImpl, error) { + ef, err := rt.VersionMap.GetEntryFactory(version) + if err != nil { + return nil, fmt.Errorf("%s implementation for version '%v' not found: %w", rt.Kind, version, err) + } + entry := ef() + if entry == nil { + return nil, errors.New("failure generating object") + } + if pe == nil { + return entry, nil + } + return entry, entry.Unmarshal(pe) +} + +// SupportedVersions returns a list of versions of this type that can be currently entered into the log +func (rt *RekorType) SupportedVersions() []string { + return rt.VersionMap.SupportedVersions() +} + +// IsSupportedVersion returns true if the version can be inserted into the log, and false if not +func (rt *RekorType) IsSupportedVersion(proposedVersion string) bool { + return slices.Contains(rt.SupportedVersions(), proposedVersion) +} + +// ListImplementedTypes returns a list of all type strings currently known to +// be implemented +func ListImplementedTypes() []string { + retVal := []string{} + TypeMap.Range(func(k interface{}, v interface{}) bool { + tf := v.(func() TypeImpl) + for _, verStr := range tf().SupportedVersions() { + retVal = append(retVal, fmt.Sprintf("%v:%v", k.(string), verStr)) + } + return true + }) + return retVal +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/versionmap.go b/vendor/github.com/sigstore/rekor/pkg/types/versionmap.go new file mode 100644 index 000000000000..d27163709946 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/versionmap.go @@ -0,0 +1,97 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "sync" + + "github.com/blang/semver" + "github.com/sigstore/rekor/pkg/internal/log" +) + +// VersionEntryFactoryMap defines a map-like interface to find the correct implementation for a version string +// This could be a simple map[string][EntryFactory], or something more elegant (e.g. semver) +type VersionEntryFactoryMap interface { + GetEntryFactory(string) (EntryFactory, error) // return the entry factory for the specified version + SetEntryFactory(string, EntryFactory) error // set the entry factory for the specified version + Count() int // return the count of entry factories currently in the map + SupportedVersions() []string // return a list of versions currently stored in the map +} + +// SemVerEntryFactoryMap implements a map that allows implementations to specify their supported versions using +// semver-compliant strings +type SemVerEntryFactoryMap struct { + factoryMap map[string]EntryFactory + + sync.RWMutex +} + +func NewSemVerEntryFactoryMap() VersionEntryFactoryMap { + s := SemVerEntryFactoryMap{} + s.factoryMap = make(map[string]EntryFactory) + return &s +} + +func (s *SemVerEntryFactoryMap) Count() int { + return len(s.factoryMap) +} + +func (s *SemVerEntryFactoryMap) GetEntryFactory(version string) (EntryFactory, error) { + s.RLock() + defer s.RUnlock() + + semverToMatch, err := semver.Parse(version) + if err != nil { + log.Logger.Error(err) + return nil, err + } + + // will return first function that matches + for k, v := range s.factoryMap { + semverRange, err := semver.ParseRange(k) + if err != nil { + log.Logger.Error(err) + return nil, err + } + + if semverRange(semverToMatch) { + return v, nil + } + } + return nil, fmt.Errorf("unable to locate entry for version %s", version) +} + +func (s *SemVerEntryFactoryMap) SetEntryFactory(constraint string, ef EntryFactory) error { + s.Lock() + defer s.Unlock() + + if _, err := semver.ParseRange(constraint); err != nil { + log.Logger.Error(err) + return err + } + + s.factoryMap[constraint] = ef + return nil +} + +func (s *SemVerEntryFactoryMap) SupportedVersions() []string { + var versions []string + for k := range s.factoryMap { + versions = append(versions, k) + } + return versions +} diff --git a/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go b/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go new file mode 100644 index 000000000000..27b246d79970 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go @@ -0,0 +1,165 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +// heavily borrowed from https://github.com/transparency-dev/formats/blob/main/log/checkpoint.go + +type Checkpoint struct { + // Origin is the unique identifier/version string + Origin string + // Size is the number of entries in the log at this checkpoint. + Size uint64 + // Hash is the hash which commits to the contents of the entire log. + Hash []byte + // OtherContent is any additional data to be included in the signed payload; each element is assumed to be one line + OtherContent []string +} + +// String returns the String representation of the Checkpoint +func (c Checkpoint) String() string { + var b strings.Builder + fmt.Fprintf(&b, "%s\n%d\n%s\n", c.Origin, c.Size, base64.StdEncoding.EncodeToString(c.Hash)) + for _, line := range c.OtherContent { + fmt.Fprintf(&b, "%s\n", line) + } + return b.String() +} + +// MarshalCheckpoint returns the common format representation of this Checkpoint. +func (c Checkpoint) MarshalCheckpoint() ([]byte, error) { + return []byte(c.String()), nil +} + +// UnmarshalCheckpoint parses the common formatted checkpoint data and stores the result +// in the Checkpoint. +// +// The supplied data is expected to begin with the following 3 lines of text, +// each followed by a newline: +// +// +// +// ... +// ... +// +// This will discard any content found after the checkpoint (including signatures) +func (c *Checkpoint) UnmarshalCheckpoint(data []byte) error { + l := bytes.Split(data, []byte("\n")) + if len(l) < 4 { + return errors.New("invalid checkpoint - too few newlines") + } + origin := string(l[0]) + if len(origin) == 0 { + return errors.New("invalid checkpoint - empty ecosystem") + } + size, err := strconv.ParseUint(string(l[1]), 10, 64) + if err != nil { + return fmt.Errorf("invalid checkpoint - size invalid: %w", err) + } + h, err := base64.StdEncoding.DecodeString(string(l[2])) + if err != nil { + return fmt.Errorf("invalid checkpoint - invalid hash: %w", err) + } + *c = Checkpoint{ + Origin: origin, + Size: size, + Hash: h, + } + if len(l) >= 3 { + for _, line := range l[3:] { + if len(line) == 0 { + break + } + c.OtherContent = append(c.OtherContent, string(line)) + } + } + return nil +} + +type SignedCheckpoint struct { + Checkpoint + SignedNote +} + +func CreateSignedCheckpoint(c Checkpoint) (*SignedCheckpoint, error) { + text, err := c.MarshalCheckpoint() + if err != nil { + return nil, err + } + return &SignedCheckpoint{ + Checkpoint: c, + SignedNote: SignedNote{Note: string(text)}, + }, nil +} + +func SignedCheckpointValidator(strToValidate string) bool { + s := SignedNote{} + if err := s.UnmarshalText([]byte(strToValidate)); err != nil { + return false + } + c := &Checkpoint{} + return c.UnmarshalCheckpoint([]byte(s.Note)) == nil +} + +func CheckpointValidator(strToValidate string) bool { + c := &Checkpoint{} + return c.UnmarshalCheckpoint([]byte(strToValidate)) == nil +} + +func (r *SignedCheckpoint) UnmarshalText(data []byte) error { + s := SignedNote{} + if err := s.UnmarshalText([]byte(data)); err != nil { + return fmt.Errorf("unmarshalling signed note: %w", err) + } + c := Checkpoint{} + if err := c.UnmarshalCheckpoint([]byte(s.Note)); err != nil { + return fmt.Errorf("unmarshalling checkpoint: %w", err) + } + *r = SignedCheckpoint{Checkpoint: c, SignedNote: s} + return nil +} + +// CreateAndSignCheckpoint creates a signed checkpoint as a commitment to the current root hash +func CreateAndSignCheckpoint(ctx context.Context, hostname string, treeID int64, treeSize uint64, rootHash []byte, signer signature.Signer) ([]byte, error) { + sth, err := CreateSignedCheckpoint(Checkpoint{ + Origin: fmt.Sprintf("%s - %d", hostname, treeID), + Size: treeSize, + Hash: rootHash, + }) + if err != nil { + return nil, fmt.Errorf("error creating checkpoint: %w", err) + } + if _, err := sth.Sign(hostname, signer, options.WithContext(ctx)); err != nil { + return nil, fmt.Errorf("error signing checkpoint: %w", err) + } + scBytes, err := sth.MarshalText() + if err != nil { + return nil, fmt.Errorf("error marshalling checkpoint: %w", err) + } + return scBytes, nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/util/fetch.go b/vendor/github.com/sigstore/rekor/pkg/util/fetch.go new file mode 100644 index 000000000000..7f8e93fb0463 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/util/fetch.go @@ -0,0 +1,49 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" +) + +// FileOrURLReadCloser Note: caller is responsible for closing ReadCloser returned from method! +func FileOrURLReadCloser(ctx context.Context, url string, content []byte) (io.ReadCloser, error) { + var dataReader io.ReadCloser + if url != "" { + //TODO: set timeout here, SSL settings? + client := &http.Client{} + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return nil, err + } + resp, err := client.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return nil, fmt.Errorf("error received while fetching artifact '%v': %v", url, resp.Status) + } + + dataReader = resp.Body + } else { + dataReader = io.NopCloser(bytes.NewReader(content)) + } + return dataReader, nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/util/sha.go b/vendor/github.com/sigstore/rekor/pkg/util/sha.go new file mode 100644 index 000000000000..07b8fb1b53d8 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/util/sha.go @@ -0,0 +1,79 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "crypto" + "fmt" + "strings" +) + +// PrefixSHA sets the prefix of a sha hash to match how it is stored based on the length. +func PrefixSHA(sha string) string { + var prefix string + var components = strings.Split(sha, ":") + + if len(components) == 2 { + return sha + } + + switch len(sha) { + case 40: + prefix = "sha1:" + case 64: + prefix = "sha256:" + case 96: + prefix = "sha384:" + case 128: + prefix = "sha512:" + } + + return fmt.Sprintf("%v%v", prefix, sha) +} + +func UnprefixSHA(sha string) (crypto.Hash, string) { + components := strings.Split(sha, ":") + + if len(components) == 2 { + prefix := components[0] + sha = components[1] + + switch prefix { + case "sha1": + return crypto.SHA1, sha + case "sha256": + return crypto.SHA256, sha + case "sha384": + return crypto.SHA384, sha + case "sha512": + return crypto.SHA512, sha + default: + return crypto.Hash(0), "" + } + } + + switch len(sha) { + case 40: + return crypto.SHA1, sha + case 64: + return crypto.SHA256, sha + case 96: + return crypto.SHA384, sha + case 128: + return crypto.SHA512, sha + } + + return crypto.Hash(0), "" +} diff --git a/vendor/github.com/sigstore/rekor/pkg/util/signed_note.go b/vendor/github.com/sigstore/rekor/pkg/util/signed_note.go new file mode 100644 index 000000000000..4c9c8f8a70f7 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/util/signed_note.go @@ -0,0 +1,211 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "bufio" + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "strings" + + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/options" + "golang.org/x/mod/sumdb/note" +) + +type SignedNote struct { + // Textual representation of a note to sign. + Note string + // Signatures are one or more signature lines covering the payload + Signatures []note.Signature +} + +// Sign adds a signature to a SignedCheckpoint object +// The signature is added to the signature array as well as being directly returned to the caller +func (s *SignedNote) Sign(identity string, signer signature.Signer, opts signature.SignOption) (*note.Signature, error) { + sig, err := signer.SignMessage(bytes.NewReader([]byte(s.Note)), opts) + if err != nil { + return nil, fmt.Errorf("signing note: %w", err) + } + + pk, err := signer.PublicKey() + if err != nil { + return nil, fmt.Errorf("retrieving public key: %w", err) + } + pkHash, err := getPublicKeyHash(pk) + if err != nil { + return nil, err + } + + signature := note.Signature{ + Name: identity, + Hash: pkHash, + Base64: base64.StdEncoding.EncodeToString(sig), + } + + s.Signatures = append(s.Signatures, signature) + return &signature, nil +} + +// Verify checks that one of the signatures can be successfully verified using +// the supplied public key +func (s SignedNote) Verify(verifier signature.Verifier) bool { + if len(s.Signatures) == 0 { + return false + } + + msg := []byte(s.Note) + digest := sha256.Sum256(msg) + + pk, err := verifier.PublicKey() + if err != nil { + return false + } + verifierPkHash, err := getPublicKeyHash(pk) + if err != nil { + return false + } + + for _, s := range s.Signatures { + sigBytes, err := base64.StdEncoding.DecodeString(s.Base64) + if err != nil { + return false + } + + if s.Hash != verifierPkHash { + return false + } + + opts := []signature.VerifyOption{} + switch pk.(type) { + case *rsa.PublicKey, *ecdsa.PublicKey: + opts = append(opts, options.WithDigest(digest[:])) + case ed25519.PublicKey: + break + default: + return false + } + if err := verifier.VerifySignature(bytes.NewReader(sigBytes), bytes.NewReader(msg), opts...); err != nil { + return false + } + } + return true +} + +// MarshalText returns the common format representation of this SignedNote. +func (s SignedNote) MarshalText() ([]byte, error) { + return []byte(s.String()), nil +} + +// String returns the String representation of the SignedNote +func (s SignedNote) String() string { + var b strings.Builder + b.WriteString(s.Note) + b.WriteRune('\n') + for _, sig := range s.Signatures { + var hbuf [4]byte + binary.BigEndian.PutUint32(hbuf[:], sig.Hash) + sigBytes, _ := base64.StdEncoding.DecodeString(sig.Base64) + b64 := base64.StdEncoding.EncodeToString(append(hbuf[:], sigBytes...)) + fmt.Fprintf(&b, "%c %s %s\n", '\u2014', sig.Name, b64) + } + + return b.String() +} + +// UnmarshalText parses the common formatted signed note data and stores the result +// in the SignedNote. THIS DOES NOT VERIFY SIGNATURES INSIDE THE CONTENT! +// +// The supplied data is expected to contain a single Note, followed by a single +// line with no comment, followed by one or more lines with the following format: +// +// \u2014 name signature +// +// - name is the string associated with the signer +// - signature is a base64 encoded string; the first 4 bytes of the decoded value is a +// hint to the public key; it is a big-endian encoded uint32 representing the first +// 4 bytes of the SHA256 hash of the public key +func (s *SignedNote) UnmarshalText(data []byte) error { + sigSplit := []byte("\n\n") + // Must end with signature block preceded by blank line. + split := bytes.LastIndex(data, sigSplit) + if split < 0 { + return errors.New("malformed note") + } + text, data := data[:split+1], data[split+2:] + if len(data) == 0 || data[len(data)-1] != '\n' { + return errors.New("malformed note") + } + + sn := SignedNote{ + Note: string(text), + } + + b := bufio.NewScanner(bytes.NewReader(data)) + for b.Scan() { + var name, signature string + if _, err := fmt.Fscanf(strings.NewReader(b.Text()), "\u2014 %s %s\n", &name, &signature); err != nil { + return fmt.Errorf("parsing signature: %w", err) + } + + sigBytes, err := base64.StdEncoding.DecodeString(signature) + if err != nil { + return fmt.Errorf("decoding signature: %w", err) + } + if len(sigBytes) < 5 { + return errors.New("signature is too small") + } + + sig := note.Signature{ + Name: name, + Hash: binary.BigEndian.Uint32(sigBytes[0:4]), + Base64: base64.StdEncoding.EncodeToString(sigBytes[4:]), + } + sn.Signatures = append(sn.Signatures, sig) + + } + if len(sn.Signatures) == 0 { + return errors.New("no signatures found in input") + } + + // copy sc to s + *s = sn + return nil +} + +func SignedNoteValidator(strToValidate string) bool { + s := SignedNote{} + return s.UnmarshalText([]byte(strToValidate)) == nil +} + +func getPublicKeyHash(publicKey crypto.PublicKey) (uint32, error) { + pubKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey) + if err != nil { + return 0, fmt.Errorf("marshalling public key: %w", err) + } + pkSha := sha256.Sum256(pubKeyBytes) + hash := binary.BigEndian.Uint32(pkSha[:]) + return hash, nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/verify/verify.go b/vendor/github.com/sigstore/rekor/pkg/verify/verify.go new file mode 100644 index 000000000000..61846923b796 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/verify/verify.go @@ -0,0 +1,234 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + + "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer" + "github.com/sigstore/rekor/pkg/generated/client" + "github.com/sigstore/rekor/pkg/generated/client/tlog" + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/util" + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/options" + "github.com/transparency-dev/merkle/proof" + "github.com/transparency-dev/merkle/rfc6962" +) + +// ProveConsistency verifies consistency between an initial, trusted STH +// and a second new STH. Callers MUST verify signature on the STHs'. +func ProveConsistency(ctx context.Context, rClient *client.Rekor, + oldSTH *util.SignedCheckpoint, newSTH *util.SignedCheckpoint, treeID string) error { + oldTreeSize := int64(oldSTH.Size) // nolint: gosec + switch { + case oldTreeSize == 0: + return errors.New("consistency proofs can not be computed starting from an empty log") + case oldTreeSize == int64(newSTH.Size): // nolint: gosec + if !bytes.Equal(oldSTH.Hash, newSTH.Hash) { + return errors.New("old root hash does not match STH hash") + } + case oldTreeSize < int64(newSTH.Size): // nolint: gosec + consistencyParams := tlog.NewGetLogProofParamsWithContext(ctx) + consistencyParams.FirstSize = &oldTreeSize // Root size at the old, or trusted state. + consistencyParams.LastSize = int64(newSTH.Size) // nolint: gosec // Root size at the new state to verify against. + consistencyParams.TreeID = &treeID + consistencyProof, err := rClient.Tlog.GetLogProof(consistencyParams) + if err != nil { + return err + } + var hashes [][]byte + for _, h := range consistencyProof.Payload.Hashes { + b, err := hex.DecodeString(h) + if err != nil { + return errors.New("error decoding consistency proof hashes") + } + hashes = append(hashes, b) + } + if err := proof.VerifyConsistency(rfc6962.DefaultHasher, + oldSTH.Size, newSTH.Size, hashes, oldSTH.Hash, newSTH.Hash); err != nil { + return err + } + case oldTreeSize > int64(newSTH.Size): // nolint: gosec + return errors.New("inclusion proof returned a tree size larger than the verified tree size") + } + return nil + +} + +// VerifyCurrentCheckpoint verifies the provided checkpoint by verifying consistency +// against a newly fetched Checkpoint. +// nolint +func VerifyCurrentCheckpoint(ctx context.Context, rClient *client.Rekor, verifier signature.Verifier, + oldSTH *util.SignedCheckpoint) (*util.SignedCheckpoint, error) { + // The oldSTH should already be verified, but check for robustness. + if !oldSTH.Verify(verifier) { + return nil, errors.New("signature on old tree head did not verify") + } + + // Get and verify against the current STH. + infoParams := tlog.NewGetLogInfoParamsWithContext(ctx) + result, err := rClient.Tlog.GetLogInfo(infoParams) + if err != nil { + return nil, err + } + + logInfo := result.GetPayload() + sth := util.SignedCheckpoint{} + if err := sth.UnmarshalText([]byte(*logInfo.SignedTreeHead)); err != nil { + return nil, err + } + + // Verify the signature on the SignedCheckpoint. + if !sth.Verify(verifier) { + return nil, errors.New("signature on tree head did not verify") + } + + // Now verify consistency up to the STH. + if err := ProveConsistency(ctx, rClient, oldSTH, &sth, *logInfo.TreeID); err != nil { + return nil, err + } + return &sth, nil +} + +// VerifyCheckpointSignature verifies the signature on a checkpoint (signed tree head). It does +// not verify consistency against other checkpoints. +// nolint +func VerifyCheckpointSignature(e *models.LogEntryAnon, verifier signature.Verifier) error { + sth := &util.SignedCheckpoint{} + if err := sth.UnmarshalText([]byte(*e.Verification.InclusionProof.Checkpoint)); err != nil { + return fmt.Errorf("unmarshalling log entry checkpoint to SignedCheckpoint: %w", err) + } + if !sth.Verify(verifier) { + return errors.New("signature on checkpoint did not verify") + } + rootHash, err := hex.DecodeString(*e.Verification.InclusionProof.RootHash) + if err != nil { + return errors.New("decoding inclusion proof root has") + } + + if !bytes.EqualFold(rootHash, sth.Hash) { + return fmt.Errorf("proof root hash does not match signed tree head, expected %s got %s", + *e.Verification.InclusionProof.RootHash, + hex.EncodeToString(sth.Hash)) + } + return nil +} + +// VerifyInclusion verifies an entry's inclusion proof. Clients MUST either verify +// the root hash against a new STH (via VerifyCurrentCheckpoint) or against a +// trusted, existing STH (via ProveConsistency). +// nolint +func VerifyInclusion(ctx context.Context, e *models.LogEntryAnon) error { + if e.Verification == nil || e.Verification.InclusionProof == nil { + return errors.New("inclusion proof not provided") + } + + hashes := [][]byte{} + for _, h := range e.Verification.InclusionProof.Hashes { + hb, _ := hex.DecodeString(h) + hashes = append(hashes, hb) + } + + rootHash, err := hex.DecodeString(*e.Verification.InclusionProof.RootHash) + if err != nil { + return err + } + + // Verify the inclusion proof. + entryBytes, err := base64.StdEncoding.DecodeString(e.Body.(string)) + if err != nil { + return err + } + leafHash := rfc6962.DefaultHasher.HashLeaf(entryBytes) + + if err := proof.VerifyInclusion(rfc6962.DefaultHasher, uint64(*e.Verification.InclusionProof.LogIndex), + uint64(*e.Verification.InclusionProof.TreeSize), leafHash, hashes, rootHash); err != nil { // nolint: gosec + return err + } + + return nil +} + +// VerifySignedEntryTimestamp verifies the entry's SET against the provided +// public key. +// nolint +func VerifySignedEntryTimestamp(ctx context.Context, e *models.LogEntryAnon, verifier signature.Verifier) error { + if e.Verification == nil { + return errors.New("missing verification") + } + if e.Verification.SignedEntryTimestamp == nil { + return errors.New("signature missing") + } + + type bundle struct { + Body interface{} `json:"body"` + IntegratedTime int64 `json:"integratedTime"` + // Note that this is the virtual index. + LogIndex int64 `json:"logIndex"` + LogID string `json:"logID"` + } + bundlePayload := bundle{ + Body: e.Body, + IntegratedTime: *e.IntegratedTime, + LogIndex: *e.LogIndex, + LogID: *e.LogID, + } + contents, err := json.Marshal(bundlePayload) + if err != nil { + return fmt.Errorf("marshaling bundle: %w", err) + } + canonicalized, err := jsoncanonicalizer.Transform(contents) + if err != nil { + return fmt.Errorf("canonicalizing bundle: %w", err) + } + + // verify the SET against the public key + if err := verifier.VerifySignature(bytes.NewReader(e.Verification.SignedEntryTimestamp), + bytes.NewReader(canonicalized), options.WithContext(ctx)); err != nil { + return fmt.Errorf("unable to verify bundle: %w", err) + } + return nil +} + +// VerifyLogEntry performs verification of a LogEntry given a Rekor verifier. +// Performs inclusion proof verification up to a verified root hash, +// SignedEntryTimestamp verification, and checkpoint verification. +// nolint +func VerifyLogEntry(ctx context.Context, e *models.LogEntryAnon, verifier signature.Verifier) error { + // Verify the inclusion proof using the body's leaf hash. + if err := VerifyInclusion(ctx, e); err != nil { + return err + } + + // Verify checkpoint, which includes a signed root hash. + if err := VerifyCheckpointSignature(e, verifier); err != nil { + return err + } + + // Verify the Signed Entry Timestamp. + if err := VerifySignedEntryTimestamp(ctx, e, verifier); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/COPYRIGHT.txt b/vendor/github.com/sigstore/sigstore-go/COPYRIGHT.txt new file mode 100644 index 000000000000..406ee260604f --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/COPYRIGHT.txt @@ -0,0 +1,13 @@ +Copyright 2023 The Sigstore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/sigstore/sigstore-go/LICENSE b/vendor/github.com/sigstore/sigstore-go/LICENSE new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/bundle/bundle.go b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/bundle.go new file mode 100644 index 000000000000..5919230e8a33 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/bundle.go @@ -0,0 +1,408 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bundle + +import ( + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "os" + "strings" + + "github.com/secure-systems-lab/go-securesystemslib/dsse" + protobundle "github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1" + protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + protodsse "github.com/sigstore/protobuf-specs/gen/pb-go/dsse" + "golang.org/x/mod/semver" + "google.golang.org/protobuf/encoding/protojson" + + "github.com/sigstore/sigstore-go/pkg/tlog" + "github.com/sigstore/sigstore-go/pkg/verify" +) + +var ErrValidation = errors.New("validation error") +var ErrUnsupportedMediaType = fmt.Errorf("%w: unsupported media type", ErrValidation) +var ErrEmptyBundle = fmt.Errorf("%w: empty protobuf bundle", ErrValidation) +var ErrMissingVerificationMaterial = fmt.Errorf("%w: missing verification material", ErrValidation) +var ErrMissingBundleContent = fmt.Errorf("%w: missing bundle content", ErrValidation) +var ErrUnimplemented = errors.New("unimplemented") +var ErrInvalidAttestation = fmt.Errorf("%w: invalid attestation", ErrValidation) +var ErrMissingEnvelope = fmt.Errorf("%w: missing valid envelope", ErrInvalidAttestation) +var ErrDecodingJSON = fmt.Errorf("%w: decoding json", ErrInvalidAttestation) +var ErrDecodingB64 = fmt.Errorf("%w: decoding base64", ErrInvalidAttestation) + +const mediaTypeBase = "application/vnd.dev.sigstore.bundle" + +func ErrValidationError(err error) error { + return fmt.Errorf("%w: %w", ErrValidation, err) +} + +type Bundle struct { + *protobundle.Bundle + hasInclusionPromise bool + hasInclusionProof bool +} + +func NewBundle(pbundle *protobundle.Bundle) (*Bundle, error) { + bundle := &Bundle{ + Bundle: pbundle, + hasInclusionPromise: false, + hasInclusionProof: false, + } + + err := bundle.validate() + if err != nil { + return nil, err + } + + return bundle, nil +} + +// Deprecated: use Bundle instead +type ProtobufBundle = Bundle + +// Deprecated: use NewBundle instead +func NewProtobufBundle(b *protobundle.Bundle) (*ProtobufBundle, error) { + return NewBundle(b) +} + +func (b *Bundle) validate() error { + bundleVersion, err := b.Version() + if err != nil { + return fmt.Errorf("error getting bundle version: %w", err) + } + + // if bundle version is < 0.1, return error + if semver.Compare(bundleVersion, "v0.1") < 0 { + return fmt.Errorf("%w: bundle version %s is not supported", ErrUnsupportedMediaType, bundleVersion) + } + + // fetch tlog entries, as next check needs to check them for inclusion proof/promise + entries, err := b.TlogEntries() + if err != nil { + return err + } + + // if bundle version == v0.1, require inclusion promise + if semver.Compare(bundleVersion, "v0.1") == 0 { + if len(entries) > 0 && !b.hasInclusionPromise { + return errors.New("inclusion promises missing in bundle (required for bundle v0.1)") + } + } else { + // if bundle version >= v0.2, require inclusion proof + if len(entries) > 0 && !b.hasInclusionProof { + return errors.New("inclusion proof missing in bundle (required for bundle v0.2)") + } + } + + // if bundle version >= v0.3, require verification material to not be X.509 certificate chain (only single certificate is allowed) + if semver.Compare(bundleVersion, "v0.3") >= 0 { + certs := b.VerificationMaterial.GetX509CertificateChain() + + if certs != nil { + return errors.New("verification material cannot be X.509 certificate chain (for bundle v0.3)") + } + } + + // if bundle version is >= v0.4, return error as this version is not supported + if semver.Compare(bundleVersion, "v0.4") >= 0 { + return fmt.Errorf("%w: bundle version %s is not yet supported", ErrUnsupportedMediaType, bundleVersion) + } + + err = validateBundle(b.Bundle) + if err != nil { + return fmt.Errorf("invalid bundle: %w", err) + } + return nil +} + +// MediaTypeString returns a mediatype string for the specified bundle version. +// The function returns an error if the resulting string does validate. +func MediaTypeString(version string) (string, error) { + if version == "" { + return "", fmt.Errorf("unable to build media type string, no version defined") + } + + var mtString string + + version = strings.TrimPrefix(version, "v") + mtString = fmt.Sprintf("%s.v%s+json", mediaTypeBase, strings.TrimPrefix(version, "v")) + + if version == "0.1" || version == "0.2" { + mtString = fmt.Sprintf("%s+json;version=%s", mediaTypeBase, strings.TrimPrefix(version, "v")) + } + + if _, err := getBundleVersion(mtString); err != nil { + return "", fmt.Errorf("unable to build mediatype: %w", err) + } + + return mtString, nil +} + +func (b *Bundle) Version() (string, error) { + return getBundleVersion(b.MediaType) +} + +func getBundleVersion(mediaType string) (string, error) { + switch mediaType { + case mediaTypeBase + "+json;version=0.1": + return "v0.1", nil + case mediaTypeBase + "+json;version=0.2": + return "v0.2", nil + case mediaTypeBase + "+json;version=0.3": + return "v0.3", nil + } + if strings.HasPrefix(mediaType, mediaTypeBase+".v") && strings.HasSuffix(mediaType, "+json") { + version := strings.TrimPrefix(mediaType, mediaTypeBase+".") + version = strings.TrimSuffix(version, "+json") + if semver.IsValid(version) { + return version, nil + } + return "", fmt.Errorf("%w: invalid bundle version: %s", ErrUnsupportedMediaType, version) + } + return "", fmt.Errorf("%w: %s", ErrUnsupportedMediaType, mediaType) +} + +func validateBundle(b *protobundle.Bundle) error { + if b == nil { + return ErrEmptyBundle + } + + if b.Content == nil { + return ErrMissingBundleContent + } + + switch b.Content.(type) { + case *protobundle.Bundle_DsseEnvelope, *protobundle.Bundle_MessageSignature: + default: + return fmt.Errorf("invalid bundle content: bundle content must be either a message signature or dsse envelope") + } + + if b.VerificationMaterial == nil || b.VerificationMaterial.Content == nil { + return ErrMissingVerificationMaterial + } + + switch b.VerificationMaterial.Content.(type) { + case *protobundle.VerificationMaterial_PublicKey, *protobundle.VerificationMaterial_Certificate, *protobundle.VerificationMaterial_X509CertificateChain: + default: + return fmt.Errorf("invalid verification material content: verification material must be one of public key, x509 certificate and x509 certificate chain") + } + + return nil +} + +func LoadJSONFromPath(path string) (*Bundle, error) { + var bundle Bundle + bundle.Bundle = new(protobundle.Bundle) + + contents, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + err = bundle.UnmarshalJSON(contents) + if err != nil { + return nil, err + } + + return &bundle, nil +} + +func (b *Bundle) MarshalJSON() ([]byte, error) { + return protojson.Marshal(b.Bundle) +} + +func (b *Bundle) UnmarshalJSON(data []byte) error { + b.Bundle = new(protobundle.Bundle) + err := protojson.Unmarshal(data, b.Bundle) + if err != nil { + return err + } + + err = b.validate() + if err != nil { + return err + } + + return nil +} + +func (b *Bundle) VerificationContent() (verify.VerificationContent, error) { + if b.VerificationMaterial == nil { + return nil, ErrMissingVerificationMaterial + } + + switch content := b.VerificationMaterial.GetContent().(type) { + case *protobundle.VerificationMaterial_X509CertificateChain: + if content.X509CertificateChain == nil { + return nil, ErrMissingVerificationMaterial + } + certs := content.X509CertificateChain.GetCertificates() + if len(certs) == 0 || certs[0].RawBytes == nil { + return nil, ErrMissingVerificationMaterial + } + parsedCert, err := x509.ParseCertificate(certs[0].RawBytes) + if err != nil { + return nil, ErrValidationError(err) + } + cert := &Certificate{ + certificate: parsedCert, + } + return cert, nil + case *protobundle.VerificationMaterial_Certificate: + if content.Certificate == nil || content.Certificate.RawBytes == nil { + return nil, ErrMissingVerificationMaterial + } + parsedCert, err := x509.ParseCertificate(content.Certificate.RawBytes) + if err != nil { + return nil, ErrValidationError(err) + } + cert := &Certificate{ + certificate: parsedCert, + } + return cert, nil + case *protobundle.VerificationMaterial_PublicKey: + if content.PublicKey == nil { + return nil, ErrMissingVerificationMaterial + } + pk := &PublicKey{ + hint: content.PublicKey.Hint, + } + return pk, nil + + default: + return nil, ErrMissingVerificationMaterial + } +} + +func (b *Bundle) HasInclusionPromise() bool { + return b.hasInclusionPromise +} + +func (b *Bundle) HasInclusionProof() bool { + return b.hasInclusionProof +} + +func (b *Bundle) TlogEntries() ([]*tlog.Entry, error) { + if b.VerificationMaterial == nil { + return nil, nil + } + + tlogEntries := make([]*tlog.Entry, len(b.VerificationMaterial.TlogEntries)) + var err error + for i, entry := range b.VerificationMaterial.TlogEntries { + tlogEntries[i], err = tlog.ParseTransparencyLogEntry(entry) + if err != nil { + return nil, ErrValidationError(err) + } + + if tlogEntries[i].HasInclusionPromise() { + b.hasInclusionPromise = true + } + if tlogEntries[i].HasInclusionProof() { + b.hasInclusionProof = true + } + } + + return tlogEntries, nil +} + +func (b *Bundle) SignatureContent() (verify.SignatureContent, error) { + switch content := b.Content.(type) { //nolint:gocritic + case *protobundle.Bundle_DsseEnvelope: + envelope, err := parseEnvelope(content.DsseEnvelope) + if err != nil { + return nil, err + } + return envelope, nil + case *protobundle.Bundle_MessageSignature: + if content.MessageSignature == nil || content.MessageSignature.MessageDigest == nil { + return nil, ErrMissingVerificationMaterial + } + return NewMessageSignature( + content.MessageSignature.MessageDigest.Digest, + protocommon.HashAlgorithm_name[int32(content.MessageSignature.MessageDigest.Algorithm)], + content.MessageSignature.Signature, + ), nil + } + return nil, ErrMissingVerificationMaterial +} + +func (b *Bundle) Envelope() (*Envelope, error) { + switch content := b.Content.(type) { //nolint:gocritic + case *protobundle.Bundle_DsseEnvelope: + envelope, err := parseEnvelope(content.DsseEnvelope) + if err != nil { + return nil, err + } + return envelope, nil + } + return nil, ErrMissingVerificationMaterial +} + +func (b *Bundle) Timestamps() ([][]byte, error) { + if b.VerificationMaterial == nil { + return nil, ErrMissingVerificationMaterial + } + + signedTimestamps := make([][]byte, 0) + + if b.VerificationMaterial.TimestampVerificationData == nil { + return signedTimestamps, nil + } + + for _, timestamp := range b.VerificationMaterial.TimestampVerificationData.Rfc3161Timestamps { + signedTimestamps = append(signedTimestamps, timestamp.SignedTimestamp) + } + + return signedTimestamps, nil +} + +// MinVersion returns true if the bundle version is greater than or equal to the expected version. +func (b *Bundle) MinVersion(expectVersion string) bool { + version, err := b.Version() + if err != nil { + return false + } + + if !strings.HasPrefix(expectVersion, "v") { + expectVersion = "v" + expectVersion + } + + return semver.Compare(version, expectVersion) >= 0 +} + +func parseEnvelope(input *protodsse.Envelope) (*Envelope, error) { + if input == nil { + return nil, ErrMissingEnvelope + } + output := &dsse.Envelope{} + payload := input.GetPayload() + if payload == nil { + return nil, ErrMissingEnvelope + } + output.Payload = base64.StdEncoding.EncodeToString([]byte(payload)) + output.PayloadType = string(input.GetPayloadType()) + output.Signatures = make([]dsse.Signature, len(input.GetSignatures())) + for i, sig := range input.GetSignatures() { + if sig == nil { + return nil, ErrMissingEnvelope + } + output.Signatures[i].KeyID = sig.GetKeyid() + output.Signatures[i].Sig = base64.StdEncoding.EncodeToString(sig.GetSig()) + } + return &Envelope{Envelope: output}, nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/bundle/signature_content.go b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/signature_content.go new file mode 100644 index 000000000000..b3e6dd69e6ca --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/signature_content.go @@ -0,0 +1,106 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bundle + +import ( + "encoding/base64" + + in_toto "github.com/in-toto/attestation/go/v1" + "github.com/secure-systems-lab/go-securesystemslib/dsse" + "github.com/sigstore/sigstore-go/pkg/verify" + "google.golang.org/protobuf/encoding/protojson" +) + +const IntotoMediaType = "application/vnd.in-toto+json" + +type MessageSignature struct { + digest []byte + digestAlgorithm string + signature []byte +} + +func (m *MessageSignature) Digest() []byte { + return m.digest +} + +func (m *MessageSignature) DigestAlgorithm() string { + return m.digestAlgorithm +} + +func NewMessageSignature(digest []byte, digestAlgorithm string, signature []byte) *MessageSignature { + return &MessageSignature{ + digest: digest, + digestAlgorithm: digestAlgorithm, + signature: signature, + } +} + +type Envelope struct { + *dsse.Envelope +} + +func (e *Envelope) Statement() (*in_toto.Statement, error) { + if e.PayloadType != IntotoMediaType { + return nil, ErrUnsupportedMediaType + } + + var statement in_toto.Statement + raw, err := e.DecodeB64Payload() + if err != nil { + return nil, ErrDecodingB64 + } + err = protojson.Unmarshal(raw, &statement) + if err != nil { + return nil, ErrDecodingJSON + } + return &statement, nil +} + +func (e *Envelope) EnvelopeContent() verify.EnvelopeContent { + return e +} + +func (e *Envelope) RawEnvelope() *dsse.Envelope { + return e.Envelope +} + +func (m *MessageSignature) EnvelopeContent() verify.EnvelopeContent { + return nil +} + +func (e *Envelope) MessageSignatureContent() verify.MessageSignatureContent { + return nil +} + +func (m *MessageSignature) MessageSignatureContent() verify.MessageSignatureContent { + return m +} + +func (m *MessageSignature) Signature() []byte { + return m.signature +} + +func (e *Envelope) Signature() []byte { + if len(e.Signatures) == 0 { + return []byte{} + } + + sigBytes, err := base64.StdEncoding.DecodeString(e.Signatures[0].Sig) + if err != nil { + return []byte{} + } + + return sigBytes +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/bundle/verification_content.go b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/verification_content.go new file mode 100644 index 000000000000..86e1e0bbc0cb --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/verification_content.go @@ -0,0 +1,92 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bundle + +import ( + "crypto" + "crypto/x509" + "time" + + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore-go/pkg/verify" +) + +type Certificate struct { + certificate *x509.Certificate +} + +func NewCertificate(cert *x509.Certificate) *Certificate { + return &Certificate{certificate: cert} +} + +type PublicKey struct { + hint string +} + +func (pk PublicKey) Hint() string { + return pk.hint +} + +func (c *Certificate) CompareKey(key any, _ root.TrustedMaterial) bool { + x509Key, ok := key.(*x509.Certificate) + if !ok { + return false + } + + return c.certificate.Equal(x509Key) +} + +func (c *Certificate) ValidAtTime(t time.Time, _ root.TrustedMaterial) bool { + return !c.certificate.NotAfter.Before(t) && !c.certificate.NotBefore.After(t) +} + +func (c *Certificate) Certificate() *x509.Certificate { + return c.certificate +} + +func (c *Certificate) PublicKey() verify.PublicKeyProvider { + return nil +} + +func (pk *PublicKey) CompareKey(key any, tm root.TrustedMaterial) bool { + verifier, err := tm.PublicKeyVerifier(pk.hint) + if err != nil { + return false + } + pubKey, err := verifier.PublicKey() + if err != nil { + return false + } + if equaler, ok := key.(interface{ Equal(x crypto.PublicKey) bool }); ok { + return equaler.Equal(pubKey) + } + return false +} + +func (pk *PublicKey) ValidAtTime(t time.Time, tm root.TrustedMaterial) bool { + verifier, err := tm.PublicKeyVerifier(pk.hint) + if err != nil { + return false + } + return verifier.ValidAtTime(t) +} + +func (pk *PublicKey) Certificate() *x509.Certificate { + return nil +} + +func (pk *PublicKey) PublicKey() verify.PublicKeyProvider { + return pk +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/extensions.go b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/extensions.go new file mode 100644 index 000000000000..e9a0680d1620 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/extensions.go @@ -0,0 +1,239 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is a verbatim copy of https://github.com/sigstore/fulcio/blob/3707d80bb25330bc7ffbd9702fb401cd643e36fa/pkg/certificate/extensions.go , +// EXCEPT: +// - the parseExtensions func has been renamed ParseExtensions + +package certificate + +import ( + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" +) + +var ( + // Deprecated: Use OIDIssuerV2 + OIDIssuer = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 1} + // Deprecated: Use OIDBuildTrigger + OIDGitHubWorkflowTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 2} + // Deprecated: Use OIDSourceRepositoryDigest + OIDGitHubWorkflowSHA = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 3} + // Deprecated: Use OIDBuildConfigURI or OIDBuildConfigDigest + OIDGitHubWorkflowName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 4} + // Deprecated: Use SourceRepositoryURI + OIDGitHubWorkflowRepository = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 5} + // Deprecated: Use OIDSourceRepositoryRef + OIDGitHubWorkflowRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 6} + + OIDOtherName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 7} + OIDIssuerV2 = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 8} + + // CI extensions + OIDBuildSignerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 9} + OIDBuildSignerDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 10} + OIDRunnerEnvironment = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 11} + OIDSourceRepositoryURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 12} + OIDSourceRepositoryDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 13} + OIDSourceRepositoryRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 14} + OIDSourceRepositoryIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 15} + OIDSourceRepositoryOwnerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 16} + OIDSourceRepositoryOwnerIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 17} + OIDBuildConfigURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 18} + OIDBuildConfigDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 19} + OIDBuildTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 20} + OIDRunInvocationURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 21} + OIDSourceRepositoryVisibilityAtSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 22} +) + +// Extensions contains all custom x509 extensions defined by Fulcio +type Extensions struct { + // NB: New extensions must be added here and documented + // at docs/oidc-info.md + + // The OIDC issuer. Should match `iss` claim of ID token or, in the case of + // a federated login like Dex it should match the issuer URL of the + // upstream issuer. The issuer is not set the extensions are invalid and + // will fail to render. + Issuer string `json:"issuer,omitempty"` // OID 1.3.6.1.4.1.57264.1.8 and 1.3.6.1.4.1.57264.1.1 (Deprecated) + + // Deprecated + // Triggering event of the Github Workflow. Matches the `event_name` claim of ID + // tokens from Github Actions + GithubWorkflowTrigger string `json:"githubWorkflowTrigger,omitempty"` // OID 1.3.6.1.4.1.57264.1.2 + + // Deprecated + // SHA of git commit being built in Github Actions. Matches the `sha` claim of ID + // tokens from Github Actions + GithubWorkflowSHA string `json:"githubWorkflowSHA,omitempty"` //nolint:tagliatelle // OID 1.3.6.1.4.1.57264.1.3 + + // Deprecated + // Name of Github Actions Workflow. Matches the `workflow` claim of the ID + // tokens from Github Actions + GithubWorkflowName string `json:"githubWorkflowName,omitempty"` // OID 1.3.6.1.4.1.57264.1.4 + + // Deprecated + // Repository of the Github Actions Workflow. Matches the `repository` claim of the ID + // tokens from Github Actions + GithubWorkflowRepository string `json:"githubWorkflowRepository,omitempty"` // OID 1.3.6.1.4.1.57264.1.5 + + // Deprecated + // Git Ref of the Github Actions Workflow. Matches the `ref` claim of the ID tokens + // from Github Actions + GithubWorkflowRef string `json:"githubWorkflowRef,omitempty"` // 1.3.6.1.4.1.57264.1.6 + + // Reference to specific build instructions that are responsible for signing. + BuildSignerURI string `json:"buildSignerURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.9 + + // Immutable reference to the specific version of the build instructions that is responsible for signing. + BuildSignerDigest string `json:"buildSignerDigest,omitempty"` // 1.3.6.1.4.1.57264.1.10 + + // Specifies whether the build took place in platform-hosted cloud infrastructure or customer/self-hosted infrastructure. + RunnerEnvironment string `json:"runnerEnvironment,omitempty"` // 1.3.6.1.4.1.57264.1.11 + + // Source repository URL that the build was based on. + SourceRepositoryURI string `json:"sourceRepositoryURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.12 + + // Immutable reference to a specific version of the source code that the build was based upon. + SourceRepositoryDigest string `json:"sourceRepositoryDigest,omitempty"` // 1.3.6.1.4.1.57264.1.13 + + // Source Repository Ref that the build run was based upon. + SourceRepositoryRef string `json:"sourceRepositoryRef,omitempty"` // 1.3.6.1.4.1.57264.1.14 + + // Immutable identifier for the source repository the workflow was based upon. + SourceRepositoryIdentifier string `json:"sourceRepositoryIdentifier,omitempty"` // 1.3.6.1.4.1.57264.1.15 + + // Source repository owner URL of the owner of the source repository that the build was based on. + SourceRepositoryOwnerURI string `json:"sourceRepositoryOwnerURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.16 + + // Immutable identifier for the owner of the source repository that the workflow was based upon. + SourceRepositoryOwnerIdentifier string `json:"sourceRepositoryOwnerIdentifier,omitempty"` // 1.3.6.1.4.1.57264.1.17 + + // Build Config URL to the top-level/initiating build instructions. + BuildConfigURI string `json:"buildConfigURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.18 + + // Immutable reference to the specific version of the top-level/initiating build instructions. + BuildConfigDigest string `json:"buildConfigDigest,omitempty"` // 1.3.6.1.4.1.57264.1.19 + + // Event or action that initiated the build. + BuildTrigger string `json:"buildTrigger,omitempty"` // 1.3.6.1.4.1.57264.1.20 + + // Run Invocation URL to uniquely identify the build execution. + RunInvocationURI string `json:"runInvocationURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.21 + + // Source repository visibility at the time of signing the certificate. + SourceRepositoryVisibilityAtSigning string `json:"sourceRepositoryVisibilityAtSigning,omitempty"` // 1.3.6.1.4.1.57264.1.22 +} + +func ParseExtensions(ext []pkix.Extension) (Extensions, error) { + out := Extensions{} + + for _, e := range ext { + switch { + // BEGIN: Deprecated + case e.Id.Equal(OIDIssuer): + out.Issuer = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowTrigger): + out.GithubWorkflowTrigger = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowSHA): + out.GithubWorkflowSHA = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowName): + out.GithubWorkflowName = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowRepository): + out.GithubWorkflowRepository = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowRef): + out.GithubWorkflowRef = string(e.Value) + // END: Deprecated + case e.Id.Equal(OIDIssuerV2): + if err := ParseDERString(e.Value, &out.Issuer); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildSignerURI): + if err := ParseDERString(e.Value, &out.BuildSignerURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildSignerDigest): + if err := ParseDERString(e.Value, &out.BuildSignerDigest); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDRunnerEnvironment): + if err := ParseDERString(e.Value, &out.RunnerEnvironment); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryURI): + if err := ParseDERString(e.Value, &out.SourceRepositoryURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryDigest): + if err := ParseDERString(e.Value, &out.SourceRepositoryDigest); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryRef): + if err := ParseDERString(e.Value, &out.SourceRepositoryRef); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryIdentifier): + if err := ParseDERString(e.Value, &out.SourceRepositoryIdentifier); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryOwnerURI): + if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryOwnerIdentifier): + if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerIdentifier); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildConfigURI): + if err := ParseDERString(e.Value, &out.BuildConfigURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildConfigDigest): + if err := ParseDERString(e.Value, &out.BuildConfigDigest); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildTrigger): + if err := ParseDERString(e.Value, &out.BuildTrigger); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDRunInvocationURI): + if err := ParseDERString(e.Value, &out.RunInvocationURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryVisibilityAtSigning): + if err := ParseDERString(e.Value, &out.SourceRepositoryVisibilityAtSigning); err != nil { + return Extensions{}, err + } + } + } + + // We only ever return nil, but leaving error in place so that we can add + // more complex parsing of fields in a backwards compatible way if needed. + return out, nil +} + +// ParseDERString decodes a DER-encoded string and puts the value in parsedVal. +// Returns an error if the unmarshalling fails or if there are trailing bytes in the encoding. +func ParseDERString(val []byte, parsedVal *string) error { + rest, err := asn1.Unmarshal(val, parsedVal) + if err != nil { + return fmt.Errorf("unexpected error unmarshalling DER-encoded string: %w", err) + } + if len(rest) != 0 { + return errors.New("unexpected trailing bytes in DER-encoded string") + } + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/summarize.go b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/summarize.go new file mode 100644 index 000000000000..da9a6661a90e --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/summarize.go @@ -0,0 +1,90 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package certificate + +import ( + "crypto/x509" + "errors" + "fmt" + "reflect" + + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +type Summary struct { + CertificateIssuer string `json:"certificateIssuer"` + SubjectAlternativeName string `json:"subjectAlternativeName"` + Extensions +} + +type ErrCompareExtensions struct { + field string + expected string + actual string +} + +func (e *ErrCompareExtensions) Error() string { + return fmt.Sprintf("expected %s to be \"%s\", got \"%s\"", e.field, e.expected, e.actual) +} + +func SummarizeCertificate(cert *x509.Certificate) (Summary, error) { + extensions, err := ParseExtensions(cert.Extensions) + + if err != nil { + return Summary{}, err + } + + var san string + + switch { + case len(cert.URIs) > 0: + san = cert.URIs[0].String() + case len(cert.EmailAddresses) > 0: + san = cert.EmailAddresses[0] + } + if san == "" { + san, _ = cryptoutils.UnmarshalOtherNameSAN(cert.Extensions) + } + if san == "" { + return Summary{}, errors.New("no Subject Alternative Name found") + } + + return Summary{CertificateIssuer: cert.Issuer.String(), SubjectAlternativeName: san, Extensions: extensions}, nil +} + +// CompareExtensions compares two Extensions structs and returns an error if +// any set values in the expected struct not equal. Empty fields in the +// expectedExt struct are ignored. +func CompareExtensions(expectedExt, actualExt Extensions) error { + expExtValue := reflect.ValueOf(expectedExt) + actExtValue := reflect.ValueOf(actualExt) + + fields := reflect.VisibleFields(expExtValue.Type()) + for _, field := range fields { + expectedFieldVal := expExtValue.FieldByName(field.Name) + + // if the expected field is empty, skip it + if expectedFieldVal.IsValid() && !expectedFieldVal.IsZero() { + actualFieldVal := actExtValue.FieldByName(field.Name) + if actualFieldVal.IsValid() { + if expectedFieldVal.Interface() != actualFieldVal.Interface() { + return &ErrCompareExtensions{field.Name, fmt.Sprintf("%v", expectedFieldVal.Interface()), fmt.Sprintf("%v", actualFieldVal.Interface())} + } + } + } + } + + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/certificate_authority.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/certificate_authority.go new file mode 100644 index 000000000000..5e1cb67cca2d --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/certificate_authority.go @@ -0,0 +1,66 @@ +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "crypto/x509" + "errors" + "time" +) + +type CertificateAuthority interface { + Verify(cert *x509.Certificate, observerTimestamp time.Time) ([][]*x509.Certificate, error) +} + +type FulcioCertificateAuthority struct { + Root *x509.Certificate + Intermediates []*x509.Certificate + ValidityPeriodStart time.Time + ValidityPeriodEnd time.Time + URI string +} + +var _ CertificateAuthority = &FulcioCertificateAuthority{} + +func (ca *FulcioCertificateAuthority) Verify(cert *x509.Certificate, observerTimestamp time.Time) ([][]*x509.Certificate, error) { + if !ca.ValidityPeriodStart.IsZero() && observerTimestamp.Before(ca.ValidityPeriodStart) { + return nil, errors.New("certificate is not valid yet") + } + if !ca.ValidityPeriodEnd.IsZero() && observerTimestamp.After(ca.ValidityPeriodEnd) { + return nil, errors.New("certificate is no longer valid") + } + + rootCertPool := x509.NewCertPool() + rootCertPool.AddCert(ca.Root) + intermediateCertPool := x509.NewCertPool() + for _, cert := range ca.Intermediates { + intermediateCertPool.AddCert(cert) + } + + // From spec: + // > ## Certificate + // > For a signature with a given certificate to be considered valid, it must have a timestamp while every certificate in the chain up to the root is valid (the so-called “hybrid model” of certificate verification per Braun et al. (2013)). + + opts := x509.VerifyOptions{ + CurrentTime: observerTimestamp, + Roots: rootCertPool, + Intermediates: intermediateCertPool, + KeyUsages: []x509.ExtKeyUsage{ + x509.ExtKeyUsageCodeSigning, + }, + } + + return cert.Verify(opts) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/signing_config.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/signing_config.go new file mode 100644 index 000000000000..a86fe0b2c36d --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/signing_config.go @@ -0,0 +1,457 @@ +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "fmt" + "math/rand" + "os" + "slices" + "time" + + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + prototrustroot "github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1" + "github.com/sigstore/sigstore-go/pkg/tuf" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/timestamppb" +) + +const SigningConfigMediaType02 = "application/vnd.dev.sigstore.signingconfig.v0.2+json" + +type SigningConfig struct { + signingConfig *prototrustroot.SigningConfig +} + +type Service struct { + URL string + MajorAPIVersion uint32 + ValidityPeriodStart time.Time + ValidityPeriodEnd time.Time + Operator string +} + +type ServiceConfiguration struct { + Selector prototrustroot.ServiceSelector + Count uint32 +} + +func NewService(s *prototrustroot.Service) Service { + validFor := s.GetValidFor() + + var start time.Time + if validFor.GetStart() != nil { + start = validFor.GetStart().AsTime() + } + + var end time.Time + if validFor.GetEnd() != nil { + end = validFor.GetEnd().AsTime() + } + + return Service{ + URL: s.GetUrl(), + MajorAPIVersion: s.GetMajorApiVersion(), + ValidityPeriodStart: start, + ValidityPeriodEnd: end, + Operator: s.GetOperator(), + } +} + +// SelectService returns which service endpoint should be used based on supported API versions +// and current time. It will select the first service with the highest API version that matches +// the criteria. Services should be sorted from newest to oldest validity period start time, to +// minimize how far clients need to search to find a matching service. +func SelectService(services []Service, supportedAPIVersions []uint32, currentTime time.Time) (Service, error) { + if len(supportedAPIVersions) == 0 { + return Service{}, fmt.Errorf("no supported API versions") + } + + // Order supported versions from highest to lowest + sortedVersions := make([]uint32, len(supportedAPIVersions)) + copy(sortedVersions, supportedAPIVersions) + slices.Sort(sortedVersions) + slices.Reverse(sortedVersions) + + // Order services from newest to oldest + sortedServices := make([]Service, len(services)) + copy(sortedServices, services) + slices.SortFunc(sortedServices, func(i, j Service) int { + return i.ValidityPeriodStart.Compare(j.ValidityPeriodStart) + }) + slices.Reverse(sortedServices) + + for _, version := range sortedVersions { + for _, s := range sortedServices { + if version == s.MajorAPIVersion && s.ValidAtTime(currentTime) { + return s, nil + } + } + } + + return Service{}, fmt.Errorf("no matching service found for API versions %v and current time %v", supportedAPIVersions, currentTime) +} + +// SelectServices returns which service endpoints should be used based on supported API versions +// and current time. It will use the configuration's selector to pick a set of services. +// ALL will return all service endpoints, ANY will return a random endpoint, and +// EXACT will return a random selection of a specified number of endpoints. +// It will select services from the highest supported API versions and will not select +// services from different API versions. It will select distinct service operators, selecting +// at most one service per operator. +func SelectServices(services []Service, config ServiceConfiguration, supportedAPIVersions []uint32, currentTime time.Time) ([]Service, error) { + if len(supportedAPIVersions) == 0 { + return nil, fmt.Errorf("no supported API versions") + } + + // Order supported versions from highest to lowest + sortedVersions := make([]uint32, len(supportedAPIVersions)) + copy(sortedVersions, supportedAPIVersions) + slices.Sort(sortedVersions) + slices.Reverse(sortedVersions) + + // Order services from newest to oldest + sortedServices := make([]Service, len(services)) + copy(sortedServices, services) + slices.SortFunc(sortedServices, func(i, j Service) int { + return i.ValidityPeriodStart.Compare(j.ValidityPeriodStart) + }) + slices.Reverse(sortedServices) + + operators := make(map[string]bool) + var selectedServices []Service + for _, version := range sortedVersions { + for _, s := range sortedServices { + if version == s.MajorAPIVersion && s.ValidAtTime(currentTime) { + // Select the newest service for a given operator + if !operators[s.Operator] { + operators[s.Operator] = true + selectedServices = append(selectedServices, s) + } + } + } + // Exit once a list of services is found + if len(selectedServices) != 0 { + break + } + } + + if len(selectedServices) == 0 { + return nil, fmt.Errorf("no matching services found for API versions %v and current time %v", supportedAPIVersions, currentTime) + } + + // Select services from the highest supported API version + switch config.Selector { + case prototrustroot.ServiceSelector_ALL: + return selectedServices, nil + case prototrustroot.ServiceSelector_ANY: + i := rand.Intn(len(selectedServices)) // #nosec G404 + return []Service{selectedServices[i]}, nil + case prototrustroot.ServiceSelector_EXACT: + matchedUrls, err := selectExact(selectedServices, config.Count) + if err != nil { + return nil, err + } + return matchedUrls, nil + default: + return nil, fmt.Errorf("invalid service selector") + } +} + +func selectExact[T any](slice []T, count uint32) ([]T, error) { + if count == 0 { + return nil, fmt.Errorf("service selector count must be greater than 0") + } + if int(count) > len(slice) { + return nil, fmt.Errorf("service selector count %d must be less than or equal to the slice length %d", count, len(slice)) + } + sliceCopy := make([]T, len(slice)) + copy(sliceCopy, slice) + var result []T + for range count { + i := rand.Intn(len(sliceCopy)) // #nosec G404 + result = append(result, sliceCopy[i]) + // Remove element from slice + sliceCopy[i], sliceCopy[len(sliceCopy)-1] = sliceCopy[len(sliceCopy)-1], sliceCopy[i] + sliceCopy = sliceCopy[:len(sliceCopy)-1] + } + return result, nil +} + +func mapFunc[T, V any](ts []T, fn func(T) V) []V { + result := make([]V, len(ts)) + for i, t := range ts { + result[i] = fn(t) + } + return result +} + +func (s Service) ValidAtTime(t time.Time) bool { + if !s.ValidityPeriodStart.IsZero() && t.Before(s.ValidityPeriodStart) { + return false + } + if !s.ValidityPeriodEnd.IsZero() && t.After(s.ValidityPeriodEnd) { + return false + } + return true +} + +func (s Service) ToServiceProtobuf() *prototrustroot.Service { + tr := &v1.TimeRange{ + Start: timestamppb.New(s.ValidityPeriodStart), + } + if !s.ValidityPeriodEnd.IsZero() { + tr.End = timestamppb.New(s.ValidityPeriodEnd) + } + + return &prototrustroot.Service{ + Url: s.URL, + MajorApiVersion: s.MajorAPIVersion, + ValidFor: tr, + Operator: s.Operator, + } +} + +func (sc ServiceConfiguration) ToConfigProtobuf() *prototrustroot.ServiceConfiguration { + return &prototrustroot.ServiceConfiguration{ + Selector: sc.Selector, + Count: sc.Count, + } +} + +func (sc *SigningConfig) FulcioCertificateAuthorityURLs() []Service { + var services []Service + + for _, s := range sc.signingConfig.GetCaUrls() { + services = append(services, NewService(s)) + } + return services +} + +func (sc *SigningConfig) OIDCProviderURLs() []Service { + var services []Service + for _, s := range sc.signingConfig.GetOidcUrls() { + services = append(services, NewService(s)) + } + return services +} + +func (sc *SigningConfig) RekorLogURLs() []Service { + var services []Service + for _, s := range sc.signingConfig.GetRekorTlogUrls() { + services = append(services, NewService(s)) + } + return services +} + +func (sc *SigningConfig) RekorLogURLsConfig() ServiceConfiguration { + c := sc.signingConfig.GetRekorTlogConfig() + return ServiceConfiguration{ + Selector: c.Selector, + Count: c.Count, + } +} + +func (sc *SigningConfig) TimestampAuthorityURLs() []Service { + var services []Service + for _, s := range sc.signingConfig.GetTsaUrls() { + services = append(services, NewService(s)) + } + return services +} + +func (sc *SigningConfig) TimestampAuthorityURLsConfig() ServiceConfiguration { + c := sc.signingConfig.GetTsaConfig() + return ServiceConfiguration{ + Selector: c.Selector, + Count: c.Count, + } +} + +func (sc *SigningConfig) WithFulcioCertificateAuthorityURLs(fulcioURLs ...Service) *SigningConfig { + var services []*prototrustroot.Service + for _, u := range fulcioURLs { + services = append(services, u.ToServiceProtobuf()) + } + sc.signingConfig.CaUrls = services + return sc +} + +func (sc *SigningConfig) AddFulcioCertificateAuthorityURLs(fulcioURLs ...Service) *SigningConfig { + for _, u := range fulcioURLs { + sc.signingConfig.CaUrls = append(sc.signingConfig.CaUrls, u.ToServiceProtobuf()) + } + return sc +} + +func (sc *SigningConfig) WithOIDCProviderURLs(oidcURLs ...Service) *SigningConfig { + var services []*prototrustroot.Service + for _, u := range oidcURLs { + services = append(services, u.ToServiceProtobuf()) + } + sc.signingConfig.OidcUrls = services + return sc +} + +func (sc *SigningConfig) AddOIDCProviderURLs(oidcURLs ...Service) *SigningConfig { + for _, u := range oidcURLs { + sc.signingConfig.OidcUrls = append(sc.signingConfig.OidcUrls, u.ToServiceProtobuf()) + } + return sc +} + +func (sc *SigningConfig) WithRekorLogURLs(logURLs ...Service) *SigningConfig { + var services []*prototrustroot.Service + for _, u := range logURLs { + services = append(services, u.ToServiceProtobuf()) + } + sc.signingConfig.RekorTlogUrls = services + return sc +} + +func (sc *SigningConfig) AddRekorLogURLs(logURLs ...Service) *SigningConfig { + for _, u := range logURLs { + sc.signingConfig.RekorTlogUrls = append(sc.signingConfig.RekorTlogUrls, u.ToServiceProtobuf()) + } + return sc +} + +func (sc *SigningConfig) WithRekorTlogConfig(selector prototrustroot.ServiceSelector, count uint32) *SigningConfig { + sc.signingConfig.RekorTlogConfig.Selector = selector + sc.signingConfig.RekorTlogConfig.Count = count + return sc +} + +func (sc *SigningConfig) WithTimestampAuthorityURLs(tsaURLs ...Service) *SigningConfig { + var services []*prototrustroot.Service + for _, u := range tsaURLs { + services = append(services, u.ToServiceProtobuf()) + } + sc.signingConfig.TsaUrls = services + return sc +} + +func (sc *SigningConfig) AddTimestampAuthorityURLs(tsaURLs ...Service) *SigningConfig { + for _, u := range tsaURLs { + sc.signingConfig.TsaUrls = append(sc.signingConfig.TsaUrls, u.ToServiceProtobuf()) + } + return sc +} + +func (sc *SigningConfig) WithTsaConfig(selector prototrustroot.ServiceSelector, count uint32) *SigningConfig { + sc.signingConfig.TsaConfig.Selector = selector + sc.signingConfig.TsaConfig.Count = count + return sc +} + +func (sc SigningConfig) String() string { + return fmt.Sprintf("{CA: %v, OIDC: %v, RekorLogs: %v, TSAs: %v, MediaType: %s}", + sc.FulcioCertificateAuthorityURLs(), + sc.OIDCProviderURLs(), + sc.RekorLogURLs(), + sc.TimestampAuthorityURLs(), + SigningConfigMediaType02) +} + +func (sc SigningConfig) MarshalJSON() ([]byte, error) { + return protojson.Marshal(sc.signingConfig) +} + +// NewSigningConfig initializes a SigningConfig object from a mediaType string, Fulcio certificate +// authority URLs, OIDC provider URLs, Rekor transparency log URLs, timestamp authorities URLs, +// selection criteria for Rekor logs and TSAs. +func NewSigningConfig(mediaType string, + fulcioCertificateAuthorities []Service, + oidcProviders []Service, + rekorLogs []Service, + rekorLogsConfig ServiceConfiguration, + timestampAuthorities []Service, + timestampAuthoritiesConfig ServiceConfiguration) (*SigningConfig, error) { + if mediaType != SigningConfigMediaType02 { + return nil, fmt.Errorf("unsupported SigningConfig media type, must be: %s", SigningConfigMediaType02) + } + sc := &SigningConfig{ + signingConfig: &prototrustroot.SigningConfig{ + MediaType: mediaType, + CaUrls: mapFunc(fulcioCertificateAuthorities, Service.ToServiceProtobuf), + OidcUrls: mapFunc(oidcProviders, Service.ToServiceProtobuf), + RekorTlogUrls: mapFunc(rekorLogs, Service.ToServiceProtobuf), + RekorTlogConfig: rekorLogsConfig.ToConfigProtobuf(), + TsaUrls: mapFunc(timestampAuthorities, Service.ToServiceProtobuf), + TsaConfig: timestampAuthoritiesConfig.ToConfigProtobuf(), + }, + } + return sc, nil +} + +// NewSigningConfigFromProtobuf returns a Sigstore signing configuration. +func NewSigningConfigFromProtobuf(sc *prototrustroot.SigningConfig) (*SigningConfig, error) { + if sc.GetMediaType() != SigningConfigMediaType02 { + return nil, fmt.Errorf("unsupported SigningConfig media type: %s", sc.GetMediaType()) + } + return &SigningConfig{signingConfig: sc}, nil +} + +// NewSigningConfigFromPath returns a Sigstore signing configuration from a file. +func NewSigningConfigFromPath(path string) (*SigningConfig, error) { + scJSON, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + return NewSigningConfigFromJSON(scJSON) +} + +// NewSigningConfigFromJSON returns a Sigstore signing configuration from JSON. +func NewSigningConfigFromJSON(rootJSON []byte) (*SigningConfig, error) { + pbSC, err := NewSigningConfigProtobuf(rootJSON) + if err != nil { + return nil, err + } + + return NewSigningConfigFromProtobuf(pbSC) +} + +// NewSigningConfigProtobuf returns a Sigstore signing configuration as a protobuf. +func NewSigningConfigProtobuf(scJSON []byte) (*prototrustroot.SigningConfig, error) { + pbSC := &prototrustroot.SigningConfig{} + err := protojson.Unmarshal(scJSON, pbSC) + if err != nil { + return nil, err + } + return pbSC, nil +} + +// FetchSigningConfig fetches the public-good Sigstore signing configuration from TUF. +func FetchSigningConfig() (*SigningConfig, error) { + return FetchSigningConfigWithOptions(tuf.DefaultOptions()) +} + +// FetchSigningConfig fetches the public-good Sigstore signing configuration with the given options from TUF. +func FetchSigningConfigWithOptions(opts *tuf.Options) (*SigningConfig, error) { + client, err := tuf.New(opts) + if err != nil { + return nil, err + } + return GetSigningConfig(client) +} + +// GetSigningConfig fetches the public-good Sigstore signing configuration target from TUF. +func GetSigningConfig(c *tuf.Client) (*SigningConfig, error) { + jsonBytes, err := c.GetTarget("signing_config.v0.2.json") + if err != nil { + return nil, err + } + return NewSigningConfigFromJSON(jsonBytes) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/timestamping_authority.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/timestamping_authority.go new file mode 100644 index 000000000000..bde952cc1167 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/timestamping_authority.go @@ -0,0 +1,75 @@ +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "bytes" + "crypto/x509" + "errors" + "time" + + tsaverification "github.com/sigstore/timestamp-authority/v2/pkg/verification" +) + +type Timestamp struct { + Time time.Time + URI string +} + +type TimestampingAuthority interface { + Verify(signedTimestamp []byte, signatureBytes []byte) (*Timestamp, error) +} + +type SigstoreTimestampingAuthority struct { + Root *x509.Certificate + Intermediates []*x509.Certificate + Leaf *x509.Certificate + ValidityPeriodStart time.Time + ValidityPeriodEnd time.Time + URI string +} + +var _ TimestampingAuthority = &SigstoreTimestampingAuthority{} + +func (tsa *SigstoreTimestampingAuthority) Verify(signedTimestamp []byte, signatureBytes []byte) (*Timestamp, error) { + if tsa.Root == nil { + var tsaURIDisplay string + if tsa.URI != "" { + tsaURIDisplay = tsa.URI + " " + } + return nil, errors.New("timestamping authority " + tsaURIDisplay + "root certificate is nil") + } + trustedRootVerificationOptions := tsaverification.VerifyOpts{ + Roots: []*x509.Certificate{tsa.Root}, + Intermediates: tsa.Intermediates, + TSACertificate: tsa.Leaf, + } + + // Ensure timestamp responses are from trusted sources + timestamp, err := tsaverification.VerifyTimestampResponse(signedTimestamp, bytes.NewReader(signatureBytes), trustedRootVerificationOptions) + if err != nil { + return nil, err + } + + if !tsa.ValidityPeriodStart.IsZero() && timestamp.Time.Before(tsa.ValidityPeriodStart) { + return nil, errors.New("timestamp is before the validity period start") + } + if !tsa.ValidityPeriodEnd.IsZero() && timestamp.Time.After(tsa.ValidityPeriodEnd) { + return nil, errors.New("timestamp is after the validity period end") + } + + // All above verification successful, so return nil + return &Timestamp{Time: timestamp.Time, URI: tsa.URI}, nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_material.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_material.go new file mode 100644 index 000000000000..d1ec4d461891 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_material.go @@ -0,0 +1,172 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "fmt" + "time" + + "github.com/sigstore/sigstore/pkg/signature" +) + +type TrustedMaterial interface { + TimestampingAuthorities() []TimestampingAuthority + FulcioCertificateAuthorities() []CertificateAuthority + RekorLogs() map[string]*TransparencyLog + CTLogs() map[string]*TransparencyLog + PublicKeyVerifier(string) (TimeConstrainedVerifier, error) +} + +type BaseTrustedMaterial struct{} + +func (b *BaseTrustedMaterial) TimestampingAuthorities() []TimestampingAuthority { + return []TimestampingAuthority{} +} + +func (b *BaseTrustedMaterial) FulcioCertificateAuthorities() []CertificateAuthority { + return []CertificateAuthority{} +} + +func (b *BaseTrustedMaterial) RekorLogs() map[string]*TransparencyLog { + return map[string]*TransparencyLog{} +} + +func (b *BaseTrustedMaterial) CTLogs() map[string]*TransparencyLog { + return map[string]*TransparencyLog{} +} + +func (b *BaseTrustedMaterial) PublicKeyVerifier(_ string) (TimeConstrainedVerifier, error) { + return nil, fmt.Errorf("public key verifier not found") +} + +type TrustedMaterialCollection []TrustedMaterial + +// Ensure types implement interfaces +var _ TrustedMaterial = &BaseTrustedMaterial{} +var _ TrustedMaterial = TrustedMaterialCollection{} + +func (tmc TrustedMaterialCollection) PublicKeyVerifier(keyID string) (TimeConstrainedVerifier, error) { + for _, tm := range tmc { + verifier, err := tm.PublicKeyVerifier(keyID) + if err == nil { + return verifier, nil + } + } + return nil, fmt.Errorf("public key verifier not found for keyID: %s", keyID) +} + +func (tmc TrustedMaterialCollection) TimestampingAuthorities() []TimestampingAuthority { + var timestampingAuthorities []TimestampingAuthority + for _, tm := range tmc { + timestampingAuthorities = append(timestampingAuthorities, tm.TimestampingAuthorities()...) + } + return timestampingAuthorities +} + +func (tmc TrustedMaterialCollection) FulcioCertificateAuthorities() []CertificateAuthority { + var certAuthorities []CertificateAuthority + for _, tm := range tmc { + certAuthorities = append(certAuthorities, tm.FulcioCertificateAuthorities()...) + } + return certAuthorities +} + +func (tmc TrustedMaterialCollection) RekorLogs() map[string]*TransparencyLog { + rekorLogs := make(map[string]*TransparencyLog) + for _, tm := range tmc { + for keyID, tlogVerifier := range tm.RekorLogs() { + rekorLogs[keyID] = tlogVerifier + } + } + return rekorLogs +} + +func (tmc TrustedMaterialCollection) CTLogs() map[string]*TransparencyLog { + rekorLogs := make(map[string]*TransparencyLog) + for _, tm := range tmc { + for keyID, tlogVerifier := range tm.CTLogs() { + rekorLogs[keyID] = tlogVerifier + } + } + return rekorLogs +} + +type ValidityPeriodChecker interface { + ValidAtTime(time.Time) bool +} + +type TimeConstrainedVerifier interface { + ValidityPeriodChecker + signature.Verifier +} + +type TrustedPublicKeyMaterial struct { + BaseTrustedMaterial + publicKeyVerifier func(string) (TimeConstrainedVerifier, error) +} + +func (tr *TrustedPublicKeyMaterial) PublicKeyVerifier(keyID string) (TimeConstrainedVerifier, error) { + return tr.publicKeyVerifier(keyID) +} + +func NewTrustedPublicKeyMaterial(publicKeyVerifier func(string) (TimeConstrainedVerifier, error)) *TrustedPublicKeyMaterial { + return &TrustedPublicKeyMaterial{ + publicKeyVerifier: publicKeyVerifier, + } +} + +// ExpiringKey is a TimeConstrainedVerifier with a static validity period. +type ExpiringKey struct { + signature.Verifier + validityPeriodStart time.Time + validityPeriodEnd time.Time +} + +var _ TimeConstrainedVerifier = &ExpiringKey{} + +// ValidAtTime returns true if the key is valid at the given time. If the +// validity period start time is not set, the key is considered valid for all +// times before the end time. Likewise, if the validity period end time is not +// set, the key is considered valid for all times after the start time. +func (k *ExpiringKey) ValidAtTime(t time.Time) bool { + if !k.validityPeriodStart.IsZero() && t.Before(k.validityPeriodStart) { + return false + } + if !k.validityPeriodEnd.IsZero() && t.After(k.validityPeriodEnd) { + return false + } + return true +} + +// NewExpiringKey returns a new ExpiringKey with the given validity period +func NewExpiringKey(verifier signature.Verifier, validityPeriodStart, validityPeriodEnd time.Time) *ExpiringKey { + return &ExpiringKey{ + Verifier: verifier, + validityPeriodStart: validityPeriodStart, + validityPeriodEnd: validityPeriodEnd, + } +} + +// NewTrustedPublicKeyMaterialFromMapping returns a TrustedPublicKeyMaterial from a map of key IDs to +// ExpiringKeys. +func NewTrustedPublicKeyMaterialFromMapping(trustedPublicKeys map[string]*ExpiringKey) *TrustedPublicKeyMaterial { + return NewTrustedPublicKeyMaterial(func(keyID string) (TimeConstrainedVerifier, error) { + expiringKey, ok := trustedPublicKeys[keyID] + if !ok { + return nil, fmt.Errorf("public key not found for keyID: %s", keyID) + } + return expiringKey, nil + }) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root.go new file mode 100644 index 000000000000..0bd69d6f1482 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root.go @@ -0,0 +1,556 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "encoding/hex" + "fmt" + "log" + "os" + "sync" + "time" + + protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + prototrustroot "github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1" + "github.com/sigstore/sigstore-go/pkg/tuf" + "google.golang.org/protobuf/encoding/protojson" +) + +const TrustedRootMediaType01 = "application/vnd.dev.sigstore.trustedroot+json;version=0.1" + +type TrustedRoot struct { + BaseTrustedMaterial + trustedRoot *prototrustroot.TrustedRoot + rekorLogs map[string]*TransparencyLog + certificateAuthorities []CertificateAuthority + ctLogs map[string]*TransparencyLog + timestampingAuthorities []TimestampingAuthority +} + +type TransparencyLog struct { + BaseURL string + ID []byte + ValidityPeriodStart time.Time + ValidityPeriodEnd time.Time + // This is the hash algorithm used by the Merkle tree + HashFunc crypto.Hash + PublicKey crypto.PublicKey + // The hash algorithm used during signature creation + SignatureHashFunc crypto.Hash +} + +const ( + defaultTrustedRoot = "trusted_root.json" +) + +func (tr *TrustedRoot) TimestampingAuthorities() []TimestampingAuthority { + return tr.timestampingAuthorities +} + +func (tr *TrustedRoot) FulcioCertificateAuthorities() []CertificateAuthority { + return tr.certificateAuthorities +} + +func (tr *TrustedRoot) RekorLogs() map[string]*TransparencyLog { + return tr.rekorLogs +} + +func (tr *TrustedRoot) CTLogs() map[string]*TransparencyLog { + return tr.ctLogs +} + +func (tr *TrustedRoot) MarshalJSON() ([]byte, error) { + err := tr.constructProtoTrustRoot() + if err != nil { + return nil, fmt.Errorf("failed constructing protobuf TrustRoot representation: %w", err) + } + + return protojson.Marshal(tr.trustedRoot) +} + +func NewTrustedRootFromProtobuf(protobufTrustedRoot *prototrustroot.TrustedRoot) (trustedRoot *TrustedRoot, err error) { + if protobufTrustedRoot.GetMediaType() != TrustedRootMediaType01 { + return nil, fmt.Errorf("unsupported TrustedRoot media type: %s", protobufTrustedRoot.GetMediaType()) + } + + trustedRoot = &TrustedRoot{trustedRoot: protobufTrustedRoot} + trustedRoot.rekorLogs, err = ParseTransparencyLogs(protobufTrustedRoot.GetTlogs()) + if err != nil { + return nil, err + } + + trustedRoot.certificateAuthorities, err = ParseCertificateAuthorities(protobufTrustedRoot.GetCertificateAuthorities()) + if err != nil { + return nil, err + } + + trustedRoot.timestampingAuthorities, err = ParseTimestampingAuthorities(protobufTrustedRoot.GetTimestampAuthorities()) + if err != nil { + return nil, err + } + + trustedRoot.ctLogs, err = ParseTransparencyLogs(protobufTrustedRoot.GetCtlogs()) + if err != nil { + return nil, err + } + + return trustedRoot, nil +} + +func ParseTransparencyLogs(tlogs []*prototrustroot.TransparencyLogInstance) (transparencyLogs map[string]*TransparencyLog, err error) { + transparencyLogs = make(map[string]*TransparencyLog) + for _, tlog := range tlogs { + if tlog.GetHashAlgorithm() != protocommon.HashAlgorithm_SHA2_256 { + return nil, fmt.Errorf("unsupported tlog hash algorithm: %s", tlog.GetHashAlgorithm()) + } + if tlog.GetLogId() == nil { + return nil, fmt.Errorf("tlog missing log ID") + } + if tlog.GetLogId().GetKeyId() == nil { + return nil, fmt.Errorf("tlog missing log ID key ID") + } + encodedKeyID := hex.EncodeToString(tlog.GetLogId().GetKeyId()) + + if tlog.GetPublicKey() == nil { + return nil, fmt.Errorf("tlog missing public key") + } + if tlog.GetPublicKey().GetRawBytes() == nil { + return nil, fmt.Errorf("tlog missing public key raw bytes") + } + + var hashFunc crypto.Hash + switch tlog.GetHashAlgorithm() { + case protocommon.HashAlgorithm_SHA2_256: + hashFunc = crypto.SHA256 + default: + return nil, fmt.Errorf("unsupported hash function for the tlog") + } + + tlogEntry := &TransparencyLog{ + BaseURL: tlog.GetBaseUrl(), + ID: tlog.GetLogId().GetKeyId(), + HashFunc: hashFunc, + SignatureHashFunc: crypto.SHA256, + } + + switch tlog.GetPublicKey().GetKeyDetails() { + case protocommon.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256, + protocommon.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384, + protocommon.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512: + key, err := x509.ParsePKIXPublicKey(tlog.GetPublicKey().GetRawBytes()) + if err != nil { + return nil, fmt.Errorf("failed to parse public key for tlog: %s %w", + tlog.GetBaseUrl(), + err, + ) + } + var ecKey *ecdsa.PublicKey + var ok bool + if ecKey, ok = key.(*ecdsa.PublicKey); !ok { + return nil, fmt.Errorf("tlog public key is not ECDSA: %s", tlog.GetPublicKey().GetKeyDetails()) + } + tlogEntry.PublicKey = ecKey + // This key format has public key in PKIX RSA format and PKCS1#1v1.5 or RSASSA-PSS signature + case protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256, + protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256, + protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256: + key, err := x509.ParsePKIXPublicKey(tlog.GetPublicKey().GetRawBytes()) + if err != nil { + return nil, fmt.Errorf("failed to parse public key for tlog: %s %w", + tlog.GetBaseUrl(), + err, + ) + } + var rsaKey *rsa.PublicKey + var ok bool + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return nil, fmt.Errorf("tlog public key is not RSA: %s", tlog.GetPublicKey().GetKeyDetails()) + } + tlogEntry.PublicKey = rsaKey + case protocommon.PublicKeyDetails_PKIX_ED25519: + key, err := x509.ParsePKIXPublicKey(tlog.GetPublicKey().GetRawBytes()) + if err != nil { + return nil, fmt.Errorf("failed to parse public key for tlog: %s %w", + tlog.GetBaseUrl(), + err, + ) + } + var edKey ed25519.PublicKey + var ok bool + if edKey, ok = key.(ed25519.PublicKey); !ok { + return nil, fmt.Errorf("tlog public key is not RSA: %s", tlog.GetPublicKey().GetKeyDetails()) + } + tlogEntry.PublicKey = edKey + // This key format is deprecated, but currently in use for Sigstore staging instance + case protocommon.PublicKeyDetails_PKCS1_RSA_PKCS1V5: //nolint:staticcheck + key, err := x509.ParsePKCS1PublicKey(tlog.GetPublicKey().GetRawBytes()) + if err != nil { + return nil, fmt.Errorf("failed to parse public key for tlog: %s %w", + tlog.GetBaseUrl(), + err, + ) + } + tlogEntry.PublicKey = key + default: + return nil, fmt.Errorf("unsupported tlog public key type: %s", tlog.GetPublicKey().GetKeyDetails()) + } + + tlogEntry.SignatureHashFunc = getSignatureHashAlgo(tlogEntry.PublicKey) + transparencyLogs[encodedKeyID] = tlogEntry + + if validFor := tlog.GetPublicKey().GetValidFor(); validFor != nil { + if validFor.GetStart() != nil { + transparencyLogs[encodedKeyID].ValidityPeriodStart = validFor.GetStart().AsTime() + } else { + return nil, fmt.Errorf("tlog missing public key validity period start time") + } + if validFor.GetEnd() != nil { + transparencyLogs[encodedKeyID].ValidityPeriodEnd = validFor.GetEnd().AsTime() + } + } else { + return nil, fmt.Errorf("tlog missing public key validity period") + } + } + return transparencyLogs, nil +} + +func ParseCertificateAuthorities(certAuthorities []*prototrustroot.CertificateAuthority) (certificateAuthorities []CertificateAuthority, err error) { + certificateAuthorities = make([]CertificateAuthority, len(certAuthorities)) + for i, certAuthority := range certAuthorities { + certificateAuthority, err := ParseCertificateAuthority(certAuthority) + if err != nil { + return nil, err + } + certificateAuthorities[i] = certificateAuthority + } + return certificateAuthorities, nil +} + +func ParseCertificateAuthority(certAuthority *prototrustroot.CertificateAuthority) (*FulcioCertificateAuthority, error) { + if certAuthority == nil { + return nil, fmt.Errorf("CertificateAuthority is nil") + } + certChain := certAuthority.GetCertChain() + if certChain == nil { + return nil, fmt.Errorf("CertificateAuthority missing cert chain") + } + chainLen := len(certChain.GetCertificates()) + if chainLen < 1 { + return nil, fmt.Errorf("CertificateAuthority cert chain is empty") + } + + certificateAuthority := &FulcioCertificateAuthority{ + URI: certAuthority.Uri, + } + for i, cert := range certChain.GetCertificates() { + parsedCert, err := x509.ParseCertificate(cert.RawBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate for %s %w", + certAuthority.Uri, + err, + ) + } + if i < chainLen-1 { + certificateAuthority.Intermediates = append(certificateAuthority.Intermediates, parsedCert) + } else { + certificateAuthority.Root = parsedCert + } + } + validFor := certAuthority.GetValidFor() + if validFor != nil { + start := validFor.GetStart() + if start != nil { + certificateAuthority.ValidityPeriodStart = start.AsTime() + } + end := validFor.GetEnd() + if end != nil { + certificateAuthority.ValidityPeriodEnd = end.AsTime() + } + } + + certificateAuthority.URI = certAuthority.Uri + + return certificateAuthority, nil +} + +func ParseTimestampingAuthorities(certAuthorities []*prototrustroot.CertificateAuthority) (timestampingAuthorities []TimestampingAuthority, err error) { + timestampingAuthorities = make([]TimestampingAuthority, len(certAuthorities)) + for i, certAuthority := range certAuthorities { + timestampingAuthority, err := ParseTimestampingAuthority(certAuthority) + if err != nil { + return nil, err + } + timestampingAuthorities[i] = timestampingAuthority + } + return timestampingAuthorities, nil +} + +func ParseTimestampingAuthority(certAuthority *prototrustroot.CertificateAuthority) (TimestampingAuthority, error) { + if certAuthority == nil { + return nil, fmt.Errorf("CertificateAuthority is nil") + } + certChain := certAuthority.GetCertChain() + if certChain == nil { + return nil, fmt.Errorf("CertificateAuthority missing cert chain") + } + chainLen := len(certChain.GetCertificates()) + if chainLen < 1 { + return nil, fmt.Errorf("CertificateAuthority cert chain is empty") + } + + timestampingAuthority := &SigstoreTimestampingAuthority{ + URI: certAuthority.Uri, + } + for i, cert := range certChain.GetCertificates() { + parsedCert, err := x509.ParseCertificate(cert.RawBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate for %s %w", + certAuthority.Uri, + err, + ) + } + switch { + case i == 0 && !parsedCert.IsCA: + timestampingAuthority.Leaf = parsedCert + case i < chainLen-1: + timestampingAuthority.Intermediates = append(timestampingAuthority.Intermediates, parsedCert) + case i == chainLen-1: + timestampingAuthority.Root = parsedCert + } + } + validFor := certAuthority.GetValidFor() + if validFor != nil { + start := validFor.GetStart() + if start != nil { + timestampingAuthority.ValidityPeriodStart = start.AsTime() + } + end := validFor.GetEnd() + if end != nil { + timestampingAuthority.ValidityPeriodEnd = end.AsTime() + } + } + + timestampingAuthority.URI = certAuthority.Uri + + return timestampingAuthority, nil +} + +func NewTrustedRootFromPath(path string) (*TrustedRoot, error) { + trustedrootJSON, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read trusted root %w", + err, + ) + } + + return NewTrustedRootFromJSON(trustedrootJSON) +} + +// NewTrustedRootFromJSON returns the Sigstore trusted root. +func NewTrustedRootFromJSON(rootJSON []byte) (*TrustedRoot, error) { + pbTrustedRoot, err := NewTrustedRootProtobuf(rootJSON) + if err != nil { + return nil, err + } + + return NewTrustedRootFromProtobuf(pbTrustedRoot) +} + +// NewTrustedRootProtobuf returns the Sigstore trusted root as a protobuf. +func NewTrustedRootProtobuf(rootJSON []byte) (*prototrustroot.TrustedRoot, error) { + pbTrustedRoot := &prototrustroot.TrustedRoot{} + err := protojson.Unmarshal(rootJSON, pbTrustedRoot) + if err != nil { + return nil, fmt.Errorf("failed to proto-json unmarshal trusted root: %w", err) + } + return pbTrustedRoot, nil +} + +// NewTrustedRoot initializes a TrustedRoot object from a mediaType string, list of Fulcio +// certificate authorities, list of timestamp authorities and maps of ctlogs and rekor +// transparency log instances. +// mediaType must be TrustedRootMediaType01 ("application/vnd.dev.sigstore.trustedroot+json;version=0.1"). +func NewTrustedRoot(mediaType string, + certificateAuthorities []CertificateAuthority, + certificateTransparencyLogs map[string]*TransparencyLog, + timestampAuthorities []TimestampingAuthority, + transparencyLogs map[string]*TransparencyLog) (*TrustedRoot, error) { + // document that we assume 1 cert chain per target and with certs already ordered from leaf to root + if mediaType != TrustedRootMediaType01 { + return nil, fmt.Errorf("unsupported TrustedRoot media type: %s, must be %s", mediaType, TrustedRootMediaType01) + } + tr := &TrustedRoot{ + certificateAuthorities: certificateAuthorities, + ctLogs: certificateTransparencyLogs, + timestampingAuthorities: timestampAuthorities, + rekorLogs: transparencyLogs, + } + return tr, nil +} + +// FetchTrustedRoot fetches the Sigstore trusted root from TUF and returns it. +func FetchTrustedRoot() (*TrustedRoot, error) { + return FetchTrustedRootWithOptions(tuf.DefaultOptions()) +} + +// FetchTrustedRootWithOptions fetches the trusted root from TUF with the given options and returns it. +func FetchTrustedRootWithOptions(opts *tuf.Options) (*TrustedRoot, error) { + client, err := tuf.New(opts) + if err != nil { + return nil, fmt.Errorf("failed to create TUF client %w", err) + } + return GetTrustedRoot(client) +} + +// GetTrustedRoot returns the trusted root +func GetTrustedRoot(c *tuf.Client) (*TrustedRoot, error) { + jsonBytes, err := c.GetTarget(defaultTrustedRoot) + if err != nil { + return nil, fmt.Errorf("failed to get trusted root from TUF client %w", + err, + ) + } + return NewTrustedRootFromJSON(jsonBytes) +} + +func getSignatureHashAlgo(pubKey crypto.PublicKey) crypto.Hash { + var h crypto.Hash + switch pk := pubKey.(type) { + case *rsa.PublicKey: + h = crypto.SHA256 + case *ecdsa.PublicKey: + switch pk.Curve { + case elliptic.P256(): + h = crypto.SHA256 + case elliptic.P384(): + h = crypto.SHA384 + case elliptic.P521(): + h = crypto.SHA512 + default: + h = crypto.SHA256 + } + case ed25519.PublicKey: + h = crypto.SHA512 + default: + h = crypto.SHA256 + } + return h +} + +// LiveTrustedRoot is a wrapper around TrustedRoot that periodically +// refreshes the trusted root from TUF. This is needed for long-running +// processes to ensure that the trusted root does not expire. +type LiveTrustedRoot struct { + *TrustedRoot + mu sync.RWMutex +} + +// NewLiveTrustedRoot returns a LiveTrustedRoot that will periodically +// refresh the trusted root from TUF. +func NewLiveTrustedRoot(opts *tuf.Options) (*LiveTrustedRoot, error) { + return NewLiveTrustedRootFromTarget(opts, defaultTrustedRoot) +} + +// NewLiveTrustedRootFromTarget returns a LiveTrustedRoot that will +// periodically refresh the trusted root from TUF using the provided target. +func NewLiveTrustedRootFromTarget(opts *tuf.Options, target string) (*LiveTrustedRoot, error) { + return NewLiveTrustedRootFromTargetWithPeriod(opts, target, 24*time.Hour) +} + +// NewLiveTrustedRootFromTargetWithPeriod returns a LiveTrustedRoot that +// performs a TUF refresh with the provided period, accesssing the provided +// target. +func NewLiveTrustedRootFromTargetWithPeriod(opts *tuf.Options, target string, rfPeriod time.Duration) (*LiveTrustedRoot, error) { + client, err := tuf.New(opts) + if err != nil { + return nil, fmt.Errorf("failed to create TUF client %w", err) + } + + b, err := client.GetTarget(target) + if err != nil { + return nil, fmt.Errorf("failed to get target from TUF client %w", err) + } + + tr, err := NewTrustedRootFromJSON(b) + if err != nil { + return nil, err + } + ltr := &LiveTrustedRoot{ + TrustedRoot: tr, + mu: sync.RWMutex{}, + } + + ticker := time.NewTicker(rfPeriod) + go func() { + for range ticker.C { + client, err = tuf.New(opts) + if err != nil { + log.Printf("error creating TUF client: %v", err) + } + + b, err := client.GetTarget(target) + if err != nil { + log.Printf("error fetching trusted root: %v", err) + } + + newTr, err := NewTrustedRootFromJSON(b) + if err != nil { + log.Printf("error fetching trusted root: %v", err) + continue + } + ltr.mu.Lock() + ltr.TrustedRoot = newTr + ltr.mu.Unlock() + log.Printf("successfully refreshed the TUF root") + } + }() + return ltr, nil +} + +func (l *LiveTrustedRoot) TimestampingAuthorities() []TimestampingAuthority { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.TimestampingAuthorities() +} + +func (l *LiveTrustedRoot) FulcioCertificateAuthorities() []CertificateAuthority { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.FulcioCertificateAuthorities() +} + +func (l *LiveTrustedRoot) RekorLogs() map[string]*TransparencyLog { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.RekorLogs() +} + +func (l *LiveTrustedRoot) CTLogs() map[string]*TransparencyLog { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.CTLogs() +} + +func (l *LiveTrustedRoot) PublicKeyVerifier(keyID string) (TimeConstrainedVerifier, error) { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.PublicKeyVerifier(keyID) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root_create.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root_create.go new file mode 100644 index 000000000000..78ec96e3a8c5 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root_create.go @@ -0,0 +1,270 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "fmt" + "sort" + "time" + + protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + prototrustroot "github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +func (tr *TrustedRoot) constructProtoTrustRoot() error { + tr.trustedRoot = &prototrustroot.TrustedRoot{} + tr.trustedRoot.MediaType = TrustedRootMediaType01 + + for logID, transparencyLog := range tr.rekorLogs { + tlProto, err := transparencyLogToProtobufTL(transparencyLog) + if err != nil { + return fmt.Errorf("failed converting rekor log %s to protobuf: %w", logID, err) + } + tr.trustedRoot.Tlogs = append(tr.trustedRoot.Tlogs, tlProto) + } + // ensure stable sorting of the slice + sortTlogSlice(tr.trustedRoot.Tlogs) + + for logID, ctLog := range tr.ctLogs { + ctProto, err := transparencyLogToProtobufTL(ctLog) + if err != nil { + return fmt.Errorf("failed converting ctlog %s to protobuf: %w", logID, err) + } + tr.trustedRoot.Ctlogs = append(tr.trustedRoot.Ctlogs, ctProto) + } + // ensure stable sorting of the slice + sortTlogSlice(tr.trustedRoot.Ctlogs) + + for _, ca := range tr.certificateAuthorities { + caProto, err := certificateAuthorityToProtobufCA(ca.(*FulcioCertificateAuthority)) + if err != nil { + return fmt.Errorf("failed converting fulcio cert chain to protobuf: %w", err) + } + tr.trustedRoot.CertificateAuthorities = append(tr.trustedRoot.CertificateAuthorities, caProto) + } + // ensure stable sorting of the slice + sortCASlice(tr.trustedRoot.CertificateAuthorities) + + for _, ca := range tr.timestampingAuthorities { + caProto, err := timestampingAuthorityToProtobufCA(ca.(*SigstoreTimestampingAuthority)) + if err != nil { + return fmt.Errorf("failed converting TSA cert chain to protobuf: %w", err) + } + tr.trustedRoot.TimestampAuthorities = append(tr.trustedRoot.TimestampAuthorities, caProto) + } + // ensure stable sorting of the slice + sortCASlice(tr.trustedRoot.TimestampAuthorities) + + return nil +} + +func sortCASlice(slc []*prototrustroot.CertificateAuthority) { + sort.Slice(slc, func(i, j int) bool { + iTime := time.Unix(0, 0) + jTime := time.Unix(0, 0) + + if slc[i].ValidFor.Start != nil { + iTime = slc[i].ValidFor.Start.AsTime() + } + if slc[j].ValidFor.Start != nil { + jTime = slc[j].ValidFor.Start.AsTime() + } + + return iTime.Before(jTime) + }) +} + +func sortTlogSlice(slc []*prototrustroot.TransparencyLogInstance) { + sort.Slice(slc, func(i, j int) bool { + iTime := time.Unix(0, 0) + jTime := time.Unix(0, 0) + + if slc[i].PublicKey.ValidFor.Start != nil { + iTime = slc[i].PublicKey.ValidFor.Start.AsTime() + } + if slc[j].PublicKey.ValidFor.Start != nil { + jTime = slc[j].PublicKey.ValidFor.Start.AsTime() + } + + return iTime.Before(jTime) + }) +} + +func certificateAuthorityToProtobufCA(ca *FulcioCertificateAuthority) (*prototrustroot.CertificateAuthority, error) { + org := "" + if len(ca.Root.Subject.Organization) > 0 { + org = ca.Root.Subject.Organization[0] + } + var allCerts []*protocommon.X509Certificate + for _, intermed := range ca.Intermediates { + allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: intermed.Raw}) + } + if ca.Root == nil { + return nil, fmt.Errorf("root certificate is nil") + } + allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: ca.Root.Raw}) + + caProto := prototrustroot.CertificateAuthority{ + Uri: ca.URI, + Subject: &protocommon.DistinguishedName{ + Organization: org, + CommonName: ca.Root.Subject.CommonName, + }, + ValidFor: &protocommon.TimeRange{ + Start: timestamppb.New(ca.ValidityPeriodStart), + }, + CertChain: &protocommon.X509CertificateChain{ + Certificates: allCerts, + }, + } + + if !ca.ValidityPeriodEnd.IsZero() { + caProto.ValidFor.End = timestamppb.New(ca.ValidityPeriodEnd) + } + + return &caProto, nil +} + +func timestampingAuthorityToProtobufCA(ca *SigstoreTimestampingAuthority) (*prototrustroot.CertificateAuthority, error) { + org := "" + if len(ca.Root.Subject.Organization) > 0 { + org = ca.Root.Subject.Organization[0] + } + var allCerts []*protocommon.X509Certificate + if ca.Leaf != nil { + allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: ca.Leaf.Raw}) + } + for _, intermed := range ca.Intermediates { + allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: intermed.Raw}) + } + if ca.Root == nil { + return nil, fmt.Errorf("root certificate is nil") + } + allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: ca.Root.Raw}) + + caProto := prototrustroot.CertificateAuthority{ + Uri: ca.URI, + Subject: &protocommon.DistinguishedName{ + Organization: org, + CommonName: ca.Root.Subject.CommonName, + }, + ValidFor: &protocommon.TimeRange{ + Start: timestamppb.New(ca.ValidityPeriodStart), + }, + CertChain: &protocommon.X509CertificateChain{ + Certificates: allCerts, + }, + } + + if !ca.ValidityPeriodEnd.IsZero() { + caProto.ValidFor.End = timestamppb.New(ca.ValidityPeriodEnd) + } + + return &caProto, nil +} + +func transparencyLogToProtobufTL(tl *TransparencyLog) (*prototrustroot.TransparencyLogInstance, error) { + hashAlgo, err := hashAlgorithmToProtobufHashAlgorithm(tl.HashFunc) + if err != nil { + return nil, fmt.Errorf("failed converting hash algorithm to protobuf: %w", err) + } + publicKey, err := publicKeyToProtobufPublicKey(tl.PublicKey, tl.ValidityPeriodStart, tl.ValidityPeriodEnd) + if err != nil { + return nil, fmt.Errorf("failed converting public key to protobuf: %w", err) + } + trProto := prototrustroot.TransparencyLogInstance{ + BaseUrl: tl.BaseURL, + HashAlgorithm: hashAlgo, + PublicKey: publicKey, + LogId: &protocommon.LogId{ + KeyId: tl.ID, + }, + } + + return &trProto, nil +} + +func hashAlgorithmToProtobufHashAlgorithm(hashAlgorithm crypto.Hash) (protocommon.HashAlgorithm, error) { + switch hashAlgorithm { + case crypto.SHA256: + return protocommon.HashAlgorithm_SHA2_256, nil + case crypto.SHA384: + return protocommon.HashAlgorithm_SHA2_384, nil + case crypto.SHA512: + return protocommon.HashAlgorithm_SHA2_512, nil + case crypto.SHA3_256: + return protocommon.HashAlgorithm_SHA3_256, nil + case crypto.SHA3_384: + return protocommon.HashAlgorithm_SHA3_384, nil + default: + return 0, fmt.Errorf("unsupported hash algorithm for Merkle tree: %v", hashAlgorithm) + } +} + +func publicKeyToProtobufPublicKey(publicKey crypto.PublicKey, start time.Time, end time.Time) (*protocommon.PublicKey, error) { + pkd := protocommon.PublicKey{ + ValidFor: &protocommon.TimeRange{ + Start: timestamppb.New(start), + }, + } + + if !end.IsZero() { + pkd.ValidFor.End = timestamppb.New(end) + } + + rawBytes, err := x509.MarshalPKIXPublicKey(publicKey) + if err != nil { + return nil, fmt.Errorf("failed marshalling public key: %w", err) + } + pkd.RawBytes = rawBytes + + switch p := publicKey.(type) { + case *ecdsa.PublicKey: + switch p.Curve { + case elliptic.P256(): + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256 + case elliptic.P384(): + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384 + case elliptic.P521(): + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512 + default: + return nil, fmt.Errorf("unsupported curve for ecdsa key: %T", p.Curve) + } + case *rsa.PublicKey: + switch p.Size() * 8 { + case 2048: + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256 + case 3072: + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256 + case 4096: + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256 + default: + return nil, fmt.Errorf("unsupported public modulus for RSA key: %d", p.Size()) + } + case ed25519.PublicKey: + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ED25519 + default: + return nil, fmt.Errorf("unknown public key type: %T", p) + } + + return &pkd, nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tlog/entry.go b/vendor/github.com/sigstore/sigstore-go/pkg/tlog/entry.go new file mode 100644 index 000000000000..ff8bec4469af --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tlog/entry.go @@ -0,0 +1,529 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tlog + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "time" + + "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag/conv" + protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1" + rekortilespb "github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf" + "github.com/sigstore/rekor-tiles/v2/pkg/note" + typesverifier "github.com/sigstore/rekor-tiles/v2/pkg/types/verifier" + "github.com/sigstore/rekor-tiles/v2/pkg/verify" + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/types" + dsse_v001 "github.com/sigstore/rekor/pkg/types/dsse/v0.0.1" + hashedrekord_v001 "github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1" + intoto_v002 "github.com/sigstore/rekor/pkg/types/intoto/v0.0.2" + rekorVerify "github.com/sigstore/rekor/pkg/verify" + "github.com/sigstore/sigstore/pkg/signature" + "google.golang.org/protobuf/encoding/protojson" + + "github.com/sigstore/sigstore-go/pkg/root" +) + +type Entry struct { + kind string + version string + rekorV1Entry types.EntryImpl + rekorV2Entry *rekortilespb.Entry + signedEntryTimestamp []byte + tle *v1.TransparencyLogEntry +} + +type RekorPayload struct { + Body interface{} `json:"body"` + IntegratedTime int64 `json:"integratedTime"` + LogIndex int64 `json:"logIndex"` + LogID string `json:"logID"` //nolint:tagliatelle +} + +var ErrNilValue = errors.New("validation error: nil value in transaction log entry") +var ErrInvalidRekorV2Entry = errors.New("type error: object is not a Rekor v2 type, try parsing as Rekor v1") + +// Deprecated: use NewTlogEntry. NewEntry only parses a Rekor v1 entry. +func NewEntry(body []byte, integratedTime int64, logIndex int64, logID []byte, signedEntryTimestamp []byte, inclusionProof *models.InclusionProof) (*Entry, error) { + pe, err := models.UnmarshalProposedEntry(bytes.NewReader(body), runtime.JSONConsumer()) + if err != nil { + return nil, err + } + rekorEntry, err := types.UnmarshalEntry(pe) + if err != nil { + return nil, err + } + + entry := &Entry{ + rekorV1Entry: rekorEntry, + tle: &v1.TransparencyLogEntry{ + LogIndex: logIndex, + LogId: &protocommon.LogId{ + KeyId: logID, + }, + IntegratedTime: integratedTime, + CanonicalizedBody: body, + }, + kind: pe.Kind(), + version: rekorEntry.APIVersion(), + } + + if len(signedEntryTimestamp) > 0 { + entry.signedEntryTimestamp = signedEntryTimestamp + } + + if inclusionProof != nil { + hashes := make([][]byte, len(inclusionProof.Hashes)) + for i, s := range inclusionProof.Hashes { + hashes[i], err = hex.DecodeString(s) + if err != nil { + return nil, err + } + } + rootHashDec, err := hex.DecodeString(*inclusionProof.RootHash) + if err != nil { + return nil, err + } + entry.tle.InclusionProof = &v1.InclusionProof{ + LogIndex: logIndex, + RootHash: rootHashDec, + TreeSize: *inclusionProof.TreeSize, + Hashes: hashes, + Checkpoint: &v1.Checkpoint{ + Envelope: *inclusionProof.Checkpoint, + }, + } + } + + return entry, nil +} + +func NewTlogEntry(tle *v1.TransparencyLogEntry) (*Entry, error) { + var rekorV2Entry *rekortilespb.Entry + var rekorV1Entry types.EntryImpl + var err error + + body := tle.CanonicalizedBody + rekorV2Entry, err = unmarshalRekorV2Entry(body) + if err != nil { + rekorV1Entry, err = unmarshalRekorV1Entry(body) + if err != nil { + return nil, fmt.Errorf("entry body is not a recognizable Rekor v1 or Rekor v2 type: %w", err) + } + } + + entry := &Entry{ + rekorV1Entry: rekorV1Entry, + rekorV2Entry: rekorV2Entry, + kind: tle.KindVersion.Kind, + version: tle.KindVersion.Version, + } + + signedEntryTimestamp := []byte{} + if tle.InclusionPromise != nil && tle.InclusionPromise.SignedEntryTimestamp != nil { + signedEntryTimestamp = tle.InclusionPromise.SignedEntryTimestamp + } + if len(signedEntryTimestamp) > 0 { + entry.signedEntryTimestamp = signedEntryTimestamp + } + entry.tle = tle + + return entry, nil +} + +func ParseTransparencyLogEntry(tle *v1.TransparencyLogEntry) (*Entry, error) { + if tle == nil { + return nil, ErrNilValue + } + if tle.CanonicalizedBody == nil || + tle.LogIndex < 0 || + tle.LogId == nil || + tle.LogId.KeyId == nil || + tle.KindVersion == nil { + return nil, ErrNilValue + } + + if tle.InclusionProof != nil { + if tle.InclusionProof.Checkpoint == nil { + return nil, fmt.Errorf("inclusion proof missing required checkpoint") + } + if tle.InclusionProof.Checkpoint.Envelope == "" { + return nil, fmt.Errorf("inclusion proof checkpoint empty") + } + } + + entry, err := NewTlogEntry(tle) + if err != nil { + return nil, err + } + if entry.kind != tle.KindVersion.Kind || entry.version != tle.KindVersion.Version { + return nil, fmt.Errorf("kind and version mismatch: %s/%s != %s/%s", entry.kind, entry.version, tle.KindVersion.Kind, tle.KindVersion.Version) + } + return entry, nil +} + +// Deprecated: use ParseTransparencyLogEntry. ParseEntry only parses Rekor v1 type entries. +// ParseEntry decodes the entry bytes to a specific entry type (types.EntryImpl). +func ParseEntry(protoEntry *v1.TransparencyLogEntry) (entry *Entry, err error) { + if protoEntry == nil || + protoEntry.CanonicalizedBody == nil || + protoEntry.IntegratedTime == 0 || + protoEntry.LogIndex < 0 || + protoEntry.LogId == nil || + protoEntry.LogId.KeyId == nil || + protoEntry.KindVersion == nil { + return nil, ErrNilValue + } + + signedEntryTimestamp := []byte{} + if protoEntry.InclusionPromise != nil && protoEntry.InclusionPromise.SignedEntryTimestamp != nil { + signedEntryTimestamp = protoEntry.InclusionPromise.SignedEntryTimestamp + } + + var inclusionProof *models.InclusionProof + + if protoEntry.InclusionProof != nil { + var hashes []string + + for _, v := range protoEntry.InclusionProof.Hashes { + hashes = append(hashes, hex.EncodeToString(v)) + } + + rootHash := hex.EncodeToString(protoEntry.InclusionProof.RootHash) + + if protoEntry.InclusionProof.Checkpoint == nil { + return nil, fmt.Errorf("inclusion proof missing required checkpoint") + } + if protoEntry.InclusionProof.Checkpoint.Envelope == "" { + return nil, fmt.Errorf("inclusion proof checkpoint empty") + } + + inclusionProof = &models.InclusionProof{ + LogIndex: conv.Pointer(protoEntry.InclusionProof.LogIndex), + RootHash: &rootHash, + TreeSize: conv.Pointer(protoEntry.InclusionProof.TreeSize), + Hashes: hashes, + Checkpoint: conv.Pointer(protoEntry.InclusionProof.Checkpoint.Envelope), + } + } + + entry, err = NewEntry(protoEntry.CanonicalizedBody, protoEntry.IntegratedTime, protoEntry.LogIndex, protoEntry.LogId.KeyId, signedEntryTimestamp, inclusionProof) + if err != nil { + return nil, err + } + + if entry.kind != protoEntry.KindVersion.Kind || entry.version != protoEntry.KindVersion.Version { + return nil, fmt.Errorf("kind and version mismatch: %s/%s != %s/%s", entry.kind, entry.version, protoEntry.KindVersion.Kind, protoEntry.KindVersion.Version) + } + entry.tle = protoEntry + + return entry, nil +} + +func ValidateEntry(entry *Entry) error { + if entry.rekorV1Entry != nil { + switch e := entry.rekorV1Entry.(type) { + case *dsse_v001.V001Entry: + err := e.DSSEObj.Validate(strfmt.Default) + if err != nil { + return err + } + case *hashedrekord_v001.V001Entry: + err := e.HashedRekordObj.Validate(strfmt.Default) + if err != nil { + return err + } + case *intoto_v002.V002Entry: + err := e.IntotoObj.Validate(strfmt.Default) + if err != nil { + return err + } + default: + return fmt.Errorf("unsupported entry type: %T", e) + } + } + if entry.rekorV2Entry != nil { + switch e := entry.rekorV2Entry.GetSpec().GetSpec().(type) { + case *rekortilespb.Spec_HashedRekordV002: + err := validateHashedRekordV002Entry(e.HashedRekordV002) + if err != nil { + return err + } + case *rekortilespb.Spec_DsseV002: + err := validateDSSEV002Entry(e.DsseV002) + if err != nil { + return err + } + } + } + return nil +} + +func validateHashedRekordV002Entry(hr *rekortilespb.HashedRekordLogEntryV002) error { + if hr.GetSignature() == nil || len(hr.GetSignature().GetContent()) == 0 { + return fmt.Errorf("missing signature") + } + if hr.GetSignature().GetVerifier() == nil { + return fmt.Errorf("missing verifier") + } + if hr.GetData() == nil { + return fmt.Errorf("missing digest") + } + return typesverifier.Validate(hr.GetSignature().GetVerifier()) +} + +func validateDSSEV002Entry(d *rekortilespb.DSSELogEntryV002) error { + if d.GetPayloadHash() == nil { + return fmt.Errorf("missing payload") + } + if len(d.GetSignatures()) == 0 { + return fmt.Errorf("missing signatures") + } + return typesverifier.Validate(d.GetSignatures()[0].GetVerifier()) +} + +func (entry *Entry) IntegratedTime() time.Time { + if entry.tle.IntegratedTime == 0 { + return time.Time{} + } + return time.Unix(entry.tle.IntegratedTime, 0) +} + +func (entry *Entry) Signature() []byte { + if entry.rekorV1Entry != nil { + switch e := entry.rekorV1Entry.(type) { + case *dsse_v001.V001Entry: + sigBytes, err := base64.StdEncoding.DecodeString(*e.DSSEObj.Signatures[0].Signature) + if err != nil { + return []byte{} + } + return sigBytes + case *hashedrekord_v001.V001Entry: + return e.HashedRekordObj.Signature.Content + case *intoto_v002.V002Entry: + sigBytes, err := base64.StdEncoding.DecodeString(string(*e.IntotoObj.Content.Envelope.Signatures[0].Sig)) + if err != nil { + return []byte{} + } + return sigBytes + } + } + if entry.rekorV2Entry != nil { + switch e := entry.rekorV2Entry.GetSpec().GetSpec().(type) { + case *rekortilespb.Spec_HashedRekordV002: + return e.HashedRekordV002.GetSignature().GetContent() + case *rekortilespb.Spec_DsseV002: + return e.DsseV002.GetSignatures()[0].GetContent() + } + } + + return []byte{} +} + +func (entry *Entry) PublicKey() any { + var pk any + var certBytes []byte + + if entry.rekorV1Entry != nil { + var pemString []byte + switch e := entry.rekorV1Entry.(type) { + case *dsse_v001.V001Entry: + pemString = []byte(*e.DSSEObj.Signatures[0].Verifier) + case *hashedrekord_v001.V001Entry: + pemString = []byte(e.HashedRekordObj.Signature.PublicKey.Content) + case *intoto_v002.V002Entry: + pemString = []byte(*e.IntotoObj.Content.Envelope.Signatures[0].PublicKey) + } + certBlock, _ := pem.Decode(pemString) + certBytes = certBlock.Bytes + } else if entry.rekorV2Entry != nil { + var verifier *rekortilespb.Verifier + switch e := entry.rekorV2Entry.GetSpec().GetSpec().(type) { + case *rekortilespb.Spec_HashedRekordV002: + verifier = e.HashedRekordV002.GetSignature().GetVerifier() + case *rekortilespb.Spec_DsseV002: + verifier = e.DsseV002.GetSignatures()[0].GetVerifier() + } + switch verifier.Verifier.(type) { + case *rekortilespb.Verifier_PublicKey: + certBytes = verifier.GetPublicKey().GetRawBytes() + case *rekortilespb.Verifier_X509Certificate: + certBytes = verifier.GetX509Certificate().GetRawBytes() + } + } + + var err error + + pk, err = x509.ParseCertificate(certBytes) + if err != nil { + pk, err = x509.ParsePKIXPublicKey(certBytes) + if err != nil { + return nil + } + } + + return pk +} + +func (entry *Entry) LogKeyID() string { + return string(entry.tle.GetLogId().GetKeyId()) +} + +func (entry *Entry) LogIndex() int64 { + return entry.tle.GetLogIndex() +} + +func (entry *Entry) Body() any { + return base64.StdEncoding.EncodeToString(entry.tle.CanonicalizedBody) +} + +func (entry *Entry) HasInclusionPromise() bool { + return entry.signedEntryTimestamp != nil +} + +func (entry *Entry) HasInclusionProof() bool { + return entry.tle.InclusionProof != nil +} + +func (entry *Entry) TransparencyLogEntry() *v1.TransparencyLogEntry { + return entry.tle +} + +// VerifyInclusion verifies a Rekor v1-style checkpoint and the entry's inclusion in the Rekor v1 log. +func VerifyInclusion(entry *Entry, verifier signature.Verifier) error { + hashes := make([]string, len(entry.tle.InclusionProof.Hashes)) + for i, b := range entry.tle.InclusionProof.Hashes { + hashes[i] = hex.EncodeToString(b) + } + rootHash := hex.EncodeToString(entry.tle.GetInclusionProof().GetRootHash()) + logEntry := models.LogEntryAnon{ + IntegratedTime: &entry.tle.IntegratedTime, + LogID: conv.Pointer(string(entry.tle.GetLogId().KeyId)), + LogIndex: conv.Pointer(entry.tle.GetInclusionProof().GetLogIndex()), + Body: base64.StdEncoding.EncodeToString(entry.tle.GetCanonicalizedBody()), + Verification: &models.LogEntryAnonVerification{ + InclusionProof: &models.InclusionProof{ + Checkpoint: conv.Pointer(entry.tle.GetInclusionProof().GetCheckpoint().GetEnvelope()), + Hashes: hashes, + LogIndex: conv.Pointer(entry.tle.GetInclusionProof().GetLogIndex()), + RootHash: &rootHash, + TreeSize: conv.Pointer(entry.tle.GetInclusionProof().GetTreeSize()), + }, + SignedEntryTimestamp: strfmt.Base64(entry.signedEntryTimestamp), + }, + } + err := rekorVerify.VerifyInclusion(context.Background(), &logEntry) + if err != nil { + return err + } + + err = rekorVerify.VerifyCheckpointSignature(&logEntry, verifier) + if err != nil { + return err + } + + return nil +} + +// VerifyCheckpointAndInclusion verifies a checkpoint and the entry's inclusion in the transparency log. +// This function is compatible with Rekor v1 and Rekor v2. +func VerifyCheckpointAndInclusion(entry *Entry, verifier signature.Verifier, origin string) error { + noteVerifier, err := note.NewNoteVerifier(origin, verifier) + if err != nil { + return fmt.Errorf("loading note verifier: %w", err) + } + err = verify.VerifyLogEntry(entry.TransparencyLogEntry(), noteVerifier) + if err != nil { + return fmt.Errorf("verifying log entry: %w", err) + } + + return nil +} + +func VerifySET(entry *Entry, verifiers map[string]*root.TransparencyLog) error { + if entry.rekorV1Entry == nil { + return fmt.Errorf("can only verify SET for Rekor v1 entry") + } + rekorPayload := RekorPayload{ + Body: entry.Body(), + IntegratedTime: entry.tle.IntegratedTime, + LogIndex: entry.LogIndex(), + LogID: hex.EncodeToString([]byte(entry.LogKeyID())), + } + + verifier, ok := verifiers[hex.EncodeToString([]byte(entry.LogKeyID()))] + if !ok { + return errors.New("rekor log public key not found for payload") + } + if verifier.ValidityPeriodStart.IsZero() { + return errors.New("rekor validity period start time not set") + } + if (verifier.ValidityPeriodStart.After(entry.IntegratedTime())) || + (!verifier.ValidityPeriodEnd.IsZero() && verifier.ValidityPeriodEnd.Before(entry.IntegratedTime())) { + return errors.New("rekor log public key not valid at payload integrated time") + } + + contents, err := json.Marshal(rekorPayload) + if err != nil { + return fmt.Errorf("marshaling: %w", err) + } + canonicalized, err := jsoncanonicalizer.Transform(contents) + if err != nil { + return fmt.Errorf("canonicalizing: %w", err) + } + + hash := sha256.Sum256(canonicalized) + if ecdsaPublicKey, ok := verifier.PublicKey.(*ecdsa.PublicKey); !ok { + return fmt.Errorf("unsupported public key type: %T", verifier.PublicKey) + } else if !ecdsa.VerifyASN1(ecdsaPublicKey, hash[:], entry.signedEntryTimestamp) { + return errors.New("unable to verify SET") + } + return nil +} + +func unmarshalRekorV1Entry(body []byte) (types.EntryImpl, error) { + pe, err := models.UnmarshalProposedEntry(bytes.NewReader(body), runtime.JSONConsumer()) + if err != nil { + return nil, err + } + rekorEntry, err := types.UnmarshalEntry(pe) + if err != nil { + return nil, err + } + return rekorEntry, nil +} + +func unmarshalRekorV2Entry(body []byte) (*rekortilespb.Entry, error) { + logEntryBody := rekortilespb.Entry{} + err := protojson.Unmarshal(body, &logEntryBody) + if err != nil { + return nil, ErrInvalidRekorV2Entry + } + return &logEntryBody, nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/client.go b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/client.go new file mode 100644 index 000000000000..e2e76d69074b --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/client.go @@ -0,0 +1,211 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tuf + +import ( + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/theupdateframework/go-tuf/v2/metadata/config" + "github.com/theupdateframework/go-tuf/v2/metadata/fetcher" + "github.com/theupdateframework/go-tuf/v2/metadata/updater" + + "github.com/sigstore/sigstore-go/pkg/util" +) + +// Client is a Sigstore TUF client +type Client struct { + cfg *config.UpdaterConfig + up *updater.Updater + opts *Options +} + +// New returns a new client with custom options +func New(opts *Options) (*Client, error) { + var c = Client{ + opts: opts, + } + dir := filepath.Join(opts.CachePath, URLToPath(opts.RepositoryBaseURL)) + var err error + + if c.cfg, err = config.New(opts.RepositoryBaseURL, opts.Root); err != nil { + return nil, fmt.Errorf("failed to create TUF client: %w", err) + } + + c.cfg.LocalMetadataDir = dir + c.cfg.LocalTargetsDir = filepath.Join(dir, "targets") + c.cfg.DisableLocalCache = c.opts.DisableLocalCache + c.cfg.PrefixTargetsWithHash = !c.opts.DisableConsistentSnapshot + + if c.cfg.DisableLocalCache { + c.opts.CachePath = "" + c.opts.CacheValidity = 0 + c.opts.ForceCache = false + } + + if opts.Fetcher != nil { + c.cfg.Fetcher = opts.Fetcher + } else { + fetcher := fetcher.NewDefaultFetcher() + fetcher.SetHTTPUserAgent(util.ConstructUserAgent()) + c.cfg.Fetcher = fetcher + } + + // Upon client creation, we may not perform a full TUF update, + // based on the cache control configuration. Start with a local + // client (only reads content on disk) and then decide if we + // must perform a full TUF update. + tmpCfg := *c.cfg + // Create a temporary config for the first use where UnsafeLocalMode + // is true. This means that when we first initialize the client, + // we are guaranteed to only read the metadata on disk. + // Based on that metadata we take a decision if a full TUF + // refresh should be done or not. As so, the tmpCfg is only needed + // here and not in future invocations. + tmpCfg.UnsafeLocalMode = true + c.up, err = updater.New(&tmpCfg) + if err != nil { + return nil, fmt.Errorf("failed to create initial TUF updater: %w", err) + } + if err = c.loadMetadata(); err != nil { + return nil, fmt.Errorf("failed to load metadata: %w", err) + } + + return &c, nil +} + +// DefaultClient returns a Sigstore TUF client for the public good instance +func DefaultClient() (*Client, error) { + opts := DefaultOptions() + + return New(opts) +} + +// loadMetadata controls if the client actually should perform a TUF refresh. +// The TUF specification mandates so, but for certain Sigstore clients, it +// may be beneficial to rely on the cache, or in air-gapped deployments it +// it may not even be possible. +func (c *Client) loadMetadata() error { + // Load the metadata into memory and verify it + if err := c.up.Refresh(); err != nil { + // this is most likely due to the lack of metadata files + // on disk. Perform a full update and return. + return c.Refresh() + } + + if c.opts.ForceCache { + return nil + } else if c.opts.CacheValidity > 0 { + cfg, err := LoadConfig(c.configPath()) + if err != nil { + // Config may not exist, don't error + // create a new empty config + cfg = &Config{} + } + + cacheValidUntil := cfg.LastTimestamp.AddDate(0, 0, c.opts.CacheValidity) + if time.Now().Before(cacheValidUntil) { + // No need to update + return nil + } + } + + return c.Refresh() +} + +func (c *Client) configPath() string { + var p = filepath.Join( + c.opts.CachePath, + fmt.Sprintf("%s.json", URLToPath(c.opts.RepositoryBaseURL)), + ) + + return p +} + +// Refresh forces a refresh of the underlying TUF client. +// As the tuf client updater does not support multiple refreshes during +// its life-time, this will replace the TUF client updater with a new one. +func (c *Client) Refresh() error { + var err error + + c.up, err = updater.New(c.cfg) + if err != nil { + return fmt.Errorf("failed to create tuf updater: %w", err) + } + err = c.up.Refresh() + if err != nil { + return fmt.Errorf("tuf refresh failed: %w", err) + } + // If cache is disabled, we don't need to persist the last timestamp + if c.cfg.DisableLocalCache { + return nil + } + // Update config with last update + cfg, err := LoadConfig(c.configPath()) + if err != nil { + // Likely config file did not exit, create it + cfg = &Config{} + } + cfg.LastTimestamp = time.Now() + // ignore error writing update config file + _ = cfg.Persist(c.configPath()) + + return nil +} + +// GetTarget returns a target file from the TUF repository +func (c *Client) GetTarget(target string) ([]byte, error) { + // Set filepath to the empty string. When we get targets, + // we rely in the target info struct instead. + const filePath = "" + ti, err := c.up.GetTargetInfo(target) + if err != nil { + return nil, fmt.Errorf("getting info for target \"%s\": %w", target, err) + } + + path, tb, err := c.up.FindCachedTarget(ti, filePath) + if err != nil { + return nil, fmt.Errorf("getting target cache: %w", err) + } + if path != "" { + // Cached version found + return tb, nil + } + + // Download of target is needed + // Ignore targetsBaseURL, set to empty string + const targetsBaseURL = "" + _, tb, err = c.up.DownloadTarget(ti, filePath, targetsBaseURL) + if err != nil { + return nil, fmt.Errorf("failed to download target file %s - %w", target, err) + } + + return tb, nil +} + +// URLToPath converts a URL to a filename-compatible string +func URLToPath(url string) string { + // Strip scheme, replace slashes with dashes + // e.g. https://github.github.com/prod-tuf-root -> github.github.com-prod-tuf-root + fn := url + fn, _ = strings.CutPrefix(fn, "https://") + fn, _ = strings.CutPrefix(fn, "http://") + fn = strings.ReplaceAll(fn, "/", "-") + fn = strings.ReplaceAll(fn, ":", "-") + + return strings.ToLower(fn) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/config.go b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/config.go new file mode 100644 index 000000000000..3f5a81f1e5e3 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/config.go @@ -0,0 +1,54 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tuf + +import ( + "encoding/json" + "fmt" + "os" + "time" +) + +type Config struct { + LastTimestamp time.Time `json:"lastTimestamp"` +} + +func LoadConfig(p string) (*Config, error) { + var c Config + + b, err := os.ReadFile(p) + if err != nil { + return nil, fmt.Errorf("failed to read config: %w", err) + } + err = json.Unmarshal(b, &c) + if err != nil { + return nil, fmt.Errorf("malformed config file: %w", err) + } + + return &c, nil +} + +func (c *Config) Persist(p string) error { + b, err := json.Marshal(c) + if err != nil { + return fmt.Errorf("failed to JSON marshal config: %w", err) + } + err = os.WriteFile(p, b, 0600) + if err != nil { + return fmt.Errorf("failed to write config: %w", err) + } + + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/options.go b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/options.go new file mode 100644 index 000000000000..e3df77a55d55 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/options.go @@ -0,0 +1,177 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tuf + +import ( + "embed" + "math" + "os" + "path/filepath" + + "github.com/sigstore/sigstore-go/pkg/util" + "github.com/theupdateframework/go-tuf/v2/metadata/fetcher" +) + +//go:embed repository +var embeddedRepo embed.FS + +const ( + DefaultMirror = "https://tuf-repo-cdn.sigstore.dev" + StagingMirror = "https://tuf-repo-cdn.sigstage.dev" + + // The following caching values can be used for the CacheValidity option + NoCache = 0 + MaxCache = math.MaxInt +) + +// Options represent the various options for a Sigstore TUF Client +type Options struct { + // CacheValidity period in days (default 0). The client will persist a + // timestamp with the cache after refresh. Note that the client will + // always refresh the cache if the metadata is expired or if the client is + // unable to find a persisted timestamp, so this is not an optimal control + // for air-gapped environments. Use const MaxCache to update the cache when + // the metadata is expired, though the first initialization will still + // refresh the cache. + CacheValidity int + // ForceCache controls if the cache should be used without update + // as long as the metadata is valid. Use ForceCache over CacheValidity + // if you want to always use the cache up until its expiration. Note that + // the client will refresh the cache once the metadata has expired, so this + // is not an optimal control for air-gapped environments. Clients instead + // should provide a trust root file directly to the client to bypass TUF. + ForceCache bool + // Root is the TUF trust anchor + Root []byte + // CachePath is the location on disk for TUF cache + // (default $HOME/.sigstore/tuf) + CachePath string + // RepositoryBaseURL is the TUF repository location URL + // (default https://tuf-repo-cdn.sigstore.dev) + RepositoryBaseURL string + // DisableLocalCache mode allows a client to work on a read-only + // files system if this is set, cache path is ignored. + DisableLocalCache bool + // DisableConsistentSnapshot + DisableConsistentSnapshot bool + // Fetcher is the metadata fetcher + Fetcher fetcher.Fetcher +} + +// WithCacheValidity sets the cache validity period in days +func (o *Options) WithCacheValidity(days int) *Options { + o.CacheValidity = days + return o +} + +// WithForceCache forces the client to use the cache without updating +func (o *Options) WithForceCache() *Options { + o.ForceCache = true + return o +} + +// WithRoot sets the TUF trust anchor +func (o *Options) WithRoot(root []byte) *Options { + o.Root = root + return o +} + +// WithCachePath sets the location on disk for TUF cache +func (o *Options) WithCachePath(path string) *Options { + o.CachePath = path + return o +} + +// WithRepositoryBaseURL sets the TUF repository location URL +func (o *Options) WithRepositoryBaseURL(url string) *Options { + o.RepositoryBaseURL = url + return o +} + +// WithDisableLocalCache sets the client to work on a read-only file system +func (o *Options) WithDisableLocalCache() *Options { + o.DisableLocalCache = true + return o +} + +// WithDisableConsistentSnapshot sets the client to disable consistent snapshot +func (o *Options) WithDisableConsistentSnapshot() *Options { + o.DisableConsistentSnapshot = true + return o +} + +// WithFetcher sets the metadata fetcher +func (o *Options) WithFetcher(f fetcher.Fetcher) *Options { + o.Fetcher = f + return o +} + +// DefaultOptions returns an options struct for the public good instance +func DefaultOptions() *Options { + var opts Options + var err error + + opts.Root = DefaultRoot() + home, err := os.UserHomeDir() + if err != nil { + // Fall back to using a TUF repository in the temp location + home = os.TempDir() + } + opts.CachePath = filepath.Join(home, ".sigstore", "root") + opts.RepositoryBaseURL = DefaultMirror + fetcher := fetcher.NewDefaultFetcher() + fetcher.SetHTTPUserAgent(util.ConstructUserAgent()) + opts.Fetcher = fetcher + + return &opts +} + +// DefaultRoot returns the root.json for the public good instance +func DefaultRoot() []byte { + // The embed file system always uses forward slashes as path separators, + // even on Windows + p := "repository/root.json" + + b, err := embeddedRepo.ReadFile(p) + if err != nil { + // This should never happen. + // ReadFile from an embedded FS will never fail as long as + // the path is correct. If it fails, it would mean + // that the binary is not assembled as it should, and there + // is no way to recover from that. + panic(err) + } + + return b +} + +// StagingRoot returns the root.json for the staging instance +func StagingRoot() []byte { + // The embed file system always uses forward slashes as path separators, + // even on Windows + p := "repository/staging_root.json" + + b, err := embeddedRepo.ReadFile(p) + if err != nil { + // This should never happen. + // ReadFile from an embedded FS will never fail as long as + // the path is correct. If it fails, it would mean + // that the binary is not assembled as it should, and there + // is no way to recover from that. + panic(err) + } + + return b +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/root.json b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/root.json new file mode 100644 index 000000000000..fe4f3ba2189d --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/root.json @@ -0,0 +1,145 @@ +{ + "signatures": [ + { + "keyid": "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3", + "sig": "" + }, + { + "keyid": "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2", + "sig": "3045022100bbddd464f8066ceb88ba787375c12cd6330680e08c2910703e6538c71cc79ad202205190b06e4537fe961b3ef81fe68edcd0089c19f919afed423b9aafd700641153" + }, + { + "keyid": "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06", + "sig": "3044022069306cd5257f732a740c1afe60a8e433c5de58eafeadbe99c336c9c71d198cf802200d773953ae7dbc48d3e5bad9a6f64bafff196b7e2ad4a52a19519367d47dc042" + }, + { + "keyid": "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222", + "sig": "304402204d21a2ec80df66e61f6fe2912951dc47df836036f8c0ab10816d375e71dbf79e0220547adce1afdf04e6794efa203dd5264c6f7e0ef78e57fe934b0d26cb994eec76" + }, + { + "keyid": "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70", + "sig": "3045022060826496557144eb1649893ed5f6f4ea54536feb0ca82f8b89ae641be39743e5022100ad7118b5e9d4837326206e412fc6da2999925d110328a7c166b06c624336c93f" + }, + { + "keyid": "183e64f37670dc13ca0d28995a3053f3740954ddce44321a41e46534cf44e632", + "sig": "3046022100d8179439c2e73eb0c1733abee7faf832dcaea7263edcb4919891c3a247f05923022100e1a437e0797e803f9b72dc9d2d92155b0a2270c24efdd5f4b3a5d8f0b0f431a7" + } + ], + "signed": { + "_type": "root", + "consistent_snapshot": true, + "expires": "2026-01-22T13:05:59Z", + "keys": { + "0c87432c3bf09fd99189fdc32fa5eaedf4e4a5fac7bab73fa04a2e0fc64af6f5": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEWRiGr5+j+3J5SsH+Ztr5nE2H2wO7\nBV+nO3s93gLca18qTOzHY1oWyAGDykMSsGTUBSt9D+An0KfKsD2mfSM42Q==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-online-uri": "gcpkms:projects/sigstore-root-signing/locations/global/keyRings/root/cryptoKeys/timestamp/cryptoKeyVersions/1" + }, + "183e64f37670dc13ca0d28995a3053f3740954ddce44321a41e46534cf44e632": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEMxpPOJCIZ5otG4106fGJseEQi3V9\npkMYQ4uyV9Tj1M7WHXIyLG+jkfvuG0glQ1JZbRZZBV3gAR4sojdGHISeow==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@lance" + }, + "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEzBzVOmHCPojMVLSI364WiiV8NPrD\n6IgRxVliskz/v+y3JER5mcVGcONliDcWMC5J2lfHmjPNPhb4H7xm8LzfSA==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@santiagotorres" + }, + "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEinikSsAQmYkNeH5eYq/CnIzLaacO\nxlSaawQDOwqKy/tCqxq5xxPSJc21K4WIhs9GyOkKfzueY3GILzcMJZ4cWw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@bobcallaway" + }, + "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE0ghrh92Lw1Yr3idGV5WqCtMDB8Cx\n+D8hdC4w2ZLNIplVRoVGLskYa3gheMyOjiJ8kPi15aQ2//7P+oj7UvJPGw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@joshuagl" + }, + "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEXsz3SZXFb8jMV42j6pJlyjbjR8K\nN3Bwocexq6LMIb5qsWKOQvLN16NUefLc4HswOoumRsVVaajSpQS6fobkRw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@mnm678" + } + }, + "roles": { + "root": { + "keyids": [ + "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2", + "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06", + "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222", + "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70", + "183e64f37670dc13ca0d28995a3053f3740954ddce44321a41e46534cf44e632" + ], + "threshold": 3 + }, + "snapshot": { + "keyids": [ + "0c87432c3bf09fd99189fdc32fa5eaedf4e4a5fac7bab73fa04a2e0fc64af6f5" + ], + "threshold": 1, + "x-tuf-on-ci-expiry-period": 3650, + "x-tuf-on-ci-signing-period": 365 + }, + "targets": { + "keyids": [ + "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2", + "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06", + "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222", + "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70", + "183e64f37670dc13ca0d28995a3053f3740954ddce44321a41e46534cf44e632" + ], + "threshold": 3 + }, + "timestamp": { + "keyids": [ + "0c87432c3bf09fd99189fdc32fa5eaedf4e4a5fac7bab73fa04a2e0fc64af6f5" + ], + "threshold": 1, + "x-tuf-on-ci-expiry-period": 7, + "x-tuf-on-ci-signing-period": 6 + } + }, + "spec_version": "1.0", + "version": 13, + "x-tuf-on-ci-expiry-period": 197, + "x-tuf-on-ci-signing-period": 46 + } +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/staging_root.json b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/staging_root.json new file mode 100644 index 000000000000..2a06edad0884 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/staging_root.json @@ -0,0 +1,107 @@ +{ + "signatures": [ + { + "keyid": "aa61e09f6af7662ac686cf0c6364079f63d3e7a86836684eeced93eace3acd81", + "sig": "3046022100fe72afdbab1bef70c6f461f39f5e75cf543e5277648bfab798a108a0f76f0ca002210098e1e1804b7a13bab42c063691864d85fc4bf6f5a875346b388be00f139c6118" + }, + { + "keyid": "61f9609d2655b346fcebccd66b509d5828168d5e447110e261f0bcc8553624bc", + "sig": "304502210094423ead9a7d546d703f649b408441688eb30f3279fb065b28eea05d2b36843102206f21fa2888836485964c7cb7468a16ddb6297784c50cdba03888578d7b46e0c7" + }, + { + "keyid": "9471fbda95411d10109e467ad526082d15f14a38de54ea2ada9687ab39d8e237", + "sig": "" + }, + { + "keyid": "0374a9e18a20a2103736cb4277e2fdd7f8453642c7d9eaf4ad8aee9cf2d47bb5", + "sig": "" + } + ], + "signed": { + "_type": "root", + "consistent_snapshot": true, + "expires": "2025-12-26T13:27:03Z", + "keys": { + "0374a9e18a20a2103736cb4277e2fdd7f8453642c7d9eaf4ad8aee9cf2d47bb5": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEoxkvDOmtGEknB3M+ZkPts8joDM0X\nIH5JZwPlgC2CXs/eqOuNF8AcEWwGYRiDhV/IMlQw5bg8PLICQcgsbrDiKg==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@mnm678" + }, + "61f9609d2655b346fcebccd66b509d5828168d5e447110e261f0bcc8553624bc": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE++Wv+DcLRk+mfkmlpCwl1GUi9EMh\npBUTz8K0fH7bE4mQuViGSyWA/eyMc0HvzZi6Xr0diHw0/lUPBvok214YQw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@kommendorkapten" + }, + "9471fbda95411d10109e467ad526082d15f14a38de54ea2ada9687ab39d8e237": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFHDb85JH+JYR1LQmxiz4UMokVMnP\nxKoWpaEnFCKXH8W4Fc/DfIxMnkpjCuvWUBdJXkO0aDIxwsij8TOFh2R7dw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@joshuagl" + }, + "aa61e09f6af7662ac686cf0c6364079f63d3e7a86836684eeced93eace3acd81": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEohqIdE+yTl4OxpX8ZxNUPrg3SL9H\nBDnhZuceKkxy2oMhUOxhWweZeG3bfM1T4ZLnJimC6CAYVU5+F5jZCoftRw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@jku" + }, + "c3479007e861445ce5dc109d9661ed77b35bbc0e3f161852c46114266fc2daa4": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAExxmEtmhF5U+i+v/6he4BcSLzCgMx\n/0qSrvDg6bUWwUrkSKS2vDpcJrhGy5fmmhRrGawjPp1ALpC3y1kqFTpXDg==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-online-uri": "gcpkms:projects/projectsigstore-staging/locations/global/keyRings/tuf-keyring/cryptoKeys/tuf-key/cryptoKeyVersions/2" + } + }, + "roles": { + "root": { + "keyids": [ + "aa61e09f6af7662ac686cf0c6364079f63d3e7a86836684eeced93eace3acd81", + "61f9609d2655b346fcebccd66b509d5828168d5e447110e261f0bcc8553624bc", + "9471fbda95411d10109e467ad526082d15f14a38de54ea2ada9687ab39d8e237", + "0374a9e18a20a2103736cb4277e2fdd7f8453642c7d9eaf4ad8aee9cf2d47bb5" + ], + "threshold": 2 + }, + "snapshot": { + "keyids": [ + "c3479007e861445ce5dc109d9661ed77b35bbc0e3f161852c46114266fc2daa4" + ], + "threshold": 1, + "x-tuf-on-ci-expiry-period": 3650, + "x-tuf-on-ci-signing-period": 365 + }, + "targets": { + "keyids": [ + "aa61e09f6af7662ac686cf0c6364079f63d3e7a86836684eeced93eace3acd81", + "61f9609d2655b346fcebccd66b509d5828168d5e447110e261f0bcc8553624bc", + "9471fbda95411d10109e467ad526082d15f14a38de54ea2ada9687ab39d8e237", + "0374a9e18a20a2103736cb4277e2fdd7f8453642c7d9eaf4ad8aee9cf2d47bb5" + ], + "threshold": 1 + }, + "timestamp": { + "keyids": [ + "c3479007e861445ce5dc109d9661ed77b35bbc0e3f161852c46114266fc2daa4" + ], + "threshold": 1, + "x-tuf-on-ci-expiry-period": 7, + "x-tuf-on-ci-signing-period": 6 + } + }, + "spec_version": "1.0", + "version": 12, + "x-tuf-on-ci-expiry-period": 182, + "x-tuf-on-ci-signing-period": 35 + } +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/util/util.go b/vendor/github.com/sigstore/sigstore-go/pkg/util/util.go new file mode 100644 index 000000000000..e5a1b088c189 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/util/util.go @@ -0,0 +1,37 @@ +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util //nolint:revive + +import ( + "runtime/debug" +) + +func ConstructUserAgent() string { + userAgent := "sigstore-go" + + buildInfo, ok := debug.ReadBuildInfo() + if !ok { + return userAgent + } + + for _, eachDep := range buildInfo.Deps { + if eachDep.Path == "github.com/sigstore/sigstore-go" { + userAgent += "/" + userAgent += eachDep.Version + } + } + + return userAgent +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate.go new file mode 100644 index 000000000000..e33d915a8d8b --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate.go @@ -0,0 +1,34 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "crypto/x509" + "errors" + "time" + + "github.com/sigstore/sigstore-go/pkg/root" +) + +func VerifyLeafCertificate(observerTimestamp time.Time, leafCert *x509.Certificate, trustedMaterial root.TrustedMaterial) ([][]*x509.Certificate, error) { // nolint: revive + for _, ca := range trustedMaterial.FulcioCertificateAuthorities() { + chains, err := ca.Verify(leafCert, observerTimestamp) + if err == nil { + return chains, nil + } + } + + return nil, errors.New("leaf certificate verification failed") +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate_identity.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate_identity.go new file mode 100644 index 000000000000..1d3bad1a7144 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate_identity.go @@ -0,0 +1,217 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + + "github.com/sigstore/sigstore-go/pkg/fulcio/certificate" +) + +type SubjectAlternativeNameMatcher struct { + SubjectAlternativeName string `json:"subjectAlternativeName"` + Regexp regexp.Regexp `json:"regexp,omitempty"` +} + +type IssuerMatcher struct { + Issuer string `json:"issuer"` + Regexp regexp.Regexp `json:"regexp,omitempty"` +} + +type CertificateIdentity struct { + SubjectAlternativeName SubjectAlternativeNameMatcher `json:"subjectAlternativeName"` + Issuer IssuerMatcher `json:"issuer"` + certificate.Extensions +} + +type CertificateIdentities []CertificateIdentity + +type ErrValueMismatch struct { + object string + expected string + actual string +} + +func (e *ErrValueMismatch) Error() string { + return fmt.Sprintf("expected %s value \"%s\", got \"%s\"", e.object, e.expected, e.actual) +} + +type ErrValueRegexMismatch struct { + object string + regex string + value string +} + +func (e *ErrValueRegexMismatch) Error() string { + return fmt.Sprintf("expected %s value to match regex \"%s\", got \"%s\"", e.object, e.regex, e.value) +} + +type ErrNoMatchingCertificateIdentity struct { + errors []error +} + +func (e *ErrNoMatchingCertificateIdentity) Error() string { + if len(e.errors) > 0 { + return fmt.Sprintf("no matching CertificateIdentity found, last error: %v", e.errors[len(e.errors)-1]) + } + return "no matching CertificateIdentity found" +} + +func (e *ErrNoMatchingCertificateIdentity) Unwrap() []error { + return e.errors +} + +// NewSANMatcher provides an easier way to create a SubjectAlternativeNameMatcher. +// If the regexpStr fails to compile into a Regexp, an error is returned. +func NewSANMatcher(sanValue string, regexpStr string) (SubjectAlternativeNameMatcher, error) { + r, err := regexp.Compile(regexpStr) + if err != nil { + return SubjectAlternativeNameMatcher{}, err + } + + return SubjectAlternativeNameMatcher{ + SubjectAlternativeName: sanValue, + Regexp: *r}, nil +} + +// The default Regexp json marshal is quite ugly, so we override it here. +func (s *SubjectAlternativeNameMatcher) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + SubjectAlternativeName string `json:"subjectAlternativeName"` + Regexp string `json:"regexp,omitempty"` + }{ + SubjectAlternativeName: s.SubjectAlternativeName, + Regexp: s.Regexp.String(), + }) +} + +// Verify checks if the actualCert matches the SANMatcher's Value and +// Regexp – if those values have been provided. +func (s SubjectAlternativeNameMatcher) Verify(actualCert certificate.Summary) error { + if s.SubjectAlternativeName != "" && + actualCert.SubjectAlternativeName != s.SubjectAlternativeName { + return &ErrValueMismatch{"SAN", string(s.SubjectAlternativeName), string(actualCert.SubjectAlternativeName)} + } + + if s.Regexp.String() != "" && + !s.Regexp.MatchString(actualCert.SubjectAlternativeName) { + return &ErrValueRegexMismatch{"SAN", string(s.Regexp.String()), string(actualCert.SubjectAlternativeName)} + } + return nil +} + +func NewIssuerMatcher(issuerValue, regexpStr string) (IssuerMatcher, error) { + r, err := regexp.Compile(regexpStr) + if err != nil { + return IssuerMatcher{}, err + } + + return IssuerMatcher{Issuer: issuerValue, Regexp: *r}, nil +} + +func (i *IssuerMatcher) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + Issuer string `json:"issuer"` + Regexp string `json:"regexp,omitempty"` + }{ + Issuer: i.Issuer, + Regexp: i.Regexp.String(), + }) +} + +func (i IssuerMatcher) Verify(actualCert certificate.Summary) error { + if i.Issuer != "" && + actualCert.Issuer != i.Issuer { + return &ErrValueMismatch{"issuer", string(i.Issuer), string(actualCert.Issuer)} + } + + if i.Regexp.String() != "" && + !i.Regexp.MatchString(actualCert.Issuer) { + return &ErrValueRegexMismatch{"issuer", string(i.Regexp.String()), string(actualCert.Issuer)} + } + return nil +} + +func NewCertificateIdentity(sanMatcher SubjectAlternativeNameMatcher, issuerMatcher IssuerMatcher, extensions certificate.Extensions) (CertificateIdentity, error) { + if sanMatcher.SubjectAlternativeName == "" && sanMatcher.Regexp.String() == "" { + return CertificateIdentity{}, errors.New("when verifying a certificate identity, there must be subject alternative name criteria") + } + + if issuerMatcher.Issuer == "" && issuerMatcher.Regexp.String() == "" { + return CertificateIdentity{}, errors.New("when verifying a certificate identity, must specify Issuer criteria") + } + + if extensions.Issuer != "" { + return CertificateIdentity{}, errors.New("please specify issuer in IssuerMatcher, not Extensions") + } + + certID := CertificateIdentity{ + SubjectAlternativeName: sanMatcher, + Issuer: issuerMatcher, + Extensions: extensions, + } + + return certID, nil +} + +// NewShortCertificateIdentity provides a more convenient way of initializing +// a CertificiateIdentity with a SAN and the Issuer OID extension. If you need +// to check more OID extensions, use NewCertificateIdentity instead. +func NewShortCertificateIdentity(issuer, issuerRegex, sanValue, sanRegex string) (CertificateIdentity, error) { + sanMatcher, err := NewSANMatcher(sanValue, sanRegex) + if err != nil { + return CertificateIdentity{}, err + } + + issuerMatcher, err := NewIssuerMatcher(issuer, issuerRegex) + if err != nil { + return CertificateIdentity{}, err + } + + return NewCertificateIdentity(sanMatcher, issuerMatcher, certificate.Extensions{}) +} + +// Verify verifies the CertificateIdentities, and if ANY of them match the cert, +// it returns the CertificateIdentity that matched. If none match, it returns an +// error. +func (i CertificateIdentities) Verify(cert certificate.Summary) (*CertificateIdentity, error) { + multierr := &ErrNoMatchingCertificateIdentity{} + var err error + for _, ci := range i { + if err = ci.Verify(cert); err == nil { + return &ci, nil + } + multierr.errors = append(multierr.errors, err) + } + return nil, multierr +} + +// Verify checks if the actualCert matches the CertificateIdentity's SAN and +// any of the provided OID extension values. Any empty values are ignored. +func (c CertificateIdentity) Verify(actualCert certificate.Summary) error { + var err error + if err = c.SubjectAlternativeName.Verify(actualCert); err != nil { + return err + } + + if err = c.Issuer.Verify(actualCert); err != nil { + return err + } + + return certificate.CompareExtensions(c.Extensions, actualCert.Extensions) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/errors.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/errors.go new file mode 100644 index 000000000000..18263c098338 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/errors.go @@ -0,0 +1,39 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "fmt" +) + +type ErrVerification struct { + err error +} + +func NewVerificationError(e error) ErrVerification { + return ErrVerification{e} +} + +func (e ErrVerification) Unwrap() error { + return e.err +} + +func (e ErrVerification) String() string { + return fmt.Sprintf("verification error: %s", e.err.Error()) +} + +func (e ErrVerification) Error() string { + return e.String() +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/interface.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/interface.go new file mode 100644 index 000000000000..85bcb50284df --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/interface.go @@ -0,0 +1,130 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "crypto/x509" + "errors" + "time" + + in_toto "github.com/in-toto/attestation/go/v1" + "github.com/secure-systems-lab/go-securesystemslib/dsse" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore-go/pkg/tlog" +) + +var errNotImplemented = errors.New("not implemented") + +type HasInclusionPromise interface { + HasInclusionPromise() bool +} + +type HasInclusionProof interface { + HasInclusionProof() bool +} + +type SignatureProvider interface { + SignatureContent() (SignatureContent, error) +} + +type SignedTimestampProvider interface { + Timestamps() ([][]byte, error) +} + +type TlogEntryProvider interface { + TlogEntries() ([]*tlog.Entry, error) +} + +type VerificationProvider interface { + VerificationContent() (VerificationContent, error) +} + +type VersionProvider interface { + Version() (string, error) +} + +type SignedEntity interface { + HasInclusionPromise + HasInclusionProof + SignatureProvider + SignedTimestampProvider + TlogEntryProvider + VerificationProvider + VersionProvider +} + +type VerificationContent interface { + CompareKey(any, root.TrustedMaterial) bool + ValidAtTime(time.Time, root.TrustedMaterial) bool + Certificate() *x509.Certificate + PublicKey() PublicKeyProvider +} + +type SignatureContent interface { + Signature() []byte + EnvelopeContent() EnvelopeContent + MessageSignatureContent() MessageSignatureContent +} + +type PublicKeyProvider interface { + Hint() string +} + +type MessageSignatureContent interface { + Digest() []byte + DigestAlgorithm() string + Signature() []byte +} + +type EnvelopeContent interface { + RawEnvelope() *dsse.Envelope + Statement() (*in_toto.Statement, error) +} + +// BaseSignedEntity is a helper struct that implements all the interfaces +// of SignedEntity. It can be embedded in a struct to implement the SignedEntity +// interface. This may be useful for testing, or for implementing a SignedEntity +// that only implements a subset of the interfaces. +type BaseSignedEntity struct{} + +var _ SignedEntity = &BaseSignedEntity{} + +func (b *BaseSignedEntity) HasInclusionPromise() bool { + return false +} + +func (b *BaseSignedEntity) HasInclusionProof() bool { + return false +} + +func (b *BaseSignedEntity) VerificationContent() (VerificationContent, error) { + return nil, errNotImplemented +} + +func (b *BaseSignedEntity) SignatureContent() (SignatureContent, error) { + return nil, errNotImplemented +} + +func (b *BaseSignedEntity) Timestamps() ([][]byte, error) { + return nil, errNotImplemented +} + +func (b *BaseSignedEntity) TlogEntries() ([]*tlog.Entry, error) { + return nil, errNotImplemented +} + +func (b *BaseSignedEntity) Version() (string, error) { + return "", errNotImplemented +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/sct.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/sct.go new file mode 100644 index 000000000000..bf447c28c39e --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/sct.go @@ -0,0 +1,100 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "crypto/x509" + "encoding/hex" + "errors" + "fmt" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/ctutil" + ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/google/certificate-transparency-go/x509util" + "github.com/sigstore/sigstore-go/pkg/root" +) + +// VerifySignedCertificateTimestamp, given a threshold, TrustedMaterial, and a +// leaf certificate, will extract SCTs from the leaf certificate and verify the +// timestamps using the TrustedMaterial's FulcioCertificateAuthorities() and +// CTLogs() +func VerifySignedCertificateTimestamp(chains [][]*x509.Certificate, threshold int, trustedMaterial root.TrustedMaterial) error { // nolint: revive + if len(chains) == 0 || len(chains[0]) == 0 || chains[0][0] == nil { + return errors.New("no chains provided") + } + // The first certificate in the chain is always the leaf certificate + leaf := chains[0][0] + + ctlogs := trustedMaterial.CTLogs() + + scts, err := x509util.ParseSCTsFromCertificate(leaf.Raw) + if err != nil { + return err + } + + leafCTCert, err := ctx509.ParseCertificates(leaf.Raw) + if err != nil { + return err + } + + verified := 0 + for _, sct := range scts { + encodedKeyID := hex.EncodeToString(sct.LogID.KeyID[:]) + key, ok := ctlogs[encodedKeyID] + if !ok { + // skip entries the trust root cannot verify + continue + } + + // Ensure sct is within ctlog validity window + sctTime := ct.TimestampToTime(sct.Timestamp) + if !key.ValidityPeriodStart.IsZero() && sctTime.Before(key.ValidityPeriodStart) { + // skip entries that were before ctlog key start time + continue + } + if !key.ValidityPeriodEnd.IsZero() && sctTime.After(key.ValidityPeriodEnd) { + // skip entries that were after ctlog key end time + continue + } + + for _, chain := range chains { + fulcioChain := make([]*ctx509.Certificate, len(leafCTCert)) + copy(fulcioChain, leafCTCert) + + if len(chain) < 2 { + continue + } + parentCert := chain[1].Raw + + fulcioIssuer, err := ctx509.ParseCertificates(parentCert) + if err != nil { + continue + } + fulcioChain = append(fulcioChain, fulcioIssuer...) + + err = ctutil.VerifySCT(key.PublicKey, fulcioChain, sct, true) + if err == nil { + verified++ + } + } + } + + if verified < threshold { + return fmt.Errorf("only able to verify %d SCT entries; unable to meet threshold of %d", verified, threshold) + } + + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/signature.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signature.go new file mode 100644 index 000000000000..0386a98e18cf --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signature.go @@ -0,0 +1,519 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/x509" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "os" + "slices" + + in_toto "github.com/in-toto/attestation/go/v1" + "github.com/secure-systems-lab/go-securesystemslib/dsse" + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore/pkg/signature" + sigdsse "github.com/sigstore/sigstore/pkg/signature/dsse" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +const maxAllowedSubjects = 1024 +const maxAllowedSubjectDigests = 32 + +var ErrDSSEInvalidSignatureCount = errors.New("exactly one signature is required") + +func VerifySignature(sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial) error { // nolint: revive + verifier, err := getSignatureVerifier(sigContent, verificationContent, trustedMaterial, false) + if err != nil { + return fmt.Errorf("could not load signature verifier: %w", err) + } + + return verifySignatureWithVerifier(verifier, sigContent, verificationContent, trustedMaterial) +} + +func verifySignatureWithVerifier(verifier signature.Verifier, sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial) error { // nolint: revive + if envelope := sigContent.EnvelopeContent(); envelope != nil { + return verifyEnvelope(verifier, envelope) + } else if msg := sigContent.MessageSignatureContent(); msg != nil { + return errors.New("artifact must be provided to verify message signature") + } + + // handle an invalid signature content message + return fmt.Errorf("signature content has neither an envelope or a message") +} + +func VerifySignatureWithArtifacts(sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, artifacts []io.Reader) error { // nolint: revive + verifier, err := getSignatureVerifier(sigContent, verificationContent, trustedMaterial, false) + if err != nil { + return fmt.Errorf("could not load signature verifier: %w", err) + } + return verifySignatureWithVerifierAndArtifacts(verifier, sigContent, verificationContent, trustedMaterial, artifacts) +} + +func verifySignatureWithVerifierAndArtifacts(verifier signature.Verifier, sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, artifacts []io.Reader) error { // nolint: revive + envelope := sigContent.EnvelopeContent() + msg := sigContent.MessageSignatureContent() + if envelope == nil && msg == nil { + return fmt.Errorf("signature content has neither an envelope or a message") + } + // If there is only one artifact and no envelope, + // attempt to verify the message signature with the artifact. + if envelope == nil { + if len(artifacts) != 1 { + return fmt.Errorf("only one artifact can be verified with a message signature") + } + return verifyMessageSignature(verifier, msg, artifacts[0]) + } + + // Otherwise, verify the envelope with the provided artifacts + return verifyEnvelopeWithArtifacts(verifier, envelope, artifacts) +} + +func VerifySignatureWithArtifactDigests(sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, digests []ArtifactDigest) error { // nolint: revive + verifier, err := getSignatureVerifier(sigContent, verificationContent, trustedMaterial, false) + if err != nil { + return fmt.Errorf("could not load signature verifier: %w", err) + } + return verifySignatureWithVerifierAndArtifactDigests(verifier, sigContent, verificationContent, trustedMaterial, digests) +} + +func verifySignatureWithVerifierAndArtifactDigests(verifier signature.Verifier, sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, digests []ArtifactDigest) error { // nolint: revive + envelope := sigContent.EnvelopeContent() + msg := sigContent.MessageSignatureContent() + if envelope == nil && msg == nil { + return fmt.Errorf("signature content has neither an envelope or a message") + } + // If there is only one artifact and no envelope, + // attempt to verify the message signature with the artifact. + if envelope == nil { + if len(digests) != 1 { + return fmt.Errorf("only one artifact can be verified with a message signature") + } + return verifyMessageSignatureWithArtifactDigest(verifier, msg, digests[0].Digest) + } + + return verifyEnvelopeWithArtifactDigests(verifier, envelope, digests) +} + +// compatVerifier is a signature.Verifier that tries multiple verifiers +// and returns nil if any of them verify the signature. This is used to +// verify signatures that were generated with old clients that used SHA256 +// for ECDSA P384/P521 keys. +type compatVerifier struct { + verifiers []signature.Verifier +} + +func (v *compatVerifier) VerifySignature(signature, message io.Reader, opts ...signature.VerifyOption) error { + // Create a buffer to store the signature bytes + sigBuf := &bytes.Buffer{} + sigTee := io.TeeReader(signature, sigBuf) + sigBytes, err := io.ReadAll(sigTee) + if err != nil { + return fmt.Errorf("failed to read signature: %w", err) + } + + // Create a buffer to store the message bytes + msgBuf := &bytes.Buffer{} + msgTee := io.TeeReader(message, msgBuf) + msgBytes, err := io.ReadAll(msgTee) + if err != nil { + return fmt.Errorf("failed to read message: %w", err) + } + + for idx, verifier := range v.verifiers { + if idx != 0 { + fmt.Fprint(os.Stderr, "Failed to verify signature with default verifier, trying compatibility verifier\n") + } + err := verifier.VerifySignature(bytes.NewReader(sigBytes), bytes.NewReader(msgBytes), opts...) + if err == nil { + return nil + } + } + return fmt.Errorf("no compatible verifier found") +} + +func (v *compatVerifier) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, error) { + return v.verifiers[0].PublicKey(opts...) +} + +func compatSignatureVerifier(leafCert *x509.Certificate, enableCompat bool, isDSSE bool) (signature.Verifier, error) { + // LoadDefaultSigner/Verifier functions accept a few options to select + // the default signer/verifier when there are ambiguities, like for + // ED25519 keys, which could be used with PureEd25519 or Ed25519ph. + // + // When dealing with DSSE, use ED25519, but when we are working with + // hashedrekord entries, use ED25519ph by default, because this is the + // only option. + var defaultOpts []signature.LoadOption + if !isDSSE { + defaultOpts = []signature.LoadOption{options.WithED25519ph()} + } + + verifiers := make([]signature.Verifier, 0) + verifier, err := signature.LoadDefaultVerifier(leafCert.PublicKey, defaultOpts...) + if err != nil { + return nil, err + } + // If compatibility is not enabled, return only the default verifier + if !enableCompat { + return verifier, nil + } + verifiers = append(verifiers, verifier) + + // Add a compatibility verifier for ECDSA P384/P521, because we still want + // to verify signatures generated with old clients that used SHA256 + var algorithmDetails signature.AlgorithmDetails + if pubKey, ok := leafCert.PublicKey.(*ecdsa.PublicKey); ok { + switch pubKey.Curve { + case elliptic.P384(): + //nolint:staticcheck // Need to use deprecated field for backwards compatibility + algorithmDetails, err = signature.GetAlgorithmDetails(v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_256) + case elliptic.P521(): + //nolint:staticcheck // Need to use deprecated field for backwards compatibility + algorithmDetails, err = signature.GetAlgorithmDetails(v1.PublicKeyDetails_PKIX_ECDSA_P521_SHA_256) + default: + return verifier, nil + } + if err != nil { + return nil, err + } + verifier, err = signature.LoadVerifierFromAlgorithmDetails(leafCert.PublicKey, algorithmDetails, defaultOpts...) + } + if err != nil { + return nil, err + } + verifiers = append(verifiers, verifier) + return &compatVerifier{verifiers: verifiers}, nil +} + +func getSignatureVerifier(sigContent SignatureContent, verificationContent VerificationContent, tm root.TrustedMaterial, enableCompat bool) (signature.Verifier, error) { + if leafCert := verificationContent.Certificate(); leafCert != nil { + isDSSE := sigContent.EnvelopeContent() != nil + return compatSignatureVerifier(leafCert, enableCompat, isDSSE) + } else if pk := verificationContent.PublicKey(); pk != nil { + return tm.PublicKeyVerifier(pk.Hint()) + } + + return nil, fmt.Errorf("no public key or certificate found") +} + +func verifyEnvelope(verifier signature.Verifier, envelope EnvelopeContent) error { + dsseEnv := envelope.RawEnvelope() + + // A DSSE envelope in a Sigstore bundle MUST only contain one + // signature, even though DSSE is more permissive. + if len(dsseEnv.Signatures) != 1 { + return ErrDSSEInvalidSignatureCount + } + pub, err := verifier.PublicKey() + if err != nil { + return fmt.Errorf("could not fetch verifier public key: %w", err) + } + envVerifier, err := dsse.NewEnvelopeVerifier(&sigdsse.VerifierAdapter{ + SignatureVerifier: verifier, + Pub: pub, + }) + + if err != nil { + return fmt.Errorf("could not load envelope verifier: %w", err) + } + + _, err = envVerifier.Verify(context.Background(), dsseEnv) + if err != nil { + return fmt.Errorf("could not verify envelope: %w", err) + } + + return nil +} + +func verifyEnvelopeWithArtifacts(verifier signature.Verifier, envelope EnvelopeContent, artifacts []io.Reader) error { + if err := verifyEnvelope(verifier, envelope); err != nil { + return err + } + statement, err := envelope.Statement() + if err != nil { + return fmt.Errorf("could not verify artifact: unable to extract statement from envelope: %w", err) + } + if err = limitSubjects(statement); err != nil { + return err + } + // Sanity check (no subjects) + if len(statement.Subject) == 0 { + return errors.New("no subjects found in statement") + } + + // determine which hash functions to use + hashFuncs, err := getHashFunctions(statement) + if err != nil { + return fmt.Errorf("unable to determine hash functions: %w", err) + } + + hashedArtifacts := make([]map[crypto.Hash][]byte, len(artifacts)) + for i, artifact := range artifacts { + // Compute digest of the artifact. + hasher, err := newMultihasher(hashFuncs) + if err != nil { + return fmt.Errorf("could not verify artifact: unable to create hasher: %w", err) + } + if _, err = io.Copy(hasher, artifact); err != nil { + return fmt.Errorf("could not verify artifact: unable to calculate digest: %w", err) + } + hashedArtifacts[i] = hasher.Sum(nil) + } + + // create a map based on the digests present in the statement + // the map key is the hash algorithm and the field is a slice of digests + // created using that hash algorithm + subjectDigests := make(map[crypto.Hash][][]byte) + for _, subject := range statement.Subject { + for alg, hexdigest := range subject.Digest { + hf, err := algStringToHashFunc(alg) + if err != nil { + continue + } + if _, ok := subjectDigests[hf]; !ok { + subjectDigests[hf] = make([][]byte, 0) + } + digest, err := hex.DecodeString(hexdigest) + if err != nil { + continue + } + subjectDigests[hf] = append(subjectDigests[hf], digest) + } + } + + // now loop over the provided artifact digests and try to compare them + // to the mapped subject digests + // if we cannot find a match, exit with an error + for _, ha := range hashedArtifacts { + matchFound := false + for key, value := range ha { + statementDigests, ok := subjectDigests[key] + if !ok { + return fmt.Errorf("no matching artifact hash algorithm found in subject digests") + } + if ok := isDigestInSlice(value, statementDigests); ok { + matchFound = true + break + } + } + if !matchFound { + return fmt.Errorf("provided artifact digests do not match digests in statement") + } + } + + return nil +} + +func verifyEnvelopeWithArtifactDigests(verifier signature.Verifier, envelope EnvelopeContent, digests []ArtifactDigest) error { + if err := verifyEnvelope(verifier, envelope); err != nil { + return err + } + statement, err := envelope.Statement() + if err != nil { + return fmt.Errorf("could not verify artifact: unable to extract statement from envelope: %w", err) + } + if err = limitSubjects(statement); err != nil { + return err + } + + // create a map based on the digests present in the statement + // the map key is the hash algorithm and the field is a slice of digests + // created using that hash algorithm + subjectDigests := make(map[string][][]byte) + for _, subject := range statement.Subject { + for alg, digest := range subject.Digest { + if _, ok := subjectDigests[alg]; !ok { + subjectDigests[alg] = make([][]byte, 0) + } + hexdigest, err := hex.DecodeString(digest) + if err != nil { + return fmt.Errorf("could not verify artifact: unable to decode subject digest: %w", err) + } + subjectDigests[alg] = append(subjectDigests[alg], hexdigest) + } + } + + // now loop over the provided artifact digests and compare them to the mapped subject digests + // if we cannot find a match, exit with an error + for _, artifactDigest := range digests { + statementDigests, ok := subjectDigests[artifactDigest.Algorithm] + if !ok { + return fmt.Errorf("provided artifact digests does not match digests in statement") + } + if ok := isDigestInSlice(artifactDigest.Digest, statementDigests); !ok { + return fmt.Errorf("provided artifact digest does not match any digest in statement") + } + } + + return nil +} + +func isDigestInSlice(digest []byte, digestSlice [][]byte) bool { + for _, el := range digestSlice { + if bytes.Equal(digest, el) { + return true + } + } + return false +} + +func verifyMessageSignature(verifier signature.Verifier, msg MessageSignatureContent, artifact io.Reader) error { + err := verifier.VerifySignature(bytes.NewReader(msg.Signature()), artifact) + if err != nil { + return fmt.Errorf("could not verify message: %w", err) + } + + return nil +} + +func verifyMessageSignatureWithArtifactDigest(verifier signature.Verifier, msg MessageSignatureContent, artifactDigest []byte) error { + if !bytes.Equal(artifactDigest, msg.Digest()) { + return errors.New("artifact does not match digest") + } + if _, ok := verifier.(*signature.ED25519Verifier); ok { + return errors.New("message signatures with ed25519 signatures can only be verified with artifacts, and not just their digest") + } + err := verifier.VerifySignature(bytes.NewReader(msg.Signature()), bytes.NewReader([]byte{}), options.WithDigest(artifactDigest)) + + if err != nil { + return fmt.Errorf("could not verify message: %w", err) + } + + return nil +} + +// limitSubjects limits the number of subjects and digests in a statement to prevent DoS. +func limitSubjects(statement *in_toto.Statement) error { + if len(statement.Subject) > maxAllowedSubjects { + return fmt.Errorf("too many subjects: %d > %d", len(statement.Subject), maxAllowedSubjects) + } + for _, subject := range statement.Subject { + // limit the number of digests too + if len(subject.Digest) > maxAllowedSubjectDigests { + return fmt.Errorf("too many digests: %d > %d", len(subject.Digest), maxAllowedSubjectDigests) + } + } + return nil +} + +type multihasher struct { + io.Writer + hashfuncs []crypto.Hash + hashes []io.Writer +} + +func newMultihasher(hashfuncs []crypto.Hash) (*multihasher, error) { + if len(hashfuncs) == 0 { + return nil, errors.New("no hash functions specified") + } + hashes := make([]io.Writer, len(hashfuncs)) + for i := range hashfuncs { + hashes[i] = hashfuncs[i].New() + } + return &multihasher{ + Writer: io.MultiWriter(hashes...), + hashfuncs: hashfuncs, + hashes: hashes, + }, nil +} + +func (m *multihasher) Sum(b []byte) map[crypto.Hash][]byte { + sums := make(map[crypto.Hash][]byte, len(m.hashes)) + for i := range m.hashes { + sums[m.hashfuncs[i]] = m.hashes[i].(hash.Hash).Sum(b) + } + return sums +} + +func algStringToHashFunc(alg string) (crypto.Hash, error) { + switch alg { + case "sha256": + return crypto.SHA256, nil + case "sha384": + return crypto.SHA384, nil + case "sha512": + return crypto.SHA512, nil + default: + return 0, errors.New("unsupported digest algorithm") + } +} + +// getHashFunctions returns the smallest subset of supported hash functions +// that are needed to verify all subjects in a statement. +func getHashFunctions(statement *in_toto.Statement) ([]crypto.Hash, error) { + if len(statement.Subject) == 0 { + return nil, errors.New("no subjects found in statement") + } + + supportedHashFuncs := []crypto.Hash{crypto.SHA512, crypto.SHA384, crypto.SHA256} + chosenHashFuncs := make([]crypto.Hash, 0, len(supportedHashFuncs)) + subjectHashFuncs := make([][]crypto.Hash, len(statement.Subject)) + + // go through the statement and make a simple data structure to hold the + // list of hash funcs for each subject (subjectHashFuncs) + for i, subject := range statement.Subject { + for alg := range subject.Digest { + hf, err := algStringToHashFunc(alg) + if err != nil { + continue + } + subjectHashFuncs[i] = append(subjectHashFuncs[i], hf) + } + } + + // for each subject, see if we have chosen a compatible hash func, and if + // not, add the first one that is supported + for _, hfs := range subjectHashFuncs { + // if any of the hash funcs are already in chosenHashFuncs, skip + if len(intersection(hfs, chosenHashFuncs)) > 0 { + continue + } + + // check each supported hash func and add it if the subject + // has a digest for it + for _, hf := range supportedHashFuncs { + if slices.Contains(hfs, hf) { + chosenHashFuncs = append(chosenHashFuncs, hf) + break + } + } + } + + if len(chosenHashFuncs) == 0 { + return nil, errors.New("no supported digest algorithms found") + } + + return chosenHashFuncs, nil +} + +func intersection(a, b []crypto.Hash) []crypto.Hash { + var result []crypto.Hash + for _, x := range a { + if slices.Contains(b, x) { + result = append(result, x) + } + } + return result +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/signed_entity.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signed_entity.go new file mode 100644 index 000000000000..5751ec852cb1 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signed_entity.go @@ -0,0 +1,882 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "crypto/x509" + "encoding/asn1" + "encoding/json" + "errors" + "fmt" + "io" + "time" + + in_toto "github.com/in-toto/attestation/go/v1" + "github.com/sigstore/sigstore-go/pkg/fulcio/certificate" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "google.golang.org/protobuf/encoding/protojson" +) + +const ( + VerificationResultMediaType01 = "application/vnd.dev.sigstore.verificationresult+json;version=0.1" +) + +type Verifier struct { + trustedMaterial root.TrustedMaterial + config VerifierConfig +} + +type VerifierConfig struct { // nolint: revive + // requireSignedTimestamps requires RFC3161 timestamps to verify + // short-lived certificates + requireSignedTimestamps bool + // signedTimestampThreshold is the minimum number of verified + // RFC3161 timestamps in a bundle + signedTimestampThreshold int + // requireIntegratedTimestamps requires log entry integrated timestamps to + // verify short-lived certificates + requireIntegratedTimestamps bool + // integratedTimeThreshold is the minimum number of log entry + // integrated timestamps in a bundle + integratedTimeThreshold int + // requireObserverTimestamps requires RFC3161 timestamps and/or log + // integrated timestamps to verify short-lived certificates + requireObserverTimestamps bool + // observerTimestampThreshold is the minimum number of verified + // RFC3161 timestamps and/or log integrated timestamps in a bundle + observerTimestampThreshold int + // requireTlogEntries requires log inclusion proofs in a bundle + requireTlogEntries bool + // tlogEntriesThreshold is the minimum number of verified inclusion + // proofs in a bundle + tlogEntriesThreshold int + // requireSCTs requires SCTs in Fulcio certificates + requireSCTs bool + // ctlogEntriesThreshold is the minimum number of verified SCTs in + // a Fulcio certificate + ctlogEntriesThreshold int + // useCurrentTime uses the current time rather than a provided signed + // or log timestamp. Most workflows will not use this option + useCurrentTime bool + // allowNoTimestamp can be used to skip timestamp checks when a key + // is used rather than a certificate. + allowNoTimestamp bool +} + +type VerifierOption func(*VerifierConfig) error + +// NewVerifier creates a new Verifier. It takes a +// root.TrustedMaterial, which contains a set of trusted public keys and +// certificates, and a set of VerifierConfigurators, which set the config +// that determines the behaviour of the Verify function. +// +// VerifierConfig's set of options should match the properties of a given +// Sigstore deployment, i.e. whether to expect SCTs, Tlog entries, or signed +// timestamps. +func NewVerifier(trustedMaterial root.TrustedMaterial, options ...VerifierOption) (*Verifier, error) { + var err error + c := VerifierConfig{} + + for _, opt := range options { + err = opt(&c) + if err != nil { + return nil, fmt.Errorf("failed to configure verifier: %w", err) + } + } + + err = c.Validate() + if err != nil { + return nil, err + } + + v := &Verifier{ + trustedMaterial: trustedMaterial, + config: c, + } + + return v, nil +} + +// TODO: Remove the following deprecated functions in a future release before sigstore-go 2.0. + +// Deprecated: Use Verifier instead +type SignedEntityVerifier = Verifier + +// Deprecated: Use NewVerifier instead +func NewSignedEntityVerifier(trustedMaterial root.TrustedMaterial, options ...VerifierOption) (*Verifier, error) { + return NewVerifier(trustedMaterial, options...) +} + +// WithSignedTimestamps configures the Verifier to expect RFC 3161 +// timestamps from a Timestamp Authority, verify them using the TrustedMaterial's +// TimestampingAuthorities(), and, if it exists, use the resulting timestamp(s) +// to verify the Fulcio certificate. +func WithSignedTimestamps(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + if threshold < 1 { + return errors.New("signed timestamp threshold must be at least 1") + } + c.requireSignedTimestamps = true + c.signedTimestampThreshold = threshold + return nil + } +} + +// WithObserverTimestamps configures the Verifier to expect +// timestamps from either an RFC3161 timestamp authority or a log's +// SignedEntryTimestamp. These are verified using the TrustedMaterial's +// TimestampingAuthorities() or RekorLogs(), and used to verify +// the Fulcio certificate. +func WithObserverTimestamps(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + if threshold < 1 { + return errors.New("observer timestamp threshold must be at least 1") + } + c.requireObserverTimestamps = true + c.observerTimestampThreshold = threshold + return nil + } +} + +// WithTransparencyLog configures the Verifier to expect +// Transparency Log inclusion proofs or SignedEntryTimestamps, verifying them +// using the TrustedMaterial's RekorLogs(). +func WithTransparencyLog(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + if threshold < 1 { + return errors.New("transparency log entry threshold must be at least 1") + } + c.requireTlogEntries = true + c.tlogEntriesThreshold = threshold + return nil + } +} + +// WithIntegratedTimestamps configures the Verifier to +// expect log entry integrated timestamps from either SignedEntryTimestamps +// or live log lookups. +func WithIntegratedTimestamps(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + c.requireIntegratedTimestamps = true + c.integratedTimeThreshold = threshold + return nil + } +} + +// WithSignedCertificateTimestamps configures the Verifier to +// expect the Fulcio certificate to have a SignedCertificateTimestamp, and +// verify it using the TrustedMaterial's CTLogAuthorities(). +func WithSignedCertificateTimestamps(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + if threshold < 1 { + return errors.New("ctlog entry threshold must be at least 1") + } + c.requireSCTs = true + c.ctlogEntriesThreshold = threshold + return nil + } +} + +// WithCurrentTime configures the Verifier to not expect +// any timestamps from either a Timestamp Authority or a Transparency Log. +// This option should not be enabled when verifying short-lived certificates, +// as an observer timestamp is needed. This option is useful primarily for +// private deployments with long-lived code signing certificates. +func WithCurrentTime() VerifierOption { + return func(c *VerifierConfig) error { + c.useCurrentTime = true + return nil + } +} + +// WithNoObserverTimestamps configures the Verifier to not expect +// any timestamps from either a Timestamp Authority or a Transparency Log +// and to not use the current time to verify a certificate. This may only +// be used when verifying with keys rather than certificates. +func WithNoObserverTimestamps() VerifierOption { + return func(c *VerifierConfig) error { + c.allowNoTimestamp = true + return nil + } +} + +func (c *VerifierConfig) Validate() error { + if c.allowNoTimestamp && (c.requireObserverTimestamps || c.requireSignedTimestamps || c.requireIntegratedTimestamps || c.useCurrentTime) { + return errors.New("specify WithNoObserverTimestamps() without any other verifier options") + } + if !c.requireObserverTimestamps && !c.requireSignedTimestamps && !c.requireIntegratedTimestamps && !c.useCurrentTime && !c.allowNoTimestamp { + return errors.New("when initializing a new Verifier, you must specify at least one of " + + "WithObserverTimestamps(), WithSignedTimestamps(), WithIntegratedTimestamps() or WithCurrentTime(), " + + "or exclusively specify WithNoObserverTimestamps()") + } + + return nil +} + +type VerificationResult struct { + MediaType string `json:"mediaType"` + Statement *in_toto.Statement `json:"statement,omitempty"` + Signature *SignatureVerificationResult `json:"signature,omitempty"` + VerifiedTimestamps []TimestampVerificationResult `json:"verifiedTimestamps"` + VerifiedIdentity *CertificateIdentity `json:"verifiedIdentity,omitempty"` +} + +type SignatureVerificationResult struct { + PublicKeyID *[]byte `json:"publicKeyId,omitempty"` + Certificate *certificate.Summary `json:"certificate,omitempty"` +} + +type TimestampVerificationResult struct { + Type string `json:"type"` + URI string `json:"uri"` + Timestamp time.Time `json:"timestamp"` +} + +func NewVerificationResult() *VerificationResult { + return &VerificationResult{ + MediaType: VerificationResultMediaType01, + } +} + +// MarshalJSON deals with protojson needed for the Statement. +// Can be removed when https://github.com/in-toto/attestation/pull/403 is merged. +func (b *VerificationResult) MarshalJSON() ([]byte, error) { + statement, err := protojson.Marshal(b.Statement) + if err != nil { + return nil, err + } + // creating a type alias to avoid infinite recursion, as MarshalJSON is + // not copied into the alias. + type Alias VerificationResult + return json.Marshal(struct { + Alias + Statement json.RawMessage `json:"statement,omitempty"` + }{ + Alias: Alias(*b), + Statement: statement, + }) +} + +func (b *VerificationResult) UnmarshalJSON(data []byte) error { + b.Statement = &in_toto.Statement{} + type Alias VerificationResult + aux := &struct { + Alias + Statement json.RawMessage `json:"statement,omitempty"` + }{ + Alias: Alias(*b), + } + if err := json.Unmarshal(data, aux); err != nil { + return err + } + return protojson.Unmarshal(aux.Statement, b.Statement) +} + +type PolicyOption func(*PolicyConfig) error +type ArtifactPolicyOption func(*PolicyConfig) error + +// PolicyBuilder is responsible for building & validating a PolicyConfig +type PolicyBuilder struct { + artifactPolicy ArtifactPolicyOption + policyOptions []PolicyOption +} + +func (pc PolicyBuilder) options() []PolicyOption { + arr := []PolicyOption{PolicyOption(pc.artifactPolicy)} + return append(arr, pc.policyOptions...) +} + +func (pc PolicyBuilder) BuildConfig() (*PolicyConfig, error) { + var err error + + policy := &PolicyConfig{} + for _, applyOption := range pc.options() { + err = applyOption(policy) + if err != nil { + return nil, err + } + } + + if err := policy.validate(); err != nil { + return nil, err + } + + return policy, nil +} + +type ArtifactDigest struct { + Algorithm string + Digest []byte +} + +type PolicyConfig struct { + ignoreArtifact bool + ignoreIdentities bool + requireSigningKey bool + certificateIdentities CertificateIdentities + verifyArtifacts bool + artifacts []io.Reader + verifyArtifactDigests bool + artifactDigests []ArtifactDigest +} + +func (p *PolicyConfig) withVerifyAlreadyConfigured() error { + if p.verifyArtifacts || p.verifyArtifactDigests { + return errors.New("only one invocation of WithArtifact/WithArtifacts/WithArtifactDigest/WithArtifactDigests is allowed") + } + + return nil +} + +func (p *PolicyConfig) validate() error { + if p.RequireIdentities() && len(p.certificateIdentities) == 0 { + return errors.New("can't verify identities without providing at least one identity") + } + + return nil +} + +// RequireArtifact returns true if the Verify algorithm should perform +// signature verification with an an artifact provided by either the +// WithArtifact or the WithArtifactDigest functions. +// +// By default, unless explicitly turned off, we should always expect to verify +// a SignedEntity's signature using an artifact. Bools are initialized to false, +// so this behaviour is therefore controlled by the ignoreArtifact field. +// +// Double negatives are confusing, though. To aid with comprehension of the +// main Verify loop, this function therefore just wraps the double negative. +func (p *PolicyConfig) RequireArtifact() bool { + return !p.ignoreArtifact +} + +// RequireIdentities returns true if the Verify algorithm should check +// whether the SignedEntity's certificate was created by one of the identities +// provided by the WithCertificateIdentity function. +// +// By default, unless explicitly turned off, we should always expect to enforce +// that a SignedEntity's certificate was created by an Identity we trust. Bools +// are initialized to false, so this behaviour is therefore controlled by the +// ignoreIdentities field. +// +// Double negatives are confusing, though. To aid with comprehension of the +// main Verify loop, this function therefore just wraps the double negative. +func (p *PolicyConfig) RequireIdentities() bool { + return !p.ignoreIdentities +} + +// RequireSigningKey returns true if we expect the SignedEntity to be signed +// with a key and not a certificate. +func (p *PolicyConfig) RequireSigningKey() bool { + return p.requireSigningKey +} + +func NewPolicy(artifactOpt ArtifactPolicyOption, options ...PolicyOption) PolicyBuilder { + return PolicyBuilder{artifactPolicy: artifactOpt, policyOptions: options} +} + +// WithoutIdentitiesUnsafe allows the caller of Verify to skip enforcing any +// checks on the identity that created the SignedEntity being verified. +// +// Do not use this option unless you know what you are doing! +// +// As the name implies, using WithoutIdentitiesUnsafe is not safe: outside of +// exceptional circumstances, we should always enforce that the SignedEntity +// being verified was signed by a trusted CertificateIdentity. +// +// For more information, consult WithCertificateIdentity. +func WithoutIdentitiesUnsafe() PolicyOption { + return func(p *PolicyConfig) error { + if len(p.certificateIdentities) > 0 { + return errors.New("can't use WithoutIdentitiesUnsafe while specifying CertificateIdentities") + } + + p.ignoreIdentities = true + return nil + } +} + +// WithCertificateIdentity allows the caller of Verify to enforce that the +// SignedEntity being verified was signed by the given identity, as defined by +// the Fulcio certificate embedded in the entity. If this policy is enabled, +// but the SignedEntity does not have a certificate, verification will fail. +// +// Providing this function multiple times will concatenate the provided +// CertificateIdentity to the list of identities being checked. +// +// If all of the provided CertificateIdentities fail to match the Fulcio +// certificate, then verification will fail. If *any* CertificateIdentity +// matches, then verification will succeed. Therefore, each CertificateIdentity +// provided to this function must define a "sufficient" identity to trust. +// +// The CertificateIdentity struct allows callers to specify: +// - The exact value, or Regexp, of the SubjectAlternativeName +// - The exact value of any Fulcio OID X.509 extension, i.e. Issuer +// +// For convenience, consult the NewShortCertificateIdentity function. +func WithCertificateIdentity(identity CertificateIdentity) PolicyOption { + return func(p *PolicyConfig) error { + if p.ignoreIdentities { + return errors.New("can't use WithCertificateIdentity while using WithoutIdentitiesUnsafe") + } + if p.requireSigningKey { + return errors.New("can't use WithCertificateIdentity while using WithKey") + } + + p.certificateIdentities = append(p.certificateIdentities, identity) + return nil + } +} + +// WithKey allows the caller of Verify to require the SignedEntity being +// verified was signed with a key and not a certificate. +func WithKey() PolicyOption { + return func(p *PolicyConfig) error { + if len(p.certificateIdentities) > 0 { + return errors.New("can't use WithKey while using WithCertificateIdentity") + } + + p.requireSigningKey = true + p.ignoreIdentities = true + return nil + } +} + +// WithoutArtifactUnsafe allows the caller of Verify to skip checking whether +// the SignedEntity was created from, or references, an artifact. +// +// WithoutArtifactUnsafe can only be used with SignedEntities that contain a +// DSSE envelope. If the the SignedEntity has a MessageSignature, providing +// this policy option will cause verification to always fail, since +// MessageSignatures can only be verified in the presence of an Artifact or +// artifact digest. See WithArtifact/WithArtifactDigest for more information. +// +// Do not use this function unless you know what you are doing! +// +// As the name implies, using WithoutArtifactUnsafe is not safe: outside of +// exceptional circumstances, SignedEntities should always be verified with +// an artifact. +func WithoutArtifactUnsafe() ArtifactPolicyOption { + return func(p *PolicyConfig) error { + if err := p.withVerifyAlreadyConfigured(); err != nil { + return err + } + + p.ignoreArtifact = true + return nil + } +} + +// WithArtifact allows the caller of Verify to enforce that the SignedEntity +// being verified was created from, or references, a given artifact. +// +// If the SignedEntity contains a DSSE envelope, then the artifact digest is +// calculated from the given artifact, and compared to the digest in the +// envelope's statement. +func WithArtifact(artifact io.Reader) ArtifactPolicyOption { + return func(p *PolicyConfig) error { + if err := p.withVerifyAlreadyConfigured(); err != nil { + return err + } + + if p.ignoreArtifact { + return errors.New("can't use WithArtifact while using WithoutArtifactUnsafe") + } + + p.verifyArtifacts = true + p.artifacts = []io.Reader{artifact} + return nil + } +} + +// WithArtifacts allows the caller of Verify to enforce that the SignedEntity +// being verified was created from, or references, a slice of artifacts. +// +// If the SignedEntity contains a DSSE envelope, then the artifact digest is +// calculated from the given artifact, and compared to the digest in the +// envelope's statement. +func WithArtifacts(artifacts []io.Reader) ArtifactPolicyOption { + return func(p *PolicyConfig) error { + if err := p.withVerifyAlreadyConfigured(); err != nil { + return err + } + + if p.ignoreArtifact { + return errors.New("can't use WithArtifacts while using WithoutArtifactUnsafe") + } + + p.verifyArtifacts = true + p.artifacts = artifacts + return nil + } +} + +// WithArtifactDigest allows the caller of Verify to enforce that the +// SignedEntity being verified was created for a given artifact digest. +// +// If the SignedEntity contains a MessageSignature that was signed using the +// ED25519 algorithm, then providing only an artifactDigest will fail; the +// whole artifact must be provided. Use WithArtifact instead. +// +// If the SignedEntity contains a DSSE envelope, then the artifact digest is +// compared to the digest in the envelope's statement. +func WithArtifactDigest(algorithm string, artifactDigest []byte) ArtifactPolicyOption { + return func(p *PolicyConfig) error { + if err := p.withVerifyAlreadyConfigured(); err != nil { + return err + } + + if p.ignoreArtifact { + return errors.New("can't use WithArtifactDigest while using WithoutArtifactUnsafe") + } + + p.verifyArtifactDigests = true + p.artifactDigests = []ArtifactDigest{{ + Algorithm: algorithm, + Digest: artifactDigest, + }} + return nil + } +} + +// WithArtifactDigests allows the caller of Verify to enforce that the +// SignedEntity being verified was created for a given array of artifact digests. +// +// If the SignedEntity contains a DSSE envelope, then the artifact digests +// are compared to the digests in the envelope's statement. +// +// If the SignedEntity does not contain a DSSE envelope, verification fails. +func WithArtifactDigests(digests []ArtifactDigest) ArtifactPolicyOption { + return func(p *PolicyConfig) error { + if err := p.withVerifyAlreadyConfigured(); err != nil { + return err + } + + if p.ignoreArtifact { + return errors.New("can't use WithArtifactDigests while using WithoutArtifactUnsafe") + } + + p.verifyArtifactDigests = true + p.artifactDigests = digests + return nil + } +} + +// Verify checks the cryptographic integrity of a given SignedEntity according +// to the options configured in the NewVerifier. Its purpose is to +// determine whether the SignedEntity was created by a Sigstore deployment we +// trust, as defined by keys in our TrustedMaterial. +// +// If the SignedEntity contains a MessageSignature, then the artifact or its +// digest must be provided to the Verify function, as it is required to verify +// the signature. See WithArtifact and WithArtifactDigest for more details. +// +// If and only if verification is successful, Verify will return a +// VerificationResult struct whose contents' integrity have been verified. +// Verify may then verify the contents of the VerificationResults using supplied +// PolicyOptions. See WithCertificateIdentity for more details. +// +// Callers of this function SHOULD ALWAYS: +// - (if the signed entity has a certificate) verify that its Subject Alternate +// Name matches a trusted identity, and that its OID Issuer field matches an +// expected value +// - (if the signed entity has a dsse envelope) verify that the envelope's +// statement's subject matches the artifact being verified +func (v *Verifier) Verify(entity SignedEntity, pb PolicyBuilder) (*VerificationResult, error) { + policy, err := pb.BuildConfig() + if err != nil { + return nil, fmt.Errorf("failed to build policy: %w", err) + } + + // Let's go by the spec: https://docs.google.com/document/d/1kbhK2qyPPk8SLavHzYSDM8-Ueul9_oxIMVFuWMWKz0E/edit#heading=h.g11ovq2s1jxh + // > ## Transparency Log Entry + verifiedTlogTimestamps, err := v.VerifyTransparencyLogInclusion(entity) + if err != nil { + return nil, fmt.Errorf("failed to verify log inclusion: %w", err) + } + + // > ## Establishing a Time for the Signature + // > First, establish a time for the signature. This timestamp is required to validate the certificate chain, so this step comes first. + verifiedTimestamps, err := v.VerifyObserverTimestamps(entity, verifiedTlogTimestamps) + if err != nil { + return nil, fmt.Errorf("failed to verify timestamps: %w", err) + } + + verificationContent, err := entity.VerificationContent() + if err != nil { + return nil, fmt.Errorf("failed to fetch verification content: %w", err) + } + + var signedWithCertificate bool + var certSummary certificate.Summary + + // If the bundle was signed with a long-lived key, and does not have a Fulcio certificate, + // then skip the certificate verification steps + if leafCert := verificationContent.Certificate(); leafCert != nil { + if policy.RequireSigningKey() { + return nil, errors.New("expected key signature, not certificate") + } + if v.config.allowNoTimestamp { + return nil, errors.New("must provide timestamp to verify certificate") + } + + signedWithCertificate = true + + // Get the summary before modifying the cert extensions + certSummary, err = certificate.SummarizeCertificate(leafCert) + if err != nil { + return nil, fmt.Errorf("failed to summarize certificate: %w", err) + } + + // From spec: + // > ## Certificate + // > … + // > The Verifier MUST perform certification path validation (RFC 5280 §6) of the certificate chain with the pre-distributed Fulcio root certificate(s) as a trust anchor, but with a fake “current time.” If a timestamp from the timestamping service is available, the Verifier MUST perform path validation using the timestamp from the Timestamping Service. If a timestamp from the Transparency Service is available, the Verifier MUST perform path validation using the timestamp from the Transparency Service. If both are available, the Verifier performs path validation twice. If either fails, verification fails. + + // Go does not support the OtherName GeneralName SAN extension. If + // Fulcio issued the certificate with an OtherName SAN, it will be + // handled by SummarizeCertificate above, and it must be removed here + // or the X.509 verification will fail. + if len(leafCert.UnhandledCriticalExtensions) > 0 { + var unhandledExts []asn1.ObjectIdentifier + for _, oid := range leafCert.UnhandledCriticalExtensions { + if !oid.Equal(cryptoutils.SANOID) { + unhandledExts = append(unhandledExts, oid) + } + } + leafCert.UnhandledCriticalExtensions = unhandledExts + } + + var chains [][]*x509.Certificate + for _, verifiedTs := range verifiedTimestamps { + // verify the leaf certificate against the root + chains, err = VerifyLeafCertificate(verifiedTs.Timestamp, leafCert, v.trustedMaterial) + if err != nil { + return nil, fmt.Errorf("failed to verify leaf certificate: %w", err) + } + } + + // From spec: + // > Unless performing online verification (see §Alternative Workflows), the Verifier MUST extract the SignedCertificateTimestamp embedded in the leaf certificate, and verify it as in RFC 9162 §8.1.3, using the verification key from the Certificate Transparency Log. + + if v.config.requireSCTs { + err = VerifySignedCertificateTimestamp(chains, v.config.ctlogEntriesThreshold, v.trustedMaterial) + if err != nil { + return nil, fmt.Errorf("failed to verify signed certificate timestamp: %w", err) + } + } + } + + // If SCTs are required, ensure the bundle is certificate-signed not public key-signed + if v.config.requireSCTs { + if verificationContent.PublicKey() != nil { + return nil, errors.New("SCTs required but bundle is signed with a public key, which cannot contain SCTs") + } + } + + // From spec: + // > ## Signature Verification + // > The Verifier MUST verify the provided signature for the constructed payload against the key in the leaf of the certificate chain. + + sigContent, err := entity.SignatureContent() + if err != nil { + return nil, fmt.Errorf("failed to fetch signature content: %w", err) + } + + entityVersion, err := entity.Version() + if err != nil { + return nil, fmt.Errorf("failed to fetch entity version: %w", err) + } + + var enableCompat bool + switch entityVersion { + case "v0.1": + fallthrough + case "0.1": + fallthrough + case "v0.2": + fallthrough + case "0.2": + fallthrough + case "v0.3": + fallthrough + case "0.3": + enableCompat = true + } + verifier, err := getSignatureVerifier(sigContent, verificationContent, v.trustedMaterial, enableCompat) + if err != nil { + return nil, fmt.Errorf("failed to get signature verifier: %w", err) + } + + if policy.RequireArtifact() { + switch { + case policy.verifyArtifacts: + err = verifySignatureWithVerifierAndArtifacts(verifier, sigContent, verificationContent, v.trustedMaterial, policy.artifacts) + case policy.verifyArtifactDigests: + err = verifySignatureWithVerifierAndArtifactDigests(verifier, sigContent, verificationContent, v.trustedMaterial, policy.artifactDigests) + default: + // should never happen, but just in case: + err = errors.New("no artifact or artifact digest provided") + } + } else { + // verifying with artifact has been explicitly turned off, so just check + // the signature on the dsse envelope: + err = verifySignatureWithVerifier(verifier, sigContent, verificationContent, v.trustedMaterial) + } + + if err != nil { + return nil, fmt.Errorf("failed to verify signature: %w", err) + } + + // Hooray! We've verified all of the entity's constituent parts! 🎉 🥳 + // Now we can construct the results object accordingly. + result := NewVerificationResult() + if signedWithCertificate { + result.Signature = &SignatureVerificationResult{ + Certificate: &certSummary, + } + } else { + pubKeyID := []byte(verificationContent.PublicKey().Hint()) + result.Signature = &SignatureVerificationResult{ + PublicKeyID: &pubKeyID, + } + } + + // SignatureContent can be either an Envelope or a MessageSignature. + // If it's an Envelope, let's pop the Statement for our results: + if envelope := sigContent.EnvelopeContent(); envelope != nil { + stmt, err := envelope.Statement() + if err != nil { + return nil, fmt.Errorf("failed to fetch envelope statement: %w", err) + } + + result.Statement = stmt + } + + result.VerifiedTimestamps = verifiedTimestamps + + // Now that the signed entity's crypto material has been verified, and the + // result struct has been constructed, we can optionally enforce some + // additional policies: + // -------------------- + + // From ## Certificate section, + // >The Verifier MUST then check the certificate against the verification policy. Details on how to do this depend on the verification policy, but the Verifier SHOULD check the Issuer X.509 extension (OID 1.3.6.1.4.1.57264.1.1) at a minimum, and will in most cases check the SubjectAlternativeName as well. See Spec: Fulcio §TODO for example checks on the certificate. + if policy.RequireIdentities() { + if !signedWithCertificate { + // We got asked to verify identities, but the entity was not signed with + // a certificate. That's a problem! + return nil, errors.New("can't verify certificate identities: entity was not signed with a certificate") + } + + if len(policy.certificateIdentities) == 0 { + return nil, errors.New("can't verify certificate identities: no identities provided") + } + + matchingCertID, err := policy.certificateIdentities.Verify(certSummary) + if err != nil { + return nil, fmt.Errorf("failed to verify certificate identity: %w", err) + } + + result.VerifiedIdentity = matchingCertID + } + + return result, nil +} + +// VerifyTransparencyLogInclusion verifies TlogEntries if expected. Optionally returns +// a list of verified timestamps from the log integrated timestamps when verifying +// with observer timestamps. +// TODO: Return a different verification result for logs specifically (also for #48) +func (v *Verifier) VerifyTransparencyLogInclusion(entity SignedEntity) ([]TimestampVerificationResult, error) { + verifiedTimestamps := []TimestampVerificationResult{} + + if v.config.requireTlogEntries { + // log timestamps should be verified if with WithIntegratedTimestamps or WithObserverTimestamps is used + verifiedTlogTimestamps, err := VerifyTlogEntry(entity, v.trustedMaterial, v.config.tlogEntriesThreshold, + v.config.requireIntegratedTimestamps || v.config.requireObserverTimestamps) + if err != nil { + return nil, err + } + + for _, vts := range verifiedTlogTimestamps { + verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "Tlog", URI: vts.URI, Timestamp: vts.Time}) + } + } + + return verifiedTimestamps, nil +} + +// VerifyObserverTimestamps verifies RFC3161 signed timestamps, and verifies +// that timestamp thresholds are met with log entry integrated timestamps, +// signed timestamps, or a combination of both. The returned timestamps +// can be used to verify short-lived certificates. +// logTimestamps may be populated with verified log entry integrated timestamps +// In order to be verifiable, a SignedEntity must have at least one verified +// "observer timestamp". +func (v *Verifier) VerifyObserverTimestamps(entity SignedEntity, logTimestamps []TimestampVerificationResult) ([]TimestampVerificationResult, error) { + verifiedTimestamps := []TimestampVerificationResult{} + + // From spec: + // > … if verification or timestamp parsing fails, the Verifier MUST abort + if v.config.requireSignedTimestamps { + verifiedSignedTimestamps, err := VerifySignedTimestampWithThreshold(entity, v.trustedMaterial, v.config.signedTimestampThreshold) + if err != nil { + return nil, err + } + for _, vts := range verifiedSignedTimestamps { + verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "TimestampAuthority", URI: vts.URI, Timestamp: vts.Time}) + } + } + + if v.config.requireIntegratedTimestamps { + if len(logTimestamps) < v.config.integratedTimeThreshold { + return nil, fmt.Errorf("threshold not met for verified log entry integrated timestamps: %d < %d", len(logTimestamps), v.config.integratedTimeThreshold) + } + verifiedTimestamps = append(verifiedTimestamps, logTimestamps...) + } + + if v.config.requireObserverTimestamps { + verifiedSignedTimestamps, verificationErrors, err := VerifySignedTimestamp(entity, v.trustedMaterial) + if err != nil { + return nil, fmt.Errorf("failed to verify signed timestamps: %w", err) + } + + // check threshold for both RFC3161 and log timestamps + tsCount := len(verifiedSignedTimestamps) + len(logTimestamps) + if tsCount < v.config.observerTimestampThreshold { + return nil, fmt.Errorf("threshold not met for verified signed & log entry integrated timestamps: %d < %d; error: %w", + tsCount, v.config.observerTimestampThreshold, errors.Join(verificationErrors...)) + } + + // append all timestamps + verifiedTimestamps = append(verifiedTimestamps, logTimestamps...) + for _, vts := range verifiedSignedTimestamps { + verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "TimestampAuthority", URI: vts.URI, Timestamp: vts.Time}) + } + } + + if v.config.useCurrentTime { + // use current time to verify certificate if no signed timestamps are provided + verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "CurrentTime", URI: "", Timestamp: time.Now()}) + } + + if len(verifiedTimestamps) == 0 && !v.config.allowNoTimestamp { + return nil, fmt.Errorf("no valid observer timestamps found") + } + + return verifiedTimestamps, nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/tlog.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tlog.go new file mode 100644 index 000000000000..eff84be07e1d --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tlog.go @@ -0,0 +1,187 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "bytes" + "crypto" + "encoding/hex" + "errors" + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore-go/pkg/tlog" + "github.com/sigstore/sigstore/pkg/signature" +) + +const maxAllowedTlogEntries = 32 + +// VerifyTlogEntry verifies that the given entity has been logged +// in the transparency log and that the log entry is valid. +// +// The threshold parameter is the number of unique transparency log entries +// that must be verified. +func VerifyTlogEntry(entity SignedEntity, trustedMaterial root.TrustedMaterial, logThreshold int, trustIntegratedTime bool) ([]root.Timestamp, error) { //nolint:revive + entries, err := entity.TlogEntries() + if err != nil { + return nil, err + } + + // limit the number of tlog entries to prevent DoS + if len(entries) > maxAllowedTlogEntries { + return nil, fmt.Errorf("too many tlog entries: %d > %d", len(entries), maxAllowedTlogEntries) + } + + // disallow duplicate entries, as a malicious actor could use duplicates to bypass the threshold + for i := 0; i < len(entries); i++ { + for j := i + 1; j < len(entries); j++ { + if entries[i].LogKeyID() == entries[j].LogKeyID() && entries[i].LogIndex() == entries[j].LogIndex() { + return nil, errors.New("duplicate tlog entries found") + } + } + } + + sigContent, err := entity.SignatureContent() + if err != nil { + return nil, err + } + + entitySignature := sigContent.Signature() + + verificationContent, err := entity.VerificationContent() + if err != nil { + return nil, err + } + + verifiedTimestamps := []root.Timestamp{} + logEntriesVerified := 0 + + for _, entry := range entries { + err := tlog.ValidateEntry(entry) + if err != nil { + return nil, err + } + + rekorLogs := trustedMaterial.RekorLogs() + keyID := entry.LogKeyID() + hex64Key := hex.EncodeToString([]byte(keyID)) + tlogVerifier, ok := trustedMaterial.RekorLogs()[hex64Key] + if !ok { + // skip entries the trust root cannot verify + continue + } + + if !entry.HasInclusionPromise() && !entry.HasInclusionProof() { + return nil, fmt.Errorf("entry must contain an inclusion proof and/or promise") + } + if entry.HasInclusionPromise() { + err = tlog.VerifySET(entry, rekorLogs) + if err != nil { + // skip entries the trust root cannot verify + continue + } + if trustIntegratedTime { + verifiedTimestamps = append(verifiedTimestamps, root.Timestamp{Time: entry.IntegratedTime(), URI: tlogVerifier.BaseURL}) + } + } + if entry.HasInclusionProof() { + verifier, err := getVerifier(tlogVerifier.PublicKey, tlogVerifier.SignatureHashFunc) + if err != nil { + return nil, err + } + + if hasRekorV1STH(entry) { + err = tlog.VerifyInclusion(entry, *verifier) + } else { + if tlogVerifier.BaseURL == "" { + return nil, fmt.Errorf("cannot verify Rekor v2 entry without baseUrl in transparency log's trusted root") + } + u, err := url.Parse(tlogVerifier.BaseURL) + if err != nil { + return nil, err + } + err = tlog.VerifyCheckpointAndInclusion(entry, *verifier, u.Hostname()) + if err != nil { + return nil, err + } + } + if err != nil { + return nil, err + } + // DO NOT use timestamp with only an inclusion proof, because it is not signed metadata + } + + // Ensure entry signature matches signature from bundle + if !bytes.Equal(entry.Signature(), entitySignature) { + return nil, errors.New("transparency log signature does not match") + } + + // Ensure entry certificate matches bundle certificate + if !verificationContent.CompareKey(entry.PublicKey(), trustedMaterial) { + return nil, errors.New("transparency log certificate does not match") + } + + // TODO: if you have access to artifact, check that it matches body subject + + // Check tlog entry time against bundle certificates + if !entry.IntegratedTime().IsZero() { + if !verificationContent.ValidAtTime(entry.IntegratedTime(), trustedMaterial) { + return nil, errors.New("integrated time outside certificate validity") + } + } + + // successful log entry verification + logEntriesVerified++ + } + + if logEntriesVerified < logThreshold { + return nil, fmt.Errorf("not enough verified log entries from transparency log: %d < %d", logEntriesVerified, logThreshold) + } + + return verifiedTimestamps, nil +} + +func getVerifier(publicKey crypto.PublicKey, hashFunc crypto.Hash) (*signature.Verifier, error) { + verifier, err := signature.LoadVerifier(publicKey, hashFunc) + if err != nil { + return nil, err + } + + return &verifier, nil +} + +// TODO: remove this deprecated function before 2.0 + +// Deprecated: use VerifyTlogEntry instead +func VerifyArtifactTransparencyLog(entity SignedEntity, trustedMaterial root.TrustedMaterial, logThreshold int, trustIntegratedTime bool) ([]root.Timestamp, error) { //nolint:revive + return VerifyTlogEntry(entity, trustedMaterial, logThreshold, trustIntegratedTime) +} + +var treeIDSuffixRegex = regexp.MustCompile(".* - [0-9]+$") + +// hasRekorV1STH checks if the checkpoint has a Rekor v1-style Signed Tree Head +// which contains a numeric Tree ID as part of its checkpoint origin. +func hasRekorV1STH(entry *tlog.Entry) bool { + tle := entry.TransparencyLogEntry() + checkpointBody := tle.GetInclusionProof().GetCheckpoint().GetEnvelope() + checkpointLines := strings.Split(checkpointBody, "\n") + if len(checkpointLines) < 4 { + return false + } + return treeIDSuffixRegex.MatchString(checkpointLines[0]) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/tsa.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tsa.go new file mode 100644 index 000000000000..1c10445dd981 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tsa.go @@ -0,0 +1,120 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "errors" + "fmt" + + "github.com/sigstore/sigstore-go/pkg/root" +) + +const maxAllowedTimestamps = 32 + +// VerifySignedTimestamp verifies that the given entity has been timestamped +// by a trusted timestamp authority and that the timestamp is valid. +func VerifySignedTimestamp(entity SignedEntity, trustedMaterial root.TrustedMaterial) ([]*root.Timestamp, []error, error) { //nolint:revive + signedTimestamps, err := entity.Timestamps() + if err != nil { + return nil, nil, err + } + + // limit the number of timestamps to prevent DoS + if len(signedTimestamps) > maxAllowedTimestamps { + return nil, nil, fmt.Errorf("too many signed timestamps: %d > %d", len(signedTimestamps), maxAllowedTimestamps) + } + sigContent, err := entity.SignatureContent() + if err != nil { + return nil, nil, err + } + + signatureBytes := sigContent.Signature() + + verifiedTimestamps := []*root.Timestamp{} + var verificationErrors []error + for _, timestamp := range signedTimestamps { + verifiedSignedTimestamp, err := verifySignedTimestamp(timestamp, signatureBytes, trustedMaterial) + if err != nil { + verificationErrors = append(verificationErrors, err) + continue + } + if isDuplicateTSA(verifiedTimestamps, verifiedSignedTimestamp) { + verificationErrors = append(verificationErrors, fmt.Errorf("duplicate timestamps from the same authority, ignoring %s", verifiedSignedTimestamp.URI)) + continue + } + + verifiedTimestamps = append(verifiedTimestamps, verifiedSignedTimestamp) + } + + return verifiedTimestamps, verificationErrors, nil +} + +// isDuplicateTSA checks if the given verified signed timestamp is a duplicate +// of any of the verified timestamps. +// This is used to prevent replay attacks and ensure a single compromised TSA +// cannot meet the threshold. +func isDuplicateTSA(verifiedTimestamps []*root.Timestamp, verifiedSignedTimestamp *root.Timestamp) bool { + for _, ts := range verifiedTimestamps { + if ts.URI == verifiedSignedTimestamp.URI { + return true + } + } + return false +} + +// VerifySignedTimestamp verifies that the given entity has been timestamped +// by a trusted timestamp authority and that the timestamp is valid. +// +// The threshold parameter is the number of unique timestamps that must be +// verified. +func VerifySignedTimestampWithThreshold(entity SignedEntity, trustedMaterial root.TrustedMaterial, threshold int) ([]*root.Timestamp, error) { //nolint:revive + verifiedTimestamps, verificationErrors, err := VerifySignedTimestamp(entity, trustedMaterial) + if err != nil { + return nil, err + } + if len(verifiedTimestamps) < threshold { + return nil, fmt.Errorf("threshold not met for verified signed timestamps: %d < %d; error: %w", len(verifiedTimestamps), threshold, errors.Join(verificationErrors...)) + } + return verifiedTimestamps, nil +} + +func verifySignedTimestamp(signedTimestamp []byte, signatureBytes []byte, trustedMaterial root.TrustedMaterial) (*root.Timestamp, error) { + timestampAuthorities := trustedMaterial.TimestampingAuthorities() + + var errs []error + + // Iterate through TSA certificate authorities to find one that verifies + for _, tsa := range timestampAuthorities { + ts, err := tsa.Verify(signedTimestamp, signatureBytes) + if err == nil { + return ts, nil + } + errs = append(errs, err) + } + + return nil, fmt.Errorf("unable to verify signed timestamps: %w", errors.Join(errs...)) +} + +// TODO: remove below deprecated functions before 2.0 + +// Deprecated: use VerifySignedTimestamp instead. +func VerifyTimestampAuthority(entity SignedEntity, trustedMaterial root.TrustedMaterial) ([]*root.Timestamp, []error, error) { //nolint:revive + return VerifySignedTimestamp(entity, trustedMaterial) +} + +// Deprecated: use VerifySignedTimestampWithThreshold instead. +func VerifyTimestampAuthorityWithThreshold(entity SignedEntity, trustedMaterial root.TrustedMaterial, threshold int) ([]*root.Timestamp, error) { //nolint:revive + return VerifySignedTimestampWithThreshold(entity, trustedMaterial, threshold) +} diff --git a/vendor/github.com/sigstore/sigstore/COPYRIGHT.txt b/vendor/github.com/sigstore/sigstore/COPYRIGHT.txt new file mode 100644 index 000000000000..7a01c8498642 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/COPYRIGHT.txt @@ -0,0 +1,14 @@ + +Copyright 2021 The Sigstore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/sigstore/sigstore/LICENSE b/vendor/github.com/sigstore/sigstore/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go new file mode 100644 index 000000000000..348f47bdc848 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go @@ -0,0 +1,173 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cryptoutils implements support for working with encoded certificates, public keys, and private keys +package cryptoutils + +import ( + "bytes" + "crypto/rand" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "time" +) + +const ( + // CertificatePEMType is the string "CERTIFICATE" to be used during PEM encoding and decoding + CertificatePEMType PEMType = "CERTIFICATE" +) + +// MarshalCertificateToPEM converts the provided X509 certificate into PEM format +func MarshalCertificateToPEM(cert *x509.Certificate) ([]byte, error) { + if cert == nil { + return nil, errors.New("nil certificate provided") + } + return PEMEncode(CertificatePEMType, cert.Raw), nil +} + +// MarshalCertificatesToPEM converts the provided X509 certificates into PEM format +func MarshalCertificatesToPEM(certs []*x509.Certificate) ([]byte, error) { + buf := bytes.Buffer{} + for _, cert := range certs { + pemBytes, err := MarshalCertificateToPEM(cert) + if err != nil { + return nil, err + } + _, _ = buf.Write(pemBytes) + } + return buf.Bytes(), nil +} + +// UnmarshalCertificatesFromPEM extracts one or more X509 certificates from the provided +// byte slice, which is assumed to be in PEM-encoded format. +func UnmarshalCertificatesFromPEM(pemBytes []byte) ([]*x509.Certificate, error) { + result := []*x509.Certificate{} + remaining := pemBytes + remaining = bytes.TrimSpace(remaining) + + for len(remaining) > 0 { + var certDer *pem.Block + certDer, remaining = pem.Decode(remaining) + + if certDer == nil { + return nil, errors.New("error during PEM decoding") + } + + cert, err := x509.ParseCertificate(certDer.Bytes) + if err != nil { + return nil, err + } + result = append(result, cert) + } + return result, nil +} + +// UnmarshalCertificatesFromPEMLimited extracts one or more X509 certificates from the provided +// byte slice, which is assumed to be in PEM-encoded format. Fails after a specified +// number of iterations. A reasonable limit is 10 iterations. +func UnmarshalCertificatesFromPEMLimited(pemBytes []byte, iterations int) ([]*x509.Certificate, error) { + result := []*x509.Certificate{} + remaining := pemBytes + remaining = bytes.TrimSpace(remaining) + + count := 0 + for len(remaining) > 0 { + if count == iterations { + return nil, errors.New("too many certificates specified in PEM block") + } + var certDer *pem.Block + certDer, remaining = pem.Decode(remaining) + + if certDer == nil { + return nil, errors.New("error during PEM decoding") + } + + cert, err := x509.ParseCertificate(certDer.Bytes) + if err != nil { + return nil, err + } + result = append(result, cert) + count++ + } + return result, nil +} + +// LoadCertificatesFromPEM extracts one or more X509 certificates from the provided +// io.Reader. +func LoadCertificatesFromPEM(pem io.Reader) ([]*x509.Certificate, error) { + fileBytes, err := io.ReadAll(pem) + if err != nil { + return nil, err + } + return UnmarshalCertificatesFromPEM(fileBytes) +} + +func formatTime(t time.Time) string { + return t.UTC().Format(time.RFC3339) +} + +// CheckExpiration verifies that epoch is during the validity period of +// the certificate provided. +// +// It returns nil if issueTime < epoch < expirationTime, and error otherwise. +func CheckExpiration(cert *x509.Certificate, epoch time.Time) error { + if cert == nil { + return errors.New("certificate is nil") + } + if cert.NotAfter.Before(epoch) { + return fmt.Errorf("certificate expiration time %s is before %s", formatTime(cert.NotAfter), formatTime(epoch)) + } + if cert.NotBefore.After(epoch) { + return fmt.Errorf("certificate issued time %s is before %s", formatTime(cert.NotBefore), formatTime(epoch)) + } + return nil +} + +// ParseCSR parses a PKCS#10 PEM-encoded CSR. +func ParseCSR(csr []byte) (*x509.CertificateRequest, error) { + derBlock, _ := pem.Decode(csr) + if derBlock == nil || derBlock.Bytes == nil { + return nil, errors.New("no CSR found while decoding") + } + correctType := false + acceptedHeaders := []string{"CERTIFICATE REQUEST", "NEW CERTIFICATE REQUEST"} + for _, v := range acceptedHeaders { + if derBlock.Type == v { + correctType = true + } + } + if !correctType { + return nil, fmt.Errorf("DER type %v is not of any type %v for CSR", derBlock.Type, acceptedHeaders) + } + + return x509.ParseCertificateRequest(derBlock.Bytes) +} + +// GenerateSerialNumber creates a compliant serial number as per RFC 5280 4.1.2.2. +// Serial numbers must be positive, and can be no longer than 20 bytes. +// The serial number is generated with 159 bits, so that the first bit will always +// be 0, resulting in a positive serial number. +func GenerateSerialNumber() (*big.Int, error) { + // Pick a random number from 0 to 2^159. + serial, err := rand.Int(rand.Reader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil)) + if err != nil { + return nil, errors.New("error generating serial number") + } + return serial, nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/doc.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/doc.go new file mode 100644 index 000000000000..4e7e73d551b5 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/doc.go @@ -0,0 +1,17 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cryptoutils contains utilities related to handling cryptographic materials. +package cryptoutils diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/generic.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/generic.go new file mode 100644 index 000000000000..3fa6e7ba5b5f --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/generic.go @@ -0,0 +1,31 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutils + +import ( + "encoding/pem" +) + +// PEMType is a specific type for string constants used during PEM encoding and decoding +type PEMType string + +// PEMEncode encodes the specified byte slice in PEM format using the provided type string +func PEMEncode(typeStr PEMType, bytes []byte) []byte { + return pem.EncodeToMemory(&pem.Block{ + Type: string(typeStr), + Bytes: bytes, + }) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go new file mode 100644 index 000000000000..89dd05e01702 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go @@ -0,0 +1,94 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutils + +import ( + "errors" + "fmt" + "io" + "os" + + "golang.org/x/term" +) + +// PassFunc is a type of function that takes a boolean (representing whether confirmation is desired) and returns the password as read, along with an error if one occurred +type PassFunc func(bool) ([]byte, error) + +// Read is for fuzzing +var Read = readPasswordFn + +// readPasswordFn reads the password from the following sources, in order of preference: +// +// - COSIGN_PASSWORD environment variable +// +// - user input from from terminal (if present) +// +// - provided to stdin from pipe +func readPasswordFn() func() ([]byte, error) { + if pw, ok := os.LookupEnv("COSIGN_PASSWORD"); ok { + return func() ([]byte, error) { + return []byte(pw), nil + } + } + if term.IsTerminal(0) { + return func() ([]byte, error) { + return term.ReadPassword(0) + } + } + // Handle piped in passwords. + return func() ([]byte, error) { + return io.ReadAll(os.Stdin) + } +} + +// StaticPasswordFunc returns a PassFunc which returns the provided password. +func StaticPasswordFunc(pw []byte) PassFunc { + return func(bool) ([]byte, error) { + return pw, nil + } +} + +// SkipPassword is a PassFunc that does not interact with a user, but +// simply returns nil for both the password result and error struct. +func SkipPassword(_ bool) ([]byte, error) { + return nil, nil +} + +// GetPasswordFromStdIn gathers the password from stdin with an +// optional confirmation step. +func GetPasswordFromStdIn(confirm bool) ([]byte, error) { + read := Read() + fmt.Fprint(os.Stderr, "Enter password for private key: ") + pw1, err := read() + fmt.Fprintln(os.Stderr) + if err != nil { + return nil, err + } + if !confirm { + return pw1, nil + } + fmt.Fprint(os.Stderr, "Enter again: ") + pw2, err := read() + fmt.Fprintln(os.Stderr) + if err != nil { + return nil, err + } + + if string(pw1) != string(pw2) { + return nil, errors.New("passwords do not match") + } + return pw1, nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go new file mode 100644 index 000000000000..325813d6923c --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go @@ -0,0 +1,152 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutils + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + + "github.com/secure-systems-lab/go-securesystemslib/encrypted" +) + +const ( + // PrivateKeyPEMType is the string "PRIVATE KEY" to be used during PEM encoding and decoding + PrivateKeyPEMType PEMType = "PRIVATE KEY" + // ECPrivateKeyPEMType is the string "EC PRIVATE KEY" used to parse SEC 1 EC private keys + ECPrivateKeyPEMType PEMType = "EC PRIVATE KEY" + // PKCS1PrivateKeyPEMType is the string "RSA PRIVATE KEY" used to parse PKCS#1-encoded private keys + PKCS1PrivateKeyPEMType PEMType = "RSA PRIVATE KEY" + encryptedCosignPrivateKeyPEMType PEMType = "ENCRYPTED COSIGN PRIVATE KEY" + // EncryptedSigstorePrivateKeyPEMType is the string "ENCRYPTED SIGSTORE PRIVATE KEY" to be used during PEM encoding and decoding + EncryptedSigstorePrivateKeyPEMType PEMType = "ENCRYPTED SIGSTORE PRIVATE KEY" +) + +func pemEncodeKeyPair(priv crypto.PrivateKey, pub crypto.PublicKey, pf PassFunc) (privPEM, pubPEM []byte, err error) { + pubPEM, err = MarshalPublicKeyToPEM(pub) + if err != nil { + return nil, nil, err + } + derBytes, err := MarshalPrivateKeyToDER(priv) + if err != nil { + return nil, nil, err + } + + if pf == nil { + return PEMEncode(PrivateKeyPEMType, derBytes), pubPEM, nil + } + password, err := pf(true) + if err != nil { + return nil, nil, err + } + if password == nil { + return PEMEncode(PrivateKeyPEMType, derBytes), pubPEM, nil + } + if derBytes, err = encrypted.Encrypt(derBytes, password); err != nil { + return nil, nil, err + } + return PEMEncode(EncryptedSigstorePrivateKeyPEMType, derBytes), pubPEM, nil +} + +// GeneratePEMEncodedECDSAKeyPair generates an ECDSA keypair, optionally password encrypted using a provided PassFunc, and PEM encoded. +func GeneratePEMEncodedECDSAKeyPair(curve elliptic.Curve, pf PassFunc) (privPEM, pubPEM []byte, err error) { + priv, err := ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, nil, err + } + return pemEncodeKeyPair(priv, priv.Public(), pf) +} + +// GeneratePEMEncodedRSAKeyPair generates an RSA keypair, optionally password encrypted using a provided PassFunc, and PEM encoded. +func GeneratePEMEncodedRSAKeyPair(keyLengthBits int, pf PassFunc) (privPEM, pubPEM []byte, err error) { + priv, err := rsa.GenerateKey(rand.Reader, keyLengthBits) + if err != nil { + return nil, nil, err + } + return pemEncodeKeyPair(priv, priv.Public(), pf) +} + +// MarshalPrivateKeyToEncryptedDER marshals the private key and encrypts the DER-encoded value using the specified password function +func MarshalPrivateKeyToEncryptedDER(priv crypto.PrivateKey, pf PassFunc) ([]byte, error) { + derKey, err := MarshalPrivateKeyToDER(priv) + if err != nil { + return nil, err + } + password, err := pf(true) + if err != nil { + return nil, err + } + if password == nil { + return nil, errors.New("password was nil") + } + return encrypted.Encrypt(derKey, password) +} + +// UnmarshalPEMToPrivateKey converts a PEM-encoded byte slice into a crypto.PrivateKey +func UnmarshalPEMToPrivateKey(pemBytes []byte, pf PassFunc) (crypto.PrivateKey, error) { + derBlock, _ := pem.Decode(pemBytes) + if derBlock == nil { + return nil, errors.New("PEM decoding failed") + } + switch derBlock.Type { + case string(PrivateKeyPEMType): + return x509.ParsePKCS8PrivateKey(derBlock.Bytes) + case string(PKCS1PrivateKeyPEMType): + return x509.ParsePKCS1PrivateKey(derBlock.Bytes) + case string(ECPrivateKeyPEMType): + return x509.ParseECPrivateKey(derBlock.Bytes) + case string(EncryptedSigstorePrivateKeyPEMType), string(encryptedCosignPrivateKeyPEMType): + derBytes := derBlock.Bytes + if pf != nil { + password, err := pf(false) + if err != nil { + return nil, err + } + if password != nil { + derBytes, err = encrypted.Decrypt(derBytes, password) + if err != nil { + return nil, err + } + } + } + + return x509.ParsePKCS8PrivateKey(derBytes) + } + return nil, fmt.Errorf("unknown private key PEM file type: %v", derBlock.Type) +} + +// MarshalPrivateKeyToDER converts a crypto.PrivateKey into a PKCS8 ASN.1 DER byte slice +func MarshalPrivateKeyToDER(priv crypto.PrivateKey) ([]byte, error) { + if priv == nil { + return nil, errors.New("empty key") + } + return x509.MarshalPKCS8PrivateKey(priv) +} + +// MarshalPrivateKeyToPEM converts a crypto.PrivateKey into a PKCS#8 PEM-encoded byte slice +func MarshalPrivateKeyToPEM(priv crypto.PrivateKey) ([]byte, error) { + derBytes, err := MarshalPrivateKeyToDER(priv) + if err != nil { + return nil, err + } + return PEMEncode(PrivateKeyPEMType, derBytes), nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go new file mode 100644 index 000000000000..63e789a8d2d1 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go @@ -0,0 +1,132 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutils + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/sha1" // nolint:gosec + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" +) + +const ( + // PublicKeyPEMType is the string "PUBLIC KEY" to be used during PEM encoding and decoding + PublicKeyPEMType PEMType = "PUBLIC KEY" + // PKCS1PublicKeyPEMType is the string "RSA PUBLIC KEY" used to parse PKCS#1-encoded public keys + PKCS1PublicKeyPEMType PEMType = "RSA PUBLIC KEY" +) + +// subjectPublicKeyInfo is used to construct a subject key ID. +// https://tools.ietf.org/html/rfc5280#section-4.1.2.7 +type subjectPublicKeyInfo struct { + Algorithm pkix.AlgorithmIdentifier + SubjectPublicKey asn1.BitString +} + +// UnmarshalPEMToPublicKey converts a PEM-encoded byte slice into a crypto.PublicKey +func UnmarshalPEMToPublicKey(pemBytes []byte) (crypto.PublicKey, error) { + derBytes, _ := pem.Decode(pemBytes) + if derBytes == nil { + return nil, errors.New("PEM decoding failed") + } + switch derBytes.Type { + case string(PublicKeyPEMType): + return x509.ParsePKIXPublicKey(derBytes.Bytes) + case string(PKCS1PublicKeyPEMType): + return x509.ParsePKCS1PublicKey(derBytes.Bytes) + default: + return nil, fmt.Errorf("unknown Public key PEM file type: %v. Are you passing the correct public key?", + derBytes.Type) + } +} + +// MarshalPublicKeyToDER converts a crypto.PublicKey into a PKIX, ASN.1 DER byte slice +func MarshalPublicKeyToDER(pub crypto.PublicKey) ([]byte, error) { + if pub == nil { + return nil, errors.New("empty key") + } + return x509.MarshalPKIXPublicKey(pub) +} + +// MarshalPublicKeyToPEM converts a crypto.PublicKey into a PEM-encoded byte slice +func MarshalPublicKeyToPEM(pub crypto.PublicKey) ([]byte, error) { + derBytes, err := MarshalPublicKeyToDER(pub) + if err != nil { + return nil, err + } + return PEMEncode(PublicKeyPEMType, derBytes), nil +} + +// SKID generates a 160-bit SHA-1 hash of the value of the BIT STRING +// subjectPublicKey (excluding the tag, length, and number of unused bits). +// https://tools.ietf.org/html/rfc5280#section-4.2.1.2 +func SKID(pub crypto.PublicKey) ([]byte, error) { + derPubBytes, err := x509.MarshalPKIXPublicKey(pub) + if err != nil { + return nil, err + } + var spki subjectPublicKeyInfo + if _, err := asn1.Unmarshal(derPubBytes, &spki); err != nil { + return nil, err + } + skid := sha1.Sum(spki.SubjectPublicKey.Bytes) // nolint:gosec + return skid[:], nil +} + +// EqualKeys compares two public keys. Supports RSA, ECDSA and ED25519. +// If not equal, the error message contains hex-encoded SHA1 hashes of the DER-encoded keys +func EqualKeys(first, second crypto.PublicKey) error { + switch pub := first.(type) { + case *rsa.PublicKey: + if !pub.Equal(second) { + return errors.New(genErrMsg(first, second, "rsa")) + } + case *ecdsa.PublicKey: + if !pub.Equal(second) { + return errors.New(genErrMsg(first, second, "ecdsa")) + } + case ed25519.PublicKey: + if !pub.Equal(second) { + return errors.New(genErrMsg(first, second, "ed25519")) + } + default: + return errors.New("unsupported key type") + } + return nil +} + +// genErrMsg generates an error message for EqualKeys +func genErrMsg(first, second crypto.PublicKey, keyType string) string { + msg := fmt.Sprintf("%s public keys are not equal", keyType) + // Calculate SKID to include in error message + firstSKID, err := SKID(first) + if err != nil { + return msg + } + secondSKID, err := SKID(second) + if err != nil { + return msg + } + return fmt.Sprintf("%s (%s, %s)", msg, hex.EncodeToString(firstSKID), hex.EncodeToString(secondSKID)) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/safestring.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/safestring.go new file mode 100644 index 000000000000..cdc69aec3778 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/safestring.go @@ -0,0 +1,34 @@ +// +// Copyright 2025 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutils + +import ( + "crypto/rand" + "encoding/base64" +) + +// GenerateRandomURLSafeString generates a cryptographically secure random +// URL-safe string with the specified number of bits of entropy. +func GenerateRandomURLSafeString(entropyLength uint) string { + if entropyLength == 0 { + return "" + } + // Round up to the nearest byte to ensure minimum entropy is met + entropyBytes := (entropyLength + 7) / 8 + b := make([]byte, entropyBytes) + _, _ = rand.Read(b) + return base64.RawURLEncoding.EncodeToString(b) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go new file mode 100644 index 000000000000..abcea306a033 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go @@ -0,0 +1,152 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutils + +import ( + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" +) + +var ( + // OIDOtherName is the OID for the OtherName SAN per RFC 5280 + OIDOtherName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 7} + // SANOID is the OID for Subject Alternative Name per RFC 5280 + SANOID = asn1.ObjectIdentifier{2, 5, 29, 17} +) + +// OtherName describes a name related to a certificate which is not in one +// of the standard name formats. RFC 5280, 4.2.1.6: +// +// OtherName ::= SEQUENCE { +// type-id OBJECT IDENTIFIER, +// value [0] EXPLICIT ANY DEFINED BY type-id } +// +// OtherName for Fulcio-issued certificates only supports UTF-8 strings as values. +type OtherName struct { + ID asn1.ObjectIdentifier + Value string `asn1:"utf8,explicit,tag:0"` +} + +// MarshalOtherNameSAN creates a Subject Alternative Name extension +// with an OtherName sequence. RFC 5280, 4.2.1.6: +// +// SubjectAltName ::= GeneralNames +// GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName +// GeneralName ::= CHOICE { +// +// otherName [0] OtherName, +// ... } +func MarshalOtherNameSAN(name string, critical bool) (*pkix.Extension, error) { + o := OtherName{ + ID: OIDOtherName, + Value: name, + } + bytes, err := asn1.MarshalWithParams(o, "tag:0") + if err != nil { + return nil, err + } + + sans, err := asn1.Marshal([]asn1.RawValue{{FullBytes: bytes}}) + if err != nil { + return nil, err + } + return &pkix.Extension{ + Id: SANOID, + Critical: critical, + Value: sans, + }, nil +} + +// UnmarshalOtherNameSAN extracts a UTF-8 string from the OtherName +// field in the Subject Alternative Name extension. +func UnmarshalOtherNameSAN(exts []pkix.Extension) (string, error) { + var otherNames []string + + for _, e := range exts { + if !e.Id.Equal(SANOID) { + continue + } + + var seq asn1.RawValue + rest, err := asn1.Unmarshal(e.Value, &seq) + if err != nil { + return "", err + } else if len(rest) != 0 { + return "", fmt.Errorf("trailing data after X.509 extension") + } + if !seq.IsCompound || seq.Tag != asn1.TagSequence || seq.Class != asn1.ClassUniversal { + return "", asn1.StructuralError{Msg: "bad SAN sequence"} + } + + rest = seq.Bytes + for len(rest) > 0 { + var v asn1.RawValue + rest, err = asn1.Unmarshal(rest, &v) + if err != nil { + return "", err + } + + // skip all GeneralName fields except OtherName + if v.Tag != 0 { + continue + } + + var other OtherName + if _, err := asn1.UnmarshalWithParams(v.FullBytes, &other, "tag:0"); err != nil { + return "", fmt.Errorf("could not parse requested OtherName SAN: %w", err) + } + if !other.ID.Equal(OIDOtherName) { + return "", fmt.Errorf("unexpected OID for OtherName, expected %v, got %v", OIDOtherName, other.ID) + } + otherNames = append(otherNames, other.Value) + } + } + + if len(otherNames) == 0 { + return "", errors.New("no OtherName found") + } + if len(otherNames) != 1 { + return "", errors.New("expected only one OtherName") + } + + return otherNames[0], nil +} + +// GetSubjectAlternateNames extracts all subject alternative names from +// the certificate, including email addresses, DNS, IP addresses, URIs, +// and OtherName SANs +func GetSubjectAlternateNames(cert *x509.Certificate) []string { + sans := []string{} + if cert == nil { + return sans + } + sans = append(sans, cert.DNSNames...) + sans = append(sans, cert.EmailAddresses...) + for _, ip := range cert.IPAddresses { + sans = append(sans, ip.String()) + } + for _, uri := range cert.URIs { + sans = append(sans, uri.String()) + } + // ignore error if there's no OtherName SAN + otherName, _ := UnmarshalOtherNameSAN(cert.Extensions) + if len(otherName) > 0 { + sans = append(sans, otherName) + } + return sans +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go b/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go new file mode 100644 index 000000000000..02c032b02df6 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go @@ -0,0 +1,314 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rsa" + "errors" + "fmt" + + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" +) + +// PublicKeyType represents the public key algorithm for a given signature algorithm. +type PublicKeyType uint + +const ( + // RSA public key + RSA PublicKeyType = iota + // ECDSA public key + ECDSA + // ED25519 public key + ED25519 +) + +// RSAKeySize represents the size of an RSA public key in bits. +type RSAKeySize int + +// AlgorithmDetails exposes relevant information for a given signature algorithm. +type AlgorithmDetails struct { + // knownAlgorithm is the signature algorithm that the following details refer to. + knownAlgorithm v1.PublicKeyDetails + + // keyType is the public key algorithm being used. + keyType PublicKeyType + + // hashType is the hash algorithm being used. + hashType crypto.Hash + + // protoHashType is the hash algorithm being used as a proto message, it must be the protobuf-specs + // v1.HashAlgorithm equivalent of the hashType. + protoHashType v1.HashAlgorithm + + // extraKeyParams contains any extra parameters required to check a given public key against this entry. + // + // The underlying type of these parameters is dependent on the keyType. + // For example, ECDSA algorithms will store an elliptic curve here whereas, RSA keys will store the key size. + // Algorithms that don't require any extra parameters leave this set to nil. + extraKeyParams interface{} + + // flagValue is a string representation of the signature algorithm that follows the naming conventions of CLI + // arguments that are used for Sigstore services. + flagValue string +} + +// GetSignatureAlgorithm returns the PublicKeyDetails associated with the algorithm details. +func (a AlgorithmDetails) GetSignatureAlgorithm() v1.PublicKeyDetails { + return a.knownAlgorithm +} + +// GetKeyType returns the PublicKeyType for the algorithm details. +func (a AlgorithmDetails) GetKeyType() PublicKeyType { + return a.keyType +} + +// GetHashType returns the hash algorithm that should be used with this algorithm. +func (a AlgorithmDetails) GetHashType() crypto.Hash { + return a.hashType +} + +// GetProtoHashType is a convenience method to get the protobuf-specs type of the hash algorithm. +func (a AlgorithmDetails) GetProtoHashType() v1.HashAlgorithm { + return a.protoHashType +} + +// GetRSAKeySize returns the RSA key size for the algorithm details, if the key type is RSA. +func (a AlgorithmDetails) GetRSAKeySize() (RSAKeySize, error) { + if a.keyType != RSA { + return 0, fmt.Errorf("unable to retrieve RSA key size for key type: %T", a.keyType) + } + rsaKeySize, ok := a.extraKeyParams.(RSAKeySize) + if !ok { + // This should be unreachable. + return 0, fmt.Errorf("unable to retrieve key size for RSA, malformed algorithm details?: %T", a.keyType) + } + return rsaKeySize, nil +} + +// GetECDSACurve returns the elliptic curve for the algorithm details, if the key type is ECDSA. +func (a AlgorithmDetails) GetECDSACurve() (*elliptic.Curve, error) { + if a.keyType != ECDSA { + return nil, fmt.Errorf("unable to retrieve ECDSA curve for key type: %T", a.keyType) + } + ecdsaCurve, ok := a.extraKeyParams.(elliptic.Curve) + if !ok { + // This should be unreachable. + return nil, fmt.Errorf("unable to retrieve curve for ECDSA, malformed algorithm details?: %T", a.keyType) + } + return &ecdsaCurve, nil +} + +func (a AlgorithmDetails) checkKey(pubKey crypto.PublicKey) (bool, error) { + switch a.keyType { + case RSA: + rsaKey, ok := pubKey.(*rsa.PublicKey) + if !ok { + return false, nil + } + keySize, err := a.GetRSAKeySize() + if err != nil { + return false, err + } + return rsaKey.Size()*8 == int(keySize), nil + case ECDSA: + ecdsaKey, ok := pubKey.(*ecdsa.PublicKey) + if !ok { + return false, nil + } + curve, err := a.GetECDSACurve() + if err != nil { + return false, err + } + return ecdsaKey.Curve == *curve, nil + case ED25519: + _, ok := pubKey.(ed25519.PublicKey) + return ok, nil + } + return false, fmt.Errorf("unrecognized key type: %T", a.keyType) +} + +func (a AlgorithmDetails) checkHash(hashType crypto.Hash) bool { + return a.hashType == hashType +} + +// Note that deprecated options in PublicKeyDetails are not included in this +// list, including PKCS1v1.5 encoded RSA. Refer to the v1.PublicKeyDetails enum +// for more details. +var supportedAlgorithms = []AlgorithmDetails{ + {v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(2048), "rsa-sign-pkcs1-2048-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(3072), "rsa-sign-pkcs1-3072-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(4096), "rsa-sign-pkcs1-4096-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PSS_2048_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(2048), "rsa-sign-pss-2048-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PSS_3072_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(3072), "rsa-sign-pss-3072-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PSS_4096_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(4096), "rsa-sign-pss-4092-sha256"}, + {v1.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256, ECDSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, elliptic.P256(), "ecdsa-sha2-256-nistp256"}, + {v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384, ECDSA, crypto.SHA384, v1.HashAlgorithm_SHA2_384, elliptic.P384(), "ecdsa-sha2-384-nistp384"}, + {v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_256, ECDSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, elliptic.P384(), "ecdsa-sha2-256-nistp384"}, //nolint:staticcheck + {v1.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512, ECDSA, crypto.SHA512, v1.HashAlgorithm_SHA2_512, elliptic.P521(), "ecdsa-sha2-512-nistp521"}, + {v1.PublicKeyDetails_PKIX_ECDSA_P521_SHA_256, ECDSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, elliptic.P521(), "ecdsa-sha2-256-nistp521"}, //nolint:staticcheck + {v1.PublicKeyDetails_PKIX_ED25519, ED25519, crypto.Hash(0), v1.HashAlgorithm_HASH_ALGORITHM_UNSPECIFIED, nil, "ed25519"}, + {v1.PublicKeyDetails_PKIX_ED25519_PH, ED25519, crypto.SHA512, v1.HashAlgorithm_SHA2_512, nil, "ed25519-ph"}, +} + +// AlgorithmRegistryConfig represents a set of permitted algorithms for a given Sigstore service or component. +// +// Individual services may wish to restrict what algorithms are allowed to a subset of what is covered in the algorithm +// registry (represented by v1.PublicKeyDetails). +type AlgorithmRegistryConfig struct { + permittedAlgorithms []AlgorithmDetails +} + +// GetAlgorithmDetails retrieves a set of details for a given v1.PublicKeyDetails flag that allows users to +// introspect the public key algorithm, hash algorithm and more. +func GetAlgorithmDetails(knownSignatureAlgorithm v1.PublicKeyDetails) (AlgorithmDetails, error) { + for _, detail := range supportedAlgorithms { + if detail.knownAlgorithm == knownSignatureAlgorithm { + return detail, nil + } + } + return AlgorithmDetails{}, fmt.Errorf("could not find algorithm details for known signature algorithm: %s", knownSignatureAlgorithm) +} + +// NewAlgorithmRegistryConfig creates a new AlgorithmRegistryConfig for a set of permitted signature algorithms. +func NewAlgorithmRegistryConfig(algorithmConfig []v1.PublicKeyDetails) (*AlgorithmRegistryConfig, error) { + permittedAlgorithms := make([]AlgorithmDetails, 0, len(supportedAlgorithms)) + for _, algorithm := range algorithmConfig { + a, err := GetAlgorithmDetails(algorithm) + if err != nil { + return nil, err + } + permittedAlgorithms = append(permittedAlgorithms, a) + } + return &AlgorithmRegistryConfig{permittedAlgorithms: permittedAlgorithms}, nil +} + +// IsAlgorithmPermitted checks whether a given public key/hash algorithm combination is permitted by a registry config. +func (registryConfig AlgorithmRegistryConfig) IsAlgorithmPermitted(key crypto.PublicKey, hash crypto.Hash) (bool, error) { + for _, algorithm := range registryConfig.permittedAlgorithms { + keyMatch, err := algorithm.checkKey(key) + if err != nil { + return false, err + } + if keyMatch && algorithm.checkHash(hash) { + return true, nil + } + } + return false, nil +} + +// FormatSignatureAlgorithmFlag formats a v1.PublicKeyDetails to a string that conforms to the naming conventions +// of CLI arguments that are used for Sigstore services. +func FormatSignatureAlgorithmFlag(algorithm v1.PublicKeyDetails) (string, error) { + for _, a := range supportedAlgorithms { + if a.knownAlgorithm == algorithm { + return a.flagValue, nil + } + } + return "", fmt.Errorf("could not find matching flag for signature algorithm: %s", algorithm) +} + +// ParseSignatureAlgorithmFlag parses a string produced by FormatSignatureAlgorithmFlag and returns the corresponding +// v1.PublicKeyDetails value. +func ParseSignatureAlgorithmFlag(flag string) (v1.PublicKeyDetails, error) { + for _, a := range supportedAlgorithms { + if a.flagValue == flag { + return a.knownAlgorithm, nil + } + } + return v1.PublicKeyDetails_PUBLIC_KEY_DETAILS_UNSPECIFIED, fmt.Errorf("could not find matching signature algorithm for flag: %s", flag) +} + +// GetDefaultPublicKeyDetails returns the default public key details for a given key. +// +// RSA 2048 => v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256 +// RSA 3072 => v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256 +// RSA 4096 => v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256 +// ECDSA P256 => v1.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256 +// ECDSA P384 => v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384 +// ECDSA P521 => v1.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512 +// ED25519 => v1.PublicKeyDetails_PKIX_ED25519_PH +// +// This function accepts LoadOptions, which are used to determine the default +// public key details when there may be ambiguities. For example, RSA keys may +// be PSS or PKCS1v1.5 encoded, and ED25519 keys may be used with PureEd25519 or +// with Ed25519ph. The Hash option is ignored if passed, because each of the +// supported algorithms already has a default hash. +func GetDefaultPublicKeyDetails(publicKey crypto.PublicKey, opts ...LoadOption) (v1.PublicKeyDetails, error) { + var rsaPSSOptions *rsa.PSSOptions + var useED25519ph bool + for _, o := range opts { + o.ApplyED25519ph(&useED25519ph) + o.ApplyRSAPSS(&rsaPSSOptions) + } + + switch pk := publicKey.(type) { + case *rsa.PublicKey: + if rsaPSSOptions != nil { + switch pk.Size() * 8 { + case 2048: + return v1.PublicKeyDetails_PKIX_RSA_PSS_2048_SHA256, nil + case 3072: + return v1.PublicKeyDetails_PKIX_RSA_PSS_3072_SHA256, nil + case 4096: + return v1.PublicKeyDetails_PKIX_RSA_PSS_4096_SHA256, nil + } + } else { + switch pk.Size() * 8 { + case 2048: + return v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256, nil + case 3072: + return v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256, nil + case 4096: + return v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256, nil + } + } + case *ecdsa.PublicKey: + switch pk.Curve { + case elliptic.P256(): + return v1.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256, nil + case elliptic.P384(): + return v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384, nil + case elliptic.P521(): + return v1.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512, nil + } + case ed25519.PublicKey: + if useED25519ph { + return v1.PublicKeyDetails_PKIX_ED25519_PH, nil + } + return v1.PublicKeyDetails_PKIX_ED25519, nil + } + return v1.PublicKeyDetails_PUBLIC_KEY_DETAILS_UNSPECIFIED, errors.New("unsupported public key type") +} + +// GetDefaultAlgorithmDetails returns the default algorithm details for a given +// key, according to GetDefaultPublicKeyDetails. +// +// This function accepts LoadOptions, which are used to determine the default +// algorithm details when there may be ambiguities. For example, RSA keys may be +// PSS or PKCS1v1.5 encoded, and ED25519 keys may be used with PureEd25519 or +// with Ed25519ph. The Hash option is ignored if passed, because each of the +// supported algorithms already has a default hash. +func GetDefaultAlgorithmDetails(publicKey crypto.PublicKey, opts ...LoadOption) (AlgorithmDetails, error) { + knownAlgorithm, err := GetDefaultPublicKeyDetails(publicKey, opts...) + if err != nil { + return AlgorithmDetails{}, err + } + return GetAlgorithmDetails(knownAlgorithm) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/doc.go b/vendor/github.com/sigstore/sigstore/pkg/signature/doc.go new file mode 100644 index 000000000000..dbd3314f6e76 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/doc.go @@ -0,0 +1,17 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package signature contains types and utilities related to Sigstore signatures. +package signature diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/adapters.go b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/adapters.go new file mode 100644 index 000000000000..5f801095e3f8 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/adapters.go @@ -0,0 +1,77 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package dsse includes wrappers to support DSSE +package dsse + +import ( + "bytes" + "context" + "crypto" + "errors" + + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +// SignerAdapter wraps a `sigstore/signature.Signer`, making it compatible with `go-securesystemslib/dsse.Signer`. +type SignerAdapter struct { + SignatureSigner signature.Signer + Pub crypto.PublicKey + Opts []signature.SignOption + PubKeyID string +} + +// Sign implements `go-securesystemslib/dsse.Signer` +func (a *SignerAdapter) Sign(ctx context.Context, data []byte) ([]byte, error) { + return a.SignatureSigner.SignMessage(bytes.NewReader(data), append(a.Opts, options.WithContext(ctx))...) +} + +// Verify disabled `go-securesystemslib/dsse.Verifier` +func (a *SignerAdapter) Verify(_ context.Context, _, _ []byte) error { + return errors.New("Verify disabled") +} + +// Public implements `go-securesystemslib/dsse.Verifier` +func (a *SignerAdapter) Public() crypto.PublicKey { + return a.Pub +} + +// KeyID implements `go-securesystemslib/dsse.Verifier` +func (a SignerAdapter) KeyID() (string, error) { + return a.PubKeyID, nil +} + +// VerifierAdapter wraps a `sigstore/signature.Verifier`, making it compatible with `go-securesystemslib/dsse.Verifier`. +type VerifierAdapter struct { + SignatureVerifier signature.Verifier + Pub crypto.PublicKey + PubKeyID string +} + +// Verify implements `go-securesystemslib/dsse.Verifier` +func (a *VerifierAdapter) Verify(ctx context.Context, data, sig []byte) error { + return a.SignatureVerifier.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data), options.WithContext(ctx)) +} + +// Public implements `go-securesystemslib/dsse.Verifier` +func (a *VerifierAdapter) Public() crypto.PublicKey { + return a.Pub +} + +// KeyID implements `go-securesystemslib/dsse.Verifier` +func (a *VerifierAdapter) KeyID() (string, error) { + return a.PubKeyID, nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/doc.go b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/doc.go new file mode 100644 index 000000000000..f58f18068f0d --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/doc.go @@ -0,0 +1,17 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package dsse contains handlers for Dead Simple Signing Envelopes +package dsse diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/dsse.go b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/dsse.go new file mode 100644 index 000000000000..628e2413c0ee --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/dsse.go @@ -0,0 +1,152 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dsse + +import ( + "bytes" + "context" + "crypto" + "encoding/base64" + "encoding/json" + "io" + + "github.com/secure-systems-lab/go-securesystemslib/dsse" + "github.com/sigstore/sigstore/pkg/signature" +) + +// WrapSigner returns a signature.Signer that uses the DSSE encoding format +func WrapSigner(s signature.Signer, payloadType string) signature.Signer { + return &wrappedSigner{ + s: s, + payloadType: payloadType, + } +} + +type wrappedSigner struct { + s signature.Signer + payloadType string +} + +// PublicKey returns the public key associated with the signer +func (w *wrappedSigner) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, error) { + return w.s.PublicKey(opts...) +} + +// SignMessage signs the provided stream in the reader using the DSSE encoding format +func (w *wrappedSigner) SignMessage(r io.Reader, opts ...signature.SignOption) ([]byte, error) { + p, err := io.ReadAll(r) + if err != nil { + return nil, err + } + pae := dsse.PAE(w.payloadType, p) + sig, err := w.s.SignMessage(bytes.NewReader(pae), opts...) + if err != nil { + return nil, err + } + + env := dsse.Envelope{ + PayloadType: w.payloadType, + Payload: base64.StdEncoding.EncodeToString(p), + Signatures: []dsse.Signature{ + { + Sig: base64.StdEncoding.EncodeToString(sig), + }, + }, + } + return json.Marshal(env) +} + +// WrapVerifier returns a signature.Verifier that uses the DSSE encoding format +func WrapVerifier(v signature.Verifier) signature.Verifier { + return &wrappedVerifier{ + v: v, + } +} + +type wrappedVerifier struct { + v signature.Verifier +} + +// PublicKey returns the public key associated with the verifier +func (w *wrappedVerifier) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, error) { + return w.v.PublicKey(opts...) +} + +// VerifySignature verifies the signature specified in an DSSE envelope +func (w *wrappedVerifier) VerifySignature(s, _ io.Reader, _ ...signature.VerifyOption) error { + sig, err := io.ReadAll(s) + if err != nil { + return err + } + + env := dsse.Envelope{} + if err := json.Unmarshal(sig, &env); err != nil { + return err + } + + pub, err := w.PublicKey() + if err != nil { + return err + } + verifier, err := dsse.NewEnvelopeVerifier(&VerifierAdapter{ + SignatureVerifier: w.v, + + Pub: pub, + PubKeyID: "", // We do not want to limit verification to a specific key. + }) + if err != nil { + return err + } + + _, err = verifier.Verify(context.Background(), &env) + return err +} + +// WrapSignerVerifier returns a signature.SignerVerifier that uses the DSSE encoding format +func WrapSignerVerifier(sv signature.SignerVerifier, payloadType string) signature.SignerVerifier { + signer := &wrappedSigner{ + payloadType: payloadType, + s: sv, + } + verifier := &wrappedVerifier{ + v: sv, + } + + return &wrappedSignerVerifier{ + signer: signer, + verifier: verifier, + } +} + +type wrappedSignerVerifier struct { + signer *wrappedSigner + verifier *wrappedVerifier +} + +// PublicKey returns the public key associated with the verifier +func (w *wrappedSignerVerifier) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, error) { + return w.signer.PublicKey(opts...) +} + +// VerifySignature verifies the signature specified in an DSSE envelope +func (w *wrappedSignerVerifier) VerifySignature(s, r io.Reader, opts ...signature.VerifyOption) error { + return w.verifier.VerifySignature(s, r, opts...) +} + +// SignMessage signs the provided stream in the reader using the DSSE encoding format +func (w *wrappedSignerVerifier) SignMessage(r io.Reader, opts ...signature.SignOption) ([]byte, error) { + return w.signer.SignMessage(r, opts...) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/multidsse.go b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/multidsse.go new file mode 100644 index 000000000000..34ebd5a41e8a --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/multidsse.go @@ -0,0 +1,186 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dsse + +import ( + "context" + "crypto" + "encoding/json" + "errors" + "io" + + "github.com/secure-systems-lab/go-securesystemslib/dsse" + "github.com/sigstore/sigstore/pkg/signature" +) + +type wrappedMultiSigner struct { + sLAdapters []dsse.Signer + payloadType string +} + +// WrapMultiSigner returns a signature.Signer that uses the DSSE encoding format +func WrapMultiSigner(payloadType string, sL ...signature.Signer) signature.Signer { + signerAdapterL := make([]dsse.Signer, 0, len(sL)) + for _, s := range sL { + pub, err := s.PublicKey() + if err != nil { + return nil + } + + keyID, err := dsse.SHA256KeyID(pub) + if err != nil { + keyID = "" + } + + signerAdapter := &SignerAdapter{ + SignatureSigner: s, + Pub: s.PublicKey, + PubKeyID: keyID, // We do not want to limit verification to a specific key. + } + + signerAdapterL = append(signerAdapterL, signerAdapter) + } + + return &wrappedMultiSigner{ + sLAdapters: signerAdapterL, + payloadType: payloadType, + } +} + +// PublicKey returns the public key associated with the signer +func (wL *wrappedMultiSigner) PublicKey(_ ...signature.PublicKeyOption) (crypto.PublicKey, error) { + return nil, errors.New("not supported for multi signatures") +} + +// SignMessage signs the provided stream in the reader using the DSSE encoding format +func (wL *wrappedMultiSigner) SignMessage(r io.Reader, _ ...signature.SignOption) ([]byte, error) { + p, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + envSigner, err := dsse.NewEnvelopeSigner(wL.sLAdapters...) + if err != nil { + return nil, err + } + + env, err := envSigner.SignPayload(context.Background(), wL.payloadType, p) + if err != nil { + return nil, err + } + + return json.Marshal(env) +} + +type wrappedMultiVerifier struct { + vLAdapters []dsse.Verifier + threshold int + payloadType string +} + +// WrapMultiVerifier returns a signature.Verifier that uses the DSSE encoding format +func WrapMultiVerifier(payloadType string, threshold int, vL ...signature.Verifier) signature.Verifier { + verifierAdapterL := make([]dsse.Verifier, 0, len(vL)) + for _, v := range vL { + pub, err := v.PublicKey() + if err != nil { + return nil + } + + keyID, err := dsse.SHA256KeyID(pub) + if err != nil { + keyID = "" + } + + verifierAdapter := &VerifierAdapter{ + SignatureVerifier: v, + Pub: v.PublicKey, + PubKeyID: keyID, // We do not want to limit verification to a specific key. + } + + verifierAdapterL = append(verifierAdapterL, verifierAdapter) + } + + return &wrappedMultiVerifier{ + vLAdapters: verifierAdapterL, + payloadType: payloadType, + threshold: threshold, + } +} + +// PublicKey returns the public key associated with the signer +func (wL *wrappedMultiVerifier) PublicKey(_ ...signature.PublicKeyOption) (crypto.PublicKey, error) { + return nil, errors.New("not supported for multi signatures") +} + +// VerifySignature verifies the signature specified in an DSSE envelope +func (wL *wrappedMultiVerifier) VerifySignature(s, _ io.Reader, _ ...signature.VerifyOption) error { + sig, err := io.ReadAll(s) + if err != nil { + return err + } + + env := dsse.Envelope{} + if err := json.Unmarshal(sig, &env); err != nil { + return err + } + + envVerifier, err := dsse.NewMultiEnvelopeVerifier(wL.threshold, wL.vLAdapters...) + if err != nil { + return err + } + + _, err = envVerifier.Verify(context.Background(), &env) + return err +} + +// WrapMultiSignerVerifier returns a signature.SignerVerifier that uses the DSSE encoding format +func WrapMultiSignerVerifier(payloadType string, threshold int, svL ...signature.SignerVerifier) signature.SignerVerifier { + signerL := make([]signature.Signer, 0, len(svL)) + verifierL := make([]signature.Verifier, 0, len(svL)) + for _, sv := range svL { + signerL = append(signerL, sv) + verifierL = append(verifierL, sv) + } + + sL := WrapMultiSigner(payloadType, signerL...) + vL := WrapMultiVerifier(payloadType, threshold, verifierL...) + + return &wrappedMultiSignerVerifier{ + signer: sL, + verifier: vL, + } +} + +type wrappedMultiSignerVerifier struct { + signer signature.Signer + verifier signature.Verifier +} + +// PublicKey returns the public key associated with the verifier +func (w *wrappedMultiSignerVerifier) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, error) { + return w.signer.PublicKey(opts...) +} + +// VerifySignature verifies the signature specified in an DSSE envelope +func (w *wrappedMultiSignerVerifier) VerifySignature(s, r io.Reader, opts ...signature.VerifyOption) error { + return w.verifier.VerifySignature(s, r, opts...) +} + +// SignMessage signs the provided stream in the reader using the DSSE encoding format +func (w *wrappedMultiSignerVerifier) SignMessage(r io.Reader, opts ...signature.SignOption) ([]byte, error) { + return w.signer.SignMessage(r, opts...) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go b/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go new file mode 100644 index 000000000000..d333cb23f914 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go @@ -0,0 +1,270 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "encoding/asn1" + "errors" + "fmt" + "io" + "math/big" + + "github.com/sigstore/sigstore/pkg/signature/options" +) + +// checked on LoadSigner, LoadVerifier and SignMessage +var ecdsaSupportedHashFuncs = []crypto.Hash{ + crypto.SHA256, + crypto.SHA512, + crypto.SHA384, + crypto.SHA224, +} + +// checked on VerifySignature. Supports SHA1 verification. +var ecdsaSupportedVerifyHashFuncs = []crypto.Hash{ + crypto.SHA256, + crypto.SHA512, + crypto.SHA384, + crypto.SHA224, + crypto.SHA1, +} + +// ECDSASigner is a signature.Signer that uses an Elliptic Curve DSA algorithm +type ECDSASigner struct { + hashFunc crypto.Hash + priv *ecdsa.PrivateKey +} + +// LoadECDSASigner calculates signatures using the specified private key and hash algorithm. +// +// hf must not be crypto.Hash(0). +func LoadECDSASigner(priv *ecdsa.PrivateKey, hf crypto.Hash) (*ECDSASigner, error) { + if priv == nil { + return nil, errors.New("invalid ECDSA private key specified") + } + + if !isSupportedAlg(hf, ecdsaSupportedHashFuncs) { + return nil, errors.New("invalid hash function specified") + } + + return &ECDSASigner{ + priv: priv, + hashFunc: hf, + }, nil +} + +// SignMessage signs the provided message. If the message is provided, +// this method will compute the digest according to the hash function specified +// when the ECDSASigner was created. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithRand() +// +// - WithDigest() +// +// - WithCryptoSignerOpts() +// +// All other options are ignored if specified. +func (e ECDSASigner) SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) { + digest, _, err := ComputeDigestForSigning(message, e.hashFunc, ecdsaSupportedHashFuncs, opts...) + if err != nil { + return nil, err + } + + rand := selectRandFromOpts(opts...) + + return ecdsa.SignASN1(rand, e.priv, digest) +} + +// Public returns the public key that can be used to verify signatures created by +// this signer. +func (e ECDSASigner) Public() crypto.PublicKey { + if e.priv == nil { + return nil + } + + return e.priv.Public() +} + +// PublicKey returns the public key that can be used to verify signatures created by +// this signer. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ECDSASigner) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.Public(), nil +} + +// Sign computes the signature for the specified digest. If a source of entropy is +// given in rand, it will be used instead of the default value (rand.Reader from crypto/rand). +// +// If opts are specified, the hash function in opts.Hash should be the one used to compute +// digest. If opts are not specified, the value provided when the signer was created will be used instead. +func (e ECDSASigner) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) { + ecdsaOpts := []SignOption{options.WithDigest(digest), options.WithRand(rand)} + if opts != nil { + ecdsaOpts = append(ecdsaOpts, options.WithCryptoSignerOpts(opts)) + } + + return e.SignMessage(nil, ecdsaOpts...) +} + +// ECDSAVerifier is a signature.Verifier that uses an Elliptic Curve DSA algorithm +type ECDSAVerifier struct { + publicKey *ecdsa.PublicKey + hashFunc crypto.Hash +} + +// LoadECDSAVerifier returns a Verifier that verifies signatures using the specified +// ECDSA public key and hash algorithm. +// +// hf must not be crypto.Hash(0). +func LoadECDSAVerifier(pub *ecdsa.PublicKey, hashFunc crypto.Hash) (*ECDSAVerifier, error) { + if pub == nil { + return nil, errors.New("invalid ECDSA public key specified") + } + + if !isSupportedAlg(hashFunc, ecdsaSupportedHashFuncs) { + return nil, errors.New("invalid hash function specified") + } + + return &ECDSAVerifier{ + publicKey: pub, + hashFunc: hashFunc, + }, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ECDSAVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.publicKey, nil +} + +// VerifySignature verifies the signature for the given message. Unless provided +// in an option, the digest of the message will be computed using the hash function specified +// when the ECDSAVerifier was created. +// +// This function returns nil if the verification succeeded, and an error message otherwise. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// All other options are ignored if specified. +func (e ECDSAVerifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error { + if e.publicKey == nil { + return errors.New("no public key set for ECDSAVerifier") + } + + digest, _, err := ComputeDigestForVerifying(message, e.hashFunc, ecdsaSupportedVerifyHashFuncs, opts...) + if err != nil { + return err + } + + if signature == nil { + return errors.New("nil signature passed to VerifySignature") + } + + sigBytes, err := io.ReadAll(signature) + if err != nil { + return fmt.Errorf("reading signature: %w", err) + } + + // Without this check, VerifyASN1 panics on an invalid key. + if !e.publicKey.IsOnCurve(e.publicKey.X, e.publicKey.Y) { + return fmt.Errorf("invalid ECDSA public key for %s", e.publicKey.Params().Name) + } + + asnParseTest := struct { + R, S *big.Int + }{} + if _, err := asn1.Unmarshal(sigBytes, &asnParseTest); err == nil { + if !ecdsa.VerifyASN1(e.publicKey, digest, sigBytes) { + return errors.New("invalid signature when validating ASN.1 encoded signature") + } + } else { + // deal with IEEE P1363 encoding of signatures + if len(sigBytes) == 0 || len(sigBytes) > 132 || len(sigBytes)%2 != 0 { + return errors.New("ecdsa: Invalid IEEE_P1363 encoded bytes") + } + r := new(big.Int).SetBytes(sigBytes[:len(sigBytes)/2]) + s := new(big.Int).SetBytes(sigBytes[len(sigBytes)/2:]) + if !ecdsa.Verify(e.publicKey, digest, r, s) { + return errors.New("invalid signature when validating IEEE_P1363 encoded signature") + } + } + + return nil +} + +// ECDSASignerVerifier is a signature.SignerVerifier that uses an Elliptic Curve DSA algorithm +type ECDSASignerVerifier struct { + *ECDSASigner + *ECDSAVerifier +} + +// LoadECDSASignerVerifier creates a combined signer and verifier. This is a convenience object +// that simply wraps an instance of ECDSASigner and ECDSAVerifier. +func LoadECDSASignerVerifier(priv *ecdsa.PrivateKey, hf crypto.Hash) (*ECDSASignerVerifier, error) { + signer, err := LoadECDSASigner(priv, hf) + if err != nil { + return nil, fmt.Errorf("initializing signer: %w", err) + } + verifier, err := LoadECDSAVerifier(&priv.PublicKey, hf) + if err != nil { + return nil, fmt.Errorf("initializing verifier: %w", err) + } + + return &ECDSASignerVerifier{ + ECDSASigner: signer, + ECDSAVerifier: verifier, + }, nil +} + +// NewDefaultECDSASignerVerifier creates a combined signer and verifier using ECDSA. +// +// This creates a new ECDSA key using the P-256 curve and uses the SHA256 hashing algorithm. +func NewDefaultECDSASignerVerifier() (*ECDSASignerVerifier, *ecdsa.PrivateKey, error) { + return NewECDSASignerVerifier(elliptic.P256(), rand.Reader, crypto.SHA256) +} + +// NewECDSASignerVerifier creates a combined signer and verifier using ECDSA. +// +// This creates a new ECDSA key using the specified elliptic curve, entropy source, and hashing function. +func NewECDSASignerVerifier(curve elliptic.Curve, rand io.Reader, hashFunc crypto.Hash) (*ECDSASignerVerifier, *ecdsa.PrivateKey, error) { + priv, err := ecdsa.GenerateKey(curve, rand) + if err != nil { + return nil, nil, err + } + + sv, err := LoadECDSASignerVerifier(priv, hashFunc) + if err != nil { + return nil, nil, err + } + + return sv, priv, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ECDSASignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.publicKey, nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go b/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go new file mode 100644 index 000000000000..23a8638ff550 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go @@ -0,0 +1,197 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "bytes" + "crypto" + "crypto/ed25519" + "crypto/rand" + "errors" + "fmt" + "io" +) + +var ed25519SupportedHashFuncs = []crypto.Hash{ + crypto.Hash(0), +} + +// ED25519Signer is a signature.Signer that uses the Ed25519 public-key signature system +type ED25519Signer struct { + priv ed25519.PrivateKey +} + +// LoadED25519Signer calculates signatures using the specified private key. +func LoadED25519Signer(priv ed25519.PrivateKey) (*ED25519Signer, error) { + if priv == nil { + return nil, errors.New("invalid ED25519 private key specified") + } + + // check this to avoid panic and throw error gracefully + if len(priv) != ed25519.PrivateKeySize { + return nil, errors.New("invalid size for ED25519 key") + } + + return &ED25519Signer{ + priv: priv, + }, nil +} + +// SignMessage signs the provided message. Passing the WithDigest option is not +// supported as ED25519 performs a two pass hash over the message during the +// signing process. +// +// All options are ignored. +func (e ED25519Signer) SignMessage(message io.Reader, _ ...SignOption) ([]byte, error) { + messageBytes, _, err := ComputeDigestForSigning(message, crypto.Hash(0), ed25519SupportedHashFuncs) + if err != nil { + return nil, err + } + + return ed25519.Sign(e.priv, messageBytes), nil +} + +// Public returns the public key that can be used to verify signatures created by +// this signer. +func (e ED25519Signer) Public() crypto.PublicKey { + if e.priv == nil { + return nil + } + + return e.priv.Public() +} + +// PublicKey returns the public key that can be used to verify signatures created by +// this signer. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ED25519Signer) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.Public(), nil +} + +// Sign computes the signature for the specified message; the first and third arguments to this +// function are ignored as they are not used by the ED25519 algorithm. +func (e ED25519Signer) Sign(_ io.Reader, message []byte, _ crypto.SignerOpts) ([]byte, error) { + if message == nil { + return nil, errors.New("message must not be nil") + } + return e.SignMessage(bytes.NewReader(message)) +} + +// ED25519Verifier is a signature.Verifier that uses the Ed25519 public-key signature system +type ED25519Verifier struct { + publicKey ed25519.PublicKey +} + +// LoadED25519Verifier returns a Verifier that verifies signatures using the specified ED25519 public key. +func LoadED25519Verifier(pub ed25519.PublicKey) (*ED25519Verifier, error) { + if pub == nil { + return nil, errors.New("invalid ED25519 public key specified") + } + + return &ED25519Verifier{ + publicKey: pub, + }, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e *ED25519Verifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.publicKey, nil +} + +// VerifySignature verifies the signature for the given message. +// +// This function returns nil if the verification succeeded, and an error message otherwise. +// +// All options are ignored if specified. +func (e *ED25519Verifier) VerifySignature(signature, message io.Reader, _ ...VerifyOption) error { + messageBytes, _, err := ComputeDigestForVerifying(message, crypto.Hash(0), ed25519SupportedHashFuncs) + if err != nil { + return err + } + + if signature == nil { + return errors.New("nil signature passed to VerifySignature") + } + + sigBytes, err := io.ReadAll(signature) + if err != nil { + return fmt.Errorf("reading signature: %w", err) + } + + if !ed25519.Verify(e.publicKey, messageBytes, sigBytes) { + return errors.New("failed to verify signature") + } + return nil +} + +// ED25519SignerVerifier is a signature.SignerVerifier that uses the Ed25519 public-key signature system +type ED25519SignerVerifier struct { + *ED25519Signer + *ED25519Verifier +} + +// LoadED25519SignerVerifier creates a combined signer and verifier. This is +// a convenience object that simply wraps an instance of ED25519Signer and ED25519Verifier. +func LoadED25519SignerVerifier(priv ed25519.PrivateKey) (*ED25519SignerVerifier, error) { + signer, err := LoadED25519Signer(priv) + if err != nil { + return nil, fmt.Errorf("initializing signer: %w", err) + } + pub, ok := priv.Public().(ed25519.PublicKey) + if !ok { + return nil, fmt.Errorf("given key is not ed25519.PublicKey") + } + verifier, err := LoadED25519Verifier(pub) + if err != nil { + return nil, fmt.Errorf("initializing verifier: %w", err) + } + + return &ED25519SignerVerifier{ + ED25519Signer: signer, + ED25519Verifier: verifier, + }, nil +} + +// NewDefaultED25519SignerVerifier creates a combined signer and verifier using ED25519. +// This creates a new ED25519 key using crypto/rand as an entropy source. +func NewDefaultED25519SignerVerifier() (*ED25519SignerVerifier, ed25519.PrivateKey, error) { + return NewED25519SignerVerifier(rand.Reader) +} + +// NewED25519SignerVerifier creates a combined signer and verifier using ED25519. +// This creates a new ED25519 key using the specified entropy source. +func NewED25519SignerVerifier(rand io.Reader) (*ED25519SignerVerifier, ed25519.PrivateKey, error) { + _, priv, err := ed25519.GenerateKey(rand) + if err != nil { + return nil, nil, err + } + + sv, err := LoadED25519SignerVerifier(priv) + if err != nil { + return nil, nil, err + } + + return sv, priv, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ED25519SignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.publicKey, nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go b/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go new file mode 100644 index 000000000000..d1660796a8ae --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go @@ -0,0 +1,211 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/ed25519" + "crypto/rand" + "errors" + "fmt" + "io" + + "github.com/sigstore/sigstore/pkg/signature/options" +) + +var ed25519phSupportedHashFuncs = []crypto.Hash{ + crypto.SHA512, +} + +// ED25519phSigner is a signature.Signer that uses the Ed25519 public-key signature system with pre-hashing +type ED25519phSigner struct { + priv ed25519.PrivateKey +} + +// LoadED25519phSigner calculates signatures using the specified private key. +func LoadED25519phSigner(priv ed25519.PrivateKey) (*ED25519phSigner, error) { + if priv == nil { + return nil, errors.New("invalid ED25519 private key specified") + } + + return &ED25519phSigner{ + priv: priv, + }, nil +} + +// ToED25519SignerVerifier creates a ED25519SignerVerifier from a ED25519phSignerVerifier +// +// Clients that use ED25519phSignerVerifier should use this method to get a +// SignerVerifier that uses the same ED25519 private key, but with the Pure +// Ed25519 algorithm. This might be necessary to interact with Fulcio, which +// only supports the Pure Ed25519 algorithm. +func (e ED25519phSignerVerifier) ToED25519SignerVerifier() (*ED25519SignerVerifier, error) { + return LoadED25519SignerVerifier(e.priv) +} + +// SignMessage signs the provided message. If the message is provided, +// this method will compute the digest according to the hash function specified +// when the ED25519phSigner was created. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// All other options are ignored if specified. +func (e ED25519phSigner) SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) { + digest, _, err := ComputeDigestForSigning(message, crypto.SHA512, ed25519phSupportedHashFuncs, opts...) + if err != nil { + return nil, err + } + + return e.priv.Sign(nil, digest, crypto.SHA512) +} + +// Public returns the public key that can be used to verify signatures created by +// this signer. +func (e ED25519phSigner) Public() crypto.PublicKey { + if e.priv == nil { + return nil + } + + return e.priv.Public() +} + +// PublicKey returns the public key that can be used to verify signatures created by +// this signer. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ED25519phSigner) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.Public(), nil +} + +// Sign computes the signature for the specified message; the first and third arguments to this +// function are ignored as they are not used by the ED25519ph algorithm. +func (e ED25519phSigner) Sign(_ io.Reader, digest []byte, _ crypto.SignerOpts) ([]byte, error) { + return e.SignMessage(nil, options.WithDigest(digest)) +} + +// ED25519phVerifier is a signature.Verifier that uses the Ed25519 public-key signature system +type ED25519phVerifier struct { + publicKey ed25519.PublicKey +} + +// LoadED25519phVerifier returns a Verifier that verifies signatures using the +// specified ED25519 public key. +func LoadED25519phVerifier(pub ed25519.PublicKey) (*ED25519phVerifier, error) { + if pub == nil { + return nil, errors.New("invalid ED25519 public key specified") + } + + return &ED25519phVerifier{ + publicKey: pub, + }, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e *ED25519phVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.publicKey, nil +} + +// VerifySignature verifies the signature for the given message. Unless provided +// in an option, the digest of the message will be computed using the hash function specified +// when the ED25519phVerifier was created. +// +// This function returns nil if the verification succeeded, and an error message otherwise. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// All other options are ignored if specified. +func (e *ED25519phVerifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error { + if signature == nil { + return errors.New("nil signature passed to VerifySignature") + } + + digest, _, err := ComputeDigestForVerifying(message, crypto.SHA512, ed25519phSupportedHashFuncs, opts...) + if err != nil { + return err + } + + sigBytes, err := io.ReadAll(signature) + if err != nil { + return fmt.Errorf("reading signature: %w", err) + } + + if err := ed25519.VerifyWithOptions(e.publicKey, digest, sigBytes, &ed25519.Options{Hash: crypto.SHA512}); err != nil { + return fmt.Errorf("failed to verify signature: %w", err) + } + return nil +} + +// ED25519phSignerVerifier is a signature.SignerVerifier that uses the Ed25519 public-key signature system +type ED25519phSignerVerifier struct { + *ED25519phSigner + *ED25519phVerifier +} + +// LoadED25519phSignerVerifier creates a combined signer and verifier. This is +// a convenience object that simply wraps an instance of ED25519phSigner and ED25519phVerifier. +func LoadED25519phSignerVerifier(priv ed25519.PrivateKey) (*ED25519phSignerVerifier, error) { + signer, err := LoadED25519phSigner(priv) + if err != nil { + return nil, fmt.Errorf("initializing signer: %w", err) + } + pub, ok := priv.Public().(ed25519.PublicKey) + if !ok { + return nil, fmt.Errorf("given key is not ed25519.PublicKey") + } + verifier, err := LoadED25519phVerifier(pub) + if err != nil { + return nil, fmt.Errorf("initializing verifier: %w", err) + } + + return &ED25519phSignerVerifier{ + ED25519phSigner: signer, + ED25519phVerifier: verifier, + }, nil +} + +// NewDefaultED25519phSignerVerifier creates a combined signer and verifier using ED25519. +// This creates a new ED25519 key using crypto/rand as an entropy source. +func NewDefaultED25519phSignerVerifier() (*ED25519phSignerVerifier, ed25519.PrivateKey, error) { + return NewED25519phSignerVerifier(rand.Reader) +} + +// NewED25519phSignerVerifier creates a combined signer and verifier using ED25519. +// This creates a new ED25519 key using the specified entropy source. +func NewED25519phSignerVerifier(rand io.Reader) (*ED25519phSignerVerifier, ed25519.PrivateKey, error) { + _, priv, err := ed25519.GenerateKey(rand) + if err != nil { + return nil, nil, err + } + + sv, err := LoadED25519phSignerVerifier(priv) + if err != nil { + return nil, nil, err + } + + return sv, priv, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ED25519phSignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.publicKey, nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/message.go b/vendor/github.com/sigstore/sigstore/pkg/signature/message.go new file mode 100644 index 000000000000..44771ff3daac --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/message.go @@ -0,0 +1,111 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + crand "crypto/rand" + "errors" + "fmt" + "io" +) + +func isSupportedAlg(alg crypto.Hash, supportedAlgs []crypto.Hash) bool { + if supportedAlgs == nil { + return true + } + for _, supportedAlg := range supportedAlgs { + if alg == supportedAlg { + return true + } + } + return false +} + +// ComputeDigestForSigning calculates the digest value for the specified message using a hash function selected by the following process: +// +// - if a digest value is already specified in a SignOption and the length of the digest matches that of the selected hash function, the +// digest value will be returned without any further computation +// - if a hash function is given using WithCryptoSignerOpts(opts) as a SignOption, it will be used (if it is in the supported list) +// - otherwise defaultHashFunc will be used (if it is in the supported list) +func ComputeDigestForSigning(rawMessage io.Reader, defaultHashFunc crypto.Hash, supportedHashFuncs []crypto.Hash, opts ...SignOption) (digest []byte, hashedWith crypto.Hash, err error) { + var cryptoSignerOpts crypto.SignerOpts = defaultHashFunc + for _, opt := range opts { + opt.ApplyDigest(&digest) + opt.ApplyCryptoSignerOpts(&cryptoSignerOpts) + } + hashedWith = cryptoSignerOpts.HashFunc() + if !isSupportedAlg(hashedWith, supportedHashFuncs) { + return nil, crypto.Hash(0), fmt.Errorf("unsupported hash algorithm: %q not in %v", hashedWith.String(), supportedHashFuncs) + } + if len(digest) > 0 { + if hashedWith != crypto.Hash(0) && len(digest) != hashedWith.Size() { + err = errors.New("unexpected length of digest for hash function specified") + } + return digest, hashedWith, err + } + digest, err = hashMessage(rawMessage, hashedWith) + return digest, hashedWith, err +} + +// ComputeDigestForVerifying calculates the digest value for the specified message using a hash function selected by the following process: +// +// - if a digest value is already specified in a SignOption and the length of the digest matches that of the selected hash function, the +// digest value will be returned without any further computation +// - if a hash function is given using WithCryptoSignerOpts(opts) as a SignOption, it will be used (if it is in the supported list) +// - otherwise defaultHashFunc will be used (if it is in the supported list) +func ComputeDigestForVerifying(rawMessage io.Reader, defaultHashFunc crypto.Hash, supportedHashFuncs []crypto.Hash, opts ...VerifyOption) (digest []byte, hashedWith crypto.Hash, err error) { + var cryptoSignerOpts crypto.SignerOpts = defaultHashFunc + for _, opt := range opts { + opt.ApplyDigest(&digest) + opt.ApplyCryptoSignerOpts(&cryptoSignerOpts) + } + hashedWith = cryptoSignerOpts.HashFunc() + if !isSupportedAlg(hashedWith, supportedHashFuncs) { + return nil, crypto.Hash(0), fmt.Errorf("unsupported hash algorithm: %q not in %v", hashedWith.String(), supportedHashFuncs) + } + if len(digest) > 0 { + if hashedWith != crypto.Hash(0) && len(digest) != hashedWith.Size() { + err = errors.New("unexpected length of digest for hash function specified") + } + return digest, hashedWith, err + } + digest, err = hashMessage(rawMessage, hashedWith) + return digest, hashedWith, err +} + +func hashMessage(rawMessage io.Reader, hashFunc crypto.Hash) ([]byte, error) { + if rawMessage == nil { + return nil, errors.New("message cannot be nil") + } + if hashFunc == crypto.Hash(0) { + return io.ReadAll(rawMessage) + } + hasher := hashFunc.New() + // avoids reading entire message into memory + if _, err := io.Copy(hasher, rawMessage); err != nil { + return nil, fmt.Errorf("hashing message: %w", err) + } + return hasher.Sum(nil), nil +} + +func selectRandFromOpts(opts ...SignOption) io.Reader { + rand := crand.Reader + for _, opt := range opts { + opt.ApplyRand(&rand) + } + return rand +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options.go new file mode 100644 index 000000000000..e17e768c2d75 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options.go @@ -0,0 +1,65 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "context" + "crypto" + "crypto/rsa" + "io" + + "github.com/sigstore/sigstore/pkg/signature/options" +) + +// RPCOption specifies options to be used when performing RPC +type RPCOption interface { + ApplyContext(*context.Context) + ApplyRemoteVerification(*bool) + ApplyRPCAuthOpts(opts *options.RPCAuth) + ApplyKeyVersion(keyVersion *string) +} + +// PublicKeyOption specifies options to be used when obtaining a public key +type PublicKeyOption interface { + RPCOption +} + +// MessageOption specifies options to be used when processing messages during signing or verification +type MessageOption interface { + ApplyDigest(*[]byte) + ApplyCryptoSignerOpts(*crypto.SignerOpts) +} + +// SignOption specifies options to be used when signing a message +type SignOption interface { + RPCOption + MessageOption + ApplyRand(*io.Reader) + ApplyKeyVersionUsed(**string) +} + +// VerifyOption specifies options to be used when verifying a signature +type VerifyOption interface { + RPCOption + MessageOption +} + +// LoadOption specifies options to be used when creating a Signer/Verifier +type LoadOption interface { + ApplyHash(*crypto.Hash) + ApplyED25519ph(*bool) + ApplyRSAPSS(**rsa.PSSOptions) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go new file mode 100644 index 000000000000..2282a863ef07 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go @@ -0,0 +1,37 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package options defines options for KMS clients +package options + +import ( + "context" +) + +// RequestContext implements the functional option pattern for including a context during RPC +type RequestContext struct { + NoOpOptionImpl + ctx context.Context +} + +// ApplyContext sets the specified context as the functional option +func (r RequestContext) ApplyContext(ctx *context.Context) { + *ctx = r.ctx +} + +// WithContext specifies that the given context should be used in RPC to external services +func WithContext(ctx context.Context) RequestContext { + return RequestContext{ctx: ctx} +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go new file mode 100644 index 000000000000..21875dc8c0d9 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go @@ -0,0 +1,35 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +// RequestDigest implements the functional option pattern for specifying a digest value +type RequestDigest struct { + NoOpOptionImpl + digest []byte +} + +// ApplyDigest sets the specified digest value as the functional option +func (r RequestDigest) ApplyDigest(digest *[]byte) { + *digest = r.digest +} + +// WithDigest specifies that the given digest can be used by underlying signature implementations +// WARNING: When verifying a digest with ECDSA, it is trivial to craft a valid signature +// over a random message given a public key. Do not use this unles you understand the +// implications and do not need to protect against malleability. +func WithDigest(digest []byte) RequestDigest { + return RequestDigest{digest: digest} +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/doc.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/doc.go new file mode 100644 index 000000000000..ebdeda22b86d --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/doc.go @@ -0,0 +1,17 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package options contains functional options for the various SignerVerifiers +package options diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/keyversion.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/keyversion.go new file mode 100644 index 000000000000..751418f9d58e --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/keyversion.go @@ -0,0 +1,50 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +// RequestKeyVersion implements the functional option pattern for specifying the KMS key version during signing or verification +type RequestKeyVersion struct { + NoOpOptionImpl + keyVersion string +} + +// ApplyKeyVersion sets the KMS's key version as a functional option +func (r RequestKeyVersion) ApplyKeyVersion(keyVersion *string) { + *keyVersion = r.keyVersion +} + +// WithKeyVersion specifies that a specific KMS key version be used during signing and verification operations; +// a value of 0 will use the latest version of the key (default) +func WithKeyVersion(keyVersion string) RequestKeyVersion { + return RequestKeyVersion{keyVersion: keyVersion} +} + +// RequestKeyVersionUsed implements the functional option pattern for obtaining the KMS key version used during signing +type RequestKeyVersionUsed struct { + NoOpOptionImpl + keyVersionUsed *string +} + +// ApplyKeyVersionUsed requests to store the KMS's key version that was used as a functional option +func (r RequestKeyVersionUsed) ApplyKeyVersionUsed(keyVersionUsed **string) { + *keyVersionUsed = r.keyVersionUsed +} + +// ReturnKeyVersionUsed specifies that the specific KMS key version that was used during signing should be stored +// in the pointer provided +func ReturnKeyVersionUsed(keyVersionUsed *string) RequestKeyVersionUsed { + return RequestKeyVersionUsed{keyVersionUsed: keyVersionUsed} +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go new file mode 100644 index 000000000000..e5f3f01162dd --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go @@ -0,0 +1,76 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +import ( + "crypto" + "crypto/rsa" +) + +// RequestHash implements the functional option pattern for setting a Hash +// function when loading a signer or verifier +type RequestHash struct { + NoOpOptionImpl + hashFunc crypto.Hash +} + +// ApplyHash sets the hash as requested by the functional option +func (r RequestHash) ApplyHash(hash *crypto.Hash) { + *hash = r.hashFunc +} + +// WithHash specifies that the given hash function should be used when loading a signer or verifier +func WithHash(hash crypto.Hash) RequestHash { + return RequestHash{hashFunc: hash} +} + +// RequestED25519ph implements the functional option pattern for specifying +// ED25519ph (pre-hashed) should be used when loading a signer or verifier and a +// ED25519 key is +type RequestED25519ph struct { + NoOpOptionImpl + useED25519ph bool +} + +// ApplyED25519ph sets the ED25519ph flag as requested by the functional option +func (r RequestED25519ph) ApplyED25519ph(useED25519ph *bool) { + *useED25519ph = r.useED25519ph +} + +// WithED25519ph specifies that the ED25519ph algorithm should be used when a ED25519 key is used +func WithED25519ph() RequestED25519ph { + return RequestED25519ph{useED25519ph: true} +} + +// RequestPSSOptions implements the functional option pattern for specifying RSA +// PSS should be used when loading a signer or verifier and a RSA key is +// detected +type RequestPSSOptions struct { + NoOpOptionImpl + opts *rsa.PSSOptions +} + +// ApplyRSAPSS sets the RSAPSS options as requested by the functional option +func (r RequestPSSOptions) ApplyRSAPSS(opts **rsa.PSSOptions) { + *opts = r.opts +} + +// WithRSAPSS specifies that the RSAPSS algorithm should be used when a RSA key is used +// Note that the RSA PSSOptions contains an hash algorithm, which will override +// the hash function specified with WithHash. +func WithRSAPSS(opts *rsa.PSSOptions) RequestPSSOptions { + return RequestPSSOptions{opts: opts} +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go new file mode 100644 index 000000000000..0c0e51856092 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go @@ -0,0 +1,59 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +import ( + "context" + "crypto" + "crypto/rsa" + "io" +) + +// NoOpOptionImpl implements the RPCOption, SignOption, VerifyOption interfaces as no-ops. +type NoOpOptionImpl struct{} + +// ApplyContext is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyContext(_ *context.Context) {} + +// ApplyCryptoSignerOpts is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyCryptoSignerOpts(_ *crypto.SignerOpts) {} + +// ApplyDigest is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyDigest(_ *[]byte) {} + +// ApplyRand is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyRand(_ *io.Reader) {} + +// ApplyRemoteVerification is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyRemoteVerification(_ *bool) {} + +// ApplyRPCAuthOpts is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyRPCAuthOpts(_ *RPCAuth) {} + +// ApplyKeyVersion is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyKeyVersion(_ *string) {} + +// ApplyKeyVersionUsed is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyKeyVersionUsed(_ **string) {} + +// ApplyHash is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyHash(_ *crypto.Hash) {} + +// ApplyED25519ph is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyED25519ph(_ *bool) {} + +// ApplyRSAPSS is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyRSAPSS(_ **rsa.PSSOptions) {} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/rand.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/rand.go new file mode 100644 index 000000000000..fd3a17f9e02a --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/rand.go @@ -0,0 +1,41 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +import ( + crand "crypto/rand" + "io" +) + +// RequestRand implements the functional option pattern for using a specific source of entropy +type RequestRand struct { + NoOpOptionImpl + rand io.Reader +} + +// ApplyRand sets the specified source of entropy as the functional option +func (r RequestRand) ApplyRand(rand *io.Reader) { + *rand = r.rand +} + +// WithRand specifies that the given source of entropy should be used in signing operations +func WithRand(rand io.Reader) RequestRand { + r := rand + if r == nil { + r = crand.Reader + } + return RequestRand{rand: r} +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/remoteverification.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/remoteverification.go new file mode 100644 index 000000000000..26144adbbf63 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/remoteverification.go @@ -0,0 +1,32 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +// RequestRemoteVerification implements the functional option pattern for remotely verifiying signatures when possible +type RequestRemoteVerification struct { + NoOpOptionImpl + remoteVerification bool +} + +// ApplyRemoteVerification sets remote verification as a functional option +func (r RequestRemoteVerification) ApplyRemoteVerification(remoteVerification *bool) { + *remoteVerification = r.remoteVerification +} + +// WithRemoteVerification specifies that the verification operation should be performed remotely (vs in the process of the caller) +func WithRemoteVerification(remoteVerification bool) RequestRemoteVerification { + return RequestRemoteVerification{remoteVerification: remoteVerification} +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/rpcauth.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/rpcauth.go new file mode 100644 index 000000000000..188de92dc96f --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/rpcauth.go @@ -0,0 +1,58 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +// RPCAuthOpts includes authentication settings for RPC calls +type RPCAuthOpts struct { + NoOpOptionImpl + opts RPCAuth +} + +// RPCAuth provides credentials for RPC calls, empty fields are ignored +type RPCAuth struct { + Address string // address is the remote server address, e.g. https://vault:8200 + Path string // path for the RPC, in vault this is the transit path which default to "transit" + Token string // token used for RPC, in vault this is the VAULT_TOKEN value + OIDC RPCAuthOIDC +} + +// RPCAuthOIDC is used to perform the RPC login using OIDC instead of a fixed token +type RPCAuthOIDC struct { + Path string // path defaults to "jwt" for vault + Role string // role is required for jwt logins + Token string // token is a jwt with vault +} + +// ApplyRPCAuthOpts sets the RPCAuth as a function option +func (r RPCAuthOpts) ApplyRPCAuthOpts(opts *RPCAuth) { + if r.opts.Address != "" { + opts.Address = r.opts.Address + } + if r.opts.Path != "" { + opts.Path = r.opts.Path + } + if r.opts.Token != "" { + opts.Token = r.opts.Token + } + if r.opts.OIDC.Token != "" { + opts.OIDC = r.opts.OIDC + } +} + +// WithRPCAuthOpts specifies RPCAuth settings to be used with RPC logins +func WithRPCAuthOpts(opts RPCAuth) RPCAuthOpts { + return RPCAuthOpts{opts: opts} +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/signeropts.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/signeropts.go new file mode 100644 index 000000000000..1a3ac7394874 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/signeropts.go @@ -0,0 +1,40 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +import ( + "crypto" +) + +// RequestCryptoSignerOpts implements the functional option pattern for supplying crypto.SignerOpts when signing or verifying +type RequestCryptoSignerOpts struct { + NoOpOptionImpl + opts crypto.SignerOpts +} + +// ApplyCryptoSignerOpts sets crypto.SignerOpts as a functional option +func (r RequestCryptoSignerOpts) ApplyCryptoSignerOpts(opts *crypto.SignerOpts) { + *opts = r.opts +} + +// WithCryptoSignerOpts specifies that provided crypto.SignerOpts be used during signing and verification operations +func WithCryptoSignerOpts(opts crypto.SignerOpts) RequestCryptoSignerOpts { + var optsToUse crypto.SignerOpts = crypto.SHA256 + if opts != nil { + optsToUse = opts + } + return RequestCryptoSignerOpts{opts: optsToUse} +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/payload/doc.go b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/doc.go new file mode 100644 index 000000000000..3664185a0f8b --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/doc.go @@ -0,0 +1,17 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package payload contains types and utilities related to the Cosign signature format. +package payload diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go new file mode 100644 index 000000000000..cab6f5b98a75 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go @@ -0,0 +1,122 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package payload defines a container image +package payload + +import ( + "encoding/json" + "fmt" + + "github.com/google/go-containerregistry/pkg/name" +) + +// CosignSignatureType is the value of `critical.type` in a SimpleContainerImage payload. +const CosignSignatureType = "cosign container image signature" + +// SimpleContainerImage describes the structure of a basic container image signature payload, as defined at: +// https://github.com/containers/image/blob/main/docs/containers-signature.5.md#json-data-format +type SimpleContainerImage struct { + Critical Critical `json:"critical"` // Critical data critical to correctly evaluating the validity of the signature + Optional map[string]interface{} `json:"optional"` // Optional optional metadata about the image +} + +// Critical data critical to correctly evaluating the validity of a signature +type Critical struct { + Identity Identity `json:"identity"` // Identity claimed identity of the image + Image Image `json:"image"` // Image identifies the container that the signature applies to + Type string `json:"type"` // Type must be 'atomic container signature' +} + +// Identity is the claimed identity of the image +type Identity struct { + DockerReference string `json:"docker-reference"` // DockerReference is a reference used to refer to or download the image +} + +// Image identifies the container image that the signature applies to +type Image struct { + DockerManifestDigest string `json:"docker-manifest-digest"` // DockerManifestDigest the manifest digest of the signed container image +} + +// Cosign describes a container image signed using Cosign +type Cosign struct { + Image name.Digest + // ClaimedIdentity is what the signer claims the image to be; usually a registry.com/…/repo:tag, but can also use a digest instead. + // ALMOST ALL consumers MUST verify that ClaimedIdentity in the signature is correct given how user refers to the image; + // e.g. if the user asks to access a signed image example.com/repo/mysql:3.14, + // it is ALMOST ALWAYS necessary to validate that ClaimedIdentity = example.com/repo/mysql:3.14 + // + // Considerations: + // - The user might refer to an image using a digest (example.com/repo/mysql@sha256:…); in that case the registry/…/repo should still match + // - If the image is multi-arch, ClaimedIdentity usually refers to the top-level multi-arch image index also on the per-arch images + // (possibly even if ClaimedIdentity contains a digest!) + // - Older versions of cosign generate signatures where ClaimedIdentity only contains a registry/…/repo ; signature consumers should allow users + // to determine whether such images should be accepted (and, long-term, the default SHOULD be to reject them) + ClaimedIdentity string + Annotations map[string]interface{} +} + +// SimpleContainerImage returns information about a container image in the github.com/containers/image/signature format +func (p Cosign) SimpleContainerImage() SimpleContainerImage { + dockerReference := p.Image.Repository.Name() + if p.ClaimedIdentity != "" { + dockerReference = p.ClaimedIdentity + } + return SimpleContainerImage{ + Critical: Critical{ + Identity: Identity{ + DockerReference: dockerReference, + }, + Image: Image{ + DockerManifestDigest: p.Image.DigestStr(), + }, + Type: CosignSignatureType, + }, + Optional: p.Annotations, + } +} + +// MarshalJSON marshals the container signature into a []byte of JSON data +func (p Cosign) MarshalJSON() ([]byte, error) { + return json.Marshal(p.SimpleContainerImage()) +} + +var _ json.Marshaler = Cosign{} + +// UnmarshalJSON unmarshals []byte of JSON data into a container signature object +func (p *Cosign) UnmarshalJSON(data []byte) error { + if string(data) == "null" { + // JSON "null" is a no-op by convention + return nil + } + var simple SimpleContainerImage + if err := json.Unmarshal(data, &simple); err != nil { + return err + } + if simple.Critical.Type != CosignSignatureType { + return fmt.Errorf("Cosign signature payload was of an unknown type: %q", simple.Critical.Type) + } + digestStr := simple.Critical.Identity.DockerReference + "@" + simple.Critical.Image.DockerManifestDigest + digest, err := name.NewDigest(digestStr) + if err != nil { + return fmt.Errorf("could not parse image digest string %q: %w", digestStr, err) + } + p.Image = digest + p.ClaimedIdentity = simple.Critical.Identity.DockerReference + p.Annotations = simple.Optional + return nil +} + +var _ json.Unmarshaler = (*Cosign)(nil) diff --git a/vendor/github.com/go-openapi/swag/file.go b/vendor/github.com/sigstore/sigstore/pkg/signature/publickey.go similarity index 55% rename from vendor/github.com/go-openapi/swag/file.go rename to vendor/github.com/sigstore/sigstore/pkg/signature/publickey.go index 16accc55f82e..6f6a47a9f96d 100644 --- a/vendor/github.com/go-openapi/swag/file.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/publickey.go @@ -1,10 +1,11 @@ -// Copyright 2015 go-swagger maintainers +// +// Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,22 +13,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -package swag - -import "mime/multipart" +package signature -// File represents an uploaded file. -type File struct { - Data multipart.File - Header *multipart.FileHeader -} - -// Read bytes from the file -func (f *File) Read(p []byte) (n int, err error) { - return f.Data.Read(p) -} +import ( + "crypto" +) -// Close the file -func (f *File) Close() error { - return f.Data.Close() +// PublicKeyProvider returns a PublicKey associated with a digital signature +type PublicKeyProvider interface { + PublicKey(opts ...PublicKeyOption) (crypto.PublicKey, error) } diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go b/vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go new file mode 100644 index 000000000000..1cac68a539fa --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go @@ -0,0 +1,225 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "errors" + "fmt" + "io" + + "github.com/sigstore/sigstore/pkg/signature/options" +) + +// RSAPKCS1v15Signer is a signature.Signer that uses the RSA PKCS1v15 algorithm +type RSAPKCS1v15Signer struct { + hashFunc crypto.Hash + priv *rsa.PrivateKey +} + +// LoadRSAPKCS1v15Signer calculates signatures using the specified private key and hash algorithm. +// +// hf must be either SHA256, SHA388, or SHA512. +func LoadRSAPKCS1v15Signer(priv *rsa.PrivateKey, hf crypto.Hash) (*RSAPKCS1v15Signer, error) { + if priv == nil { + return nil, errors.New("invalid RSA private key specified") + } + + if !isSupportedAlg(hf, rsaSupportedHashFuncs) { + return nil, errors.New("invalid hash function specified") + } + + return &RSAPKCS1v15Signer{ + priv: priv, + hashFunc: hf, + }, nil +} + +// SignMessage signs the provided message using PKCS1v15. If the message is provided, +// this method will compute the digest according to the hash function specified +// when the RSAPKCS1v15Signer was created. +// +// SignMessage recognizes the following Options listed in order of preference: +// +// - WithRand() +// +// - WithDigest() +// +// - WithCryptoSignerOpts() +// +// All other options are ignored if specified. +func (r RSAPKCS1v15Signer) SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) { + digest, hf, err := ComputeDigestForSigning(message, r.hashFunc, rsaSupportedHashFuncs, opts...) + if err != nil { + return nil, err + } + + rand := selectRandFromOpts(opts...) + + return rsa.SignPKCS1v15(rand, r.priv, hf, digest) +} + +// Public returns the public key that can be used to verify signatures created by +// this signer. +func (r RSAPKCS1v15Signer) Public() crypto.PublicKey { + if r.priv == nil { + return nil + } + + return r.priv.Public() +} + +// PublicKey returns the public key that can be used to verify signatures created by +// this signer. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (r RSAPKCS1v15Signer) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return r.Public(), nil +} + +// Sign computes the signature for the specified digest using PKCS1v15. +// +// If a source of entropy is given in rand, it will be used instead of the default value (rand.Reader +// from crypto/rand). +// +// If opts are specified, they should specify the hash function used to compute digest. If opts are +// not specified, this function assumes the hash function provided when the signer was created was +// used to create the value specified in digest. +func (r RSAPKCS1v15Signer) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) { + rsaOpts := []SignOption{options.WithDigest(digest), options.WithRand(rand)} + if opts != nil { + rsaOpts = append(rsaOpts, options.WithCryptoSignerOpts(opts)) + } + + return r.SignMessage(nil, rsaOpts...) +} + +// RSAPKCS1v15Verifier is a signature.Verifier that uses the RSA PKCS1v15 algorithm +type RSAPKCS1v15Verifier struct { + publicKey *rsa.PublicKey + hashFunc crypto.Hash +} + +// LoadRSAPKCS1v15Verifier returns a Verifier that verifies signatures using the specified +// RSA public key and hash algorithm. +// +// hf must be either SHA256, SHA388, or SHA512. +func LoadRSAPKCS1v15Verifier(pub *rsa.PublicKey, hashFunc crypto.Hash) (*RSAPKCS1v15Verifier, error) { + if pub == nil { + return nil, errors.New("invalid RSA public key specified") + } + + if !isSupportedAlg(hashFunc, rsaSupportedHashFuncs) { + return nil, errors.New("invalid hash function specified") + } + + return &RSAPKCS1v15Verifier{ + publicKey: pub, + hashFunc: hashFunc, + }, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (r RSAPKCS1v15Verifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return r.publicKey, nil +} + +// VerifySignature verifies the signature for the given message using PKCS1v15. Unless provided +// in an option, the digest of the message will be computed using the hash function specified +// when the RSAPKCS1v15Verifier was created. +// +// This function returns nil if the verification succeeded, and an error message otherwise. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// - WithCryptoSignerOpts() +// +// All other options are ignored if specified. +func (r RSAPKCS1v15Verifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error { + digest, hf, err := ComputeDigestForVerifying(message, r.hashFunc, rsaSupportedVerifyHashFuncs, opts...) + if err != nil { + return err + } + + if signature == nil { + return errors.New("nil signature passed to VerifySignature") + } + + sigBytes, err := io.ReadAll(signature) + if err != nil { + return fmt.Errorf("reading signature: %w", err) + } + + return rsa.VerifyPKCS1v15(r.publicKey, hf, digest, sigBytes) +} + +// RSAPKCS1v15SignerVerifier is a signature.SignerVerifier that uses the RSA PKCS1v15 algorithm +type RSAPKCS1v15SignerVerifier struct { + *RSAPKCS1v15Signer + *RSAPKCS1v15Verifier +} + +// LoadRSAPKCS1v15SignerVerifier creates a combined signer and verifier. This is a convenience object +// that simply wraps an instance of RSAPKCS1v15Signer and RSAPKCS1v15Verifier. +func LoadRSAPKCS1v15SignerVerifier(priv *rsa.PrivateKey, hf crypto.Hash) (*RSAPKCS1v15SignerVerifier, error) { + signer, err := LoadRSAPKCS1v15Signer(priv, hf) + if err != nil { + return nil, fmt.Errorf("initializing signer: %w", err) + } + verifier, err := LoadRSAPKCS1v15Verifier(&priv.PublicKey, hf) + if err != nil { + return nil, fmt.Errorf("initializing verifier: %w", err) + } + + return &RSAPKCS1v15SignerVerifier{ + RSAPKCS1v15Signer: signer, + RSAPKCS1v15Verifier: verifier, + }, nil +} + +// NewDefaultRSAPKCS1v15SignerVerifier creates a combined signer and verifier using RSA PKCS1v15. +// This creates a new RSA key of 2048 bits and uses the SHA256 hashing algorithm. +func NewDefaultRSAPKCS1v15SignerVerifier() (*RSAPKCS1v15SignerVerifier, *rsa.PrivateKey, error) { + return NewRSAPKCS1v15SignerVerifier(rand.Reader, 2048, crypto.SHA256) +} + +// NewRSAPKCS1v15SignerVerifier creates a combined signer and verifier using RSA PKCS1v15. +// This creates a new RSA key of the specified length of bits, entropy source, and hash function. +func NewRSAPKCS1v15SignerVerifier(rand io.Reader, bits int, hashFunc crypto.Hash) (*RSAPKCS1v15SignerVerifier, *rsa.PrivateKey, error) { + priv, err := rsa.GenerateKey(rand, bits) + if err != nil { + return nil, nil, err + } + + sv, err := LoadRSAPKCS1v15SignerVerifier(priv, hashFunc) + if err != nil { + return nil, nil, err + } + + return sv, priv, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (r RSAPKCS1v15SignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return r.publicKey, nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go b/vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go new file mode 100644 index 000000000000..6e52bed9ba64 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go @@ -0,0 +1,260 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "errors" + "fmt" + "io" + + "github.com/sigstore/sigstore/pkg/signature/options" +) + +// checked on LoadSigner, LoadVerifier, and SignMessage +var rsaSupportedHashFuncs = []crypto.Hash{ + crypto.SHA256, + crypto.SHA384, + crypto.SHA512, +} + +// checked on VerifySignature. Supports SHA1 verification. +var rsaSupportedVerifyHashFuncs = []crypto.Hash{ + crypto.SHA1, + crypto.SHA256, + crypto.SHA384, + crypto.SHA512, +} + +// RSAPSSSigner is a signature.Signer that uses the RSA PSS algorithm +type RSAPSSSigner struct { + hashFunc crypto.Hash + priv *rsa.PrivateKey + pssOpts *rsa.PSSOptions +} + +// LoadRSAPSSSigner calculates signatures using the specified private key and hash algorithm. +// +// If opts are specified, then they will be stored and used as a default if not overridden +// by the value passed to Sign(). +// +// hf must be either SHA256, SHA388, or SHA512. opts.Hash is ignored. +func LoadRSAPSSSigner(priv *rsa.PrivateKey, hf crypto.Hash, opts *rsa.PSSOptions) (*RSAPSSSigner, error) { + if priv == nil { + return nil, errors.New("invalid RSA private key specified") + } + + if !isSupportedAlg(hf, rsaSupportedHashFuncs) { + return nil, errors.New("invalid hash function specified") + } + + return &RSAPSSSigner{ + priv: priv, + pssOpts: opts, + hashFunc: hf, + }, nil +} + +// SignMessage signs the provided message using PSS. If the message is provided, +// this method will compute the digest according to the hash function specified +// when the RSAPSSSigner was created. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithRand() +// +// - WithDigest() +// +// - WithCryptoSignerOpts() +// +// All other options are ignored if specified. +func (r RSAPSSSigner) SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) { + digest, hf, err := ComputeDigestForSigning(message, r.hashFunc, rsaSupportedHashFuncs, opts...) + if err != nil { + return nil, err + } + + rand := selectRandFromOpts(opts...) + pssOpts := r.pssOpts + if pssOpts == nil { + pssOpts = &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + } + } + pssOpts.Hash = hf + + return rsa.SignPSS(rand, r.priv, hf, digest, pssOpts) +} + +// Public returns the public key that can be used to verify signatures created by +// this signer. +func (r RSAPSSSigner) Public() crypto.PublicKey { + if r.priv == nil { + return nil + } + + return r.priv.Public() +} + +// PublicKey returns the public key that can be used to verify signatures created by +// this signer. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (r RSAPSSSigner) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return r.Public(), nil +} + +// Sign computes the signature for the specified digest using PSS. +// +// If a source of entropy is given in rand, it will be used instead of the default value (rand.Reader +// from crypto/rand). +// +// If opts are specified, they must be *rsa.PSSOptions. If opts are not specified, the hash function +// provided when the signer was created will be assumed. +func (r RSAPSSSigner) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) { + rsaOpts := []SignOption{options.WithDigest(digest), options.WithRand(rand)} + if opts != nil { + rsaOpts = append(rsaOpts, options.WithCryptoSignerOpts(opts)) + } + + return r.SignMessage(nil, rsaOpts...) +} + +// RSAPSSVerifier is a signature.Verifier that uses the RSA PSS algorithm +type RSAPSSVerifier struct { + publicKey *rsa.PublicKey + hashFunc crypto.Hash + pssOpts *rsa.PSSOptions +} + +// LoadRSAPSSVerifier verifies signatures using the specified public key and hash algorithm. +// +// hf must be either SHA256, SHA388, or SHA512. opts.Hash is ignored. +func LoadRSAPSSVerifier(pub *rsa.PublicKey, hashFunc crypto.Hash, opts *rsa.PSSOptions) (*RSAPSSVerifier, error) { + if pub == nil { + return nil, errors.New("invalid RSA public key specified") + } + + if !isSupportedAlg(hashFunc, rsaSupportedHashFuncs) { + return nil, errors.New("invalid hash function specified") + } + + return &RSAPSSVerifier{ + publicKey: pub, + hashFunc: hashFunc, + pssOpts: opts, + }, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (r RSAPSSVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return r.publicKey, nil +} + +// VerifySignature verifies the signature for the given message using PSS. Unless provided +// in an option, the digest of the message will be computed using the hash function specified +// when the RSAPSSVerifier was created. +// +// This function returns nil if the verification succeeded, and an error message otherwise. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// - WithCryptoSignerOpts() +// +// All other options are ignored if specified. +func (r RSAPSSVerifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error { + digest, hf, err := ComputeDigestForVerifying(message, r.hashFunc, rsaSupportedVerifyHashFuncs, opts...) + if err != nil { + return err + } + + if signature == nil { + return errors.New("nil signature passed to VerifySignature") + } + + sigBytes, err := io.ReadAll(signature) + if err != nil { + return fmt.Errorf("reading signature: %w", err) + } + + // rsa.VerifyPSS ignores pssOpts.Hash, so we don't set it + pssOpts := r.pssOpts + if pssOpts == nil { + pssOpts = &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + } + } + + return rsa.VerifyPSS(r.publicKey, hf, digest, sigBytes, pssOpts) +} + +// RSAPSSSignerVerifier is a signature.SignerVerifier that uses the RSA PSS algorithm +type RSAPSSSignerVerifier struct { + *RSAPSSSigner + *RSAPSSVerifier +} + +// LoadRSAPSSSignerVerifier creates a combined signer and verifier using RSA PSS. This is +// a convenience object that simply wraps an instance of RSAPSSSigner and RSAPSSVerifier. +func LoadRSAPSSSignerVerifier(priv *rsa.PrivateKey, hf crypto.Hash, opts *rsa.PSSOptions) (*RSAPSSSignerVerifier, error) { + signer, err := LoadRSAPSSSigner(priv, hf, opts) + if err != nil { + return nil, fmt.Errorf("initializing signer: %w", err) + } + verifier, err := LoadRSAPSSVerifier(&priv.PublicKey, hf, opts) + if err != nil { + return nil, fmt.Errorf("initializing verifier: %w", err) + } + + return &RSAPSSSignerVerifier{ + RSAPSSSigner: signer, + RSAPSSVerifier: verifier, + }, nil +} + +// NewDefaultRSAPSSSignerVerifier creates a combined signer and verifier using RSA PSS. +// This creates a new RSA key of 2048 bits and uses the SHA256 hashing algorithm. +func NewDefaultRSAPSSSignerVerifier() (*RSAPSSSignerVerifier, *rsa.PrivateKey, error) { + return NewRSAPSSSignerVerifier(rand.Reader, 2048, crypto.SHA256) +} + +// NewRSAPSSSignerVerifier creates a combined signer and verifier using RSA PSS. +// This creates a new RSA key of the specified length of bits, entropy source, and hash function. +func NewRSAPSSSignerVerifier(rand io.Reader, bits int, hashFunc crypto.Hash) (*RSAPSSSignerVerifier, *rsa.PrivateKey, error) { + priv, err := rsa.GenerateKey(rand, bits) + if err != nil { + return nil, nil, err + } + + sv, err := LoadRSAPSSSignerVerifier(priv, hashFunc, &rsa.PSSOptions{Hash: hashFunc}) + if err != nil { + return nil, nil, err + } + + return sv, priv, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (r RSAPSSSignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return r.publicKey, nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go new file mode 100644 index 000000000000..50f432798d36 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go @@ -0,0 +1,144 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "errors" + "io" + "os" + "path/filepath" + + // these ensure we have the implementations loaded + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +// Signer creates digital signatures over a message using a specified key pair +type Signer interface { + PublicKeyProvider + SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) +} + +// SignerOpts implements crypto.SignerOpts but also allows callers to specify +// additional options that may be utilized in signing the digest provided. +type SignerOpts struct { + Hash crypto.Hash + Opts []SignOption +} + +// HashFunc returns the hash function for this object +func (s SignerOpts) HashFunc() crypto.Hash { + return s.Hash +} + +// LoadSigner returns a signature.Signer based on the algorithm of the private key +// provided. +// +// If privateKey is an RSA key, a RSAPKCS1v15Signer will be returned. If a +// RSAPSSSigner is desired instead, use the LoadRSAPSSSigner() method directly. +func LoadSigner(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (Signer, error) { + return LoadSignerWithOpts(privateKey, options.WithHash(hashFunc)) +} + +// LoadSignerWithOpts returns a signature.Signer based on the algorithm of the private key +// provided. +func LoadSignerWithOpts(privateKey crypto.PrivateKey, opts ...LoadOption) (Signer, error) { + var rsaPSSOptions *rsa.PSSOptions + var useED25519ph bool + hashFunc := crypto.SHA256 + for _, o := range opts { + o.ApplyED25519ph(&useED25519ph) + o.ApplyHash(&hashFunc) + o.ApplyRSAPSS(&rsaPSSOptions) + } + + switch pk := privateKey.(type) { + case *rsa.PrivateKey: + if rsaPSSOptions != nil { + return LoadRSAPSSSigner(pk, hashFunc, rsaPSSOptions) + } + return LoadRSAPKCS1v15Signer(pk, hashFunc) + case *ecdsa.PrivateKey: + return LoadECDSASigner(pk, hashFunc) + case ed25519.PrivateKey: + if useED25519ph { + return LoadED25519phSigner(pk) + } + return LoadED25519Signer(pk) + } + return nil, errors.New("unsupported public key type") +} + +// LoadSignerFromPEMFile returns a signature.Signer based on the algorithm of the private key +// in the file. The Signer will use the hash function specified when computing digests. +// +// If key is an RSA key, a RSAPKCS1v15Signer will be returned. If a +// RSAPSSSigner is desired instead, use the LoadRSAPSSSigner() and +// cryptoutils.UnmarshalPEMToPrivateKey() methods directly. +func LoadSignerFromPEMFile(path string, hashFunc crypto.Hash, pf cryptoutils.PassFunc) (Signer, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf) + if err != nil { + return nil, err + } + return LoadSigner(priv, hashFunc) +} + +// LoadSignerFromPEMFileWithOpts returns a signature.Signer based on the algorithm of the private key +// in the file. The Signer will use the hash function specified in the options when computing digests. +func LoadSignerFromPEMFileWithOpts(path string, pf cryptoutils.PassFunc, opts ...LoadOption) (Signer, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf) + if err != nil { + return nil, err + } + return LoadSignerWithOpts(priv, opts...) +} + +// LoadDefaultSigner returns a signature.Signer based on the private key. +// Each private key has a corresponding PublicKeyDetails associated in the +// Sigstore ecosystem, see Algorithm Registry for more details. +func LoadDefaultSigner(privateKey crypto.PrivateKey, opts ...LoadOption) (Signer, error) { + signer, ok := privateKey.(crypto.Signer) + if !ok { + return nil, errors.New("private key does not implement signature.Signer") + } + algorithmDetails, err := GetDefaultAlgorithmDetails(signer.Public(), opts...) + if err != nil { + return nil, err + } + return LoadSignerFromAlgorithmDetails(privateKey, algorithmDetails, opts...) +} + +// LoadSignerFromAlgorithmDetails returns a signature.Signer based on +// the algorithm details and the user's choice of options. +func LoadSignerFromAlgorithmDetails(privateKey crypto.PrivateKey, algorithmDetails AlgorithmDetails, opts ...LoadOption) (Signer, error) { + filteredOpts := GetOptsFromAlgorithmDetails(algorithmDetails, opts...) + return LoadSignerWithOpts(privateKey, filteredOpts...) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go new file mode 100644 index 000000000000..9ff93420ecc4 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go @@ -0,0 +1,127 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "errors" + "os" + "path/filepath" + + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +// SignerVerifier creates and verifies digital signatures over a message using a specified key pair +type SignerVerifier interface { + Signer + Verifier +} + +// LoadSignerVerifier returns a signature.SignerVerifier based on the algorithm of the private key +// provided. +// +// If privateKey is an RSA key, a RSAPKCS1v15SignerVerifier will be returned. If a +// RSAPSSSignerVerifier is desired instead, use the LoadRSAPSSSignerVerifier() method directly. +func LoadSignerVerifier(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (SignerVerifier, error) { + return LoadSignerVerifierWithOpts(privateKey, options.WithHash(hashFunc)) +} + +// LoadSignerVerifierWithOpts returns a signature.SignerVerifier based on the +// algorithm of the private key provided and the user's choice. +func LoadSignerVerifierWithOpts(privateKey crypto.PrivateKey, opts ...LoadOption) (SignerVerifier, error) { + var rsaPSSOptions *rsa.PSSOptions + var useED25519ph bool + hashFunc := crypto.SHA256 + for _, o := range opts { + o.ApplyED25519ph(&useED25519ph) + o.ApplyHash(&hashFunc) + o.ApplyRSAPSS(&rsaPSSOptions) + } + + switch pk := privateKey.(type) { + case *rsa.PrivateKey: + if rsaPSSOptions != nil { + return LoadRSAPSSSignerVerifier(pk, hashFunc, rsaPSSOptions) + } + return LoadRSAPKCS1v15SignerVerifier(pk, hashFunc) + case *ecdsa.PrivateKey: + return LoadECDSASignerVerifier(pk, hashFunc) + case ed25519.PrivateKey: + if useED25519ph { + return LoadED25519phSignerVerifier(pk) + } + return LoadED25519SignerVerifier(pk) + } + return nil, errors.New("unsupported public key type") +} + +// LoadSignerVerifierFromPEMFile returns a signature.SignerVerifier based on the algorithm of the private key +// in the file. The SignerVerifier will use the hash function specified when computing digests. +// +// If publicKey is an RSA key, a RSAPKCS1v15SignerVerifier will be returned. If a +// RSAPSSSignerVerifier is desired instead, use the LoadRSAPSSSignerVerifier() and +// cryptoutils.UnmarshalPEMToPrivateKey() methods directly. +func LoadSignerVerifierFromPEMFile(path string, hashFunc crypto.Hash, pf cryptoutils.PassFunc) (SignerVerifier, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf) + if err != nil { + return nil, err + } + return LoadSignerVerifier(priv, hashFunc) +} + +// LoadSignerVerifierFromPEMFileWithOpts returns a signature.SignerVerifier based on the algorithm of the private key +// in the file. The SignerVerifier will use the hash function specified in the options when computing digests. +func LoadSignerVerifierFromPEMFileWithOpts(path string, pf cryptoutils.PassFunc, opts ...LoadOption) (SignerVerifier, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf) + if err != nil { + return nil, err + } + return LoadSignerVerifierWithOpts(priv, opts...) +} + +// LoadDefaultSignerVerifier returns a signature.SignerVerifier based on +// the private key. Each private key has a corresponding PublicKeyDetails +// associated in the Sigstore ecosystem, see Algorithm Registry for more details. +func LoadDefaultSignerVerifier(privateKey crypto.PrivateKey, opts ...LoadOption) (SignerVerifier, error) { + signer, ok := privateKey.(crypto.Signer) + if !ok { + return nil, errors.New("private key does not implement signature.Signer") + } + algorithmDetails, err := GetDefaultAlgorithmDetails(signer.Public(), opts...) + if err != nil { + return nil, err + } + return LoadSignerVerifierFromAlgorithmDetails(privateKey, algorithmDetails, opts...) +} + +// LoadSignerVerifierFromAlgorithmDetails returns a signature.SignerVerifier based on +// the algorithm details and the user's choice of options. +func LoadSignerVerifierFromAlgorithmDetails(privateKey crypto.PrivateKey, algorithmDetails AlgorithmDetails, opts ...LoadOption) (SignerVerifier, error) { + filteredOpts := GetOptsFromAlgorithmDetails(algorithmDetails, opts...) + return LoadSignerVerifierWithOpts(privateKey, filteredOpts...) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/util.go b/vendor/github.com/sigstore/sigstore/pkg/signature/util.go new file mode 100644 index 000000000000..3f8beff49c85 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/util.go @@ -0,0 +1,74 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "bytes" + "crypto/rsa" + "encoding/json" + "fmt" + + "github.com/google/go-containerregistry/pkg/name" + + "github.com/sigstore/sigstore/pkg/signature/options" + sigpayload "github.com/sigstore/sigstore/pkg/signature/payload" +) + +// SignImage signs a container manifest using the specified signer object +func SignImage(signer SignerVerifier, image name.Digest, optionalAnnotations map[string]interface{}) (payload, signature []byte, err error) { + imgPayload := sigpayload.Cosign{ + Image: image, + Annotations: optionalAnnotations, + } + payload, err = json.Marshal(imgPayload) + if err != nil { + return nil, nil, fmt.Errorf("failed to marshal payload to JSON: %w", err) + } + signature, err = signer.SignMessage(bytes.NewReader(payload)) + if err != nil { + return nil, nil, fmt.Errorf("failed to sign payload: %w", err) + } + return payload, signature, nil +} + +// VerifyImageSignature verifies a signature over a container manifest +func VerifyImageSignature(signer SignerVerifier, payload, signature []byte) (image name.Digest, annotations map[string]interface{}, err error) { + if err := signer.VerifySignature(bytes.NewReader(signature), bytes.NewReader(payload)); err != nil { + return name.Digest{}, nil, fmt.Errorf("signature verification failed: %w", err) + } + var imgPayload sigpayload.Cosign + if err := json.Unmarshal(payload, &imgPayload); err != nil { + return name.Digest{}, nil, fmt.Errorf("could not deserialize image payload: %w", err) + } + return imgPayload.Image, imgPayload.Annotations, nil +} + +// GetOptsFromAlgorithmDetails returns a list of LoadOptions that are +// appropriate for the given algorithm details. It ignores the hash type because +// that can be retrieved from the algorithm details. +func GetOptsFromAlgorithmDetails(algorithmDetails AlgorithmDetails, opts ...LoadOption) []LoadOption { + res := []LoadOption{options.WithHash(algorithmDetails.hashType)} + for _, opt := range opts { + var useED25519ph bool + var rsaPSSOptions *rsa.PSSOptions + opt.ApplyED25519ph(&useED25519ph) + opt.ApplyRSAPSS(&rsaPSSOptions) + if useED25519ph || rsaPSSOptions != nil { + res = append(res, opt) + } + } + return res +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go b/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go new file mode 100644 index 000000000000..0b5a1bba7c06 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go @@ -0,0 +1,156 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "errors" + "io" + "os" + "path/filepath" + + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +// Verifier verifies the digital signature using a specified public key +type Verifier interface { + PublicKeyProvider + VerifySignature(signature, message io.Reader, opts ...VerifyOption) error +} + +// LoadVerifier returns a signature.Verifier based on the algorithm of the public key +// provided that will use the hash function specified when computing digests. +// +// If publicKey is an RSA key, a RSAPKCS1v15Verifier will be returned. If a +// RSAPSSVerifier is desired instead, use the LoadRSAPSSVerifier() method directly. +func LoadVerifier(publicKey crypto.PublicKey, hashFunc crypto.Hash) (Verifier, error) { + return LoadVerifierWithOpts(publicKey, options.WithHash(hashFunc)) +} + +// LoadVerifierWithOpts returns a signature.Verifier based on the algorithm of the public key +// provided that will use the hash function specified when computing digests. +func LoadVerifierWithOpts(publicKey crypto.PublicKey, opts ...LoadOption) (Verifier, error) { + var rsaPSSOptions *rsa.PSSOptions + var useED25519ph bool + hashFunc := crypto.SHA256 + for _, o := range opts { + o.ApplyED25519ph(&useED25519ph) + o.ApplyHash(&hashFunc) + o.ApplyRSAPSS(&rsaPSSOptions) + } + + switch pk := publicKey.(type) { + case *rsa.PublicKey: + if rsaPSSOptions != nil { + return LoadRSAPSSVerifier(pk, hashFunc, rsaPSSOptions) + } + return LoadRSAPKCS1v15Verifier(pk, hashFunc) + case *ecdsa.PublicKey: + return LoadECDSAVerifier(pk, hashFunc) + case ed25519.PublicKey: + if useED25519ph { + return LoadED25519phVerifier(pk) + } + return LoadED25519Verifier(pk) + } + return nil, errors.New("unsupported public key type") +} + +// LoadUnsafeVerifier returns a signature.Verifier based on the algorithm of the public key +// provided that will use SHA1 when computing digests for RSA and ECDSA signatures. +// +// If publicKey is an RSA key, a RSAPKCS1v15Verifier will be returned. If a +// RSAPSSVerifier is desired instead, use the LoadRSAPSSVerifier() method directly. +func LoadUnsafeVerifier(publicKey crypto.PublicKey) (Verifier, error) { + switch pk := publicKey.(type) { + case *rsa.PublicKey: + if pk == nil { + return nil, errors.New("invalid RSA public key specified") + } + return &RSAPKCS1v15Verifier{ + publicKey: pk, + hashFunc: crypto.SHA1, + }, nil + case *ecdsa.PublicKey: + if pk == nil { + return nil, errors.New("invalid ECDSA public key specified") + } + return &ECDSAVerifier{ + publicKey: pk, + hashFunc: crypto.SHA1, + }, nil + case ed25519.PublicKey: + return LoadED25519Verifier(pk) + } + return nil, errors.New("unsupported public key type") +} + +// LoadVerifierFromPEMFile returns a signature.Verifier based on the contents of a +// file located at path. The Verifier wil use the hash function specified when computing digests. +// +// If the publickey is an RSA key, a RSAPKCS1v15Verifier will be returned. If a +// RSAPSSVerifier is desired instead, use the LoadRSAPSSVerifier() and cryptoutils.UnmarshalPEMToPublicKey() methods directly. +func LoadVerifierFromPEMFile(path string, hashFunc crypto.Hash) (Verifier, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + + pubKey, err := cryptoutils.UnmarshalPEMToPublicKey(fileBytes) + if err != nil { + return nil, err + } + + return LoadVerifier(pubKey, hashFunc) +} + +// LoadVerifierFromPEMFileWithOpts returns a signature.Verifier based on the contents of a +// file located at path. The Verifier wil use the hash function specified in the options when computing digests. +func LoadVerifierFromPEMFileWithOpts(path string, opts ...LoadOption) (Verifier, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + + pubKey, err := cryptoutils.UnmarshalPEMToPublicKey(fileBytes) + if err != nil { + return nil, err + } + + return LoadVerifierWithOpts(pubKey, opts...) +} + +// LoadDefaultVerifier returns a signature.Verifier based on the public key. +// Each public key has a corresponding PublicKeyDetails associated in the +// Sigstore ecosystem, see Algorithm Registry for more details. +func LoadDefaultVerifier(publicKey crypto.PublicKey, opts ...LoadOption) (Verifier, error) { + algorithmDetails, err := GetDefaultAlgorithmDetails(publicKey, opts...) + if err != nil { + return nil, err + } + return LoadVerifierFromAlgorithmDetails(publicKey, algorithmDetails, opts...) +} + +// LoadVerifierFromAlgorithmDetails returns a signature.Verifier based on +// the algorithm details and the user's choice of options. +func LoadVerifierFromAlgorithmDetails(publicKey crypto.PublicKey, algorithmDetails AlgorithmDetails, opts ...LoadOption) (Verifier, error) { + filteredOpts := GetOptsFromAlgorithmDetails(algorithmDetails, opts...) + return LoadVerifierWithOpts(publicKey, filteredOpts...) +} diff --git a/vendor/github.com/sigstore/timestamp-authority/v2/CONTRIBUTORS.md b/vendor/github.com/sigstore/timestamp-authority/v2/CONTRIBUTORS.md new file mode 100644 index 000000000000..016e119f4456 --- /dev/null +++ b/vendor/github.com/sigstore/timestamp-authority/v2/CONTRIBUTORS.md @@ -0,0 +1,132 @@ +# Contributing + +When contributing to this repository, please first discuss the change you wish +to make via an [issue](https://github.com/sigstore/timestamp-authority/issues). + +## Pull Request Process + +1. Create an [issue](https://github.com/sigstore/timestamp-authority/issues) + outlining the fix or feature. +2. Fork the {project-name} repository to your own github account and clone it locally. +3. Hack on your changes. +4. Update the README.md with details of changes to any interface, this includes new environment + variables, exposed ports, useful file locations, CLI parameters and + new or changed configuration values. +5. Correctly format your commit message see [Commit Messages](#commit-message-guidelines) + below. +6. Ensure that CI passes, if it fails, fix the failures. +7. Every pull request requires a review from the [core timestamp-authority team](https://github.com/orgs/github.com/sigstore/teams/tsa-codeowners) + before merging. +8. If your pull request consists of more than one commit, please squash your + commits as described in [Squash Commits](#squash-commits) + +## Commit Message Guidelines + +We follow the commit formatting recommendations found on [Chris Beams' How to Write a Git Commit Message article](https://chris.beams.io/posts/git-commit/). + +Well formed commit messages not only help reviewers understand the nature of +the Pull Request, but also assists the release process where commit messages +are used to generate release notes. + +A good example of a commit message would be as follows: + +```text +Summarize changes in around 50 characters or less + +More detailed explanatory text, if necessary. Wrap it to about 72 +characters or so. In some contexts, the first line is treated as the +subject of the commit and the rest of the text as the body. The +blank line separating the summary from the body is critical (unless +you omit the body entirely); various tools like `log`, `shortlog` +and `rebase` can get confused if you run the two together. + +Explain the problem that this commit is solving. Focus on why you +are making this change as opposed to how (the code explains that). +Are there side effects or other unintuitive consequences of this +change? Here's the place to explain them. + +Further paragraphs come after blank lines. + + - Bullet points are okay, too + + - Typically a hyphen or asterisk is used for the bullet, preceded + by a single space, with blank lines in between, but conventions + vary here + +If you use an issue tracker, put references to them at the bottom, +like this: + +Resolves: #123 +See also: #456, #789 +``` + +Note the `Resolves #123` tag, this references the issue raised and allows us to +ensure issues are associated and closed when a pull request is merged. + +Please refer to [the github help page on message types](https://help.github.com/articles/closing-issues-using-keywords/) for a complete list of issue references. + +## Squash Commits + +Should your pull request consist of more than one commit (perhaps due to +a change being requested during the review cycle), please perform a git squash +once a reviewer has approved your pull request. + +A squash can be performed as follows. Let's say you have the following commits: + +```text +initial commit +second commit +final commit +``` + +Run the command below with the number set to the total commits you wish to +squash (in our case 3 commits): + +```shell +git rebase -i HEAD~3 +``` + +You default text editor will then open up and you will see the following: + +```shell +pick eb36612 initial commit +pick 9ac8968 second commit +pick a760569 final commit + +# Rebase eb1429f..a760569 onto eb1429f (3 commands) +``` + +We want to rebase on top of our first commit, so we change the other two commits +to `squash`: + +```shell +pick eb36612 initial commit +squash 9ac8968 second commit +squash a760569 final commit +``` + +After this, should you wish to update your commit message to better summarise +all of your pull request, run: + +```shell +git commit --amend +``` + +You will then need to force push (assuming your initial commit(s) were posted +to github): + +```shell +git push origin your-branch --force +``` + +Alternatively, a core member can squash your commits within Github. + +## DCO Signoff + +Make sure to sign the [Developer Certificate of +Origin](https://git-scm.com/docs/git-commit#Documentation/git-commit.txt---signoff). + +## Code of Conduct + +Sigstore Timestamp-Authority adheres to and enforces the [Contributor Covenant](http://contributor-covenant.org/version/1/4/) Code of Conduct. +Please take a moment to read the [CODE_OF_CONDUCT.md](/CODE_OF_CONDUCT.md) document. diff --git a/vendor/github.com/sigstore/timestamp-authority/v2/COPYRIGHT.txt b/vendor/github.com/sigstore/timestamp-authority/v2/COPYRIGHT.txt new file mode 100644 index 000000000000..5f2c003d6d16 --- /dev/null +++ b/vendor/github.com/sigstore/timestamp-authority/v2/COPYRIGHT.txt @@ -0,0 +1,13 @@ +Copyright 2022 The Sigstore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/sigstore/timestamp-authority/v2/LICENSE b/vendor/github.com/sigstore/timestamp-authority/v2/LICENSE new file mode 100644 index 000000000000..f49a4e16e68b --- /dev/null +++ b/vendor/github.com/sigstore/timestamp-authority/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify.go b/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify.go new file mode 100644 index 000000000000..4f6c77c79257 --- /dev/null +++ b/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify.go @@ -0,0 +1,340 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verification + +import ( + "bytes" + "crypto/x509" + "encoding/asn1" + "fmt" + "hash" + "io" + "math/big" + + "github.com/digitorus/pkcs7" + "github.com/digitorus/timestamp" + "github.com/pkg/errors" +) + +var ( + // EKUOID is the Extended Key Usage OID, per RFC 5280 + EKUOID = asn1.ObjectIdentifier{2, 5, 29, 37} +) + +// VerifyOpts contains verification options for a RFC3161 timestamp +type VerifyOpts struct { + // OID verifies that the TSR's OID has an expected value. Optional, used when + // an alternative OID was passed with a request to the TSA + OID asn1.ObjectIdentifier + // TSACertificate verifies that the TSR uses the TSACertificate as expected. Optional if the TSR contains the TSA certificate + TSACertificate *x509.Certificate + // Intermediates verifies the TSR's certificate. Optional, used for chain building + Intermediates []*x509.Certificate + // Roots is the set of trusted root certificates that verifies the TSR's certificate + Roots []*x509.Certificate + // Nonce verifies that the TSR contains the expected nonce. Optional, used when + // an optional nonce was passed with a request to the TSA + Nonce *big.Int + // CommonName verifies that the TSR certificate subject's Common Name matches the expected value. Optional + CommonName string +} + +// Verify the TSR's certificate identifier matches a provided TSA certificate +func verifyESSCertID(tsaCert *x509.Certificate, opts VerifyOpts) error { + if opts.TSACertificate == nil { + return nil + } + + if !bytes.Equal(opts.TSACertificate.RawIssuer, tsaCert.RawIssuer) { + return fmt.Errorf("TSR cert issuer does not match provided TSA cert issuer") + } + + if opts.TSACertificate.SerialNumber.Cmp(tsaCert.SerialNumber) != 0 { + return fmt.Errorf("TSR cert serial number does not match provided TSA cert serial number") + } + + return nil +} + +// Verify the leaf certificate's subject Common Name matches a provided Common Name +func verifySubjectCommonName(cert *x509.Certificate, opts VerifyOpts) error { + if opts.CommonName == "" { + return nil + } + + if cert.Subject.CommonName != opts.CommonName { + return fmt.Errorf("the certificate's subject Common Name %s does not match the provided Common Name %s", cert.Subject.CommonName, opts.CommonName) + } + return nil +} + +// If embedded in the TSR, verify the TSR's leaf certificate matches a provided TSA certificate +func verifyEmbeddedLeafCert(tsaCert *x509.Certificate, opts VerifyOpts) error { + if opts.TSACertificate != nil && !opts.TSACertificate.Equal(tsaCert) { + return fmt.Errorf("certificate embedded in the TSR does not match the provided TSA certificate") + } + return nil +} + +// Verify the leaf's EKU is set to critical, per RFC 3161 2.3 +func verifyLeafCertCriticalEKU(cert *x509.Certificate) error { + var criticalEKU bool + for _, ext := range cert.Extensions { + if ext.Id.Equal(EKUOID) { + criticalEKU = ext.Critical + break + } + } + if !criticalEKU { + return errors.New("certificate must set EKU to critical") + } + return nil +} + +func verifyLeafCert(ts timestamp.Timestamp, opts VerifyOpts) error { + if len(ts.Certificates) == 0 && opts.TSACertificate == nil { + return fmt.Errorf("leaf certificate must be present the in TSR or as a verify option") + } + + errMsg := "failed to verify TSA certificate" + + var leafCert *x509.Certificate + if len(ts.Certificates) != 0 { + for _, c := range ts.Certificates { + if !c.IsCA { + leafCert = c + break + } + } + if leafCert == nil { + return fmt.Errorf("no leaf certificate found in chain") + } + + err := verifyEmbeddedLeafCert(leafCert, opts) + if err != nil { + return fmt.Errorf("%s: %w", errMsg, err) + } + } else { + leafCert = opts.TSACertificate + } + + err := verifyLeafCertCriticalEKU(leafCert) + if err != nil { + return fmt.Errorf("%s: %w", errMsg, err) + } + + err = verifyESSCertID(leafCert, opts) + if err != nil { + return fmt.Errorf("%s: %w", errMsg, err) + } + + err = verifySubjectCommonName(leafCert, opts) + if err != nil { + return fmt.Errorf("%s: %w", errMsg, err) + } + + // verifies that the leaf certificate and any intermediate certificates + // have EKU set to only time stamping usage + err = verifyLeafAndIntermediatesTimestampingEKU(leafCert, opts) + if err != nil { + return fmt.Errorf("failed to verify EKU on leaf certificate: %w", err) + } + + return nil +} + +func verifyLeafExtendedKeyUsage(cert *x509.Certificate) error { + certEKULen := len(cert.ExtKeyUsage) + if certEKULen != 1 { + return fmt.Errorf("certificate has %d extended key usages, expected only one", certEKULen) + } + + if cert.ExtKeyUsage[0] != x509.ExtKeyUsageTimeStamping { + return fmt.Errorf("leaf certificate EKU is not set to TimeStamping as required") + } + return nil +} + +func verifyIntermediateExtendedKeyUsage(cert *x509.Certificate) error { + // If no EKU specified it means unrestricted usage + if len(cert.ExtKeyUsage) == 0 { + return nil + } + + allowsTimestampingUse := false + for _, eku := range cert.ExtKeyUsage { + if eku == x509.ExtKeyUsageTimeStamping || eku == x509.ExtKeyUsageAny { + allowsTimestampingUse = true + break + } + } + + if !allowsTimestampingUse { + return errors.New("intermediate certificate does not allow Timestamping usage") + } + + return nil +} + +// Verify the leaf and intermediate certificates (called "EKU chaining") all +// have the appropriate extended key usage set. +// Leaf certificates must have exactly one EKU set to Timestamping +// Intermediates can have no EKU (unrestricted) or multiple EKUs, +// which need to include Timestamping or UsageAny. +func verifyLeafAndIntermediatesTimestampingEKU(leafCert *x509.Certificate, opts VerifyOpts) error { + err := verifyLeafExtendedKeyUsage(leafCert) + if err != nil { + return fmt.Errorf("failed to verify EKU on leaf certificate: %w", err) + } + + for _, cert := range opts.Intermediates { + err := verifyIntermediateExtendedKeyUsage(cert) + if err != nil { + return fmt.Errorf("failed to verify EKU on intermediate certificate: %w", err) + } + } + return nil +} + +// Verify the OID of the TSR matches an expected OID +func verifyOID(oid []int, opts VerifyOpts) error { + if opts.OID == nil { + return nil + } + responseOID := opts.OID + if len(oid) != len(responseOID) { + return fmt.Errorf("OID lengths do not match") + } + for i, v := range oid { + if v != responseOID[i] { + return fmt.Errorf("OID content does not match") + } + } + return nil +} + +// Verify the nonce - Mostly important for when the response is first returned +func verifyNonce(requestNonce *big.Int, opts VerifyOpts) error { + if opts.Nonce == nil { + return nil + } + if opts.Nonce.Cmp(requestNonce) != 0 { + return fmt.Errorf("incoming nonce %d does not match TSR nonce %d", requestNonce, opts.Nonce) + } + return nil +} + +// VerifyTimestampResponse the timestamp response using a timestamp certificate chain. +func VerifyTimestampResponse(tsrBytes []byte, artifact io.Reader, opts VerifyOpts) (*timestamp.Timestamp, error) { + // Verify the status of the TSR does not contain an error + // handled by the timestamp.ParseResponse function + ts, err := timestamp.ParseResponse(tsrBytes) + if err != nil { + pe := timestamp.ParseError("") + if errors.As(err, &pe) { + return nil, fmt.Errorf("timestamp response is not valid: %w", err) + } + return nil, fmt.Errorf("error parsing response into Timestamp: %w", err) + } + + // verify the timestamp response signature using the provided certificate pool + if err = verifyTSRWithChain(ts, opts); err != nil { + return nil, err + } + + if err = verifyNonce(ts.Nonce, opts); err != nil { + return nil, err + } + + if err = verifyOID(ts.Policy, opts); err != nil { + return nil, err + } + + if err = verifyLeafCert(*ts, opts); err != nil { + return nil, err + } + + // verify the hash in the timestamp response matches the artifact hash + if err = verifyHashedMessages(ts.HashAlgorithm.New(), ts.HashedMessage, artifact); err != nil { + return nil, err + } + + // if the parsed timestamp is verified, return the timestamp + return ts, nil +} + +func verifyTSRWithChain(ts *timestamp.Timestamp, opts VerifyOpts) error { + p7Message, err := pkcs7.Parse(ts.RawToken) + if err != nil { + return fmt.Errorf("error parsing hashed message: %w", err) + } + + if len(opts.Roots) == 0 { + return fmt.Errorf("no root certificates provided for verifying the certificate chain") + } + rootCertPool := x509.NewCertPool() + for _, cert := range opts.Roots { + if cert != nil { + rootCertPool.AddCert(cert) + } + } + if rootCertPool.Equal(x509.NewCertPool()) { + return fmt.Errorf("no valid root certificates provided for verifying the certificate chain") + } + intermediateCertPool := x509.NewCertPool() + for _, cert := range opts.Intermediates { + if cert != nil { + intermediateCertPool.AddCert(cert) + } + } + + x509Opts := x509.VerifyOptions{ + Roots: rootCertPool, + Intermediates: intermediateCertPool, + } + + // if the PCKS7 object does not have any certificates set in the + // Certificates field, the VerifyWithChain method will because it will be + // unable to find a leaf certificate associated with a signer. Since the + // leaf certificate issuer and serial number information is already part of + // the PKCS7 object, adding the leaf certificate to the Certificates field + // will allow verification to pass + if p7Message.Certificates == nil && opts.TSACertificate != nil { + p7Message.Certificates = []*x509.Certificate{opts.TSACertificate} + } + + err = p7Message.VerifyWithOpts(x509Opts) + if err != nil { + return fmt.Errorf("error while verifying with chain: %w", err) + } + + return nil +} + +// Verify that the TSR's hashed message matches the digest of the artifact to be timestamped +func verifyHashedMessages(hashAlg hash.Hash, hashedMessage []byte, artifactReader io.Reader) error { + h := hashAlg + if _, err := io.Copy(h, artifactReader); err != nil { + return fmt.Errorf("failed to create hash %w", err) + } + localHashedMsg := h.Sum(nil) + + if !bytes.Equal(localHashedMsg, hashedMessage) { + return fmt.Errorf("hashed messages don't match") + } + + return nil +} diff --git a/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify_request.go b/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify_request.go new file mode 100644 index 000000000000..ba7511d7551d --- /dev/null +++ b/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify_request.go @@ -0,0 +1,47 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verification + +import ( + "crypto" + "fmt" + + "github.com/digitorus/timestamp" + "github.com/pkg/errors" +) + +var ErrWeakHashAlg = errors.New("weak hash algorithm: must be SHA-256, SHA-384, or SHA-512") +var ErrUnsupportedHashAlg = errors.New("unsupported hash algorithm") +var ErrInconsistentDigestLength = errors.New("digest length inconsistent with specified hash algorithm") + +func VerifyRequest(ts *timestamp.Request) error { + // only SHA-1, SHA-256, SHA-384, and SHA-512 are supported by the underlying library + switch ts.HashAlgorithm { + case crypto.SHA1: + return ErrWeakHashAlg + case crypto.SHA256, crypto.SHA384, crypto.SHA512: + default: + return ErrUnsupportedHashAlg + } + + expectedDigestLength := ts.HashAlgorithm.Size() + actualDigestLength := len(ts.HashedMessage) + + if actualDigestLength != expectedDigestLength { + return fmt.Errorf("%w: expected %d bytes, got %d bytes", ErrInconsistentDigestLength, expectedDigestLength, actualDigestLength) + } + + return nil +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/LICENSE b/vendor/github.com/theupdateframework/go-tuf/v2/LICENSE new file mode 100644 index 000000000000..85541be2e1b0 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2024 The Update Framework Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/NOTICE b/vendor/github.com/theupdateframework/go-tuf/v2/NOTICE new file mode 100644 index 000000000000..09005219963b --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/NOTICE @@ -0,0 +1,9 @@ +Copyright 2024 The Update Framework Authors + +Apache 2.0 License +Copyright 2024 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (/). + +SPDX-License-Identifier: Apache-2.0 diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go new file mode 100644 index 000000000000..2123e57dfc3c --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go @@ -0,0 +1,144 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package config + +import ( + "fmt" + "net/http" + "net/url" + "os" + "time" + + "github.com/cenkalti/backoff/v5" + "github.com/theupdateframework/go-tuf/v2/metadata/fetcher" +) + +type UpdaterConfig struct { + // TUF configuration + MaxRootRotations int64 + MaxDelegations int + RootMaxLength int64 + TimestampMaxLength int64 + SnapshotMaxLength int64 + TargetsMaxLength int64 + // Updater configuration + Fetcher fetcher.Fetcher + LocalTrustedRoot []byte + LocalMetadataDir string + LocalTargetsDir string + RemoteMetadataURL string + RemoteTargetsURL string + DisableLocalCache bool + PrefixTargetsWithHash bool + // UnsafeLocalMode only uses the metadata as written on disk + // if the metadata is incomplete, calling updater.Refresh will fail + UnsafeLocalMode bool +} + +// New creates a new UpdaterConfig instance used by the Updater to +// store configuration +func New(remoteURL string, rootBytes []byte) (*UpdaterConfig, error) { + // Default URL for target files - /targets + targetsURL, err := url.JoinPath(remoteURL, "targets") + if err != nil { + return nil, err + } + + return &UpdaterConfig{ + // TUF configuration + MaxRootRotations: 256, + MaxDelegations: 32, + RootMaxLength: 512000, // bytes + TimestampMaxLength: 16384, // bytes + SnapshotMaxLength: 2000000, // bytes + TargetsMaxLength: 5000000, // bytes + // Updater configuration + Fetcher: fetcher.NewDefaultFetcher(), // use the default built-in download fetcher + LocalTrustedRoot: rootBytes, // trusted root.json + RemoteMetadataURL: remoteURL, // URL of where the TUF metadata is + RemoteTargetsURL: targetsURL, // URL of where the target files should be downloaded from + DisableLocalCache: false, // enable local caching of trusted metadata + PrefixTargetsWithHash: true, // use hash-prefixed target files with consistent snapshots + UnsafeLocalMode: false, + }, nil +} + +func (cfg *UpdaterConfig) EnsurePathsExist() error { + if cfg.DisableLocalCache { + return nil + } + + for _, path := range []string{cfg.LocalMetadataDir, cfg.LocalTargetsDir} { + if err := os.MkdirAll(path, os.ModePerm); err != nil { + return err + } + } + + return nil +} + +func (cfg *UpdaterConfig) SetDefaultFetcherHTTPClient(client *http.Client) error { + // Check if the configured fetcher is the default fetcher + // since we are only configuring a http.Client value for the default fetcher + df, ok := cfg.Fetcher.(*fetcher.DefaultFetcher) + if !ok { + return fmt.Errorf("fetcher is not type fetcher.DefaultFetcher") + } + df.SetHTTPClient(client) + cfg.Fetcher = df + return nil +} + +func (cfg *UpdaterConfig) SetDefaultFetcherTransport(rt http.RoundTripper) error { + // Check if the configured fetcher is the default fetcher + // since we are only configuring a Transport value for the default fetcher + df, ok := cfg.Fetcher.(*fetcher.DefaultFetcher) + if !ok { + return fmt.Errorf("fetcher is not type fetcher.DefaultFetcher") + } + if err := df.SetTransport(rt); err != nil { + return err + } + cfg.Fetcher = df + return nil +} + +// SetDefaultFetcherRetry sets the constant retry interval and count for the default fetcher +func (cfg *UpdaterConfig) SetDefaultFetcherRetry(retryInterval time.Duration, retryCount uint) error { + // Check if the configured fetcher is the default fetcher + // since we are only configuring the retry interval and count for the default fetcher + df, ok := cfg.Fetcher.(*fetcher.DefaultFetcher) + if !ok { + return fmt.Errorf("fetcher is not type fetcher.DefaultFetcher") + } + df.SetRetry(retryInterval, retryCount) + cfg.Fetcher = df + return nil +} + +func (cfg *UpdaterConfig) SetRetryOptions(retryOptions ...backoff.RetryOption) error { + // Check if the configured fetcher is the default fetcher + // since we are only configuring retry options for the default fetcher + df, ok := cfg.Fetcher.(*fetcher.DefaultFetcher) + if !ok { + return fmt.Errorf("fetcher is not type fetcher.DefaultFetcher") + } + df.SetRetryOptions(retryOptions...) + cfg.Fetcher = df + return nil +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/errors.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/errors.go new file mode 100644 index 000000000000..3bd8e5ea718c --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/errors.go @@ -0,0 +1,246 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "fmt" +) + +// Define TUF error types used inside the new modern implementation. +// The names chosen for TUF error types should start in 'Err' except where +// there is a good reason not to, and provide that reason in those cases. + +// Repository errors + +// ErrRepository - an error with a repository's state, such as a missing file. +// It covers all exceptions that come from the repository side when +// looking from the perspective of users of metadata API or client +type ErrRepository struct { + Msg string +} + +func (e *ErrRepository) Error() string { + return fmt.Sprintf("repository error: %s", e.Msg) +} + +func (e *ErrRepository) Is(target error) bool { + _, ok := target.(*ErrRepository) + return ok +} + +// ErrUnsignedMetadata - An error about metadata object with insufficient threshold of signatures +type ErrUnsignedMetadata struct { + Msg string +} + +func (e *ErrUnsignedMetadata) Error() string { + return fmt.Sprintf("unsigned metadata error: %s", e.Msg) +} + +// ErrUnsignedMetadata is a subset of ErrRepository +func (e *ErrUnsignedMetadata) Is(target error) bool { + if _, ok := target.(*ErrUnsignedMetadata); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// ErrBadVersionNumber - An error for metadata that contains an invalid version number +type ErrBadVersionNumber struct { + Msg string +} + +func (e *ErrBadVersionNumber) Error() string { + return fmt.Sprintf("bad version number error: %s", e.Msg) +} + +// ErrBadVersionNumber is a subset of ErrRepository +func (e *ErrBadVersionNumber) Is(target error) bool { + if _, ok := target.(*ErrBadVersionNumber); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// ErrEqualVersionNumber - An error for metadata containing a previously verified version number +type ErrEqualVersionNumber struct { + Msg string +} + +func (e *ErrEqualVersionNumber) Error() string { + return fmt.Sprintf("equal version number error: %s", e.Msg) +} + +// ErrEqualVersionNumber is a subset of both ErrRepository and ErrBadVersionNumber +func (e *ErrEqualVersionNumber) Is(target error) bool { + if _, ok := target.(*ErrEqualVersionNumber); ok { + return true + } + if _, ok := target.(*ErrBadVersionNumber); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// ErrExpiredMetadata - Indicate that a TUF Metadata file has expired +type ErrExpiredMetadata struct { + Msg string +} + +func (e *ErrExpiredMetadata) Error() string { + return fmt.Sprintf("expired metadata error: %s", e.Msg) +} + +// ErrExpiredMetadata is a subset of ErrRepository +func (e *ErrExpiredMetadata) Is(target error) bool { + if _, ok := target.(*ErrExpiredMetadata); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// ErrLengthOrHashMismatch - An error while checking the length and hash values of an object +type ErrLengthOrHashMismatch struct { + Msg string +} + +func (e *ErrLengthOrHashMismatch) Error() string { + return fmt.Sprintf("length/hash verification error: %s", e.Msg) +} + +// ErrLengthOrHashMismatch is a subset of ErrRepository +func (e *ErrLengthOrHashMismatch) Is(target error) bool { + if _, ok := target.(*ErrLengthOrHashMismatch); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// Download errors + +// ErrDownload - An error occurred while attempting to download a file +type ErrDownload struct { + Msg string +} + +func (e *ErrDownload) Error() string { + return fmt.Sprintf("download error: %s", e.Msg) +} + +func (e *ErrDownload) Is(target error) bool { + _, ok := target.(*ErrDownload) + return ok +} + +// ErrDownloadLengthMismatch - Indicate that a mismatch of lengths was seen while downloading a file +type ErrDownloadLengthMismatch struct { + Msg string +} + +func (e *ErrDownloadLengthMismatch) Error() string { + return fmt.Sprintf("download length mismatch error: %s", e.Msg) +} + +// ErrDownloadLengthMismatch is a subset of ErrDownload +func (e *ErrDownloadLengthMismatch) Is(target error) bool { + if _, ok := target.(*ErrDownloadLengthMismatch); ok { + return true + } + if _, ok := target.(*ErrDownload); ok { + return true + } + return false +} + +// ErrDownloadHTTP - Returned by Fetcher interface implementations for HTTP errors +type ErrDownloadHTTP struct { + StatusCode int + URL string +} + +func (e *ErrDownloadHTTP) Error() string { + return fmt.Sprintf("failed to download %s, http status code: %d", e.URL, e.StatusCode) +} + +// ErrDownloadHTTP is a subset of ErrDownload +func (e *ErrDownloadHTTP) Is(target error) bool { + if _, ok := target.(*ErrDownloadHTTP); ok { + return true + } + if _, ok := target.(*ErrDownload); ok { + return true + } + return false +} + +// ValueError +type ErrValue struct { + Msg string +} + +func (e *ErrValue) Error() string { + return fmt.Sprintf("value error: %s", e.Msg) +} + +func (e *ErrValue) Is(err error) bool { + _, ok := err.(*ErrValue) + return ok +} + +// TypeError +type ErrType struct { + Msg string +} + +func (e *ErrType) Error() string { + return fmt.Sprintf("type error: %s", e.Msg) +} + +func (e *ErrType) Is(err error) bool { + _, ok := err.(*ErrType) + return ok +} + +// RuntimeError +type ErrRuntime struct { + Msg string +} + +func (e *ErrRuntime) Error() string { + return fmt.Sprintf("runtime error: %s", e.Msg) +} + +func (e *ErrRuntime) Is(err error) bool { + _, ok := err.(*ErrRuntime) + return ok +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/fetcher/fetcher.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/fetcher/fetcher.go new file mode 100644 index 000000000000..71796b7891da --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/fetcher/fetcher.go @@ -0,0 +1,172 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package fetcher + +import ( + "context" + "fmt" + "io" + "net/http" + "strconv" + "time" + + "github.com/cenkalti/backoff/v5" + "github.com/theupdateframework/go-tuf/v2/metadata" +) + +// httpClient interface allows us to either provide a live http.Client +// or a mock implementation for testing purposes +type httpClient interface { + Do(req *http.Request) (*http.Response, error) +} + +// Fetcher interface +type Fetcher interface { + // DownloadFile downloads a file from the provided URL, reading + // up to maxLength of bytes before it aborts. + // The timeout argument is deprecated and not used. To configure + // the timeout (or retries), modify the fetcher instead. For the + // DefaultFetcher the underlying HTTP client can be substituted. + DownloadFile(urlPath string, maxLength int64, _ time.Duration) ([]byte, error) +} + +// DefaultFetcher implements Fetcher +type DefaultFetcher struct { + // httpClient configuration + httpUserAgent string + client httpClient + // retry logic configuration + retryOptions []backoff.RetryOption +} + +func (d *DefaultFetcher) SetHTTPUserAgent(httpUserAgent string) { + d.httpUserAgent = httpUserAgent +} + +// DownloadFile downloads a file from urlPath, errors out if it failed, +// its length is larger than maxLength or the timeout is reached. +func (d *DefaultFetcher) DownloadFile(urlPath string, maxLength int64, _ time.Duration) ([]byte, error) { + req, err := http.NewRequest("GET", urlPath, nil) + if err != nil { + return nil, err + } + // Use in case of multiple sessions. + if d.httpUserAgent != "" { + req.Header.Set("User-Agent", d.httpUserAgent) + } + + // For backwards compatibility, if the client is nil, use the default client. + if d.client == nil { + d.client = http.DefaultClient + } + + operation := func() ([]byte, error) { + // Execute the request. + res, err := d.client.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + // Handle HTTP status codes. + if res.StatusCode != http.StatusOK { + return nil, &metadata.ErrDownloadHTTP{StatusCode: res.StatusCode, URL: urlPath} + } + var length int64 + // Get content length from header (might not be accurate, -1 or not set). + if header := res.Header.Get("Content-Length"); header != "" { + length, err = strconv.ParseInt(header, 10, 0) + if err != nil { + return nil, err + } + // Error if the reported size is greater than what is expected. + if length > maxLength { + return nil, &metadata.ErrDownloadLengthMismatch{Msg: fmt.Sprintf("download failed for %s, length %d is larger than expected %d", urlPath, length, maxLength)} + } + } + // Although the size has been checked above, use a LimitReader in case + // the reported size is inaccurate, or size is -1 which indicates an + // unknown length. We read maxLength + 1 in order to check if the read data + // surpassed our set limit. + data, err := io.ReadAll(io.LimitReader(res.Body, maxLength+1)) + if err != nil { + return nil, err + } + // Error if the reported size is greater than what is expected. + length = int64(len(data)) + if length > maxLength { + return nil, &metadata.ErrDownloadLengthMismatch{Msg: fmt.Sprintf("download failed for %s, length %d is larger than expected %d", urlPath, length, maxLength)} + } + + return data, nil + } + data, err := backoff.Retry(context.Background(), operation, d.retryOptions...) + if err != nil { + return nil, err + } + + return data, nil +} + +func NewDefaultFetcher() *DefaultFetcher { + return &DefaultFetcher{ + client: http.DefaultClient, + // default to attempting the HTTP request once + retryOptions: []backoff.RetryOption{backoff.WithMaxTries(1)}, + } +} + +// NewFetcherWithHTTPClient creates a new DefaultFetcher with a custom httpClient +func (f *DefaultFetcher) NewFetcherWithHTTPClient(hc httpClient) *DefaultFetcher { + return &DefaultFetcher{ + client: hc, + } +} + +// NewFetcherWithRoundTripper creates a new DefaultFetcher with a custom RoundTripper +// The function will create a default http.Client and replace the transport with the provided RoundTripper implementation +func (f *DefaultFetcher) NewFetcherWithRoundTripper(rt http.RoundTripper) *DefaultFetcher { + client := http.DefaultClient + client.Transport = rt + return &DefaultFetcher{ + client: client, + } +} + +func (f *DefaultFetcher) SetHTTPClient(hc httpClient) { + f.client = hc +} + +func (f *DefaultFetcher) SetTransport(rt http.RoundTripper) error { + hc, ok := f.client.(*http.Client) + if !ok { + return fmt.Errorf("fetcher is not type fetcher.DefaultFetcher") + } + hc.Transport = rt + f.client = hc + return nil +} + +func (f *DefaultFetcher) SetRetry(retryInterval time.Duration, retryCount uint) { + constantBackOff := backoff.WithBackOff(backoff.NewConstantBackOff(retryInterval)) + maxTryCount := backoff.WithMaxTries(retryCount) + f.SetRetryOptions(constantBackOff, maxTryCount) +} + +func (f *DefaultFetcher) SetRetryOptions(retryOptions ...backoff.RetryOption) { + f.retryOptions = retryOptions +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go new file mode 100644 index 000000000000..57e38612be19 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go @@ -0,0 +1,133 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/hex" + "fmt" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +const ( + KeyTypeEd25519 = "ed25519" + KeyTypeECDSA_SHA2_P256_COMPAT = "ecdsa-sha2-nistp256" + KeyTypeECDSA_SHA2_P256 = "ecdsa" + KeyTypeRSASSA_PSS_SHA256 = "rsa" + KeySchemeEd25519 = "ed25519" + KeySchemeECDSA_SHA2_P256 = "ecdsa-sha2-nistp256" + KeySchemeECDSA_SHA2_P384 = "ecdsa-sha2-nistp384" + KeySchemeRSASSA_PSS_SHA256 = "rsassa-pss-sha256" +) + +// ToPublicKey generate crypto.PublicKey from metadata type Key +func (k *Key) ToPublicKey() (crypto.PublicKey, error) { + switch k.Type { + case KeyTypeRSASSA_PSS_SHA256: + publicKey, err := cryptoutils.UnmarshalPEMToPublicKey([]byte(k.Value.PublicKey)) + if err != nil { + return nil, err + } + rsaKey, ok := publicKey.(*rsa.PublicKey) + if !ok { + return nil, fmt.Errorf("invalid rsa public key") + } + // done for verification - ref. https://github.com/theupdateframework/go-tuf/pull/357 + if _, err := x509.MarshalPKIXPublicKey(rsaKey); err != nil { + return nil, err + } + return rsaKey, nil + case KeyTypeECDSA_SHA2_P256, KeyTypeECDSA_SHA2_P256_COMPAT: // handle "ecdsa" too as python-tuf/sslib keys are using it for keytype instead of https://theupdateframework.github.io/specification/latest/index.html#keytype-ecdsa-sha2-nistp256 + publicKey, err := cryptoutils.UnmarshalPEMToPublicKey([]byte(k.Value.PublicKey)) + if err != nil { + return nil, err + } + ecdsaKey, ok := publicKey.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf("invalid ecdsa public key") + } + // done for verification - ref. https://github.com/theupdateframework/go-tuf/pull/357 + if _, err := x509.MarshalPKIXPublicKey(ecdsaKey); err != nil { + return nil, err + } + return ecdsaKey, nil + case KeyTypeEd25519: + publicKey, err := hex.DecodeString(k.Value.PublicKey) + if err != nil { + return nil, err + } + ed25519Key := ed25519.PublicKey(publicKey) + // done for verification - ref. https://github.com/theupdateframework/go-tuf/pull/357 + if _, err := x509.MarshalPKIXPublicKey(ed25519Key); err != nil { + return nil, err + } + return ed25519Key, nil + } + return nil, fmt.Errorf("unsupported public key type") +} + +// KeyFromPublicKey generate metadata type Key from crypto.PublicKey +func KeyFromPublicKey(k crypto.PublicKey) (*Key, error) { + key := &Key{} + switch k := k.(type) { + case *rsa.PublicKey: + key.Type = KeyTypeRSASSA_PSS_SHA256 + key.Scheme = KeySchemeRSASSA_PSS_SHA256 + pemKey, err := cryptoutils.MarshalPublicKeyToPEM(k) + if err != nil { + return nil, err + } + key.Value.PublicKey = string(pemKey) + case *ecdsa.PublicKey: + key.Type = KeyTypeECDSA_SHA2_P256 + key.Scheme = KeySchemeECDSA_SHA2_P256 + pemKey, err := cryptoutils.MarshalPublicKeyToPEM(k) + if err != nil { + return nil, err + } + key.Value.PublicKey = string(pemKey) + case ed25519.PublicKey: + key.Type = KeyTypeEd25519 + key.Scheme = KeySchemeEd25519 + key.Value.PublicKey = hex.EncodeToString(k) + default: + return nil, fmt.Errorf("unsupported public key type") + } + return key, nil +} + +// ID returns the keyID value for the given Key +func (k *Key) ID() string { + // the identifier is a hexdigest of the SHA-256 hash of the canonical form of the key + if k.id == "" { + data, err := cjson.EncodeCanonical(k) + if err != nil { + panic(fmt.Errorf("error creating key ID: %w", err)) + } + digest := sha256.Sum256(data) + k.id = hex.EncodeToString(digest[:]) + } + return k.id +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/logger.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/logger.go new file mode 100644 index 000000000000..c7cab38389b8 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/logger.go @@ -0,0 +1,45 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +var log Logger = DiscardLogger{} + +// Logger partially implements the go-log/logr's interface: +// https://github.com/go-logr/logr/blob/master/logr.go +type Logger interface { + // Info logs a non-error message with key/value pairs + Info(msg string, kv ...any) + // Error logs an error with a given message and key/value pairs. + Error(err error, msg string, kv ...any) +} + +type DiscardLogger struct{} + +func (d DiscardLogger) Info(msg string, kv ...any) { +} + +func (d DiscardLogger) Error(err error, msg string, kv ...any) { +} + +func SetLogger(logger Logger) { + log = logger +} + +func GetLogger() Logger { + return log +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go new file mode 100644 index 000000000000..bd3f1e44bdc4 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go @@ -0,0 +1,567 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "encoding/hex" + "encoding/json" + "errors" +) + +// The following marshal/unmarshal methods override the default behavior for for each TUF type +// in order to support unrecognized fields + +func (signed RootType) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["_type"] = signed.Type + dict["spec_version"] = signed.SpecVersion + dict["consistent_snapshot"] = signed.ConsistentSnapshot + dict["version"] = signed.Version + dict["expires"] = signed.Expires + dict["keys"] = signed.Keys + dict["roles"] = signed.Roles + return json.Marshal(dict) +} + +func (signed *RootType) UnmarshalJSON(data []byte) error { + type Alias RootType + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = RootType(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "_type") + delete(dict, "spec_version") + delete(dict, "consistent_snapshot") + delete(dict, "version") + delete(dict, "expires") + delete(dict, "keys") + delete(dict, "roles") + signed.UnrecognizedFields = dict + return nil +} + +func (signed SnapshotType) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["_type"] = signed.Type + dict["spec_version"] = signed.SpecVersion + dict["version"] = signed.Version + dict["expires"] = signed.Expires + dict["meta"] = signed.Meta + return json.Marshal(dict) +} + +func (signed *SnapshotType) UnmarshalJSON(data []byte) error { + type Alias SnapshotType + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = SnapshotType(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "_type") + delete(dict, "spec_version") + delete(dict, "version") + delete(dict, "expires") + delete(dict, "meta") + signed.UnrecognizedFields = dict + return nil +} + +func (signed TimestampType) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["_type"] = signed.Type + dict["spec_version"] = signed.SpecVersion + dict["version"] = signed.Version + dict["expires"] = signed.Expires + dict["meta"] = signed.Meta + return json.Marshal(dict) +} + +func (signed *TimestampType) UnmarshalJSON(data []byte) error { + type Alias TimestampType + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = TimestampType(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "_type") + delete(dict, "spec_version") + delete(dict, "version") + delete(dict, "expires") + delete(dict, "meta") + signed.UnrecognizedFields = dict + return nil +} + +func (signed TargetsType) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["_type"] = signed.Type + dict["spec_version"] = signed.SpecVersion + dict["version"] = signed.Version + dict["expires"] = signed.Expires + dict["targets"] = signed.Targets + if signed.Delegations != nil { + dict["delegations"] = signed.Delegations + } + return json.Marshal(dict) +} + +func (signed *TargetsType) UnmarshalJSON(data []byte) error { + type Alias TargetsType + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = TargetsType(s) + + // populate the path field for each target + for name, targetFile := range signed.Targets { + targetFile.Path = name + } + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "_type") + delete(dict, "spec_version") + delete(dict, "version") + delete(dict, "expires") + delete(dict, "targets") + delete(dict, "delegations") + signed.UnrecognizedFields = dict + return nil +} + +func (signed MetaFiles) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + // length and hashes are optional + if signed.Length != 0 { + dict["length"] = signed.Length + } + if len(signed.Hashes) != 0 { + dict["hashes"] = signed.Hashes + } + dict["version"] = signed.Version + return json.Marshal(dict) +} + +func (signed *MetaFiles) UnmarshalJSON(data []byte) error { + type Alias MetaFiles + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = MetaFiles(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "length") + delete(dict, "hashes") + delete(dict, "version") + signed.UnrecognizedFields = dict + return nil +} + +func (signed TargetFiles) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["length"] = signed.Length + dict["hashes"] = signed.Hashes + if signed.Custom != nil { + dict["custom"] = signed.Custom + } + return json.Marshal(dict) +} + +func (signed *TargetFiles) UnmarshalJSON(data []byte) error { + type Alias TargetFiles + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = TargetFiles(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "length") + delete(dict, "hashes") + delete(dict, "custom") + signed.UnrecognizedFields = dict + return nil +} + +func (key Key) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(key.UnrecognizedFields) != 0 { + copyMapValues(key.UnrecognizedFields, dict) + } + dict["keytype"] = key.Type + dict["scheme"] = key.Scheme + dict["keyval"] = key.Value + return json.Marshal(dict) +} + +func (key *Key) UnmarshalJSON(data []byte) error { + type Alias Key + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + // nolint + *key = Key(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keytype") + delete(dict, "scheme") + delete(dict, "keyval") + key.UnrecognizedFields = dict + return nil +} + +func (meta Metadata[T]) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(meta.UnrecognizedFields) != 0 { + copyMapValues(meta.UnrecognizedFields, dict) + } + dict["signed"] = meta.Signed + dict["signatures"] = meta.Signatures + return json.Marshal(dict) +} + +func (meta *Metadata[T]) UnmarshalJSON(data []byte) error { + tmp := any(new(T)) + var m map[string]any + if err := json.Unmarshal(data, &m); err != nil { + return err + } + switch tmp.(type) { + case *RootType: + dict := struct { + Signed RootType `json:"signed"` + Signatures []Signature `json:"signatures"` + }{} + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + var i interface{} = dict.Signed + meta.Signed = i.(T) + meta.Signatures = dict.Signatures + case *SnapshotType: + dict := struct { + Signed SnapshotType `json:"signed"` + Signatures []Signature `json:"signatures"` + }{} + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + var i interface{} = dict.Signed + meta.Signed = i.(T) + meta.Signatures = dict.Signatures + case *TimestampType: + dict := struct { + Signed TimestampType `json:"signed"` + Signatures []Signature `json:"signatures"` + }{} + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + var i interface{} = dict.Signed + meta.Signed = i.(T) + meta.Signatures = dict.Signatures + case *TargetsType: + dict := struct { + Signed TargetsType `json:"signed"` + Signatures []Signature `json:"signatures"` + }{} + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + var i interface{} = dict.Signed + meta.Signed = i.(T) + meta.Signatures = dict.Signatures + default: + return &ErrValue{Msg: "unrecognized metadata type"} + } + delete(m, "signed") + delete(m, "signatures") + meta.UnrecognizedFields = m + return nil +} + +func (s Signature) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(s.UnrecognizedFields) != 0 { + copyMapValues(s.UnrecognizedFields, dict) + } + dict["keyid"] = s.KeyID + dict["sig"] = s.Signature + return json.Marshal(dict) +} + +func (s *Signature) UnmarshalJSON(data []byte) error { + type Alias Signature + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *s = Signature(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keyid") + delete(dict, "sig") + s.UnrecognizedFields = dict + return nil +} + +func (kv KeyVal) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(kv.UnrecognizedFields) != 0 { + copyMapValues(kv.UnrecognizedFields, dict) + } + dict["public"] = kv.PublicKey + return json.Marshal(dict) +} + +func (kv *KeyVal) UnmarshalJSON(data []byte) error { + type Alias KeyVal + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *kv = KeyVal(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "public") + kv.UnrecognizedFields = dict + return nil +} + +func (role Role) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(role.UnrecognizedFields) != 0 { + copyMapValues(role.UnrecognizedFields, dict) + } + dict["keyids"] = role.KeyIDs + dict["threshold"] = role.Threshold + return json.Marshal(dict) +} + +func (role *Role) UnmarshalJSON(data []byte) error { + type Alias Role + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *role = Role(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keyids") + delete(dict, "threshold") + role.UnrecognizedFields = dict + return nil +} + +func (d Delegations) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(d.UnrecognizedFields) != 0 { + copyMapValues(d.UnrecognizedFields, dict) + } + // only one is allowed + dict["keys"] = d.Keys + if d.Roles != nil { + dict["roles"] = d.Roles + } else if d.SuccinctRoles != nil { + dict["succinct_roles"] = d.SuccinctRoles + } + return json.Marshal(dict) +} + +func (d *Delegations) UnmarshalJSON(data []byte) error { + type Alias Delegations + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *d = Delegations(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keys") + delete(dict, "roles") + delete(dict, "succinct_roles") + d.UnrecognizedFields = dict + return nil +} + +func (role DelegatedRole) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(role.UnrecognizedFields) != 0 { + copyMapValues(role.UnrecognizedFields, dict) + } + dict["name"] = role.Name + dict["keyids"] = role.KeyIDs + dict["threshold"] = role.Threshold + dict["terminating"] = role.Terminating + // make sure we have only one of the two (per spec) + if role.Paths != nil && role.PathHashPrefixes != nil { + return nil, &ErrValue{Msg: "failed to marshal: not allowed to have both \"paths\" and \"path_hash_prefixes\" present"} + } + if role.Paths != nil { + dict["paths"] = role.Paths + } else if role.PathHashPrefixes != nil { + dict["path_hash_prefixes"] = role.PathHashPrefixes + } + return json.Marshal(dict) +} + +func (role *DelegatedRole) UnmarshalJSON(data []byte) error { + type Alias DelegatedRole + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *role = DelegatedRole(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "name") + delete(dict, "keyids") + delete(dict, "threshold") + delete(dict, "terminating") + delete(dict, "paths") + delete(dict, "path_hash_prefixes") + role.UnrecognizedFields = dict + return nil +} + +func (role SuccinctRoles) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(role.UnrecognizedFields) != 0 { + copyMapValues(role.UnrecognizedFields, dict) + } + dict["keyids"] = role.KeyIDs + dict["threshold"] = role.Threshold + dict["bit_length"] = role.BitLength + dict["name_prefix"] = role.NamePrefix + return json.Marshal(dict) +} + +func (role *SuccinctRoles) UnmarshalJSON(data []byte) error { + type Alias SuccinctRoles + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *role = SuccinctRoles(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keyids") + delete(dict, "threshold") + delete(dict, "bit_length") + delete(dict, "name_prefix") + role.UnrecognizedFields = dict + return nil +} + +func (b *HexBytes) UnmarshalJSON(data []byte) error { + if len(data) < 2 || len(data)%2 != 0 || data[0] != '"' || data[len(data)-1] != '"' { + return errors.New("tuf: invalid JSON hex bytes") + } + res := make([]byte, hex.DecodedLen(len(data)-2)) + _, err := hex.Decode(res, data[1:len(data)-1]) + if err != nil { + return err + } + *b = res + return nil +} + +func (b HexBytes) MarshalJSON() ([]byte, error) { + res := make([]byte, hex.EncodedLen(len(b))+2) + res[0] = '"' + res[len(res)-1] = '"' + hex.Encode(res[1:], b) + return res, nil +} + +func (b HexBytes) String() string { + return hex.EncodeToString(b) +} + +// copyMapValues copies the values of the src map to dst +func copyMapValues(src, dst map[string]any) { + for k, v := range src { + dst[k] = v + } +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go new file mode 100644 index 000000000000..0d0afd850ee6 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go @@ -0,0 +1,926 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "bytes" + "crypto" + "crypto/hmac" + "crypto/rsa" + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "io" + "math" + "os" + "path/filepath" + "slices" + "strconv" + "strings" + "time" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" + "github.com/sigstore/sigstore/pkg/signature" +) + +// Root return new metadata instance of type Root +func Root(expires ...time.Time) *Metadata[RootType] { + // expire now if there's nothing set + if len(expires) == 0 { + expires = []time.Time{time.Now().UTC()} + } + // populate Roles + roles := map[string]*Role{} + for _, r := range []string{ROOT, SNAPSHOT, TARGETS, TIMESTAMP} { + roles[r] = &Role{ + KeyIDs: []string{}, + Threshold: 1, + } + } + log.Info("Created metadata", "type", ROOT) + return &Metadata[RootType]{ + Signed: RootType{ + Type: ROOT, + SpecVersion: SPECIFICATION_VERSION, + Version: 1, + Expires: expires[0], + Keys: map[string]*Key{}, + Roles: roles, + ConsistentSnapshot: true, + }, + Signatures: []Signature{}, + } +} + +// Snapshot return new metadata instance of type Snapshot +func Snapshot(expires ...time.Time) *Metadata[SnapshotType] { + // expire now if there's nothing set + if len(expires) == 0 { + expires = []time.Time{time.Now().UTC()} + } + log.Info("Created metadata", "type", SNAPSHOT) + return &Metadata[SnapshotType]{ + Signed: SnapshotType{ + Type: SNAPSHOT, + SpecVersion: SPECIFICATION_VERSION, + Version: 1, + Expires: expires[0], + Meta: map[string]*MetaFiles{ + "targets.json": { + Version: 1, + }, + }, + }, + Signatures: []Signature{}, + } +} + +// Timestamp return new metadata instance of type Timestamp +func Timestamp(expires ...time.Time) *Metadata[TimestampType] { + // expire now if there's nothing set + if len(expires) == 0 { + expires = []time.Time{time.Now().UTC()} + } + log.Info("Created metadata", "type", TIMESTAMP) + return &Metadata[TimestampType]{ + Signed: TimestampType{ + Type: TIMESTAMP, + SpecVersion: SPECIFICATION_VERSION, + Version: 1, + Expires: expires[0], + Meta: map[string]*MetaFiles{ + "snapshot.json": { + Version: 1, + }, + }, + }, + Signatures: []Signature{}, + } +} + +// Targets return new metadata instance of type Targets +func Targets(expires ...time.Time) *Metadata[TargetsType] { + // expire now if there's nothing set + if len(expires) == 0 { + expires = []time.Time{time.Now().UTC()} + } + log.Info("Created metadata", "type", TARGETS) + return &Metadata[TargetsType]{ + Signed: TargetsType{ + Type: TARGETS, + SpecVersion: SPECIFICATION_VERSION, + Version: 1, + Expires: expires[0], + Targets: map[string]*TargetFiles{}, + }, + Signatures: []Signature{}, + } +} + +// TargetFile return new metadata instance of type TargetFiles +func TargetFile() *TargetFiles { + return &TargetFiles{ + Length: 0, + Hashes: Hashes{}, + } +} + +// MetaFile return new metadata instance of type MetaFile +func MetaFile(version int64) *MetaFiles { + if version < 1 { + // attempting to set incorrect version + log.Info("Attempting to set incorrect version for MetaFile", "version", version) + version = 1 + } + return &MetaFiles{ + Length: 0, + Hashes: Hashes{}, + Version: version, + } +} + +// FromFile load metadata from file +func (meta *Metadata[T]) FromFile(name string) (*Metadata[T], error) { + data, err := os.ReadFile(name) + if err != nil { + return nil, err + } + m, err := fromBytes[T](data) + if err != nil { + return nil, err + } + *meta = *m + log.Info("Loaded metadata from file", "name", name) + return meta, nil +} + +// FromBytes deserialize metadata from bytes +func (meta *Metadata[T]) FromBytes(data []byte) (*Metadata[T], error) { + m, err := fromBytes[T](data) + if err != nil { + return nil, err + } + *meta = *m + log.Info("Loaded metadata from bytes") + return meta, nil +} + +// ToBytes serialize metadata to bytes +func (meta *Metadata[T]) ToBytes(pretty bool) ([]byte, error) { + log.Info("Writing metadata to bytes") + if pretty { + return json.MarshalIndent(*meta, "", "\t") + } + return json.Marshal(*meta) +} + +// ToFile save metadata to file +func (meta *Metadata[T]) ToFile(name string, pretty bool) error { + log.Info("Writing metadata to file", "name", name) + data, err := meta.ToBytes(pretty) + if err != nil { + return err + } + return os.WriteFile(name, data, 0644) +} + +// Sign create signature over Signed and assign it to Signatures +func (meta *Metadata[T]) Sign(signer signature.Signer) (*Signature, error) { + // encode the Signed part to canonical JSON so signatures are consistent + payload, err := cjson.EncodeCanonical(meta.Signed) + if err != nil { + return nil, err + } + // sign the Signed part + sb, err := signer.SignMessage(bytes.NewReader(payload)) + if err != nil { + return nil, &ErrUnsignedMetadata{Msg: "problem signing metadata"} + } + // get the signer's PublicKey + publ, err := signer.PublicKey() + if err != nil { + return nil, err + } + // convert to TUF Key type to get keyID + key, err := KeyFromPublicKey(publ) + if err != nil { + return nil, err + } + // build signature + sig := &Signature{ + KeyID: key.ID(), + Signature: sb, + } + // update the Signatures part + meta.Signatures = append(meta.Signatures, *sig) + // return the new signature + log.Info("Signed metadata with key", "ID", key.ID()) + return sig, nil +} + +// VerifyDelegate verifies that delegatedMetadata is signed with the required +// threshold of keys for the delegated role delegatedRole +func (meta *Metadata[T]) VerifyDelegate(delegatedRole string, delegatedMetadata any) error { + i := any(meta) + signingKeys := map[string]bool{} + var keys map[string]*Key + var roleKeyIDs []string + var roleThreshold int + + log.Info("Verifying", "role", delegatedRole) + + // collect keys, keyIDs and threshold based on delegator type + switch i := i.(type) { + // Root delegator + case *Metadata[RootType]: + keys = i.Signed.Keys + if role, ok := (*i).Signed.Roles[delegatedRole]; ok { + roleKeyIDs = role.KeyIDs + roleThreshold = role.Threshold + } else { + // the delegated role was not found, no need to proceed + return &ErrValue{Msg: fmt.Sprintf("no delegation found for %s", delegatedRole)} + } + // Targets delegator + case *Metadata[TargetsType]: + if i.Signed.Delegations == nil { + return &ErrValue{Msg: "no delegations found"} + } + keys = i.Signed.Delegations.Keys + if i.Signed.Delegations.Roles != nil { + found := false + for _, v := range i.Signed.Delegations.Roles { + if v.Name == delegatedRole { + found = true + roleKeyIDs = v.KeyIDs + roleThreshold = v.Threshold + break + } + } + // the delegated role was not found, no need to proceed + if !found { + return &ErrValue{Msg: fmt.Sprintf("no delegation found for %s", delegatedRole)} + } + } else if i.Signed.Delegations.SuccinctRoles != nil { + roleKeyIDs = i.Signed.Delegations.SuccinctRoles.KeyIDs + roleThreshold = i.Signed.Delegations.SuccinctRoles.Threshold + } + default: + return &ErrType{Msg: "call is valid only on delegator metadata (should be either root or targets)"} + } + // if there are no keyIDs for that role it means there's no delegation found + if len(roleKeyIDs) == 0 { + return &ErrValue{Msg: fmt.Sprintf("no delegation found for %s", delegatedRole)} + } + // loop through each role keyID + for _, keyID := range roleKeyIDs { + key, ok := keys[keyID] + if !ok { + return &ErrValue{Msg: fmt.Sprintf("key with ID %s not found in %s keyids", keyID, delegatedRole)} + } + sign := Signature{} + var payload []byte + // convert to a PublicKey type + publicKey, err := key.ToPublicKey() + if err != nil { + return err + } + // use corresponding hash function for key type + hash := crypto.Hash(0) + if key.Type != KeyTypeEd25519 { + switch key.Scheme { + case KeySchemeECDSA_SHA2_P256: + hash = crypto.SHA256 + case KeySchemeECDSA_SHA2_P384: + hash = crypto.SHA384 + default: + hash = crypto.SHA256 + } + } + // load a verifier based on that key + // handle RSA PSS scheme separately as the LoadVerifier function doesn't identify it correctly + // Note we should support RSA PSS, not RSA PKCS1v15 (which is what LoadVerifier would return) + // Reference: https://theupdateframework.github.io/specification/latest/#file-formats-keys + var verifier signature.Verifier + if key.Type == KeyTypeRSASSA_PSS_SHA256 { + // Load a verifier for rsa + publicKeyRSAPSS, ok := publicKey.(*rsa.PublicKey) + if !ok { + return &ErrType{Msg: "failed to convert public key to RSA PSS key"} + } + verifier, err = signature.LoadRSAPSSVerifier(publicKeyRSAPSS, hash, &rsa.PSSOptions{Hash: crypto.SHA256}) + } else { + // Load a verifier for ed25519 and ecdsa + verifier, err = signature.LoadVerifier(publicKey, hash) + } + if err != nil { + return err + } + // collect the signature for that key and build the payload we'll verify + // based on the Signed part of the delegated metadata + switch d := delegatedMetadata.(type) { + case *Metadata[RootType]: + for _, signature := range d.Signatures { + if signature.KeyID == keyID { + sign = signature + } + } + payload, err = cjson.EncodeCanonical(d.Signed) + if err != nil { + return err + } + case *Metadata[SnapshotType]: + for _, signature := range d.Signatures { + if signature.KeyID == keyID { + sign = signature + } + } + payload, err = cjson.EncodeCanonical(d.Signed) + if err != nil { + return err + } + case *Metadata[TimestampType]: + for _, signature := range d.Signatures { + if signature.KeyID == keyID { + sign = signature + } + } + payload, err = cjson.EncodeCanonical(d.Signed) + if err != nil { + return err + } + case *Metadata[TargetsType]: + for _, signature := range d.Signatures { + if signature.KeyID == keyID { + sign = signature + } + } + payload, err = cjson.EncodeCanonical(d.Signed) + if err != nil { + return err + } + default: + return &ErrType{Msg: "unknown delegated metadata type"} + } + // verify if the signature for that payload corresponds to the given key + if err := verifier.VerifySignature(bytes.NewReader(sign.Signature), bytes.NewReader(payload)); err != nil { + // failed to verify the metadata with that key ID + log.Info("Failed to verify %s with key ID %s", delegatedRole, keyID) + } else { + // save the verified keyID only if verification passed + signingKeys[keyID] = true + log.Info("Verified with key", "role", delegatedRole, "ID", keyID) + } + } + // check if the amount of valid signatures is enough + if len(signingKeys) < roleThreshold { + log.Info("Verifying failed, not enough signatures", "role", delegatedRole, "got", len(signingKeys), "want", roleThreshold) + return &ErrUnsignedMetadata{Msg: fmt.Sprintf("Verifying %s failed, not enough signatures, got %d, want %d", delegatedRole, len(signingKeys), roleThreshold)} + } + log.Info("Verified successfully", "role", delegatedRole) + return nil +} + +// IsExpired returns true if metadata is expired. +// It checks if referenceTime is after Signed.Expires +func (signed *RootType) IsExpired(referenceTime time.Time) bool { + return referenceTime.After(signed.Expires) +} + +// IsExpired returns true if metadata is expired. +// It checks if referenceTime is after Signed.Expires +func (signed *SnapshotType) IsExpired(referenceTime time.Time) bool { + return referenceTime.After(signed.Expires) +} + +// IsExpired returns true if metadata is expired. +// It checks if referenceTime is after Signed.Expires +func (signed *TimestampType) IsExpired(referenceTime time.Time) bool { + return referenceTime.After(signed.Expires) +} + +// IsExpired returns true if metadata is expired. +// It checks if referenceTime is after Signed.Expires +func (signed *TargetsType) IsExpired(referenceTime time.Time) bool { + return referenceTime.After(signed.Expires) +} + +// VerifyLengthHashes checks whether the MetaFiles data matches its corresponding +// length and hashes +func (f *MetaFiles) VerifyLengthHashes(data []byte) error { + // hashes and length are optional for MetaFiles + if len(f.Hashes) > 0 { + err := verifyHashes(data, f.Hashes) + if err != nil { + return err + } + } + if f.Length != 0 { + err := verifyLength(data, f.Length) + if err != nil { + return err + } + } + return nil +} + +// VerifyLengthHashes checks whether the TargetFiles data matches its corresponding +// length and hashes +func (f *TargetFiles) VerifyLengthHashes(data []byte) error { + err := verifyHashes(data, f.Hashes) + if err != nil { + return err + } + err = verifyLength(data, f.Length) + if err != nil { + return err + } + return nil +} + +// Equal checks whether the source target file matches another +func (source *TargetFiles) Equal(expected TargetFiles) bool { + if source.Length == expected.Length && source.Hashes.Equal(expected.Hashes) { + return true + } + return false +} + +// FromFile generate TargetFiles from file +func (t *TargetFiles) FromFile(localPath string, hashes ...string) (*TargetFiles, error) { + log.Info("Generating target file from file", "path", localPath) + // read file + data, err := os.ReadFile(localPath) + if err != nil { + return nil, err + } + return t.FromBytes(localPath, data, hashes...) +} + +// FromBytes generate TargetFiles from bytes +func (t *TargetFiles) FromBytes(localPath string, data []byte, hashes ...string) (*TargetFiles, error) { + log.Info("Generating target file from bytes", "path", localPath) + var hasher hash.Hash + targetFile := &TargetFiles{ + Hashes: map[string]HexBytes{}, + } + // use default hash algorithm if not set + if len(hashes) == 0 { + hashes = []string{"sha256"} + } + // calculate length + len, err := io.Copy(io.Discard, bytes.NewReader(data)) + if err != nil { + return nil, err + } + targetFile.Length = len + for _, v := range hashes { + switch v { + case "sha256": + hasher = sha256.New() + case "sha512": + hasher = sha512.New() + default: + return nil, &ErrValue{Msg: fmt.Sprintf("failed generating TargetFile - unsupported hashing algorithm - %s", v)} + } + _, err := hasher.Write(data) + if err != nil { + return nil, err + } + targetFile.Hashes[v] = hasher.Sum(nil) + } + targetFile.Path = localPath + return targetFile, nil +} + +// ClearSignatures clears Signatures +func (meta *Metadata[T]) ClearSignatures() { + log.Info("Cleared signatures") + meta.Signatures = []Signature{} +} + +// IsDelegatedPath determines whether the given "targetFilepath" is in one of +// the paths that "DelegatedRole" is trusted to provide +func (role *DelegatedRole) IsDelegatedPath(targetFilepath string) (bool, error) { + if len(role.Paths) > 0 { + // standard delegations + for _, pathPattern := range role.Paths { + // A delegated role path may be an explicit path or glob + // pattern (Unix shell-style wildcards). + if isTargetInPathPattern(targetFilepath, pathPattern) { + return true, nil + } + } + } else if len(role.PathHashPrefixes) > 0 { + // hash bin delegations - calculate the hash of the filepath to determine in which bin to find the target. + targetFilepathHash := sha256.Sum256([]byte(targetFilepath)) + for _, pathHashPrefix := range role.PathHashPrefixes { + if strings.HasPrefix(base64.URLEncoding.EncodeToString(targetFilepathHash[:]), pathHashPrefix) { + return true, nil + } + } + } + return false, nil +} + +// Determine whether “targetpath“ matches the “pathpattern“. +func isTargetInPathPattern(targetpath string, pathpattern string) bool { + // We need to make sure that targetpath and pathpattern are pointing to + // the same directory as fnmatch doesn't threat "/" as a special symbol. + targetParts := strings.Split(targetpath, "/") + patternParts := strings.Split(pathpattern, "/") + if len(targetParts) != len(patternParts) { + return false + } + + // Every part in the pathpattern could include a glob pattern, that's why + // each of the target and pathpattern parts should match. + for i := 0; i < len(targetParts); i++ { + if ok, _ := filepath.Match(patternParts[i], targetParts[i]); !ok { + return false + } + } + + return true +} + +// GetRolesForTarget return the names and terminating status of all +// delegated roles who are responsible for targetFilepath +// Note the result should be an ordered list, ref. https://github.com/theupdateframework/go-tuf/security/advisories/GHSA-4f8r-qqr9-fq8j +func (role *Delegations) GetRolesForTarget(targetFilepath string) []RoleResult { + var res []RoleResult + // Standard delegations + if role.Roles != nil { + for _, r := range role.Roles { + ok, err := r.IsDelegatedPath(targetFilepath) + if err == nil && ok { + res = append(res, RoleResult{Name: r.Name, Terminating: r.Terminating}) + } + } + } else if role.SuccinctRoles != nil { + // SuccinctRoles delegations + res = role.SuccinctRoles.GetRolesForTarget(targetFilepath) + } + // We preserve the same order as the actual roles list + return res +} + +// GetRolesForTarget calculate the name of the delegated role responsible for "targetFilepath". +// The target at path "targetFilepath" is assigned to a bin by casting +// the left-most "BitLength" of bits of the file path hash digest to +// int, using it as bin index between 0 and “2**BitLength-1”. +func (role *SuccinctRoles) GetRolesForTarget(targetFilepath string) []RoleResult { + // calculate the suffixLen value based on the total number of bins in + // hex. If bit_length = 10 then numberOfBins = 1024 or bin names will + // have a suffix between "000" and "3ff" in hex and suffixLen will be 3 + // meaning the third bin will have a suffix of "003" + numberOfBins := math.Pow(2, float64(role.BitLength)) + // suffixLen is calculated based on "numberOfBins - 1" as the name + // of the last bin contains the number "numberOfBins -1" as a suffix. + suffixLen := len(strconv.FormatInt(int64(numberOfBins-1), 16)) + + targetFilepathHash := sha256.Sum256([]byte(targetFilepath)) + // we can't ever need more than 4 bytes (32 bits) + hashBytes := targetFilepathHash[:4] + + // right shift hash bytes, so that we only have the leftmost + // bit_length bits that we care about + shiftValue := 32 - role.BitLength + binNumber := binary.BigEndian.Uint32(hashBytes) >> shiftValue + // add zero padding if necessary and cast to hex the suffix + suffix := fmt.Sprintf("%0*x", suffixLen, binNumber) + // we consider all succinct_roles as terminating. + // for more information, read TAP 15. + return []RoleResult{{Name: fmt.Sprintf("%s-%s", role.NamePrefix, suffix), Terminating: true}} +} + +// GetRoles returns the names of all different delegated roles +func (role *SuccinctRoles) GetRoles() []string { + res := []string{} + suffixLen, numberOfBins := role.GetSuffixLen() + + for binNumber := 0; binNumber < numberOfBins; binNumber++ { + suffix := fmt.Sprintf("%0*x", suffixLen, binNumber) + res = append(res, fmt.Sprintf("%s-%s", role.NamePrefix, suffix)) + } + return res +} + +func (role *SuccinctRoles) GetSuffixLen() (int, int) { + numberOfBins := int(math.Pow(2, float64(role.BitLength))) + return len(strconv.FormatInt(int64(numberOfBins-1), 16)), numberOfBins +} + +// IsDelegatedRole returns whether the given roleName is in one of +// the delegated roles that “SuccinctRoles“ represents +func (role *SuccinctRoles) IsDelegatedRole(roleName string) bool { + suffixLen, numberOfBins := role.GetSuffixLen() + + expectedPrefix := fmt.Sprintf("%s-", role.NamePrefix) + + // check if the roleName prefix is what we would expect + if !strings.HasPrefix(roleName, expectedPrefix) { + return false + } + + // check if the roleName suffix length is what we would expect + suffix := roleName[len(expectedPrefix):] + if len(suffix) != suffixLen { + return false + } + + // make sure suffix is hex value and get bin number + value, err := strconv.ParseInt(suffix, 16, 64) + if err != nil { + return false + } + + // check if the bin we calculated is indeed within the range of what we support + return (value >= 0) && (value < int64(numberOfBins)) +} + +// AddKey adds new signing key for delegated role "role" +// keyID: Identifier of the key to be added for “role“. +// key: Signing key to be added for “role“. +// role: Name of the role, for which “key“ is added. +func (signed *RootType) AddKey(key *Key, role string) error { + // verify role is present + if _, ok := signed.Roles[role]; !ok { + return &ErrValue{Msg: fmt.Sprintf("role %s doesn't exist", role)} + } + // add keyID to role + if !slices.Contains(signed.Roles[role].KeyIDs, key.ID()) { + signed.Roles[role].KeyIDs = append(signed.Roles[role].KeyIDs, key.ID()) + } + // update Keys + signed.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + return nil +} + +// RevokeKey revoke key from “role“ and updates the Keys store. +// keyID: Identifier of the key to be removed for “role“. +// role: Name of the role, for which a signing key is removed. +func (signed *RootType) RevokeKey(keyID, role string) error { + // verify role is present + if _, ok := signed.Roles[role]; !ok { + return &ErrValue{Msg: fmt.Sprintf("role %s doesn't exist", role)} + } + // verify keyID is present for given role + if !slices.Contains(signed.Roles[role].KeyIDs, keyID) { + return &ErrValue{Msg: fmt.Sprintf("key with id %s is not used by %s", keyID, role)} + } + // remove keyID from role + filteredKeyIDs := []string{} + for _, k := range signed.Roles[role].KeyIDs { + if k != keyID { + filteredKeyIDs = append(filteredKeyIDs, k) + } + } + // overwrite the old keyID slice + signed.Roles[role].KeyIDs = filteredKeyIDs + // check if keyID is used by other roles too + for _, r := range signed.Roles { + if slices.Contains(r.KeyIDs, keyID) { + return nil + } + } + // delete the keyID from Keys if it's not used anywhere else + delete(signed.Keys, keyID) + return nil +} + +// AddKey adds new signing key for delegated role "role" +// key: Signing key to be added for “role“. +// role: Name of the role, for which “key“ is added. +// If SuccinctRoles is used then the "role" argument can be ignored. +func (signed *TargetsType) AddKey(key *Key, role string) error { + // check if Delegations are even present + if signed.Delegations == nil { + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} + } + // standard delegated roles + if signed.Delegations.Roles != nil { + // loop through all delegated roles + isDelegatedRole := false + for i, d := range signed.Delegations.Roles { + // if role is found + if d.Name == role { + isDelegatedRole = true + // add key if keyID is not already part of keyIDs for that role + if !slices.Contains(d.KeyIDs, key.ID()) { + signed.Delegations.Roles[i].KeyIDs = append(signed.Delegations.Roles[i].KeyIDs, key.ID()) + signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + return nil + } + log.Info("Delegated role already has keyID", "role", role, "ID", key.ID()) + } + } + if !isDelegatedRole { + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} + } + } else if signed.Delegations.SuccinctRoles != nil { + // add key if keyID is not already part of keyIDs for the SuccinctRoles role + if !slices.Contains(signed.Delegations.SuccinctRoles.KeyIDs, key.ID()) { + signed.Delegations.SuccinctRoles.KeyIDs = append(signed.Delegations.SuccinctRoles.KeyIDs, key.ID()) + signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + return nil + } + log.Info("SuccinctRoles role already has keyID", "ID", key.ID()) + + } + signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + return nil +} + +// RevokeKey revokes key from delegated role "role" and updates the delegations key store +// keyID: Identifier of the key to be removed for “role“. +// role: Name of the role, for which a signing key is removed. +func (signed *TargetsType) RevokeKey(keyID string, role string) error { + // check if Delegations are even present + if signed.Delegations == nil { + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} + } + // standard delegated roles + if signed.Delegations.Roles != nil { + // loop through all delegated roles + for i, d := range signed.Delegations.Roles { + // if role is found + if d.Name == role { + // check if keyID is present in keyIDs for that role + if !slices.Contains(d.KeyIDs, keyID) { + return &ErrValue{Msg: fmt.Sprintf("key with id %s is not used by %s", keyID, role)} + } + // remove keyID from role + filteredKeyIDs := []string{} + for _, k := range signed.Delegations.Roles[i].KeyIDs { + if k != keyID { + filteredKeyIDs = append(filteredKeyIDs, k) + } + } + // overwrite the old keyID slice for that role + signed.Delegations.Roles[i].KeyIDs = filteredKeyIDs + // check if keyID is used by other roles too + for _, r := range signed.Delegations.Roles { + if slices.Contains(r.KeyIDs, keyID) { + return nil + } + } + // delete the keyID from Keys if it's not used anywhere else + delete(signed.Delegations.Keys, keyID) + return nil + } + } + // we haven't found the delegated role + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} + } else if signed.Delegations.SuccinctRoles != nil { + // check if keyID is used by SuccinctRoles role + if !slices.Contains(signed.Delegations.SuccinctRoles.KeyIDs, keyID) { + return &ErrValue{Msg: fmt.Sprintf("key with id %s is not used by SuccinctRoles", keyID)} + } + // remove keyID from the SuccinctRoles role + filteredKeyIDs := []string{} + for _, k := range signed.Delegations.SuccinctRoles.KeyIDs { + if k != keyID { + filteredKeyIDs = append(filteredKeyIDs, k) + } + } + // overwrite the old keyID slice for SuccinctRoles role + signed.Delegations.SuccinctRoles.KeyIDs = filteredKeyIDs + + // delete the keyID from Keys since it can not be used anywhere else + delete(signed.Delegations.Keys, keyID) + return nil + } + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} +} + +// Equal checks whether one hash set equals another +func (source Hashes) Equal(expected Hashes) bool { + hashChecked := false + for typ, hash := range expected { + if h, ok := source[typ]; ok { + // hash type match found + hashChecked = true + if !hmac.Equal(h, hash) { + // hash values don't match + return false + } + } + } + return hashChecked +} + +// verifyLength verifies if the passed data has the corresponding length +func verifyLength(data []byte, length int64) error { + len, err := io.Copy(io.Discard, bytes.NewReader(data)) + if err != nil { + return err + } + if length != len { + return &ErrLengthOrHashMismatch{Msg: fmt.Sprintf("length verification failed - expected %d, got %d", length, len)} + } + return nil +} + +// verifyHashes verifies if the hash of the passed data corresponds to it +func verifyHashes(data []byte, hashes Hashes) error { + var hasher hash.Hash + for k, v := range hashes { + switch k { + case "sha256": + hasher = sha256.New() + case "sha512": + hasher = sha512.New() + default: + return &ErrLengthOrHashMismatch{Msg: fmt.Sprintf("hash verification failed - unknown hashing algorithm - %s", k)} + } + hasher.Write(data) + if hex.EncodeToString(v) != hex.EncodeToString(hasher.Sum(nil)) { + return &ErrLengthOrHashMismatch{Msg: fmt.Sprintf("hash verification failed - mismatch for algorithm %s", k)} + } + } + return nil +} + +// fromBytes return a *Metadata[T] object from bytes and verifies +// that the data corresponds to the caller struct type +func fromBytes[T Roles](data []byte) (*Metadata[T], error) { + meta := &Metadata[T]{} + // verify that the type we used to create the object is the same as the type of the metadata file + if err := checkType[T](data); err != nil { + return nil, err + } + // if all is okay, unmarshal meta to the desired Metadata[T] type + if err := json.Unmarshal(data, meta); err != nil { + return nil, err + } + // Make sure signature key IDs are unique + if err := checkUniqueSignatures(*meta); err != nil { + return nil, err + } + return meta, nil +} + +// checkUniqueSignatures verifies if the signature key IDs are unique for that metadata +func checkUniqueSignatures[T Roles](meta Metadata[T]) error { + signatures := []string{} + for _, sig := range meta.Signatures { + if slices.Contains(signatures, sig.KeyID) { + return &ErrValue{Msg: fmt.Sprintf("multiple signatures found for key ID %s", sig.KeyID)} + } + signatures = append(signatures, sig.KeyID) + } + return nil +} + +// checkType verifies if the generic type used to create the object is the same as the type of the metadata file in bytes +func checkType[T Roles](data []byte) error { + var m map[string]any + i := any(new(T)) + if err := json.Unmarshal(data, &m); err != nil { + return err + } + signedType := m["signed"].(map[string]any)["_type"].(string) + switch i.(type) { + case *RootType: + if ROOT != signedType { + return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", ROOT, signedType)} + } + case *SnapshotType: + if SNAPSHOT != signedType { + return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", SNAPSHOT, signedType)} + } + case *TimestampType: + if TIMESTAMP != signedType { + return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", TIMESTAMP, signedType)} + } + case *TargetsType: + if TARGETS != signedType { + return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", TARGETS, signedType)} + } + default: + return &ErrValue{Msg: fmt.Sprintf("unrecognized metadata type - %s", signedType)} + } + // all okay + return nil +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go new file mode 100644 index 000000000000..0726f31f8816 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go @@ -0,0 +1,357 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package trustedmetadata + +import ( + "fmt" + "time" + + "github.com/theupdateframework/go-tuf/v2/metadata" +) + +// TrustedMetadata struct for storing trusted metadata +type TrustedMetadata struct { + Root *metadata.Metadata[metadata.RootType] + Snapshot *metadata.Metadata[metadata.SnapshotType] + Timestamp *metadata.Metadata[metadata.TimestampType] + Targets map[string]*metadata.Metadata[metadata.TargetsType] + RefTime time.Time +} + +// New creates a new TrustedMetadata instance which ensures that the +// collection of metadata in it is valid and trusted through the whole +// client update workflow. It provides easy ways to update the metadata +// with the caller making decisions on what is updated +func New(rootData []byte) (*TrustedMetadata, error) { + res := &TrustedMetadata{ + Targets: map[string]*metadata.Metadata[metadata.TargetsType]{}, + RefTime: time.Now().UTC(), + } + // load and validate the local root metadata + // valid initial trusted root metadata is required + err := res.loadTrustedRoot(rootData) + if err != nil { + return nil, err + } + return res, nil +} + +// UpdateRoot verifies and loads “rootData“ as new root metadata. +// Note that an expired intermediate root is considered valid: expiry is +// only checked for the final root in UpdateTimestamp() +func (trusted *TrustedMetadata) UpdateRoot(rootData []byte) (*metadata.Metadata[metadata.RootType], error) { + log := metadata.GetLogger() + + if trusted.Timestamp != nil { + return nil, &metadata.ErrRuntime{Msg: "cannot update root after timestamp"} + } + log.Info("Updating root") + // generate root metadata + newRoot, err := metadata.Root().FromBytes(rootData) + if err != nil { + return nil, err + } + // check metadata type matches root + if newRoot.Signed.Type != metadata.ROOT { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.ROOT, newRoot.Signed.Type)} + } + // verify that new root is signed by trusted root + err = trusted.Root.VerifyDelegate(metadata.ROOT, newRoot) + if err != nil { + return nil, err + } + // verify version + if newRoot.Signed.Version != trusted.Root.Signed.Version+1 { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("bad version number, expected %d, got %d", trusted.Root.Signed.Version+1, newRoot.Signed.Version)} + } + // verify that new root is signed by itself + err = newRoot.VerifyDelegate(metadata.ROOT, newRoot) + if err != nil { + return nil, err + } + // save root if verified + trusted.Root = newRoot + log.Info("Updated root", "version", trusted.Root.Signed.Version) + return trusted.Root, nil +} + +// UpdateTimestamp verifies and loads “timestampData“ as new timestamp metadata. +// Note that an intermediate timestamp is allowed to be expired. "TrustedMetadata" +// will error in this case but the intermediate timestamp will be loaded. +// This way a newer timestamp can still be loaded (and the intermediate +// timestamp will be used for rollback protection). Expired timestamp will +// prevent loading snapshot metadata. +func (trusted *TrustedMetadata) UpdateTimestamp(timestampData []byte) (*metadata.Metadata[metadata.TimestampType], error) { + log := metadata.GetLogger() + + if trusted.Snapshot != nil { + return nil, &metadata.ErrRuntime{Msg: "cannot update timestamp after snapshot"} + } + // client workflow 5.3.10: Make sure final root is not expired. + if trusted.Root.Signed.IsExpired(trusted.RefTime) { + // no need to check for 5.3.11 (fast forward attack recovery): + // timestamp/snapshot can not yet be loaded at this point + return nil, &metadata.ErrExpiredMetadata{Msg: "final root.json is expired"} + } + log.Info("Updating timestamp") + newTimestamp, err := metadata.Timestamp().FromBytes(timestampData) + if err != nil { + return nil, err + } + // check metadata type matches timestamp + if newTimestamp.Signed.Type != metadata.TIMESTAMP { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.TIMESTAMP, newTimestamp.Signed.Type)} + } + // verify that new timestamp is signed by trusted root + err = trusted.Root.VerifyDelegate(metadata.TIMESTAMP, newTimestamp) + if err != nil { + return nil, err + } + // if an existing trusted timestamp is updated, + // check for a rollback attack + if trusted.Timestamp != nil { + // prevent rolling back timestamp version + if newTimestamp.Signed.Version < trusted.Timestamp.Signed.Version { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("new timestamp version %d must be >= %d", newTimestamp.Signed.Version, trusted.Timestamp.Signed.Version)} + } + // keep using old timestamp if versions are equal + if newTimestamp.Signed.Version == trusted.Timestamp.Signed.Version { + log.Info("New timestamp version equals the old one", "new", newTimestamp.Signed.Version, "old", trusted.Timestamp.Signed.Version) + return nil, &metadata.ErrEqualVersionNumber{Msg: fmt.Sprintf("new timestamp version %d equals the old one %d", newTimestamp.Signed.Version, trusted.Timestamp.Signed.Version)} + } + // prevent rolling back snapshot version + snapshotMeta := trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + newSnapshotMeta := newTimestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + if newSnapshotMeta.Version < snapshotMeta.Version { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("new snapshot version %d must be >= %d", newSnapshotMeta.Version, snapshotMeta.Version)} + } + } + // expiry not checked to allow old timestamp to be used for rollback + // protection of new timestamp: expiry is checked in UpdateSnapshot() + // save root if verified + trusted.Timestamp = newTimestamp + log.Info("Updated timestamp", "version", trusted.Timestamp.Signed.Version) + + // timestamp is loaded: error if it is not valid _final_ timestamp + err = trusted.checkFinalTimestamp() + if err != nil { + return nil, err + } + // all okay + return trusted.Timestamp, nil +} + +// checkFinalTimestamp verifies if trusted timestamp is not expired +func (trusted *TrustedMetadata) checkFinalTimestamp() error { + if trusted.Timestamp.Signed.IsExpired(trusted.RefTime) { + return &metadata.ErrExpiredMetadata{Msg: "timestamp.json is expired"} + } + return nil +} + +// UpdateSnapshot verifies and loads “snapshotData“ as new snapshot metadata. +// Note that an intermediate snapshot is allowed to be expired and version +// is allowed to not match timestamp meta version: TrustedMetadata +// will error for case of expired metadata or when using bad versions but the +// intermediate snapshot will be loaded. This way a newer snapshot can still +// be loaded (and the intermediate snapshot will be used for rollback protection). +// Expired snapshot or snapshot that does not match timestamp meta version will +// prevent loading targets. +func (trusted *TrustedMetadata) UpdateSnapshot(snapshotData []byte, isTrusted bool) (*metadata.Metadata[metadata.SnapshotType], error) { + log := metadata.GetLogger() + + if trusted.Timestamp == nil { + return nil, &metadata.ErrRuntime{Msg: "cannot update snapshot before timestamp"} + } + if trusted.Targets[metadata.TARGETS] != nil { + return nil, &metadata.ErrRuntime{Msg: "cannot update snapshot after targets"} + } + log.Info("Updating snapshot") + + // snapshot cannot be loaded if final timestamp is expired + err := trusted.checkFinalTimestamp() + if err != nil { + return nil, err + } + snapshotMeta := trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + // verify non-trusted data against the hashes in timestamp, if any. + // trusted snapshot data has already been verified once. + if !isTrusted { + err = snapshotMeta.VerifyLengthHashes(snapshotData) + if err != nil { + return nil, err + } + } + newSnapshot, err := metadata.Snapshot().FromBytes(snapshotData) + if err != nil { + return nil, err + } + // check metadata type matches snapshot + if newSnapshot.Signed.Type != metadata.SNAPSHOT { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.SNAPSHOT, newSnapshot.Signed.Type)} + } + // verify that new snapshot is signed by trusted root + err = trusted.Root.VerifyDelegate(metadata.SNAPSHOT, newSnapshot) + if err != nil { + return nil, err + } + + // version not checked against meta version to allow old snapshot to be + // used in rollback protection: it is checked when targets is updated + + // if an existing trusted snapshot is updated, check for rollback attack + if trusted.Snapshot != nil { + for name, info := range trusted.Snapshot.Signed.Meta { + newFileInfo, ok := newSnapshot.Signed.Meta[name] + // prevent removal of any metadata in meta + if !ok { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("new snapshot is missing info for %s", name)} + } + // prevent rollback of any metadata versions + if newFileInfo.Version < info.Version { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("expected %s version %d, got %d", name, newFileInfo.Version, info.Version)} + } + } + } + + // expiry not checked to allow old snapshot to be used for rollback + // protection of new snapshot: it is checked when targets is updated + trusted.Snapshot = newSnapshot + log.Info("Updated snapshot", "version", trusted.Snapshot.Signed.Version) + + // snapshot is loaded, but we error if it's not valid _final_ snapshot + err = trusted.checkFinalSnapshot() + if err != nil { + return nil, err + } + // all okay + return trusted.Snapshot, nil +} + +// checkFinalSnapshot verifies if it's not expired and snapshot version matches timestamp meta version +func (trusted *TrustedMetadata) checkFinalSnapshot() error { + if trusted.Snapshot.Signed.IsExpired(trusted.RefTime) { + return &metadata.ErrExpiredMetadata{Msg: "snapshot.json is expired"} + } + snapshotMeta := trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + if trusted.Snapshot.Signed.Version != snapshotMeta.Version { + return &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("expected %d, got %d", snapshotMeta.Version, trusted.Snapshot.Signed.Version)} + } + return nil +} + +// UpdateTargets verifies and loads “targetsData“ as new top-level targets metadata. +func (trusted *TrustedMetadata) UpdateTargets(targetsData []byte) (*metadata.Metadata[metadata.TargetsType], error) { + return trusted.UpdateDelegatedTargets(targetsData, metadata.TARGETS, metadata.ROOT) +} + +// UpdateDelegatedTargets verifies and loads “targetsData“ as new metadata for target “roleName“ +func (trusted *TrustedMetadata) UpdateDelegatedTargets(targetsData []byte, roleName, delegatorName string) (*metadata.Metadata[metadata.TargetsType], error) { + log := metadata.GetLogger() + + var ok bool + if trusted.Snapshot == nil { + return nil, &metadata.ErrRuntime{Msg: "cannot load targets before snapshot"} + } + // targets cannot be loaded if final snapshot is expired or its version + // does not match meta version in timestamp + err := trusted.checkFinalSnapshot() + if err != nil { + return nil, err + } + // check if delegator metadata is present + if delegatorName == metadata.ROOT { + if trusted.Root != nil { + ok = true + } else { + ok = false + } + } else { + _, ok = trusted.Targets[delegatorName] + } + if !ok { + return nil, &metadata.ErrRuntime{Msg: "cannot load targets before delegator"} + } + log.Info("Updating delegated role", "role", roleName, "delegator", delegatorName) + // Verify against the hashes in snapshot, if any + meta, ok := trusted.Snapshot.Signed.Meta[fmt.Sprintf("%s.json", roleName)] + if !ok { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("snapshot does not contain information for %s", roleName)} + } + err = meta.VerifyLengthHashes(targetsData) + if err != nil { + return nil, err + } + newDelegate, err := metadata.Targets().FromBytes(targetsData) + if err != nil { + return nil, err + } + // check metadata type matches targets + if newDelegate.Signed.Type != metadata.TARGETS { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.TARGETS, newDelegate.Signed.Type)} + } + // get delegator metadata and verify the new delegatee + if delegatorName == metadata.ROOT { + err = trusted.Root.VerifyDelegate(roleName, newDelegate) + if err != nil { + return nil, err + } + } else { + err = trusted.Targets[delegatorName].VerifyDelegate(roleName, newDelegate) + if err != nil { + return nil, err + } + } + // check versions + if newDelegate.Signed.Version != meta.Version { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("expected %s version %d, got %d", roleName, meta.Version, newDelegate.Signed.Version)} + } + // check expiration + if newDelegate.Signed.IsExpired(trusted.RefTime) { + return nil, &metadata.ErrExpiredMetadata{Msg: fmt.Sprintf("new %s is expired", roleName)} + } + trusted.Targets[roleName] = newDelegate + log.Info("Updated role", "role", roleName, "version", trusted.Targets[roleName].Signed.Version) + return trusted.Targets[roleName], nil +} + +// loadTrustedRoot verifies and loads "data" as trusted root metadata. +// Note that an expired initial root is considered valid: expiry is +// only checked for the final root in “UpdateTimestamp()“. +func (trusted *TrustedMetadata) loadTrustedRoot(rootData []byte) error { + log := metadata.GetLogger() + + // generate root metadata + newRoot, err := metadata.Root().FromBytes(rootData) + if err != nil { + return err + } + // check metadata type matches root + if newRoot.Signed.Type != metadata.ROOT { + return &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.ROOT, newRoot.Signed.Type)} + } + // verify root by itself + err = newRoot.VerifyDelegate(metadata.ROOT, newRoot) + if err != nil { + return err + } + // save root if verified + trusted.Root = newRoot + log.Info("Loaded trusted root", "version", trusted.Root.Signed.Version) + return nil +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/types.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/types.go new file mode 100644 index 000000000000..0bd86ee8f234 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/types.go @@ -0,0 +1,179 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "encoding/json" + "time" +) + +// Generic type constraint +type Roles interface { + RootType | SnapshotType | TimestampType | TargetsType +} + +// Define version of the TUF specification +const ( + SPECIFICATION_VERSION = "1.0.31" +) + +// Define top level role names +const ( + ROOT = "root" + SNAPSHOT = "snapshot" + TARGETS = "targets" + TIMESTAMP = "timestamp" +) + +var TOP_LEVEL_ROLE_NAMES = [...]string{ROOT, TIMESTAMP, SNAPSHOT, TARGETS} + +// Metadata[T Roles] represents a TUF metadata. +// Provides methods to read and write to and +// from file and bytes, also to create, verify and clear metadata signatures. +type Metadata[T Roles] struct { + Signed T `json:"signed"` + Signatures []Signature `json:"signatures"` + UnrecognizedFields map[string]any `json:"-"` +} + +// Signature represents the Signature part of a TUF metadata +type Signature struct { + KeyID string `json:"keyid"` + Signature HexBytes `json:"sig"` + UnrecognizedFields map[string]any `json:"-"` +} + +// RootType represents the Signed portion of a root metadata +type RootType struct { + Type string `json:"_type"` + SpecVersion string `json:"spec_version"` + ConsistentSnapshot bool `json:"consistent_snapshot"` + Version int64 `json:"version"` + Expires time.Time `json:"expires"` + Keys map[string]*Key `json:"keys"` + Roles map[string]*Role `json:"roles"` + UnrecognizedFields map[string]any `json:"-"` +} + +// SnapshotType represents the Signed portion of a snapshot metadata +type SnapshotType struct { + Type string `json:"_type"` + SpecVersion string `json:"spec_version"` + Version int64 `json:"version"` + Expires time.Time `json:"expires"` + Meta map[string]*MetaFiles `json:"meta"` + UnrecognizedFields map[string]any `json:"-"` +} + +// TargetsType represents the Signed portion of a targets metadata +type TargetsType struct { + Type string `json:"_type"` + SpecVersion string `json:"spec_version"` + Version int64 `json:"version"` + Expires time.Time `json:"expires"` + Targets map[string]*TargetFiles `json:"targets"` + Delegations *Delegations `json:"delegations,omitempty"` + UnrecognizedFields map[string]any `json:"-"` +} + +// TimestampType represents the Signed portion of a timestamp metadata +type TimestampType struct { + Type string `json:"_type"` + SpecVersion string `json:"spec_version"` + Version int64 `json:"version"` + Expires time.Time `json:"expires"` + Meta map[string]*MetaFiles `json:"meta"` + UnrecognizedFields map[string]any `json:"-"` +} + +// Key represents a key in TUF +type Key struct { + Type string `json:"keytype"` + Scheme string `json:"scheme"` + Value KeyVal `json:"keyval"` + id string `json:"-"` + UnrecognizedFields map[string]any `json:"-"` +} + +type KeyVal struct { + PublicKey string `json:"public"` + UnrecognizedFields map[string]any `json:"-"` +} + +// Role represents one of the top-level roles in TUF +type Role struct { + KeyIDs []string `json:"keyids"` + Threshold int `json:"threshold"` + UnrecognizedFields map[string]any `json:"-"` +} + +type HexBytes []byte + +type Hashes map[string]HexBytes + +// MetaFiles represents the value portion of METAFILES in TUF (used in Snapshot and Timestamp metadata). Used to store information about a particular meta file. +type MetaFiles struct { + Length int64 `json:"length,omitempty"` + Hashes Hashes `json:"hashes,omitempty"` + Version int64 `json:"version"` + UnrecognizedFields map[string]any `json:"-"` +} + +// TargetFiles represents the value portion of TARGETS in TUF (used Targets metadata). Used to store information about a particular target file. +type TargetFiles struct { + Length int64 `json:"length"` + Hashes Hashes `json:"hashes"` + Custom *json.RawMessage `json:"custom,omitempty"` + Path string `json:"-"` + UnrecognizedFields map[string]any `json:"-"` +} + +// Delegations is an optional object which represents delegation roles and their corresponding keys +type Delegations struct { + Keys map[string]*Key `json:"keys"` + Roles []DelegatedRole `json:"roles,omitempty"` + SuccinctRoles *SuccinctRoles `json:"succinct_roles,omitempty"` + UnrecognizedFields map[string]any `json:"-"` +} + +// DelegatedRole represents a delegated role in TUF +type DelegatedRole struct { + Name string `json:"name"` + KeyIDs []string `json:"keyids"` + Threshold int `json:"threshold"` + Terminating bool `json:"terminating"` + PathHashPrefixes []string `json:"path_hash_prefixes,omitempty"` + Paths []string `json:"paths,omitempty"` + UnrecognizedFields map[string]any `json:"-"` +} + +// SuccinctRoles represents a delegation graph that covers all targets, +// distributing them uniformly over the delegated roles (i.e. bins) in the graph. +type SuccinctRoles struct { + KeyIDs []string `json:"keyids"` + Threshold int `json:"threshold"` + BitLength int `json:"bit_length"` + NamePrefix string `json:"name_prefix"` + UnrecognizedFields map[string]any `json:"-"` +} + +// RoleResult represents the name and terminating status of a delegated role that is responsible for targetFilepath +type RoleResult struct { + Name string + Terminating bool +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go new file mode 100644 index 000000000000..bd533b63c3d8 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go @@ -0,0 +1,704 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package updater + +import ( + "encoding/hex" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "regexp" + "slices" + "strconv" + "strings" + "time" + + "github.com/theupdateframework/go-tuf/v2/metadata" + "github.com/theupdateframework/go-tuf/v2/metadata/config" + "github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata" +) + +// Client update workflow implementation +// +// The "Updater" provides an implementation of the TUF client workflow (ref. https://theupdateframework.github.io/specification/latest/#detailed-client-workflow). +// "Updater" provides an API to query available targets and to download them in a +// secure manner: All downloaded files are verified by signed metadata. +// High-level description of "Updater" functionality: +// - Initializing an "Updater" loads and validates the trusted local root +// metadata: This root metadata is used as the source of trust for all other +// metadata. +// - Refresh() can optionally be called to update and load all top-level +// metadata as described in the specification, using both locally cached +// metadata and metadata downloaded from the remote repository. If refresh is +// not done explicitly, it will happen automatically during the first target +// info lookup. +// - Updater can be used to download targets. For each target: +// - GetTargetInfo() is first used to find information about a +// specific target. This will load new targets metadata as needed (from +// local cache or remote repository). +// - FindCachedTarget() can optionally be used to check if a +// target file is already locally cached. +// - DownloadTarget() downloads a target file and ensures it is +// verified correct by the metadata. +type Updater struct { + trusted *trustedmetadata.TrustedMetadata + cfg *config.UpdaterConfig +} + +type roleParentTuple struct { + Role string + Parent string +} + +// New creates a new Updater instance and loads trusted root metadata +func New(config *config.UpdaterConfig) (*Updater, error) { + // make sure the trusted root metadata and remote URL were provided + if len(config.LocalTrustedRoot) == 0 || len(config.RemoteMetadataURL) == 0 { + return nil, fmt.Errorf("no initial trusted root metadata or remote URL provided") + } + // create a new trusted metadata instance using the trusted root.json + trustedMetadataSet, err := trustedmetadata.New(config.LocalTrustedRoot) + if err != nil { + return nil, err + } + // create an updater instance + updater := &Updater{ + cfg: config, + trusted: trustedMetadataSet, // save trusted metadata set + } + // ensure paths exist, doesn't do anything if caching is disabled + err = updater.cfg.EnsurePathsExist() + if err != nil { + return nil, err + } + // persist the initial root metadata to the local metadata folder + err = updater.persistMetadata(metadata.ROOT, updater.cfg.LocalTrustedRoot) + if err != nil { + return nil, err + } + // all okay, return the updater instance + return updater, nil +} + +// Refresh loads and possibly refreshes top-level metadata. +// Downloads, verifies, and loads metadata for the top-level roles in the +// specified order (root -> timestamp -> snapshot -> targets) implementing +// all the checks required in the TUF client workflow. +// A Refresh() can be done only once during the lifetime of an Updater. +// If Refresh() has not been explicitly called before the first +// GetTargetInfo() call, it will be done implicitly at that time. +// The metadata for delegated roles is not updated by Refresh(): +// that happens on demand during GetTargetInfo(). However, if the +// repository uses consistent snapshots (ref. https://theupdateframework.github.io/specification/latest/#consistent-snapshots), +// then all metadata downloaded by the Updater will use the same consistent repository state. +// +// If UnsafeLocalMode is set, no network interaction is performed, only +// the cached files on disk are used. If the cached data is not complete, +// this call will fail. +func (update *Updater) Refresh() error { + if update.cfg.UnsafeLocalMode { + return update.unsafeLocalRefresh() + } + return update.onlineRefresh() +} + +// onlineRefresh implements the TUF client workflow as described for +// the Refresh function. +func (update *Updater) onlineRefresh() error { + err := update.loadRoot() + if err != nil { + return err + } + err = update.loadTimestamp() + if err != nil { + return err + } + err = update.loadSnapshot() + if err != nil { + return err + } + _, err = update.loadTargets(metadata.TARGETS, metadata.ROOT) + if err != nil { + return err + } + return nil +} + +// unsafeLocalRefresh tries to load the persisted metadata already cached +// on disk. Note that this is an usafe function, and does deviate from the +// TUF specification section 5.3 to 5.7 (update phases). +// The metadata on disk are verified against the provided root though, +// and expiration dates are verified. +func (update *Updater) unsafeLocalRefresh() error { + // Root is already loaded + // load timestamp + var p = filepath.Join(update.cfg.LocalMetadataDir, metadata.TIMESTAMP) + data, err := update.loadLocalMetadata(p) + if err != nil { + return err + } + _, err = update.trusted.UpdateTimestamp(data) + if err != nil { + return err + } + + // load snapshot + p = filepath.Join(update.cfg.LocalMetadataDir, metadata.SNAPSHOT) + data, err = update.loadLocalMetadata(p) + if err != nil { + return err + } + _, err = update.trusted.UpdateSnapshot(data, false) + if err != nil { + return err + } + + // targets + p = filepath.Join(update.cfg.LocalMetadataDir, metadata.TARGETS) + data, err = update.loadLocalMetadata(p) + if err != nil { + return err + } + // verify and load the new target metadata + _, err = update.trusted.UpdateDelegatedTargets(data, metadata.TARGETS, metadata.ROOT) + if err != nil { + return err + } + + return nil +} + +// GetTargetInfo returns metadata.TargetFiles instance with information +// for targetPath. The return value can be used as an argument to +// DownloadTarget() and FindCachedTarget(). +// If Refresh() has not been called before calling +// GetTargetInfo(), the refresh will be done implicitly. +// As a side-effect this method downloads all the additional (delegated +// targets) metadata it needs to return the target information. +func (update *Updater) GetTargetInfo(targetPath string) (*metadata.TargetFiles, error) { + // do a Refresh() in case there's no trusted targets.json yet + if update.trusted.Targets[metadata.TARGETS] == nil { + err := update.Refresh() + if err != nil { + return nil, err + } + } + return update.preOrderDepthFirstWalk(targetPath) +} + +// DownloadTarget downloads the target file specified by targetFile +func (update *Updater) DownloadTarget(targetFile *metadata.TargetFiles, filePath, targetBaseURL string) (string, []byte, error) { + log := metadata.GetLogger() + + var err error + if filePath == "" { + filePath, err = update.generateTargetFilePath(targetFile) + if err != nil { + return "", nil, err + } + } + if targetBaseURL == "" { + if update.cfg.RemoteTargetsURL == "" { + return "", nil, &metadata.ErrValue{Msg: "targetBaseURL must be set in either DownloadTarget() or the Updater struct"} + } + targetBaseURL = ensureTrailingSlash(update.cfg.RemoteTargetsURL) + } else { + targetBaseURL = ensureTrailingSlash(targetBaseURL) + } + + targetFilePath := targetFile.Path + targetRemotePath := targetFilePath + consistentSnapshot := update.trusted.Root.Signed.ConsistentSnapshot + if consistentSnapshot && update.cfg.PrefixTargetsWithHash { + hashes := "" + // get first hex value of hashes + for _, v := range targetFile.Hashes { + hashes = hex.EncodeToString(v) + break + } + baseName := filepath.Base(targetFilePath) + dirName, ok := strings.CutSuffix(targetFilePath, "/"+baseName) + if !ok { + // . + targetRemotePath = fmt.Sprintf("%s.%s", hashes, baseName) + } else { + // /. + targetRemotePath = fmt.Sprintf("%s/%s.%s", dirName, hashes, baseName) + } + } + fullURL := fmt.Sprintf("%s%s", targetBaseURL, targetRemotePath) + data, err := update.cfg.Fetcher.DownloadFile(fullURL, targetFile.Length, 0) + if err != nil { + return "", nil, err + } + err = targetFile.VerifyLengthHashes(data) + if err != nil { + return "", nil, err + } + + // do not persist the target file if cache is disabled + if !update.cfg.DisableLocalCache { + err = os.WriteFile(filePath, data, 0644) + if err != nil { + return "", nil, err + } + } + log.Info("Downloaded target", "path", targetFile.Path) + return filePath, data, nil +} + +// FindCachedTarget checks whether a local file is an up to date target +func (update *Updater) FindCachedTarget(targetFile *metadata.TargetFiles, filePath string) (string, []byte, error) { + var err error + targetFilePath := "" + // do not look for cached target file if cache is disabled + if update.cfg.DisableLocalCache { + return "", nil, nil + } + // get its path if not provided + if filePath == "" { + targetFilePath, err = update.generateTargetFilePath(targetFile) + if err != nil { + return "", nil, err + } + } else { + targetFilePath = filePath + } + // get file content + data, err := os.ReadFile(targetFilePath) + if err != nil { + // do not want to return err, instead we say that there's no cached target available + return "", nil, nil + } + // verify if the length and hashes of this target file match the expected values + err = targetFile.VerifyLengthHashes(data) + if err != nil { + // do not want to return err, instead we say that there's no cached target available + return "", nil, nil + } + // if all okay, return its path + return targetFilePath, data, nil +} + +// loadTimestamp load local and remote timestamp metadata +func (update *Updater) loadTimestamp() error { + log := metadata.GetLogger() + // try to read local timestamp + data, err := update.loadLocalMetadata(filepath.Join(update.cfg.LocalMetadataDir, metadata.TIMESTAMP)) + if err != nil { + // this means there's no existing local timestamp so we should proceed downloading it without the need to UpdateTimestamp + log.Info("Local timestamp does not exist") + } else { + // local timestamp exists, let's try to verify it and load it to the trusted metadata set + _, err := update.trusted.UpdateTimestamp(data) + if err != nil { + if errors.Is(err, &metadata.ErrRepository{}) { + // local timestamp is not valid, proceed downloading from remote; note that this error type includes several other subset errors + log.Info("Local timestamp is not valid") + } else { + // another error + return err + } + } + log.Info("Local timestamp is valid") + // all okay, local timestamp exists and it is valid, nevertheless proceed with downloading from remote + } + // load from remote (whether local load succeeded or not) + data, err = update.downloadMetadata(metadata.TIMESTAMP, update.cfg.TimestampMaxLength, "") + if err != nil { + return err + } + // try to verify and load the newly downloaded timestamp + _, err = update.trusted.UpdateTimestamp(data) + if err != nil { + if errors.Is(err, &metadata.ErrEqualVersionNumber{}) { + // if the new timestamp version is the same as current, discard the + // new timestamp; this is normal and it shouldn't raise any error + return nil + } else { + // another error + return err + } + } + // proceed with persisting the new timestamp + err = update.persistMetadata(metadata.TIMESTAMP, data) + if err != nil { + return err + } + return nil +} + +// loadSnapshot load local (and if needed remote) snapshot metadata +func (update *Updater) loadSnapshot() error { + log := metadata.GetLogger() + // try to read local snapshot + data, err := update.loadLocalMetadata(filepath.Join(update.cfg.LocalMetadataDir, metadata.SNAPSHOT)) + if err != nil { + // this means there's no existing local snapshot so we should proceed downloading it without the need to UpdateSnapshot + log.Info("Local snapshot does not exist") + } else { + // successfully read a local snapshot metadata, so let's try to verify and load it to the trusted metadata set + _, err = update.trusted.UpdateSnapshot(data, true) + if err != nil { + // this means snapshot verification/loading failed + if errors.Is(err, &metadata.ErrRepository{}) { + // local snapshot is not valid, proceed downloading from remote; note that this error type includes several other subset errors + log.Info("Local snapshot is not valid") + } else { + // another error + return err + } + } else { + // this means snapshot verification/loading succeeded + log.Info("Local snapshot is valid: not downloading new one") + return nil + } + } + // local snapshot does not exist or is invalid, update from remote + log.Info("Failed to load local snapshot") + if update.trusted.Timestamp == nil { + return fmt.Errorf("trusted timestamp not set") + } + // extract the snapshot meta from the trusted timestamp metadata + snapshotMeta := update.trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + // extract the length of the snapshot metadata to be downloaded + length := snapshotMeta.Length + if length == 0 { + length = update.cfg.SnapshotMaxLength + } + // extract which snapshot version should be downloaded in case of consistent snapshots + version := "" + if update.trusted.Root.Signed.ConsistentSnapshot { + version = strconv.FormatInt(snapshotMeta.Version, 10) + } + // download snapshot metadata + data, err = update.downloadMetadata(metadata.SNAPSHOT, length, version) + if err != nil { + return err + } + // verify and load the new snapshot + _, err = update.trusted.UpdateSnapshot(data, false) + if err != nil { + return err + } + // persist the new snapshot + err = update.persistMetadata(metadata.SNAPSHOT, data) + if err != nil { + return err + } + return nil +} + +// loadTargets load local (and if needed remote) metadata for roleName +func (update *Updater) loadTargets(roleName, parentName string) (*metadata.Metadata[metadata.TargetsType], error) { + log := metadata.GetLogger() + // avoid loading "roleName" more than once during "GetTargetInfo" + role, ok := update.trusted.Targets[roleName] + if ok { + return role, nil + } + // try to read local targets + data, err := update.loadLocalMetadata(filepath.Join(update.cfg.LocalMetadataDir, roleName)) + if err != nil { + // this means there's no existing local target file so we should proceed downloading it without the need to UpdateDelegatedTargets + log.Info("Local role does not exist", "role", roleName) + } else { + // successfully read a local targets metadata, so let's try to verify and load it to the trusted metadata set + delegatedTargets, err := update.trusted.UpdateDelegatedTargets(data, roleName, parentName) + if err != nil { + // this means targets verification/loading failed + if errors.Is(err, &metadata.ErrRepository{}) { + // local target file is not valid, proceed downloading from remote; note that this error type includes several other subset errors + log.Info("Local role is not valid", "role", roleName) + } else { + // another error + return nil, err + } + } else { + // this means targets verification/loading succeeded + log.Info("Local role is valid: not downloading new one", "role", roleName) + return delegatedTargets, nil + } + } + // local "roleName" does not exist or is invalid, update from remote + log.Info("Failed to load local role", "role", roleName) + if update.trusted.Snapshot == nil { + return nil, fmt.Errorf("trusted snapshot not set") + } + // extract the targets' meta from the trusted snapshot metadata + metaInfo, ok := update.trusted.Snapshot.Signed.Meta[fmt.Sprintf("%s.json", roleName)] + if !ok { + return nil, fmt.Errorf("role %s not found in snapshot", roleName) + } + // extract the length of the target metadata to be downloaded + length := metaInfo.Length + if length == 0 { + length = update.cfg.TargetsMaxLength + } + // extract which target metadata version should be downloaded in case of consistent snapshots + version := "" + if update.trusted.Root.Signed.ConsistentSnapshot { + version = strconv.FormatInt(metaInfo.Version, 10) + } + // download targets metadata + data, err = update.downloadMetadata(roleName, length, version) + if err != nil { + return nil, err + } + // verify and load the new target metadata + delegatedTargets, err := update.trusted.UpdateDelegatedTargets(data, roleName, parentName) + if err != nil { + return nil, err + } + // persist the new target metadata + err = update.persistMetadata(roleName, data) + if err != nil { + return nil, err + } + return delegatedTargets, nil +} + +// loadRoot load remote root metadata. Sequentially load and +// persist on local disk every newer root metadata version +// available on the remote +func (update *Updater) loadRoot() error { + // calculate boundaries + lowerBound := update.trusted.Root.Signed.Version + 1 + upperBound := lowerBound + update.cfg.MaxRootRotations + + // loop until we find the latest available version of root (download -> verify -> load -> persist) + for nextVersion := lowerBound; nextVersion < upperBound; nextVersion++ { + data, err := update.downloadMetadata(metadata.ROOT, update.cfg.RootMaxLength, strconv.FormatInt(nextVersion, 10)) + if err != nil { + // downloading the root metadata failed for some reason + var tmpErr *metadata.ErrDownloadHTTP + if errors.As(err, &tmpErr) { + if tmpErr.StatusCode != http.StatusNotFound { + // unexpected HTTP status code + return err + } + // 404 means current root is newest available, so we can stop the loop and move forward + break + } + // some other error ocurred + return err + } else { + // downloading root metadata succeeded, so let's try to verify and load it + _, err = update.trusted.UpdateRoot(data) + if err != nil { + return err + } + // persist root metadata to disk + err = update.persistMetadata(metadata.ROOT, data) + if err != nil { + return err + } + } + } + return nil +} + +// preOrderDepthFirstWalk interrogates the tree of target delegations +// in order of appearance (which implicitly order trustworthiness), +// and returns the matching target found in the most trusted role. +func (update *Updater) preOrderDepthFirstWalk(targetFilePath string) (*metadata.TargetFiles, error) { + log := metadata.GetLogger() + // list of delegations to be interrogated. A (role, parent role) pair + // is needed to load and verify the delegated targets metadata + delegationsToVisit := []roleParentTuple{{ + Role: metadata.TARGETS, + Parent: metadata.ROOT, + }} + visitedRoleNames := map[string]bool{} + // pre-order depth-first traversal of the graph of target delegations + for len(visitedRoleNames) <= update.cfg.MaxDelegations && len(delegationsToVisit) > 0 { + // pop the role name from the top of the stack + delegation := delegationsToVisit[len(delegationsToVisit)-1] + delegationsToVisit = delegationsToVisit[:len(delegationsToVisit)-1] + // skip any visited current role to prevent cycles + _, ok := visitedRoleNames[delegation.Role] + if ok { + log.Info("Skipping visited current role", "role", delegation.Role) + continue + } + // the metadata for delegation.Role must be downloaded/updated before + // its targets, delegations, and child roles can be inspected + targets, err := update.loadTargets(delegation.Role, delegation.Parent) + if err != nil { + return nil, err + } + target, ok := targets.Signed.Targets[targetFilePath] + if ok { + log.Info("Found target in current role", "role", delegation.Role) + return target, nil + } + // after pre-order check, add current role to set of visited roles + visitedRoleNames[delegation.Role] = true + if targets.Signed.Delegations != nil { + var childRolesToVisit []roleParentTuple + // note that this may be a slow operation if there are many + // delegated roles + roles := targets.Signed.Delegations.GetRolesForTarget(targetFilePath) + for _, rolesForTarget := range roles { + log.Info("Adding child role", "role", rolesForTarget.Name) + childRolesToVisit = append(childRolesToVisit, roleParentTuple{Role: rolesForTarget.Name, Parent: delegation.Role}) + if rolesForTarget.Terminating { + log.Info("Not backtracking to other roles") + delegationsToVisit = []roleParentTuple{} + break + } + } + // push childRolesToVisit in reverse order of appearance + // onto delegationsToVisit. Roles are popped from the end of + // the list + slices.Reverse(childRolesToVisit) + delegationsToVisit = append(delegationsToVisit, childRolesToVisit...) + } + } + if len(delegationsToVisit) > 0 { + log.Info("Too many roles left to visit for max allowed delegations", + "roles-left", len(delegationsToVisit), + "allowed-delegations", update.cfg.MaxDelegations) + } + // if this point is reached then target is not found, return nil + return nil, fmt.Errorf("target %s not found", targetFilePath) +} + +// persistMetadata writes metadata to disk atomically to avoid data loss +func (update *Updater) persistMetadata(roleName string, data []byte) error { + log := metadata.GetLogger() + // do not persist the metadata if we have disabled local caching + if update.cfg.DisableLocalCache { + return nil + } + // caching enabled, proceed with persisting the metadata locally + fileName := filepath.Join(update.cfg.LocalMetadataDir, fmt.Sprintf("%s.json", url.PathEscape(roleName))) + // create a temporary file + file, err := os.CreateTemp(update.cfg.LocalMetadataDir, "tuf_tmp") + if err != nil { + return err + } + // change the file permissions to our desired permissions + err = file.Chmod(0644) + if err != nil { + // close and delete the temporary file if there was an error while writing + file.Close() + errRemove := os.Remove(file.Name()) + if errRemove != nil { + log.Info("Failed to delete temporary file", "name", file.Name()) + } + return err + } + // write the data content to the temporary file + _, err = file.Write(data) + if err != nil { + // close and delete the temporary file if there was an error while writing + file.Close() + errRemove := os.Remove(file.Name()) + if errRemove != nil { + log.Info("Failed to delete temporary file", "name", file.Name()) + } + return err + } + + // can't move/rename an open file on windows, so close it first + err = file.Close() + if err != nil { + return err + } + // if all okay, rename the temporary file to the desired one + err = os.Rename(file.Name(), fileName) + if err != nil { + return err + } + read, err := os.ReadFile(fileName) + if err != nil { + return err + } + if string(read) != string(data) { + return fmt.Errorf("failed to persist metadata") + } + return nil +} + +// downloadMetadata download a metadata file and return it as bytes +func (update *Updater) downloadMetadata(roleName string, length int64, version string) ([]byte, error) { + urlPath := ensureTrailingSlash(update.cfg.RemoteMetadataURL) + // build urlPath + if version == "" { + urlPath = fmt.Sprintf("%s%s.json", urlPath, url.PathEscape(roleName)) + } else { + urlPath = fmt.Sprintf("%s%s.%s.json", urlPath, version, url.PathEscape(roleName)) + } + return update.cfg.Fetcher.DownloadFile(urlPath, length, 0) +} + +// generateTargetFilePath generates path from TargetFiles +func (update *Updater) generateTargetFilePath(tf *metadata.TargetFiles) (string, error) { + // LocalTargetsDir can be omitted if caching is disabled + if update.cfg.LocalTargetsDir == "" && !update.cfg.DisableLocalCache { + return "", &metadata.ErrValue{Msg: "LocalTargetsDir must be set if filepath is not given"} + } + // Use URL encoded target path as filename + return filepath.Join(update.cfg.LocalTargetsDir, url.PathEscape(tf.Path)), nil +} + +// loadLocalMetadata reads a local .json file and returns its bytes +func (update *Updater) loadLocalMetadata(roleName string) ([]byte, error) { + return os.ReadFile(fmt.Sprintf("%s.json", roleName)) +} + +// GetTopLevelTargets returns the top-level target files +func (update *Updater) GetTopLevelTargets() map[string]*metadata.TargetFiles { + return update.trusted.Targets[metadata.TARGETS].Signed.Targets +} + +// GetTrustedMetadataSet returns the trusted metadata set +func (update *Updater) GetTrustedMetadataSet() trustedmetadata.TrustedMetadata { + return *update.trusted +} + +// UnsafeSetRefTime sets the reference time that the updater uses. +// This should only be done in tests. +// Using this function is useful when testing time-related behavior in go-tuf. +func (update *Updater) UnsafeSetRefTime(t time.Time) { + update.trusted.RefTime = t +} + +func IsWindowsPath(path string) bool { + match, _ := regexp.MatchString(`^[a-zA-Z]:\\`, path) + return match +} + +// ensureTrailingSlash ensures url ends with a slash +func ensureTrailingSlash(url string) string { + if IsWindowsPath(url) { + slash := string(filepath.Separator) + if strings.HasSuffix(url, slash) { + return url + } + return url + slash + } + if strings.HasSuffix(url, "/") { + return url + } + return url + "/" +} diff --git a/vendor/github.com/transparency-dev/formats/LICENSE b/vendor/github.com/transparency-dev/formats/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/transparency-dev/formats/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/transparency-dev/formats/log/README.md b/vendor/github.com/transparency-dev/formats/log/README.md new file mode 100644 index 000000000000..887ce47b2a15 --- /dev/null +++ b/vendor/github.com/transparency-dev/formats/log/README.md @@ -0,0 +1,127 @@ +# Checkpoint format + +This directory contains a description and supporting golang code for +a reusable Checkpoint format which the TrustFabric team uses in various +projects. + +The format itself is heavily based on the +[golang sumbdb head](https://sum.golang.org/latest), and corresponding +[signed note](https://pkg.go.dev/golang.org/x/mod/sumdb/note) formats, +and consists of two parts: a signed envelope, and a body. + +### Signed envelope + +The envelope (signed note) is of the form: + +* One or more non-empty lines, each terminated by `\n` (the `body`) +* One line consisting of just one `\n` (i.e. a blank line) +* One or more `signature` lines, each terminated by `\n` + +All signatures commit to the body only (including its trailing newline, but not +the blank line's newline - see below for an example). + +The signature(s) themselves are in the sumdb note format (concrete example +[below](#example)): + +`– ` +where: + +* `–` is an emdash (U+2014) +* `` gives a human-readable representation of the signing ID + +and the `signature_bytes` are prefixed with the first 4 bytes of the SHA256 hash +of the associated public key to act as a hint in identifying the correct key to +verify with. + +For guidance on generating keys, see the +[note documentation](https://pkg.go.dev/golang.org/x/mod/sumdb/note#hdr-Generating_Keys) +and [implementation](https://cs.opensource.google/go/x/mod/+/master:sumdb/note/note.go;l=368;drc=ed3ec21bb8e252814c380df79a80f366440ddb2d). +Of particular note is that the public key and its hash commit to the algorithm +identifier. + +**Differences from sumdb note:** +Whereas the golang signed note *implementation* currently supports only Ed25519 +signatures, the format itself is not restricted to this scheme. + +### Checkpoint body + +The checkpoint body is of the form: + +```text + + + +[otherdata] +``` + +The first 3 lines of the body **MUST** be present in all Checkpoints. + +* `` should be a unique identifier for the log identity which issued the checkpoint. + The format SHOULD be a URI-like structure like `[/]`, where the log operator + controls ``, e.g `example.com/log42`. This is only a recommendation, and clients MUST + NOT assume that the origin is a URI following this format. This structure reduces the likelihood + of origin collision, and gives clues to humans about the log operator and what is in the log. The + suffix is optional and can be anything. It is used to disambiguate logs owned under the same + prefix. + + The presence of this identifier forms part of the log claim, and guards against two + logs producing bytewise identical checkpoints. + +* `` is the ASCII decimal representation of the number of leaves committed + to by this checkpoint. It should not have leading zeroes. + +* `` is an + [RFC4684 standard encoding](https://datatracker.ietf.org/doc/html/rfc4648#section-4) base-64 + representation of the log root hash at the specified log size. + +* `[otherdata]` is opaque and optional, and, if necessary, can be used to tie extra + data to the checkpoint, however its format must conform to the sumdb signed + note spec (e.g. it must not contain blank lines.) + +> Note that golang sumdb implementation is already compatible with this +`[otherdata]` extension (see +). +If you plan to use `otherdata` in your log, see the section on [merging checkpoints](#merging-checkpoints). + +The first signature on a checkpoint should be from the log which issued it, but there MUST NOT +be more than one signature from a log identity present on the checkpoint. + +## Example + +An annotated example signed checkpoint in this format is shown below: + +![format](images/format.png) + + +This checkpoint was issued by the log known as "Moon Log", the log's size is +4027504, in the `other data` section a timestamp is encoded as a 64bit hex +value, and further application-specific data relating to the phase of the moon +at the point the checkpoint was issued is supplied following that. + +## Merging Checkpoints + +This checkpoint format allows a checkpoint that has been independently signed by +multiple identities to be merged, creating a single checkpoint with multiple +signatures. This is particularly useful for witnessing, where witnesses will +independently check consistency of the log and produce a counter-signed copy +containing two signatures: one for the log, and one for the witness. + +The ability to merge signatures for the same body is a useful optimization. +Clients that require N witness signatures will not be required to fetch N checkpoints. +Instead they can fetch a single checkpoint and confirm it has the N required +signatures (in addition to the log signature). + +Note that this optimization requires the checkpoint _body_ to be byte-equivalent. +The log signature does not need to be equal; when merging, only one of the log's +signatures over this body will be propagated. The core checkpoint format above +allows merging for any two consistent checkpoints for the same tree size. +However, if the `otherdata` extension is used then this can lead to checkpoints +that cannot be merged, even at the same tree size. + +We recommend that log operators using `otherdata` consider carefully what +information is included in this. If data is included in `otherdata` that is not +fixed for a given tree size, then this can easily lead to unmergeable checkpoints. +The most commonly anticipated cause for this would be including the timestamp at +which the checkpoint was requested within the `otherdata`. In this case, no two +witnesses are likely to ever acquire the same checkpoint body. There may be cases +where this is unavoidable, but this consequence should be considered in the design. diff --git a/vendor/github.com/transparency-dev/formats/log/checkpoint.go b/vendor/github.com/transparency-dev/formats/log/checkpoint.go new file mode 100644 index 000000000000..3bbaacb38936 --- /dev/null +++ b/vendor/github.com/transparency-dev/formats/log/checkpoint.go @@ -0,0 +1,79 @@ +// Copyright 2021 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package log provides basic support for the common log checkpoint and proof +// format described by the README in this directory. +package log + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "strconv" +) + +// Checkpoint represents a minimal log checkpoint (STH). +type Checkpoint struct { + // Origin is the string identifying the log which issued this checkpoint. + Origin string + // Size is the number of entries in the log at this checkpoint. + Size uint64 + // Hash is the hash which commits to the contents of the entire log. + Hash []byte +} + +// Marshal returns the common format representation of this Checkpoint. +func (c Checkpoint) Marshal() []byte { + return []byte(fmt.Sprintf("%s\n%d\n%s\n", c.Origin, c.Size, base64.StdEncoding.EncodeToString(c.Hash))) +} + +// Unmarshal parses the common formatted checkpoint data and stores the result +// in the Checkpoint. +// +// The supplied data is expected to begin with the following 3 lines of text, +// each followed by a newline: +// - +// - +// - +// +// Any trailing data after this will be returned. +func (c *Checkpoint) Unmarshal(data []byte) ([]byte, error) { + l := bytes.SplitN(data, []byte("\n"), 4) + if len(l) < 4 { + return nil, errors.New("invalid checkpoint - too few newlines") + } + origin := string(l[0]) + if len(origin) == 0 { + return nil, errors.New("invalid checkpoint - empty origin") + } + size, err := strconv.ParseUint(string(l[1]), 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid checkpoint - size invalid: %w", err) + } + h, err := base64.StdEncoding.DecodeString(string(l[2])) + if err != nil { + return nil, fmt.Errorf("invalid checkpoint - invalid hash: %w", err) + } + var rest []byte + if len(l[3]) > 0 { + rest = l[3] + } + *c = Checkpoint{ + Origin: origin, + Size: size, + Hash: h, + } + return rest, nil +} diff --git a/vendor/github.com/transparency-dev/formats/log/identifier.go b/vendor/github.com/transparency-dev/formats/log/identifier.go new file mode 100644 index 000000000000..424794541f72 --- /dev/null +++ b/vendor/github.com/transparency-dev/formats/log/identifier.go @@ -0,0 +1,30 @@ +// Copyright 2021 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "crypto/sha256" + "fmt" +) + +// ID returns the identifier to use for a log given the Origin. This is the ID +// used to find checkpoints for this log at distributors, and that will be used +// to feed checkpoints to witnesses. +func ID(origin string) string { + s := sha256.New() + s.Write([]byte("o:")) + s.Write([]byte(origin)) + return fmt.Sprintf("%x", s.Sum(nil)) +} diff --git a/vendor/github.com/transparency-dev/formats/log/note.go b/vendor/github.com/transparency-dev/formats/log/note.go new file mode 100644 index 000000000000..45ea536841f2 --- /dev/null +++ b/vendor/github.com/transparency-dev/formats/log/note.go @@ -0,0 +1,56 @@ +// Copyright 2021 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "fmt" + + "golang.org/x/mod/sumdb/note" +) + +// ParseCheckpoint takes a raw checkpoint as bytes and returns a parsed checkpoint +// and any otherData in the body, providing that: +// * a valid log signature is found; and +// * the checkpoint unmarshals correctly; and +// * the log origin is that expected. +// In all other cases, an empty checkpoint is returned. The underlying note is always +// returned where possible. +// The signatures on the note will include the log signature if no error is returned, +// plus any signatures from otherVerifiers that were found. +func ParseCheckpoint(chkpt []byte, origin string, logVerifier note.Verifier, otherVerifiers ...note.Verifier) (*Checkpoint, []byte, *note.Note, error) { + vs := append(append(make([]note.Verifier, 0, len(otherVerifiers)+1), logVerifier), otherVerifiers...) + verifiers := note.VerifierList(vs...) + + n, err := note.Open(chkpt, verifiers) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to verify signatures on checkpoint: %v", err) + } + + for _, s := range n.Sigs { + if s.Hash == logVerifier.KeyHash() && s.Name == logVerifier.Name() { + // The log has signed this checkpoint. It is now safe to parse. + cp := &Checkpoint{} + var otherData []byte + if otherData, err = cp.Unmarshal([]byte(n.Text)); err != nil { + return nil, nil, n, fmt.Errorf("failed to unmarshal checkpoint: %v", err) + } + if cp.Origin != origin { + return nil, nil, n, fmt.Errorf("got Origin %q but expected %q", cp.Origin, origin) + } + return cp, otherData, n, nil + } + } + return nil, nil, n, fmt.Errorf("no log signature found on note") +} diff --git a/vendor/github.com/transparency-dev/merkle/.golangci.yaml b/vendor/github.com/transparency-dev/merkle/.golangci.yaml new file mode 100644 index 000000000000..0675e1ef4099 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/.golangci.yaml @@ -0,0 +1,11 @@ +run: + # timeout for analysis, e.g. 30s, 5m, default is 1m + deadline: 90s + +linters-settings: + depguard: + list-type: blacklist + packages: + - golang.org/x/net/context + - github.com/gogo/protobuf/proto + diff --git a/vendor/github.com/transparency-dev/merkle/CHANGELOG.md b/vendor/github.com/transparency-dev/merkle/CHANGELOG.md new file mode 100644 index 000000000000..b05ee0adcbdc --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/CHANGELOG.md @@ -0,0 +1,10 @@ +# MERKLE changelog + +## HEAD + +## v0.0.2 + * Fuzzing support + * Dependency updates, notably to go1.19 + +## v0.0.1 +Initial release diff --git a/vendor/github.com/transparency-dev/merkle/CODEOWNERS b/vendor/github.com/transparency-dev/merkle/CODEOWNERS new file mode 100644 index 000000000000..1b4134658414 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/CODEOWNERS @@ -0,0 +1 @@ +* @transparency-dev/core-team diff --git a/vendor/github.com/transparency-dev/merkle/CONTRIBUTING.md b/vendor/github.com/transparency-dev/merkle/CONTRIBUTING.md new file mode 100644 index 000000000000..43de4c9d4709 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/CONTRIBUTING.md @@ -0,0 +1,58 @@ +# How to contribute # + +We'd love to accept your patches and contributions to this project. There are +a just a few small guidelines you need to follow. + + +## Contributor License Agreement ## + +Contributions to any Google project must be accompanied by a Contributor +License Agreement. This is not a copyright **assignment**, it simply gives +Google permission to use and redistribute your contributions as part of the +project. + + * If you are an individual writing original source code and you're sure you + own the intellectual property, then you'll need to sign an [individual + CLA][]. + + * If you work for a company that wants to allow you to contribute your work, + then you'll need to sign a [corporate CLA][]. + +You generally only need to submit a CLA once, so if you've already submitted +one (even if it was for a different project), you probably don't need to do it +again. + +[individual CLA]: https://developers.google.com/open-source/cla/individual +[corporate CLA]: https://developers.google.com/open-source/cla/corporate + +Once your CLA is submitted (or if you already submitted one for +another Google project), make a commit adding yourself to the +[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part +of your first [pull request][]. + +[AUTHORS]: AUTHORS +[CONTRIBUTORS]: CONTRIBUTORS + + +## Submitting a patch ## + + 1. It's generally best to start by opening a new issue describing the bug or + feature you're intending to fix. Even if you think it's relatively minor, + it's helpful to know what people are working on. Mention in the initial + issue that you are planning to work on that bug or feature so that it can + be assigned to you. + + 1. Follow the normal process of [forking][] the project, and setup a new + branch to work in. It's important that each group of changes be done in + separate branches in order to ensure that a pull request only includes the + commits related to that bug or feature. + + 1. Do your best to have [well-formed commit messages][] for each change. + This provides consistency throughout the project, and ensures that commit + messages are able to be formatted properly by various git tools. + + 1. Finally, push the commits to your fork and submit a [pull request][]. + +[forking]: https://help.github.com/articles/fork-a-repo +[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html +[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/vendor/github.com/transparency-dev/merkle/LICENSE b/vendor/github.com/transparency-dev/merkle/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/transparency-dev/merkle/README.md b/vendor/github.com/transparency-dev/merkle/README.md new file mode 100644 index 000000000000..3c8d2127110b --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/README.md @@ -0,0 +1,25 @@ +# Merkle + +[![Go Reference](https://pkg.go.dev/badge/github.com/transparency-dev/merkle.svg)](https://pkg.go.dev/github.com/transparency-dev/merkle) +[![Go Report +Card](https://goreportcard.com/badge/github.com/transparency-dev/merkle)](https://goreportcard.com/report/github.com/transparency-dev/merkle) +[![codecov](https://codecov.io/gh/transparency-dev/merkle/branch/main/graph/badge.svg?token=BBCRAMOBY2)](https://codecov.io/gh/transparency-dev/merkle) +[![Slack +Status](https://img.shields.io/badge/Slack-Chat-blue.svg)](https://gtrillian.slack.com/) + +## Overview + +This repository contains Go code to help create and manipulate Merkle trees, as +well as constructing and verifying various types of proof. + +This is the data structure which is used by projects such as +[Trillian](https://github.com/google/trillian) to provide +[verifiable logs](https://transparency.dev/verifiable-data-structures/#verifiable-log). + + +## Support +* Mailing list: https://groups.google.com/forum/#!forum/trillian-transparency +* Slack: https://gtrillian.slack.com/ (invitation) + + + diff --git a/vendor/github.com/transparency-dev/merkle/cloudbuild.yaml b/vendor/github.com/transparency-dev/merkle/cloudbuild.yaml new file mode 100644 index 000000000000..eafbc790a088 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/cloudbuild.yaml @@ -0,0 +1,26 @@ +timeout: 300s +options: + machineType: E2_HIGHCPU_32 + volumes: + - name: go-modules + path: /go + env: + - GOPROXY=https://proxy.golang.org + - PROJECT_ROOT=github.com/transparency-dev/merkle + - GOPATH=/go + +# Cloud Build logs sent to GCS bucket +logsBucket: 'gs://trillian-cloudbuild-logs' + +steps: +- id: 'lint' + name: "golangci/golangci-lint:v1.51" + args: ["golangci-lint", "run", "--timeout", "10m"] + +- id: 'unit tests' + name: 'golang:1.19' + args: ['go', 'test', './...'] + +- id: 'build' + name: 'golang:1.19' + args: ['go', 'build', './...'] diff --git a/vendor/github.com/transparency-dev/merkle/compact/nodes.go b/vendor/github.com/transparency-dev/merkle/compact/nodes.go new file mode 100644 index 000000000000..41c0854e4836 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/compact/nodes.go @@ -0,0 +1,89 @@ +// Copyright 2019 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compact + +import "math/bits" + +// NodeID identifies a node of a Merkle tree. +// +// The ID consists of a level and index within this level. Levels are numbered +// from 0, which corresponds to the tree leaves. Within each level, nodes are +// numbered with consecutive indices starting from 0. +// +// L4: ┌───────0───────┐ ... +// L3: ┌───0───┐ ┌───1───┐ ┌─── ... +// L2: ┌─0─┐ ┌─1─┐ ┌─2─┐ ┌─3─┐ ┌─4─┐ ... +// L1: ┌0┐ ┌1┐ ┌2┐ ┌3┐ ┌4┐ ┌5┐ ┌6┐ ┌7┐ ┌8┐ ┌9┐ ... +// L0: 0 1 2 3 4 5 6 7 8 9 ... ... ... ... ... ... +// +// When the tree is not perfect, the nodes that would complement it to perfect +// are called ephemeral. Algorithms that operate with ephemeral nodes still map +// them to the same address space. +type NodeID struct { + Level uint + Index uint64 +} + +// NewNodeID returns a NodeID with the passed in node coordinates. +func NewNodeID(level uint, index uint64) NodeID { + return NodeID{Level: level, Index: index} +} + +// Parent returns the ID of the parent node. +func (id NodeID) Parent() NodeID { + return NewNodeID(id.Level+1, id.Index>>1) +} + +// Sibling returns the ID of the sibling node. +func (id NodeID) Sibling() NodeID { + return NewNodeID(id.Level, id.Index^1) +} + +// Coverage returns the [begin, end) range of leaves covered by the node. +func (id NodeID) Coverage() (uint64, uint64) { + return id.Index << id.Level, (id.Index + 1) << id.Level +} + +// RangeNodes appends the IDs of the nodes that comprise the [begin, end) +// compact range to the given slice, and returns the new slice. The caller may +// pre-allocate space with the help of the RangeSize function. +func RangeNodes(begin, end uint64, ids []NodeID) []NodeID { + left, right := Decompose(begin, end) + + pos := begin + // Iterate over perfect subtrees along the left border of the range, ordered + // from lower to upper levels. + for bit := uint64(0); left != 0; pos, left = pos+bit, left^bit { + level := uint(bits.TrailingZeros64(left)) + bit = uint64(1) << level + ids = append(ids, NewNodeID(level, pos>>level)) + } + + // Iterate over perfect subtrees along the right border of the range, ordered + // from upper to lower levels. + for bit := uint64(0); right != 0; pos, right = pos+bit, right^bit { + level := uint(bits.Len64(right)) - 1 + bit = uint64(1) << level + ids = append(ids, NewNodeID(level, pos>>level)) + } + + return ids +} + +// RangeSize returns the number of nodes in the [begin, end) compact range. +func RangeSize(begin, end uint64) int { + left, right := Decompose(begin, end) + return bits.OnesCount64(left) + bits.OnesCount64(right) +} diff --git a/vendor/github.com/transparency-dev/merkle/compact/range.go b/vendor/github.com/transparency-dev/merkle/compact/range.go new file mode 100644 index 000000000000..0952646c4f6f --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/compact/range.go @@ -0,0 +1,264 @@ +// Copyright 2019 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compact provides compact Merkle tree data structures. +package compact + +import ( + "bytes" + "errors" + "fmt" + "math/bits" +) + +// HashFn computes an internal node's hash using the hashes of its child nodes. +type HashFn func(left, right []byte) []byte + +// VisitFn visits the node with the specified ID and hash. +type VisitFn func(id NodeID, hash []byte) + +// RangeFactory allows creating compact ranges with the specified hash +// function, which must not be nil, and must not be changed. +type RangeFactory struct { + Hash HashFn +} + +// NewRange creates a Range for [begin, end) with the given set of hashes. The +// hashes correspond to the roots of the minimal set of perfect sub-trees +// covering the [begin, end) leaves range, ordered left to right. +func (f *RangeFactory) NewRange(begin, end uint64, hashes [][]byte) (*Range, error) { + if end < begin { + return nil, fmt.Errorf("invalid range: end=%d, want >= %d", end, begin) + } + if got, want := len(hashes), RangeSize(begin, end); got != want { + return nil, fmt.Errorf("invalid hashes: got %d values, want %d", got, want) + } + return &Range{f: f, begin: begin, end: end, hashes: hashes}, nil +} + +// NewEmptyRange returns a new Range for an empty [begin, begin) range. The +// value of begin defines where the range will start growing from when entries +// are appended to it. +func (f *RangeFactory) NewEmptyRange(begin uint64) *Range { + return &Range{f: f, begin: begin, end: begin} +} + +// Range represents a compact Merkle tree range for leaf indices [begin, end). +// +// It contains the minimal set of perfect subtrees whose leaves comprise this +// range. The structure is efficiently mergeable with other compact ranges that +// share one of the endpoints with it. +// +// For more details, see +// https://github.com/transparency-dev/merkle/blob/main/docs/compact_ranges.md. +type Range struct { + f *RangeFactory + begin uint64 + end uint64 + hashes [][]byte +} + +// Begin returns the first index covered by the range (inclusive). +func (r *Range) Begin() uint64 { + return r.begin +} + +// End returns the last index covered by the range (exclusive). +func (r *Range) End() uint64 { + return r.end +} + +// Hashes returns sub-tree hashes corresponding to the minimal set of perfect +// sub-trees covering the [begin, end) range, ordered left to right. +func (r *Range) Hashes() [][]byte { + return r.hashes +} + +// Append extends the compact range by appending the passed in hash to it. It +// reports all the added nodes through the visitor function (if non-nil). +func (r *Range) Append(hash []byte, visitor VisitFn) error { + if visitor != nil { + visitor(NewNodeID(0, r.end), hash) + } + return r.appendImpl(r.end+1, hash, nil, visitor) +} + +// AppendRange extends the compact range by merging in the other compact range +// from the right. It uses the tree hasher to calculate hashes of newly created +// nodes, and reports them through the visitor function (if non-nil). +func (r *Range) AppendRange(other *Range, visitor VisitFn) error { + if other.f != r.f { + return errors.New("incompatible ranges") + } + if got, want := other.begin, r.end; got != want { + return fmt.Errorf("ranges are disjoint: other.begin=%d, want %d", got, want) + } + if len(other.hashes) == 0 { // The other range is empty, merging is trivial. + return nil + } + return r.appendImpl(other.end, other.hashes[0], other.hashes[1:], visitor) +} + +// GetRootHash returns the root hash of the Merkle tree represented by this +// compact range. Requires the range to start at index 0. If the range is +// empty, returns nil. +// +// If visitor is not nil, it is called with all "ephemeral" nodes (i.e. the +// ones rooting imperfect subtrees) along the right border of the tree. +func (r *Range) GetRootHash(visitor VisitFn) ([]byte, error) { + if r.begin != 0 { + return nil, fmt.Errorf("begin=%d, want 0", r.begin) + } + ln := len(r.hashes) + if ln == 0 { + return nil, nil + } + hash := r.hashes[ln-1] + // All non-perfect subtree hashes along the right border of the tree + // correspond to the parents of all perfect subtree nodes except the lowest + // one (therefore the loop skips it). + for i, size := ln-2, r.end; i >= 0; i-- { + hash = r.f.Hash(r.hashes[i], hash) + if visitor != nil { + size &= size - 1 // Delete the previous node. + level := uint(bits.TrailingZeros64(size)) + 1 // Compute the parent level. + index := size >> level // And its horizontal index. + visitor(NewNodeID(level, index), hash) + } + } + return hash, nil +} + +// Equal compares two Ranges for equality. +func (r *Range) Equal(other *Range) bool { + if r.f != other.f || r.begin != other.begin || r.end != other.end { + return false + } + if len(r.hashes) != len(other.hashes) { + return false + } + for i := range r.hashes { + if !bytes.Equal(r.hashes[i], other.hashes[i]) { + return false + } + } + return true +} + +// appendImpl extends the compact range by merging the [r.end, end) compact +// range into it. The other compact range is decomposed into a seed hash and +// all the other hashes (possibly none). The method uses the tree hasher to +// calculate hashes of newly created nodes, and reports them through the +// visitor function (if non-nil). +func (r *Range) appendImpl(end uint64, seed []byte, hashes [][]byte, visitor VisitFn) error { + // Bits [low, high) of r.end encode the merge path, i.e. the sequence of node + // merges that transforms the two compact ranges into one. + low, high := getMergePath(r.begin, r.end, end) + if high < low { + high = low + } + index := r.end >> low + // Now bits [0, high-low) of index encode the merge path. + + // The number of one bits in index is the number of nodes from the left range + // that will be merged, and zero bits correspond to the nodes in the right + // range. Below we make sure that both ranges have enough hashes, which can + // be false only in case the data is corrupted in some way. + ones := bits.OnesCount64(index & (1<<(high-low) - 1)) + if ln := len(r.hashes); ln < ones { + return fmt.Errorf("corrupted lhs range: got %d hashes, want >= %d", ln, ones) + } + if ln, zeros := len(hashes), int(high-low)-ones; ln < zeros { + return fmt.Errorf("corrupted rhs range: got %d hashes, want >= %d", ln+1, zeros+1) + } + + // Some of the trailing nodes of the left compact range, and some of the + // leading nodes of the right range, are sequentially merged with the seed, + // according to the mask. All new nodes are reported through the visitor. + idx1, idx2 := len(r.hashes), 0 + for h := low; h < high; h++ { + if index&1 == 0 { + seed = r.f.Hash(seed, hashes[idx2]) + idx2++ + } else { + idx1-- + seed = r.f.Hash(r.hashes[idx1], seed) + } + index >>= 1 + if visitor != nil { + visitor(NewNodeID(h+1, index), seed) + } + } + + // All nodes from both ranges that have not been merged are bundled together + // with the "merged" seed node. + r.hashes = append(append(r.hashes[:idx1], seed), hashes[idx2:]...) + r.end = end + return nil +} + +// getMergePath returns the merging path between the compact range [begin, mid) +// and [mid, end). The path is represented as a range of bits within mid, with +// bit indices [low, high). A bit value of 1 on level i of mid means that the +// node on this level merges with the corresponding node in the left compact +// range, whereas 0 represents merging with the right compact range. If the +// path is empty then high <= low. +// +// The output is not specified if begin <= mid <= end doesn't hold, but the +// function never panics. +func getMergePath(begin, mid, end uint64) (uint, uint) { + low := bits.TrailingZeros64(mid) + high := 64 + if begin != 0 { + high = bits.Len64(mid ^ (begin - 1)) + } + if high2 := bits.Len64((mid - 1) ^ end); high2 < high { + high = high2 + } + return uint(low), uint(high - 1) +} + +// Decompose splits the [begin, end) range into a minimal number of sub-ranges, +// each of which is of the form [m * 2^k, (m+1) * 2^k), i.e. of length 2^k, for +// some integers m, k >= 0. +// +// The sequence of sizes is returned encoded as bitmasks left and right, where: +// - a 1 bit in a bitmask denotes a sub-range of the corresponding size 2^k +// - left mask bits in LSB-to-MSB order encode the left part of the sequence +// - right mask bits in MSB-to-LSB order encode the right part +// +// The corresponding values of m are not returned (they can be calculated from +// begin and the sub-range sizes). +// +// For example, (begin, end) values of (0b110, 0b11101) would indicate a +// sequence of tree sizes: 2,8; 8,4,1. +// +// The output is not specified if begin > end, but the function never panics. +func Decompose(begin, end uint64) (uint64, uint64) { + // Special case, as the code below works only if begin != 0, or end < 2^63. + if begin == 0 { + return 0, end + } + xbegin := begin - 1 + // Find where paths to leaves #begin-1 and #end diverge, and mask the upper + // bits away, as only the nodes strictly below this point are in the range. + d := bits.Len64(xbegin^end) - 1 + mask := uint64(1)<= size { + return Nodes{}, fmt.Errorf("index %d out of bounds for tree size %d", index, size) + } + return nodes(index, 0, size).skipFirst(), nil +} + +// Consistency returns the information on how to fetch and construct a +// consistency proof between the two given tree sizes of a log Merkle tree. It +// requires 0 <= size1 <= size2. +func Consistency(size1, size2 uint64) (Nodes, error) { + if size1 > size2 { + return Nodes{}, fmt.Errorf("tree size %d > %d", size1, size2) + } + if size1 == size2 || size1 == 0 { + return Nodes{IDs: []compact.NodeID{}}, nil + } + + // Find the root of the biggest perfect subtree that ends at size1. + level := uint(bits.TrailingZeros64(size1)) + index := (size1 - 1) >> level + // The consistency proof consists of this node (except if size1 is a power of + // two, in which case adding this node would be redundant because the client + // is assumed to know it from a checkpoint), and nodes of the inclusion proof + // into this node in the tree of size2. + p := nodes(index, level, size2) + + // Handle the case when size1 is a power of 2. + if index == 0 { + return p.skipFirst(), nil + } + return p, nil +} + +// nodes returns the node IDs necessary to prove that the (level, index) node +// is included in the Merkle tree of the given size. +func nodes(index uint64, level uint, size uint64) Nodes { + // Compute the `fork` node, where the path from root to (level, index) node + // diverges from the path to (0, size). + // + // The sibling of this node is the ephemeral node which represents a subtree + // that is not complete in the tree of the given size. To compute the hash + // of the ephemeral node, we need all the non-ephemeral nodes that cover the + // same range of leaves. + // + // The `inner` variable is how many layers up from (level, index) the `fork` + // and the ephemeral nodes are. + inner := bits.Len64(index^(size>>level)) - 1 + fork := compact.NewNodeID(level+uint(inner), index>>inner) + + begin, end := fork.Coverage() + left := compact.RangeSize(0, begin) + right := compact.RangeSize(end, size) + + node := compact.NewNodeID(level, index) + // Pre-allocate the exact number of nodes for the proof, in order: + // - The seed node for which we are building the proof. + // - The `inner` nodes at each level up to the fork node. + // - The `right` nodes, comprising the ephemeral node. + // - The `left` nodes, completing the coverage of the whole [0, size) range. + nodes := append(make([]compact.NodeID, 0, 1+inner+right+left), node) + + // The first portion of the proof consists of the siblings for nodes of the + // path going up to the level at which the ephemeral node appears. + for ; node.Level < fork.Level; node = node.Parent() { + nodes = append(nodes, node.Sibling()) + } + // This portion of the proof covers the range [begin, end) under it. The + // ranges to the left and to the right from it remain to be covered. + + // Add all the nodes (potentially none) that cover the right range, and + // represent the ephemeral node. Reverse them so that the Rehash method can + // process hashes in the convenient order, from lower to upper levels. + len1 := len(nodes) + nodes = compact.RangeNodes(end, size, nodes) + reverse(nodes[len(nodes)-right:]) + len2 := len(nodes) + // Add the nodes that cover the left range, ordered increasingly by level. + nodes = compact.RangeNodes(0, begin, nodes) + reverse(nodes[len(nodes)-left:]) + + // nodes[len1:len2] contains the nodes representing the ephemeral node. If + // it's empty, make it zero. Note that it can also contain a single node. + // Depending on the preference of the layer above, it may or may not be + // considered ephemeral. + if len1 >= len2 { + len1, len2 = 0, 0 + } + + return Nodes{IDs: nodes, begin: len1, end: len2, ephem: fork.Sibling()} +} + +// Ephem returns the ephemeral node, and indices begin and end, such that +// IDs[begin:end] slice contains the child nodes of the ephemeral node. +// +// The list is empty iff there are no ephemeral nodes in the proof. Some +// examples of when this can happen: a proof in a perfect tree; an inclusion +// proof for a leaf in a perfect subtree at the right edge of the tree. +func (n Nodes) Ephem() (compact.NodeID, int, int) { + return n.ephem, n.begin, n.end +} + +// Rehash computes the proof based on the slice of node hashes corresponding to +// their IDs in the n.IDs field. The slices must be of the same length. The hc +// parameter computes a node's hash based on hashes of its children. +// +// Warning: The passed-in slice of hashes can be modified in-place. +func (n Nodes) Rehash(h [][]byte, hc func(left, right []byte) []byte) ([][]byte, error) { + if got, want := len(h), len(n.IDs); got != want { + return nil, fmt.Errorf("got %d hashes but expected %d", got, want) + } + cursor := 0 + // Scan the list of node hashes, and store the rehashed list in-place. + // Invariant: cursor <= i, and h[:cursor] contains all the hashes of the + // rehashed list after scanning h up to index i-1. + for i, ln := 0, len(h); i < ln; i, cursor = i+1, cursor+1 { + hash := h[i] + if i >= n.begin && i < n.end { + // Scan the block of node hashes that need rehashing. + for i++; i < n.end; i++ { + hash = hc(h[i], hash) + } + i-- + } + h[cursor] = hash + } + return h[:cursor], nil +} + +func (n Nodes) skipFirst() Nodes { + n.IDs = n.IDs[1:] + // Fixup the indices into the IDs slice. + if n.begin < n.end { + n.begin-- + n.end-- + } + return n +} + +func reverse(ids []compact.NodeID) { + for i, j := 0, len(ids)-1; i < j; i, j = i+1, j-1 { + ids[i], ids[j] = ids[j], ids[i] + } +} diff --git a/vendor/github.com/transparency-dev/merkle/proof/verify.go b/vendor/github.com/transparency-dev/merkle/proof/verify.go new file mode 100644 index 000000000000..d42e1afe36fe --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/proof/verify.go @@ -0,0 +1,176 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proof + +import ( + "bytes" + "errors" + "fmt" + "math/bits" + + "github.com/transparency-dev/merkle" +) + +// RootMismatchError occurs when an inclusion proof fails. +type RootMismatchError struct { + ExpectedRoot []byte + CalculatedRoot []byte +} + +func (e RootMismatchError) Error() string { + return fmt.Sprintf("calculated root:\n%v\n does not match expected root:\n%v", e.CalculatedRoot, e.ExpectedRoot) +} + +func verifyMatch(calculated, expected []byte) error { + if !bytes.Equal(calculated, expected) { + return RootMismatchError{ExpectedRoot: expected, CalculatedRoot: calculated} + } + return nil +} + +// VerifyInclusion verifies the correctness of the inclusion proof for the leaf +// with the specified hash and index, relatively to the tree of the given size +// and root hash. Requires 0 <= index < size. +func VerifyInclusion(hasher merkle.LogHasher, index, size uint64, leafHash []byte, proof [][]byte, root []byte) error { + calcRoot, err := RootFromInclusionProof(hasher, index, size, leafHash, proof) + if err != nil { + return err + } + return verifyMatch(calcRoot, root) +} + +// RootFromInclusionProof calculates the expected root hash for a tree of the +// given size, provided a leaf index and hash with the corresponding inclusion +// proof. Requires 0 <= index < size. +func RootFromInclusionProof(hasher merkle.LogHasher, index, size uint64, leafHash []byte, proof [][]byte) ([]byte, error) { + if index >= size { + return nil, fmt.Errorf("index is beyond size: %d >= %d", index, size) + } + if got, want := len(leafHash), hasher.Size(); got != want { + return nil, fmt.Errorf("leafHash has unexpected size %d, want %d", got, want) + } + + inner, border := decompInclProof(index, size) + if got, want := len(proof), inner+border; got != want { + return nil, fmt.Errorf("wrong proof size %d, want %d", got, want) + } + + res := chainInner(hasher, leafHash, proof[:inner], index) + res = chainBorderRight(hasher, res, proof[inner:]) + return res, nil +} + +// VerifyConsistency checks that the passed-in consistency proof is valid +// between the passed in tree sizes, with respect to the corresponding root +// hashes. Requires 0 <= size1 <= size2. +func VerifyConsistency(hasher merkle.LogHasher, size1, size2 uint64, proof [][]byte, root1, root2 []byte) error { + switch { + case size2 < size1: + return fmt.Errorf("size2 (%d) < size1 (%d)", size1, size2) + case size1 == size2: + if len(proof) > 0 { + return errors.New("size1=size2, but proof is not empty") + } + return verifyMatch(root1, root2) + case size1 == 0: + // Any size greater than 0 is consistent with size 0. + if len(proof) > 0 { + return fmt.Errorf("expected empty proof, but got %d components", len(proof)) + } + return nil // Proof OK. + case len(proof) == 0: + return errors.New("empty proof") + } + + inner, border := decompInclProof(size1-1, size2) + shift := bits.TrailingZeros64(size1) + inner -= shift // Note: shift < inner if size1 < size2. + + // The proof includes the root hash for the sub-tree of size 2^shift. + seed, start := proof[0], 1 + if size1 == 1<> uint(shift) // Start chaining from level |shift|. + hash1 := chainInnerRight(hasher, seed, proof[:inner], mask) + hash1 = chainBorderRight(hasher, hash1, proof[inner:]) + if err := verifyMatch(hash1, root1); err != nil { + return err + } + + // Verify the second root. + hash2 := chainInner(hasher, seed, proof[:inner], mask) + hash2 = chainBorderRight(hasher, hash2, proof[inner:]) + return verifyMatch(hash2, root2) +} + +// decompInclProof breaks down inclusion proof for a leaf at the specified +// |index| in a tree of the specified |size| into 2 components. The splitting +// point between them is where paths to leaves |index| and |size-1| diverge. +// Returns lengths of the bottom and upper proof parts correspondingly. The sum +// of the two determines the correct length of the inclusion proof. +func decompInclProof(index, size uint64) (int, int) { + inner := innerProofSize(index, size) + border := bits.OnesCount64(index >> uint(inner)) + return inner, border +} + +func innerProofSize(index, size uint64) int { + return bits.Len64(index ^ (size - 1)) +} + +// chainInner computes a subtree hash for a node on or below the tree's right +// border. Assumes |proof| hashes are ordered from lower levels to upper, and +// |seed| is the initial subtree/leaf hash on the path located at the specified +// |index| on its level. +func chainInner(hasher merkle.LogHasher, seed []byte, proof [][]byte, index uint64) []byte { + for i, h := range proof { + if (index>>uint(i))&1 == 0 { + seed = hasher.HashChildren(seed, h) + } else { + seed = hasher.HashChildren(h, seed) + } + } + return seed +} + +// chainInnerRight computes a subtree hash like chainInner, but only takes +// hashes to the left from the path into consideration, which effectively means +// the result is a hash of the corresponding earlier version of this subtree. +func chainInnerRight(hasher merkle.LogHasher, seed []byte, proof [][]byte, index uint64) []byte { + for i, h := range proof { + if (index>>uint(i))&1 == 1 { + seed = hasher.HashChildren(h, seed) + } + } + return seed +} + +// chainBorderRight chains proof hashes along tree borders. This differs from +// inner chaining because |proof| contains only left-side subtree hashes. +func chainBorderRight(hasher merkle.LogHasher, seed []byte, proof [][]byte) []byte { + for _, h := range proof { + seed = hasher.HashChildren(h, seed) + } + return seed +} diff --git a/vendor/github.com/transparency-dev/merkle/rfc6962/rfc6962.go b/vendor/github.com/transparency-dev/merkle/rfc6962/rfc6962.go new file mode 100644 index 000000000000..b04f952ef855 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/rfc6962/rfc6962.go @@ -0,0 +1,68 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package rfc6962 provides hashing functionality according to RFC6962. +package rfc6962 + +import ( + "crypto" + _ "crypto/sha256" // SHA256 is the default algorithm. +) + +// Domain separation prefixes +const ( + RFC6962LeafHashPrefix = 0 + RFC6962NodeHashPrefix = 1 +) + +// DefaultHasher is a SHA256 based LogHasher. +var DefaultHasher = New(crypto.SHA256) + +// Hasher implements the RFC6962 tree hashing algorithm. +type Hasher struct { + crypto.Hash +} + +// New creates a new Hashers.LogHasher on the passed in hash function. +func New(h crypto.Hash) *Hasher { + return &Hasher{Hash: h} +} + +// EmptyRoot returns a special case for an empty tree. +func (t *Hasher) EmptyRoot() []byte { + return t.New().Sum(nil) +} + +// HashLeaf returns the Merkle tree leaf hash of the data passed in through leaf. +// The data in leaf is prefixed by the LeafHashPrefix. +func (t *Hasher) HashLeaf(leaf []byte) []byte { + h := t.New() + h.Write([]byte{RFC6962LeafHashPrefix}) + h.Write(leaf) + return h.Sum(nil) +} + +// HashChildren returns the inner Merkle tree node hash of the two child nodes l and r. +// The hashed structure is NodeHashPrefix||l||r. +func (t *Hasher) HashChildren(l, r []byte) []byte { + h := t.New() + b := append(append(append( + make([]byte, 0, 1+len(l)+len(r)), + RFC6962NodeHashPrefix), + l...), + r...) + + h.Write(b) + return h.Sum(nil) +} diff --git a/vendor/go.mongodb.org/mongo-driver/LICENSE b/vendor/go.mongodb.org/mongo-driver/LICENSE new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson.go b/vendor/go.mongodb.org/mongo-driver/bson/bson.go new file mode 100644 index 000000000000..a0d818582611 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bson.go @@ -0,0 +1,50 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer +// See THIRD-PARTY-NOTICES for original license terms. + +package bson // import "go.mongodb.org/mongo-driver/bson" + +import ( + "go.mongodb.org/mongo-driver/bson/primitive" +) + +// Zeroer allows custom struct types to implement a report of zero +// state. All struct types that don't implement Zeroer or where IsZero +// returns false are considered to be not zero. +type Zeroer interface { + IsZero() bool +} + +// D is an ordered representation of a BSON document. This type should be used when the order of the elements matters, +// such as MongoDB command documents. If the order of the elements does not matter, an M should be used instead. +// +// A D should not be constructed with duplicate key names, as that can cause undefined server behavior. +// +// Example usage: +// +// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} +type D = primitive.D + +// E represents a BSON element for a D. It is usually used inside a D. +type E = primitive.E + +// M is an unordered representation of a BSON document. This type should be used when the order of the elements does not +// matter. This type is handled as a regular map[string]interface{} when encoding and decoding. Elements will be +// serialized in an undefined, random order. If the order of the elements matters, a D should be used instead. +// +// Example usage: +// +// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} +type M = primitive.M + +// An A is an ordered representation of a BSON array. +// +// Example usage: +// +// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} +type A = primitive.A diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go new file mode 100644 index 000000000000..652aa48b8536 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go @@ -0,0 +1,55 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +// ArrayCodec is the Codec used for bsoncore.Array values. +// +// Deprecated: ArrayCodec will not be directly accessible in Go Driver 2.0. +type ArrayCodec struct{} + +var defaultArrayCodec = NewArrayCodec() + +// NewArrayCodec returns an ArrayCodec. +// +// Deprecated: NewArrayCodec will not be available in Go Driver 2.0. See +// [ArrayCodec] for more details. +func NewArrayCodec() *ArrayCodec { + return &ArrayCodec{} +} + +// EncodeValue is the ValueEncoder for bsoncore.Array values. +func (ac *ArrayCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tCoreArray { + return ValueEncoderError{Name: "CoreArrayEncodeValue", Types: []reflect.Type{tCoreArray}, Received: val} + } + + arr := val.Interface().(bsoncore.Array) + return bsonrw.Copier{}.CopyArrayFromBytes(vw, arr) +} + +// DecodeValue is the ValueDecoder for bsoncore.Array values. +func (ac *ArrayCodec) DecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tCoreArray { + return ValueDecoderError{Name: "CoreArrayDecodeValue", Types: []reflect.Type{tCoreArray}, Received: val} + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, 0)) + } + + val.SetLen(0) + arr, err := bsonrw.Copier{}.AppendArrayBytes(val.Interface().(bsoncore.Array), vr) + val.Set(reflect.ValueOf(arr)) + return err +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go new file mode 100644 index 000000000000..0693bd432fee --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go @@ -0,0 +1,382 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec // import "go.mongodb.org/mongo-driver/bson/bsoncodec" + +import ( + "fmt" + "reflect" + "strings" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +var ( + emptyValue = reflect.Value{} +) + +// Marshaler is an interface implemented by types that can marshal themselves +// into a BSON document represented as bytes. The bytes returned must be a valid +// BSON document if the error is nil. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Marshaler] instead. +type Marshaler interface { + MarshalBSON() ([]byte, error) +} + +// ValueMarshaler is an interface implemented by types that can marshal +// themselves into a BSON value as bytes. The type must be the valid type for +// the bytes returned. The bytes and byte type together must be valid if the +// error is nil. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueMarshaler] instead. +type ValueMarshaler interface { + MarshalBSONValue() (bsontype.Type, []byte, error) +} + +// Unmarshaler is an interface implemented by types that can unmarshal a BSON +// document representation of themselves. The BSON bytes can be assumed to be +// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data +// after returning. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Unmarshaler] instead. +type Unmarshaler interface { + UnmarshalBSON([]byte) error +} + +// ValueUnmarshaler is an interface implemented by types that can unmarshal a +// BSON value representation of themselves. The BSON bytes and type can be +// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it +// wishes to retain the data after returning. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueUnmarshaler] instead. +type ValueUnmarshaler interface { + UnmarshalBSONValue(bsontype.Type, []byte) error +} + +// ValueEncoderError is an error returned from a ValueEncoder when the provided value can't be +// encoded by the ValueEncoder. +type ValueEncoderError struct { + Name string + Types []reflect.Type + Kinds []reflect.Kind + Received reflect.Value +} + +func (vee ValueEncoderError) Error() string { + typeKinds := make([]string, 0, len(vee.Types)+len(vee.Kinds)) + for _, t := range vee.Types { + typeKinds = append(typeKinds, t.String()) + } + for _, k := range vee.Kinds { + if k == reflect.Map { + typeKinds = append(typeKinds, "map[string]*") + continue + } + typeKinds = append(typeKinds, k.String()) + } + received := vee.Received.Kind().String() + if vee.Received.IsValid() { + received = vee.Received.Type().String() + } + return fmt.Sprintf("%s can only encode valid %s, but got %s", vee.Name, strings.Join(typeKinds, ", "), received) +} + +// ValueDecoderError is an error returned from a ValueDecoder when the provided value can't be +// decoded by the ValueDecoder. +type ValueDecoderError struct { + Name string + Types []reflect.Type + Kinds []reflect.Kind + Received reflect.Value +} + +func (vde ValueDecoderError) Error() string { + typeKinds := make([]string, 0, len(vde.Types)+len(vde.Kinds)) + for _, t := range vde.Types { + typeKinds = append(typeKinds, t.String()) + } + for _, k := range vde.Kinds { + if k == reflect.Map { + typeKinds = append(typeKinds, "map[string]*") + continue + } + typeKinds = append(typeKinds, k.String()) + } + received := vde.Received.Kind().String() + if vde.Received.IsValid() { + received = vde.Received.Type().String() + } + return fmt.Sprintf("%s can only decode valid and settable %s, but got %s", vde.Name, strings.Join(typeKinds, ", "), received) +} + +// EncodeContext is the contextual information required for a Codec to encode a +// value. +type EncodeContext struct { + *Registry + + // MinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64, + // uint, uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits) + // that can represent the integer value. + // + // Deprecated: Use bson.Encoder.IntMinSize instead. + MinSize bool + + errorOnInlineDuplicates bool + stringifyMapKeysWithFmt bool + nilMapAsEmpty bool + nilSliceAsEmpty bool + nilByteSliceAsEmpty bool + omitZeroStruct bool + useJSONStructTags bool +} + +// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in +// the marshaled BSON when the "inline" struct tag option is set. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. +func (ec *EncodeContext) ErrorOnInlineDuplicates() { + ec.errorOnInlineDuplicates = true +} + +// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name +// strings using fmt.Sprintf() instead of the default string conversion logic. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. +func (ec *EncodeContext) StringifyMapKeysWithFmt() { + ec.stringifyMapKeysWithFmt = true +} + +// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON +// null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. +func (ec *EncodeContext) NilMapAsEmpty() { + ec.nilMapAsEmpty = true +} + +// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON +// null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. +func (ec *EncodeContext) NilSliceAsEmpty() { + ec.nilSliceAsEmpty = true +} + +// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values +// instead of BSON null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. +func (ec *EncodeContext) NilByteSliceAsEmpty() { + ec.nilByteSliceAsEmpty = true +} + +// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{}) +// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set. +// +// Note that the Encoder only examines exported struct fields when determining if a struct is the +// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. +func (ec *EncodeContext) OmitZeroStruct() { + ec.omitZeroStruct = true +} + +// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] instead. +func (ec *EncodeContext) UseJSONStructTags() { + ec.useJSONStructTags = true +} + +// DecodeContext is the contextual information required for a Codec to decode a +// value. +type DecodeContext struct { + *Registry + + // Truncate, if true, instructs decoders to to truncate the fractional part of BSON "double" + // values when attempting to unmarshal them into a Go integer (int, int8, int16, int32, int64, + // uint, uint8, uint16, uint32, or uint64) struct field. The truncation logic does not apply to + // BSON "decimal128" values. + // + // Deprecated: Use bson.Decoder.AllowTruncatingDoubles instead. + Truncate bool + + // Ancestor is the type of a containing document. This is mainly used to determine what type + // should be used when decoding an embedded document into an empty interface. For example, if + // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface + // will be decoded into a bson.M. + // + // Deprecated: Use bson.Decoder.DefaultDocumentM or bson.Decoder.DefaultDocumentD instead. + Ancestor reflect.Type + + // defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the + // usage for this field is restricted to data typed as "interface{}" or "map[string]interface{}". If DocumentType is + // set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an + // error. DocumentType overrides the Ancestor field. + defaultDocumentType reflect.Type + + binaryAsSlice bool + useJSONStructTags bool + useLocalTimeZone bool + zeroMaps bool + zeroStructs bool +} + +// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or +// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. +func (dc *DecodeContext) BinaryAsSlice() { + dc.binaryAsSlice = true +} + +// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. +func (dc *DecodeContext) UseJSONStructTags() { + dc.useJSONStructTags = true +} + +// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead +// of the UTC timezone. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. +func (dc *DecodeContext) UseLocalTimeZone() { + dc.useLocalTimeZone = true +} + +// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value +// passed to Decode before unmarshaling BSON documents into them. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. +func (dc *DecodeContext) ZeroMaps() { + dc.zeroMaps = true +} + +// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination +// value passed to Decode before unmarshaling BSON documents into them. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. +func (dc *DecodeContext) ZeroStructs() { + dc.zeroStructs = true +} + +// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentM] instead. +func (dc *DecodeContext) DefaultDocumentM() { + dc.defaultDocumentType = reflect.TypeOf(primitive.M{}) +} + +// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentD] instead. +func (dc *DecodeContext) DefaultDocumentD() { + dc.defaultDocumentType = reflect.TypeOf(primitive.D{}) +} + +// ValueCodec is an interface for encoding and decoding a reflect.Value. +// values. +// +// Deprecated: Use [ValueEncoder] and [ValueDecoder] instead. +type ValueCodec interface { + ValueEncoder + ValueDecoder +} + +// ValueEncoder is the interface implemented by types that can handle the encoding of a value. +type ValueEncoder interface { + EncodeValue(EncodeContext, bsonrw.ValueWriter, reflect.Value) error +} + +// ValueEncoderFunc is an adapter function that allows a function with the correct signature to be +// used as a ValueEncoder. +type ValueEncoderFunc func(EncodeContext, bsonrw.ValueWriter, reflect.Value) error + +// EncodeValue implements the ValueEncoder interface. +func (fn ValueEncoderFunc) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + return fn(ec, vw, val) +} + +// ValueDecoder is the interface implemented by types that can handle the decoding of a value. +type ValueDecoder interface { + DecodeValue(DecodeContext, bsonrw.ValueReader, reflect.Value) error +} + +// ValueDecoderFunc is an adapter function that allows a function with the correct signature to be +// used as a ValueDecoder. +type ValueDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) error + +// DecodeValue implements the ValueDecoder interface. +func (fn ValueDecoderFunc) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + return fn(dc, vr, val) +} + +// typeDecoder is the interface implemented by types that can handle the decoding of a value given its type. +type typeDecoder interface { + decodeType(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error) +} + +// typeDecoderFunc is an adapter function that allows a function with the correct signature to be used as a typeDecoder. +type typeDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error) + +func (fn typeDecoderFunc) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + return fn(dc, vr, t) +} + +// decodeAdapter allows two functions with the correct signatures to be used as both a ValueDecoder and typeDecoder. +type decodeAdapter struct { + ValueDecoderFunc + typeDecoderFunc +} + +var _ ValueDecoder = decodeAdapter{} +var _ typeDecoder = decodeAdapter{} + +// decodeTypeOrValue calls decoder.decodeType is decoder is a typeDecoder. Otherwise, it allocates a new element of type +// t and calls decoder.DecodeValue on it. +func decodeTypeOrValue(decoder ValueDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + td, _ := decoder.(typeDecoder) + return decodeTypeOrValueWithInfo(decoder, td, dc, vr, t, true) +} + +func decodeTypeOrValueWithInfo(vd ValueDecoder, td typeDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type, convert bool) (reflect.Value, error) { + if td != nil { + val, err := td.decodeType(dc, vr, t) + if err == nil && convert && val.Type() != t { + // This conversion step is necessary for slices and maps. If a user declares variables like: + // + // type myBool bool + // var m map[string]myBool + // + // and tries to decode BSON bytes into the map, the decoding will fail if this conversion is not present + // because we'll try to assign a value of type bool to one of type myBool. + val = val.Convert(t) + } + return val, err + } + + val := reflect.New(t).Elem() + err := vd.DecodeValue(dc, vr, val) + return val, err +} + +// CodecZeroer is the interface implemented by Codecs that can also determine if +// a value of the type that would be encoded is zero. +// +// Deprecated: Defining custom rules for the zero/empty value will not be supported in Go Driver +// 2.0. Users who want to omit empty complex values should use a pointer field and set the value to +// nil instead. +type CodecZeroer interface { + IsTypeZero(interface{}) bool +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go new file mode 100644 index 000000000000..0134b5a94bec --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go @@ -0,0 +1,138 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// ByteSliceCodec is the Codec used for []byte values. +// +// Deprecated: ByteSliceCodec will not be directly configurable in Go Driver +// 2.0. To configure the byte slice encode and decode behavior, use the +// configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the byte slice +// encode and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to encode nil byte slices as empty +// BSON binary values, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilByteSliceAsEmpty: true, +// }) +// +// See the deprecation notice for each field in ByteSliceCodec for the +// corresponding settings. +type ByteSliceCodec struct { + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go byte slices as empty BSON binary values + // instead of BSON null. + // + // Deprecated: Use bson.Encoder.NilByteSliceAsEmpty or options.BSONOptions.NilByteSliceAsEmpty + // instead. + EncodeNilAsEmpty bool +} + +var ( + defaultByteSliceCodec = NewByteSliceCodec() + + // Assert that defaultByteSliceCodec satisfies the typeDecoder interface, which allows it to be + // used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. + _ typeDecoder = defaultByteSliceCodec +) + +// NewByteSliceCodec returns a ByteSliceCodec with options opts. +// +// Deprecated: NewByteSliceCodec will not be available in Go Driver 2.0. See +// [ByteSliceCodec] for more details. +func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec { + byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...) + codec := ByteSliceCodec{} + if byteSliceOpt.EncodeNilAsEmpty != nil { + codec.EncodeNilAsEmpty = *byteSliceOpt.EncodeNilAsEmpty + } + return &codec +} + +// EncodeValue is the ValueEncoder for []byte. +func (bsc *ByteSliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tByteSlice { + return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + if val.IsNil() && !bsc.EncodeNilAsEmpty && !ec.nilByteSliceAsEmpty { + return vw.WriteNull() + } + return vw.WriteBinary(val.Interface().([]byte)) +} + +func (bsc *ByteSliceCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tByteSlice { + return emptyValue, ValueDecoderError{ + Name: "ByteSliceDecodeValue", + Types: []reflect.Type{tByteSlice}, + Received: reflect.Zero(t), + } + } + + var data []byte + var err error + switch vrType := vr.Type(); vrType { + case bsontype.String: + str, err := vr.ReadString() + if err != nil { + return emptyValue, err + } + data = []byte(str) + case bsontype.Symbol: + sym, err := vr.ReadSymbol() + if err != nil { + return emptyValue, err + } + data = []byte(sym) + case bsontype.Binary: + var subtype byte + data, subtype, err = vr.ReadBinary() + if err != nil { + return emptyValue, err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return emptyValue, decodeBinaryError{subtype: subtype, typeName: "[]byte"} + } + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a []byte", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(data), nil +} + +// DecodeValue is the ValueDecoder for []byte. +func (bsc *ByteSliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tByteSlice { + return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + + elem, err := bsc.decodeType(dc, vr, tByteSlice) + if err != nil { + return err + } + + val.Set(elem) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go new file mode 100644 index 000000000000..844b50299f2f --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go @@ -0,0 +1,166 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + "sync" + "sync/atomic" +) + +// Runtime check that the kind encoder and decoder caches can store any valid +// reflect.Kind constant. +func init() { + if s := reflect.Kind(len(kindEncoderCache{}.entries)).String(); s != "kind27" { + panic("The capacity of kindEncoderCache is too small.\n" + + "This is due to a new type being added to reflect.Kind.") + } +} + +// statically assert array size +var _ = (kindEncoderCache{}).entries[reflect.UnsafePointer] +var _ = (kindDecoderCache{}).entries[reflect.UnsafePointer] + +type typeEncoderCache struct { + cache sync.Map // map[reflect.Type]ValueEncoder +} + +func (c *typeEncoderCache) Store(rt reflect.Type, enc ValueEncoder) { + c.cache.Store(rt, enc) +} + +func (c *typeEncoderCache) Load(rt reflect.Type) (ValueEncoder, bool) { + if v, _ := c.cache.Load(rt); v != nil { + return v.(ValueEncoder), true + } + return nil, false +} + +func (c *typeEncoderCache) LoadOrStore(rt reflect.Type, enc ValueEncoder) ValueEncoder { + if v, loaded := c.cache.LoadOrStore(rt, enc); loaded { + enc = v.(ValueEncoder) + } + return enc +} + +func (c *typeEncoderCache) Clone() *typeEncoderCache { + cc := new(typeEncoderCache) + c.cache.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + cc.cache.Store(k, v) + } + return true + }) + return cc +} + +type typeDecoderCache struct { + cache sync.Map // map[reflect.Type]ValueDecoder +} + +func (c *typeDecoderCache) Store(rt reflect.Type, dec ValueDecoder) { + c.cache.Store(rt, dec) +} + +func (c *typeDecoderCache) Load(rt reflect.Type) (ValueDecoder, bool) { + if v, _ := c.cache.Load(rt); v != nil { + return v.(ValueDecoder), true + } + return nil, false +} + +func (c *typeDecoderCache) LoadOrStore(rt reflect.Type, dec ValueDecoder) ValueDecoder { + if v, loaded := c.cache.LoadOrStore(rt, dec); loaded { + dec = v.(ValueDecoder) + } + return dec +} + +func (c *typeDecoderCache) Clone() *typeDecoderCache { + cc := new(typeDecoderCache) + c.cache.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + cc.cache.Store(k, v) + } + return true + }) + return cc +} + +// atomic.Value requires that all calls to Store() have the same concrete type +// so we wrap the ValueEncoder with a kindEncoderCacheEntry to ensure the type +// is always the same (since different concrete types may implement the +// ValueEncoder interface). +type kindEncoderCacheEntry struct { + enc ValueEncoder +} + +type kindEncoderCache struct { + entries [reflect.UnsafePointer + 1]atomic.Value // *kindEncoderCacheEntry +} + +func (c *kindEncoderCache) Store(rt reflect.Kind, enc ValueEncoder) { + if enc != nil && rt < reflect.Kind(len(c.entries)) { + c.entries[rt].Store(&kindEncoderCacheEntry{enc: enc}) + } +} + +func (c *kindEncoderCache) Load(rt reflect.Kind) (ValueEncoder, bool) { + if rt < reflect.Kind(len(c.entries)) { + if ent, ok := c.entries[rt].Load().(*kindEncoderCacheEntry); ok { + return ent.enc, ent.enc != nil + } + } + return nil, false +} + +func (c *kindEncoderCache) Clone() *kindEncoderCache { + cc := new(kindEncoderCache) + for i, v := range c.entries { + if val := v.Load(); val != nil { + cc.entries[i].Store(val) + } + } + return cc +} + +// atomic.Value requires that all calls to Store() have the same concrete type +// so we wrap the ValueDecoder with a kindDecoderCacheEntry to ensure the type +// is always the same (since different concrete types may implement the +// ValueDecoder interface). +type kindDecoderCacheEntry struct { + dec ValueDecoder +} + +type kindDecoderCache struct { + entries [reflect.UnsafePointer + 1]atomic.Value // *kindDecoderCacheEntry +} + +func (c *kindDecoderCache) Store(rt reflect.Kind, dec ValueDecoder) { + if rt < reflect.Kind(len(c.entries)) { + c.entries[rt].Store(&kindDecoderCacheEntry{dec: dec}) + } +} + +func (c *kindDecoderCache) Load(rt reflect.Kind) (ValueDecoder, bool) { + if rt < reflect.Kind(len(c.entries)) { + if ent, ok := c.entries[rt].Load().(*kindDecoderCacheEntry); ok { + return ent.dec, ent.dec != nil + } + } + return nil, false +} + +func (c *kindDecoderCache) Clone() *kindDecoderCache { + cc := new(kindDecoderCache) + for i, v := range c.entries { + if val := v.Load(); val != nil { + cc.entries[i].Store(val) + } + } + return cc +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go new file mode 100644 index 000000000000..cb8180f25ccc --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go @@ -0,0 +1,63 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonrw" +) + +// condAddrEncoder is the encoder used when a pointer to the encoding value has an encoder. +type condAddrEncoder struct { + canAddrEnc ValueEncoder + elseEnc ValueEncoder +} + +var _ ValueEncoder = (*condAddrEncoder)(nil) + +// newCondAddrEncoder returns an condAddrEncoder. +func newCondAddrEncoder(canAddrEnc, elseEnc ValueEncoder) *condAddrEncoder { + encoder := condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} + return &encoder +} + +// EncodeValue is the ValueEncoderFunc for a value that may be addressable. +func (cae *condAddrEncoder) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.CanAddr() { + return cae.canAddrEnc.EncodeValue(ec, vw, val) + } + if cae.elseEnc != nil { + return cae.elseEnc.EncodeValue(ec, vw, val) + } + return ErrNoEncoder{Type: val.Type()} +} + +// condAddrDecoder is the decoder used when a pointer to the value has a decoder. +type condAddrDecoder struct { + canAddrDec ValueDecoder + elseDec ValueDecoder +} + +var _ ValueDecoder = (*condAddrDecoder)(nil) + +// newCondAddrDecoder returns an CondAddrDecoder. +func newCondAddrDecoder(canAddrDec, elseDec ValueDecoder) *condAddrDecoder { + decoder := condAddrDecoder{canAddrDec: canAddrDec, elseDec: elseDec} + return &decoder +} + +// DecodeValue is the ValueDecoderFunc for a value that may be addressable. +func (cad *condAddrDecoder) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if val.CanAddr() { + return cad.canAddrDec.DecodeValue(dc, vr, val) + } + if cad.elseDec != nil { + return cad.elseDec.DecodeValue(dc, vr, val) + } + return ErrNoDecoder{Type: val.Type()} +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go new file mode 100644 index 000000000000..8702d6d39e09 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -0,0 +1,1819 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "net/url" + "reflect" + "strconv" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var ( + defaultValueDecoders DefaultValueDecoders + errCannotTruncate = errors.New("float64 can only be truncated to a lower precision type when truncation is enabled") +) + +type decodeBinaryError struct { + subtype byte + typeName string +} + +func (d decodeBinaryError) Error() string { + return fmt.Sprintf("only binary values with subtype 0x00 or 0x02 can be decoded into %s, but got subtype %v", d.typeName, d.subtype) +} + +func newDefaultStructCodec() *StructCodec { + codec, err := NewStructCodec(DefaultStructTagParser) + if err != nil { + // This function is called from the codec registration path, so errors can't be propagated. If there's an error + // constructing the StructCodec, we panic to avoid losing it. + panic(fmt.Errorf("error creating default StructCodec: %w", err)) + } + return codec +} + +// DefaultValueDecoders is a namespace type for the default ValueDecoders used +// when creating a registry. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +type DefaultValueDecoders struct{} + +// RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with +// the provided RegistryBuilder. +// +// There is no support for decoding map[string]interface{} because there is no decoder for +// interface{}, so users must either register this decoder themselves or use the +// EmptyInterfaceDecoder available in the bson package. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { + if rb == nil { + panic(errors.New("argument to RegisterDefaultDecoders must not be nil")) + } + + intDecoder := decodeAdapter{dvd.IntDecodeValue, dvd.intDecodeType} + floatDecoder := decodeAdapter{dvd.FloatDecodeValue, dvd.floatDecodeType} + + rb. + RegisterTypeDecoder(tD, ValueDecoderFunc(dvd.DDecodeValue)). + RegisterTypeDecoder(tBinary, decodeAdapter{dvd.BinaryDecodeValue, dvd.binaryDecodeType}). + RegisterTypeDecoder(tUndefined, decodeAdapter{dvd.UndefinedDecodeValue, dvd.undefinedDecodeType}). + RegisterTypeDecoder(tDateTime, decodeAdapter{dvd.DateTimeDecodeValue, dvd.dateTimeDecodeType}). + RegisterTypeDecoder(tNull, decodeAdapter{dvd.NullDecodeValue, dvd.nullDecodeType}). + RegisterTypeDecoder(tRegex, decodeAdapter{dvd.RegexDecodeValue, dvd.regexDecodeType}). + RegisterTypeDecoder(tDBPointer, decodeAdapter{dvd.DBPointerDecodeValue, dvd.dBPointerDecodeType}). + RegisterTypeDecoder(tTimestamp, decodeAdapter{dvd.TimestampDecodeValue, dvd.timestampDecodeType}). + RegisterTypeDecoder(tMinKey, decodeAdapter{dvd.MinKeyDecodeValue, dvd.minKeyDecodeType}). + RegisterTypeDecoder(tMaxKey, decodeAdapter{dvd.MaxKeyDecodeValue, dvd.maxKeyDecodeType}). + RegisterTypeDecoder(tJavaScript, decodeAdapter{dvd.JavaScriptDecodeValue, dvd.javaScriptDecodeType}). + RegisterTypeDecoder(tSymbol, decodeAdapter{dvd.SymbolDecodeValue, dvd.symbolDecodeType}). + RegisterTypeDecoder(tByteSlice, defaultByteSliceCodec). + RegisterTypeDecoder(tTime, defaultTimeCodec). + RegisterTypeDecoder(tEmpty, defaultEmptyInterfaceCodec). + RegisterTypeDecoder(tCoreArray, defaultArrayCodec). + RegisterTypeDecoder(tOID, decodeAdapter{dvd.ObjectIDDecodeValue, dvd.objectIDDecodeType}). + RegisterTypeDecoder(tDecimal, decodeAdapter{dvd.Decimal128DecodeValue, dvd.decimal128DecodeType}). + RegisterTypeDecoder(tJSONNumber, decodeAdapter{dvd.JSONNumberDecodeValue, dvd.jsonNumberDecodeType}). + RegisterTypeDecoder(tURL, decodeAdapter{dvd.URLDecodeValue, dvd.urlDecodeType}). + RegisterTypeDecoder(tCoreDocument, ValueDecoderFunc(dvd.CoreDocumentDecodeValue)). + RegisterTypeDecoder(tCodeWithScope, decodeAdapter{dvd.CodeWithScopeDecodeValue, dvd.codeWithScopeDecodeType}). + RegisterDefaultDecoder(reflect.Bool, decodeAdapter{dvd.BooleanDecodeValue, dvd.booleanDecodeType}). + RegisterDefaultDecoder(reflect.Int, intDecoder). + RegisterDefaultDecoder(reflect.Int8, intDecoder). + RegisterDefaultDecoder(reflect.Int16, intDecoder). + RegisterDefaultDecoder(reflect.Int32, intDecoder). + RegisterDefaultDecoder(reflect.Int64, intDecoder). + RegisterDefaultDecoder(reflect.Uint, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Uint8, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Uint16, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Uint32, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Uint64, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Float32, floatDecoder). + RegisterDefaultDecoder(reflect.Float64, floatDecoder). + RegisterDefaultDecoder(reflect.Array, ValueDecoderFunc(dvd.ArrayDecodeValue)). + RegisterDefaultDecoder(reflect.Map, defaultMapCodec). + RegisterDefaultDecoder(reflect.Slice, defaultSliceCodec). + RegisterDefaultDecoder(reflect.String, defaultStringCodec). + RegisterDefaultDecoder(reflect.Struct, newDefaultStructCodec()). + RegisterDefaultDecoder(reflect.Ptr, NewPointerCodec()). + RegisterTypeMapEntry(bsontype.Double, tFloat64). + RegisterTypeMapEntry(bsontype.String, tString). + RegisterTypeMapEntry(bsontype.Array, tA). + RegisterTypeMapEntry(bsontype.Binary, tBinary). + RegisterTypeMapEntry(bsontype.Undefined, tUndefined). + RegisterTypeMapEntry(bsontype.ObjectID, tOID). + RegisterTypeMapEntry(bsontype.Boolean, tBool). + RegisterTypeMapEntry(bsontype.DateTime, tDateTime). + RegisterTypeMapEntry(bsontype.Regex, tRegex). + RegisterTypeMapEntry(bsontype.DBPointer, tDBPointer). + RegisterTypeMapEntry(bsontype.JavaScript, tJavaScript). + RegisterTypeMapEntry(bsontype.Symbol, tSymbol). + RegisterTypeMapEntry(bsontype.CodeWithScope, tCodeWithScope). + RegisterTypeMapEntry(bsontype.Int32, tInt32). + RegisterTypeMapEntry(bsontype.Int64, tInt64). + RegisterTypeMapEntry(bsontype.Timestamp, tTimestamp). + RegisterTypeMapEntry(bsontype.Decimal128, tDecimal). + RegisterTypeMapEntry(bsontype.MinKey, tMinKey). + RegisterTypeMapEntry(bsontype.MaxKey, tMaxKey). + RegisterTypeMapEntry(bsontype.Type(0), tD). + RegisterTypeMapEntry(bsontype.EmbeddedDocument, tD). + RegisterHookDecoder(tValueUnmarshaler, ValueDecoderFunc(dvd.ValueUnmarshalerDecodeValue)). + RegisterHookDecoder(tUnmarshaler, ValueDecoderFunc(dvd.UnmarshalerDecodeValue)) +} + +// DDecodeValue is the ValueDecoderFunc for primitive.D instances. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || !val.CanSet() || val.Type() != tD { + return ValueDecoderError{Name: "DDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Type(0), bsontype.EmbeddedDocument: + dc.Ancestor = tD + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a primitive.D", vrType) + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + decoder, err := dc.LookupDecoder(tEmpty) + if err != nil { + return err + } + tEmptyTypeDecoder, _ := decoder.(typeDecoder) + + // Use the elements in the provided value if it's non nil. Otherwise, allocate a new D instance. + var elems primitive.D + if !val.IsNil() { + val.SetLen(0) + elems = val.Interface().(primitive.D) + } else { + elems = make(primitive.D, 0) + } + + for { + key, elemVr, err := dr.ReadElement() + if errors.Is(err, bsonrw.ErrEOD) { + break + } else if err != nil { + return err + } + + // Pass false for convert because we don't need to call reflect.Value.Convert for tEmpty. + elem, err := decodeTypeOrValueWithInfo(decoder, tEmptyTypeDecoder, dc, elemVr, tEmpty, false) + if err != nil { + return err + } + + elems = append(elems, primitive.E{Key: key, Value: elem.Interface()}) + } + + val.Set(reflect.ValueOf(elems)) + return nil +} + +func (dvd DefaultValueDecoders) booleanDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t.Kind() != reflect.Bool { + return emptyValue, ValueDecoderError{ + Name: "BooleanDecodeValue", + Kinds: []reflect.Kind{reflect.Bool}, + Received: reflect.Zero(t), + } + } + + var b bool + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return emptyValue, err + } + b = (i32 != 0) + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return emptyValue, err + } + b = (i64 != 0) + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return emptyValue, err + } + b = (f64 != 0) + case bsontype.Boolean: + b, err = vr.ReadBoolean() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a boolean", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(b), nil +} + +// BooleanDecodeValue is the ValueDecoderFunc for bool types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool { + return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} + } + + elem, err := dvd.booleanDecodeType(dctx, vr, val.Type()) + if err != nil { + return err + } + + val.SetBool(elem.Bool()) + return nil +} + +func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + var i64 int64 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return emptyValue, err + } + i64 = int64(i32) + case bsontype.Int64: + i64, err = vr.ReadInt64() + if err != nil { + return emptyValue, err + } + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return emptyValue, err + } + if !dc.Truncate && math.Floor(f64) != f64 { + return emptyValue, errCannotTruncate + } + if f64 > float64(math.MaxInt64) { + return emptyValue, fmt.Errorf("%g overflows int64", f64) + } + i64 = int64(f64) + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return emptyValue, err + } + if b { + i64 = 1 + } + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) + } + + switch t.Kind() { + case reflect.Int8: + if i64 < math.MinInt8 || i64 > math.MaxInt8 { + return emptyValue, fmt.Errorf("%d overflows int8", i64) + } + + return reflect.ValueOf(int8(i64)), nil + case reflect.Int16: + if i64 < math.MinInt16 || i64 > math.MaxInt16 { + return emptyValue, fmt.Errorf("%d overflows int16", i64) + } + + return reflect.ValueOf(int16(i64)), nil + case reflect.Int32: + if i64 < math.MinInt32 || i64 > math.MaxInt32 { + return emptyValue, fmt.Errorf("%d overflows int32", i64) + } + + return reflect.ValueOf(int32(i64)), nil + case reflect.Int64: + return reflect.ValueOf(i64), nil + case reflect.Int: + if i64 > math.MaxInt { // Can we fit this inside of an int + return emptyValue, fmt.Errorf("%d overflows int", i64) + } + + return reflect.ValueOf(int(i64)), nil + default: + return emptyValue, ValueDecoderError{ + Name: "IntDecodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: reflect.Zero(t), + } + } +} + +// IntDecodeValue is the ValueDecoderFunc for int types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() { + return ValueDecoderError{ + Name: "IntDecodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: val, + } + } + + elem, err := dvd.intDecodeType(dc, vr, val.Type()) + if err != nil { + return err + } + + val.SetInt(elem.Int()) + return nil +} + +// UintDecodeValue is the ValueDecoderFunc for uint types. +// +// Deprecated: UintDecodeValue is not registered by default. Use UintCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + var i64 int64 + var err error + switch vr.Type() { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return err + } + i64 = int64(i32) + case bsontype.Int64: + i64, err = vr.ReadInt64() + if err != nil { + return err + } + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return err + } + if !dc.Truncate && math.Floor(f64) != f64 { + return errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled") + } + if f64 > float64(math.MaxInt64) { + return fmt.Errorf("%g overflows int64", f64) + } + i64 = int64(f64) + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return err + } + if b { + i64 = 1 + } + default: + return fmt.Errorf("cannot decode %v into an integer type", vr.Type()) + } + + if !val.CanSet() { + return ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } + } + + switch val.Kind() { + case reflect.Uint8: + if i64 < 0 || i64 > math.MaxUint8 { + return fmt.Errorf("%d overflows uint8", i64) + } + case reflect.Uint16: + if i64 < 0 || i64 > math.MaxUint16 { + return fmt.Errorf("%d overflows uint16", i64) + } + case reflect.Uint32: + if i64 < 0 || i64 > math.MaxUint32 { + return fmt.Errorf("%d overflows uint32", i64) + } + case reflect.Uint64: + if i64 < 0 { + return fmt.Errorf("%d overflows uint64", i64) + } + case reflect.Uint: + if i64 < 0 || uint64(i64) > uint64(math.MaxUint) { // Can we fit this inside of an uint + return fmt.Errorf("%d overflows uint", i64) + } + default: + return ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } + } + + val.SetUint(uint64(i64)) + return nil +} + +func (dvd DefaultValueDecoders) floatDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + var f float64 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return emptyValue, err + } + f = float64(i32) + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return emptyValue, err + } + f = float64(i64) + case bsontype.Double: + f, err = vr.ReadDouble() + if err != nil { + return emptyValue, err + } + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return emptyValue, err + } + if b { + f = 1 + } + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into a float32 or float64 type", vrType) + } + + switch t.Kind() { + case reflect.Float32: + if !dc.Truncate && float64(float32(f)) != f { + return emptyValue, errCannotTruncate + } + + return reflect.ValueOf(float32(f)), nil + case reflect.Float64: + return reflect.ValueOf(f), nil + default: + return emptyValue, ValueDecoderError{ + Name: "FloatDecodeValue", + Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, + Received: reflect.Zero(t), + } + } +} + +// FloatDecodeValue is the ValueDecoderFunc for float types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() { + return ValueDecoderError{ + Name: "FloatDecodeValue", + Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, + Received: val, + } + } + + elem, err := dvd.floatDecodeType(ec, vr, val.Type()) + if err != nil { + return err + } + + val.SetFloat(elem.Float()) + return nil +} + +// StringDecodeValue is the ValueDecoderFunc for string types. +// +// Deprecated: StringDecodeValue is not registered by default. Use StringCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) StringDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + var str string + var err error + switch vr.Type() { + // TODO(GODRIVER-577): Handle JavaScript and Symbol BSON types when allowed. + case bsontype.String: + str, err = vr.ReadString() + if err != nil { + return err + } + default: + return fmt.Errorf("cannot decode %v into a string type", vr.Type()) + } + if !val.CanSet() || val.Kind() != reflect.String { + return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} + } + + val.SetString(str) + return nil +} + +func (DefaultValueDecoders) javaScriptDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tJavaScript { + return emptyValue, ValueDecoderError{ + Name: "JavaScriptDecodeValue", + Types: []reflect.Type{tJavaScript}, + Received: reflect.Zero(t), + } + } + + var js string + var err error + switch vrType := vr.Type(); vrType { + case bsontype.JavaScript: + js, err = vr.ReadJavascript() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a primitive.JavaScript", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.JavaScript(js)), nil +} + +// JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tJavaScript { + return ValueDecoderError{Name: "JavaScriptDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val} + } + + elem, err := dvd.javaScriptDecodeType(dctx, vr, tJavaScript) + if err != nil { + return err + } + + val.SetString(elem.String()) + return nil +} + +func (DefaultValueDecoders) symbolDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tSymbol { + return emptyValue, ValueDecoderError{ + Name: "SymbolDecodeValue", + Types: []reflect.Type{tSymbol}, + Received: reflect.Zero(t), + } + } + + var symbol string + var err error + switch vrType := vr.Type(); vrType { + case bsontype.String: + symbol, err = vr.ReadString() + case bsontype.Symbol: + symbol, err = vr.ReadSymbol() + case bsontype.Binary: + data, subtype, err := vr.ReadBinary() + if err != nil { + return emptyValue, err + } + + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return emptyValue, decodeBinaryError{subtype: subtype, typeName: "primitive.Symbol"} + } + symbol = string(data) + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Symbol", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Symbol(symbol)), nil +} + +// SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tSymbol { + return ValueDecoderError{Name: "SymbolDecodeValue", Types: []reflect.Type{tSymbol}, Received: val} + } + + elem, err := dvd.symbolDecodeType(dctx, vr, tSymbol) + if err != nil { + return err + } + + val.SetString(elem.String()) + return nil +} + +func (DefaultValueDecoders) binaryDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tBinary { + return emptyValue, ValueDecoderError{ + Name: "BinaryDecodeValue", + Types: []reflect.Type{tBinary}, + Received: reflect.Zero(t), + } + } + + var data []byte + var subtype byte + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Binary: + data, subtype, err = vr.ReadBinary() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a Binary", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Binary{Subtype: subtype, Data: data}), nil +} + +// BinaryDecodeValue is the ValueDecoderFunc for Binary. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tBinary { + return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val} + } + + elem, err := dvd.binaryDecodeType(dc, vr, tBinary) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) undefinedDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tUndefined { + return emptyValue, ValueDecoderError{ + Name: "UndefinedDecodeValue", + Types: []reflect.Type{tUndefined}, + Received: reflect.Zero(t), + } + } + + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Undefined: + err = vr.ReadUndefined() + case bsontype.Null: + err = vr.ReadNull() + default: + return emptyValue, fmt.Errorf("cannot decode %v into an Undefined", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Undefined{}), nil +} + +// UndefinedDecodeValue is the ValueDecoderFunc for Undefined. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tUndefined { + return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val} + } + + elem, err := dvd.undefinedDecodeType(dc, vr, tUndefined) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +// Accept both 12-byte string and pretty-printed 24-byte hex string formats. +func (dvd DefaultValueDecoders) objectIDDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tOID { + return emptyValue, ValueDecoderError{ + Name: "ObjectIDDecodeValue", + Types: []reflect.Type{tOID}, + Received: reflect.Zero(t), + } + } + + var oid primitive.ObjectID + var err error + switch vrType := vr.Type(); vrType { + case bsontype.ObjectID: + oid, err = vr.ReadObjectID() + if err != nil { + return emptyValue, err + } + case bsontype.String: + str, err := vr.ReadString() + if err != nil { + return emptyValue, err + } + if oid, err = primitive.ObjectIDFromHex(str); err == nil { + break + } + if len(str) != 12 { + return emptyValue, fmt.Errorf("an ObjectID string must be exactly 12 bytes long (got %v)", len(str)) + } + byteArr := []byte(str) + copy(oid[:], byteArr) + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into an ObjectID", vrType) + } + + return reflect.ValueOf(oid), nil +} + +// ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tOID { + return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val} + } + + elem, err := dvd.objectIDDecodeType(dc, vr, tOID) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) dateTimeDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tDateTime { + return emptyValue, ValueDecoderError{ + Name: "DateTimeDecodeValue", + Types: []reflect.Type{tDateTime}, + Received: reflect.Zero(t), + } + } + + var dt int64 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.DateTime: + dt, err = vr.ReadDateTime() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a DateTime", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.DateTime(dt)), nil +} + +// DateTimeDecodeValue is the ValueDecoderFunc for DateTime. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tDateTime { + return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val} + } + + elem, err := dvd.dateTimeDecodeType(dc, vr, tDateTime) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) nullDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tNull { + return emptyValue, ValueDecoderError{ + Name: "NullDecodeValue", + Types: []reflect.Type{tNull}, + Received: reflect.Zero(t), + } + } + + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Undefined: + err = vr.ReadUndefined() + case bsontype.Null: + err = vr.ReadNull() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a Null", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Null{}), nil +} + +// NullDecodeValue is the ValueDecoderFunc for Null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tNull { + return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val} + } + + elem, err := dvd.nullDecodeType(dc, vr, tNull) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) regexDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tRegex { + return emptyValue, ValueDecoderError{ + Name: "RegexDecodeValue", + Types: []reflect.Type{tRegex}, + Received: reflect.Zero(t), + } + } + + var pattern, options string + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Regex: + pattern, options, err = vr.ReadRegex() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a Regex", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Regex{Pattern: pattern, Options: options}), nil +} + +// RegexDecodeValue is the ValueDecoderFunc for Regex. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tRegex { + return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val} + } + + elem, err := dvd.regexDecodeType(dc, vr, tRegex) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) dBPointerDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tDBPointer { + return emptyValue, ValueDecoderError{ + Name: "DBPointerDecodeValue", + Types: []reflect.Type{tDBPointer}, + Received: reflect.Zero(t), + } + } + + var ns string + var pointer primitive.ObjectID + var err error + switch vrType := vr.Type(); vrType { + case bsontype.DBPointer: + ns, pointer, err = vr.ReadDBPointer() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a DBPointer", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.DBPointer{DB: ns, Pointer: pointer}), nil +} + +// DBPointerDecodeValue is the ValueDecoderFunc for DBPointer. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tDBPointer { + return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val} + } + + elem, err := dvd.dBPointerDecodeType(dc, vr, tDBPointer) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) timestampDecodeType(_ DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) { + if reflectType != tTimestamp { + return emptyValue, ValueDecoderError{ + Name: "TimestampDecodeValue", + Types: []reflect.Type{tTimestamp}, + Received: reflect.Zero(reflectType), + } + } + + var t, incr uint32 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Timestamp: + t, incr, err = vr.ReadTimestamp() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a Timestamp", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Timestamp{T: t, I: incr}), nil +} + +// TimestampDecodeValue is the ValueDecoderFunc for Timestamp. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tTimestamp { + return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val} + } + + elem, err := dvd.timestampDecodeType(dc, vr, tTimestamp) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) minKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tMinKey { + return emptyValue, ValueDecoderError{ + Name: "MinKeyDecodeValue", + Types: []reflect.Type{tMinKey}, + Received: reflect.Zero(t), + } + } + + var err error + switch vrType := vr.Type(); vrType { + case bsontype.MinKey: + err = vr.ReadMinKey() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a MinKey", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.MinKey{}), nil +} + +// MinKeyDecodeValue is the ValueDecoderFunc for MinKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tMinKey { + return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val} + } + + elem, err := dvd.minKeyDecodeType(dc, vr, tMinKey) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) maxKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tMaxKey { + return emptyValue, ValueDecoderError{ + Name: "MaxKeyDecodeValue", + Types: []reflect.Type{tMaxKey}, + Received: reflect.Zero(t), + } + } + + var err error + switch vrType := vr.Type(); vrType { + case bsontype.MaxKey: + err = vr.ReadMaxKey() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a MaxKey", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.MaxKey{}), nil +} + +// MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tMaxKey { + return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val} + } + + elem, err := dvd.maxKeyDecodeType(dc, vr, tMaxKey) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (dvd DefaultValueDecoders) decimal128DecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tDecimal { + return emptyValue, ValueDecoderError{ + Name: "Decimal128DecodeValue", + Types: []reflect.Type{tDecimal}, + Received: reflect.Zero(t), + } + } + + var d128 primitive.Decimal128 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Decimal128: + d128, err = vr.ReadDecimal128() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Decimal128", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(d128), nil +} + +// Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tDecimal { + return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val} + } + + elem, err := dvd.decimal128DecodeType(dctx, vr, tDecimal) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (dvd DefaultValueDecoders) jsonNumberDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tJSONNumber { + return emptyValue, ValueDecoderError{ + Name: "JSONNumberDecodeValue", + Types: []reflect.Type{tJSONNumber}, + Received: reflect.Zero(t), + } + } + + var jsonNum json.Number + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return emptyValue, err + } + jsonNum = json.Number(strconv.FormatFloat(f64, 'f', -1, 64)) + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return emptyValue, err + } + jsonNum = json.Number(strconv.FormatInt(int64(i32), 10)) + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return emptyValue, err + } + jsonNum = json.Number(strconv.FormatInt(i64, 10)) + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a json.Number", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(jsonNum), nil +} + +// JSONNumberDecodeValue is the ValueDecoderFunc for json.Number. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tJSONNumber { + return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} + } + + elem, err := dvd.jsonNumberDecodeType(dc, vr, tJSONNumber) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (dvd DefaultValueDecoders) urlDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tURL { + return emptyValue, ValueDecoderError{ + Name: "URLDecodeValue", + Types: []reflect.Type{tURL}, + Received: reflect.Zero(t), + } + } + + urlPtr := &url.URL{} + var err error + switch vrType := vr.Type(); vrType { + case bsontype.String: + var str string // Declare str here to avoid shadowing err during the ReadString call. + str, err = vr.ReadString() + if err != nil { + return emptyValue, err + } + + urlPtr, err = url.Parse(str) + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a *url.URL", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(urlPtr).Elem(), nil +} + +// URLDecodeValue is the ValueDecoderFunc for url.URL. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tURL { + return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val} + } + + elem, err := dvd.urlDecodeType(dc, vr, tURL) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +// TimeDecodeValue is the ValueDecoderFunc for time.Time. +// +// Deprecated: TimeDecodeValue is not registered by default. Use TimeCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) TimeDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if vr.Type() != bsontype.DateTime { + return fmt.Errorf("cannot decode %v into a time.Time", vr.Type()) + } + + dt, err := vr.ReadDateTime() + if err != nil { + return err + } + + if !val.CanSet() || val.Type() != tTime { + return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} + } + + val.Set(reflect.ValueOf(time.Unix(dt/1000, dt%1000*1000000).UTC())) + return nil +} + +// ByteSliceDecodeValue is the ValueDecoderFunc for []byte. +// +// Deprecated: ByteSliceDecodeValue is not registered by default. Use ByteSliceCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) ByteSliceDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null { + return fmt.Errorf("cannot decode %v into a []byte", vr.Type()) + } + + if !val.CanSet() || val.Type() != tByteSlice { + return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + + if vr.Type() == bsontype.Null { + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + } + + data, subtype, err := vr.ReadBinary() + if err != nil { + return err + } + if subtype != 0x00 { + return fmt.Errorf("ByteSliceDecodeValue can only be used to decode subtype 0x00 for %s, got %v", bsontype.Binary, subtype) + } + + val.Set(reflect.ValueOf(data)) + return nil +} + +// MapDecodeValue is the ValueDecoderFunc for map[string]* types. +// +// Deprecated: MapDecodeValue is not registered by default. Use MapCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { + return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + switch vr.Type() { + case bsontype.Type(0), bsontype.EmbeddedDocument: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type()) + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeMap(val.Type())) + } + + eType := val.Type().Elem() + decoder, err := dc.LookupDecoder(eType) + if err != nil { + return err + } + + if eType == tEmpty { + dc.Ancestor = val.Type() + } + + keyType := val.Type().Key() + for { + key, vr, err := dr.ReadElement() + if errors.Is(err, bsonrw.ErrEOD) { + break + } + if err != nil { + return err + } + + elem := reflect.New(eType).Elem() + + err = decoder.DecodeValue(dc, vr, elem) + if err != nil { + return err + } + + val.SetMapIndex(reflect.ValueOf(key).Convert(keyType), elem) + } + return nil +} + +// ArrayDecodeValue is the ValueDecoderFunc for array types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Array { + return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Array: + case bsontype.Type(0), bsontype.EmbeddedDocument: + if val.Type().Elem() != tE { + return fmt.Errorf("cannot decode document into %s", val.Type()) + } + case bsontype.Binary: + if val.Type().Elem() != tByte { + return fmt.Errorf("ArrayDecodeValue can only be used to decode binary into a byte array, got %v", vrType) + } + data, subtype, err := vr.ReadBinary() + if err != nil { + return err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return fmt.Errorf("ArrayDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) + } + + if len(data) > val.Len() { + return fmt.Errorf("more elements returned in array than can fit inside %s", val.Type()) + } + + for idx, elem := range data { + val.Index(idx).Set(reflect.ValueOf(elem)) + } + return nil + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + case bsontype.Undefined: + val.Set(reflect.Zero(val.Type())) + return vr.ReadUndefined() + default: + return fmt.Errorf("cannot decode %v into an array", vrType) + } + + var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) + switch val.Type().Elem() { + case tE: + elemsFunc = dvd.decodeD + default: + elemsFunc = dvd.decodeDefault + } + + elems, err := elemsFunc(dc, vr, val) + if err != nil { + return err + } + + if len(elems) > val.Len() { + return fmt.Errorf("more elements returned in array than can fit inside %s, got %v elements", val.Type(), len(elems)) + } + + for idx, elem := range elems { + val.Index(idx).Set(elem) + } + + return nil +} + +// SliceDecodeValue is the ValueDecoderFunc for slice types. +// +// Deprecated: SliceDecodeValue is not registered by default. Use SliceCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Slice { + return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + switch vr.Type() { + case bsontype.Array: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + case bsontype.Type(0), bsontype.EmbeddedDocument: + if val.Type().Elem() != tE { + return fmt.Errorf("cannot decode document into %s", val.Type()) + } + default: + return fmt.Errorf("cannot decode %v into a slice", vr.Type()) + } + + var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) + switch val.Type().Elem() { + case tE: + dc.Ancestor = val.Type() + elemsFunc = dvd.decodeD + default: + elemsFunc = dvd.decodeDefault + } + + elems, err := elemsFunc(dc, vr, val) + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) + } + + val.SetLen(0) + val.Set(reflect.Append(val, elems...)) + + return nil +} + +// ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) { + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + + // If BSON value is null and the go value is a pointer, then don't call + // UnmarshalBSONValue. Even if the Go pointer is already initialized (i.e., + // non-nil), encountering null in BSON will result in the pointer being + // directly set to nil here. Since the pointer is being replaced with nil, + // there is no opportunity (or reason) for the custom UnmarshalBSONValue logic + // to be called. + if vr.Type() == bsontype.Null && val.Kind() == reflect.Ptr { + val.Set(reflect.Zero(val.Type())) + + return vr.ReadNull() + } + + if val.Kind() == reflect.Ptr && val.IsNil() { + if !val.CanSet() { + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + val.Set(reflect.New(val.Type().Elem())) + } + + if !val.Type().Implements(tValueUnmarshaler) { + if !val.CanAddr() { + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. + } + + t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) + if err != nil { + return err + } + + m, ok := val.Interface().(ValueUnmarshaler) + if !ok { + // NB: this error should be unreachable due to the above checks + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + return m.UnmarshalBSONValue(t, src) +} + +// UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) { + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + + if val.Kind() == reflect.Ptr && val.IsNil() { + if !val.CanSet() { + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + val.Set(reflect.New(val.Type().Elem())) + } + + _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) + if err != nil { + return err + } + + // If the target Go value is a pointer and the BSON field value is empty, set the value to the + // zero value of the pointer (nil) and don't call UnmarshalBSON. UnmarshalBSON has no way to + // change the pointer value from within the function (only the value at the pointer address), + // so it can't set the pointer to "nil" itself. Since the most common Go value for an empty BSON + // field value is "nil", we set "nil" here and don't call UnmarshalBSON. This behavior matches + // the behavior of the Go "encoding/json" unmarshaler when the target Go value is a pointer and + // the JSON field value is "null". + if val.Kind() == reflect.Ptr && len(src) == 0 { + val.Set(reflect.Zero(val.Type())) + return nil + } + + if !val.Type().Implements(tUnmarshaler) { + if !val.CanAddr() { + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. + } + + m, ok := val.Interface().(Unmarshaler) + if !ok { + // NB: this error should be unreachable due to the above checks + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + return m.UnmarshalBSON(src) +} + +// EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}. +// +// Deprecated: EmptyInterfaceDecodeValue is not registered by default. Use EmptyInterfaceCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tEmpty { + return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + rtype, err := dc.LookupTypeMapEntry(vr.Type()) + if err != nil { + switch vr.Type() { + case bsontype.EmbeddedDocument: + if dc.Ancestor != nil { + rtype = dc.Ancestor + break + } + rtype = tD + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + default: + return err + } + } + + decoder, err := dc.LookupDecoder(rtype) + if err != nil { + return err + } + + elem := reflect.New(rtype).Elem() + err = decoder.DecodeValue(dc, vr, elem) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +// CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (DefaultValueDecoders) CoreDocumentDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tCoreDocument { + return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, 0)) + } + + val.SetLen(0) + + cdoc, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(bsoncore.Document), vr) + val.Set(reflect.ValueOf(cdoc)) + return err +} + +func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) ([]reflect.Value, error) { + elems := make([]reflect.Value, 0) + + ar, err := vr.ReadArray() + if err != nil { + return nil, err + } + + eType := val.Type().Elem() + + decoder, err := dc.LookupDecoder(eType) + if err != nil { + return nil, err + } + eTypeDecoder, _ := decoder.(typeDecoder) + + idx := 0 + for { + vr, err := ar.ReadValue() + if errors.Is(err, bsonrw.ErrEOA) { + break + } + if err != nil { + return nil, err + } + + elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) + if err != nil { + return nil, newDecodeError(strconv.Itoa(idx), err) + } + elems = append(elems, elem) + idx++ + } + + return elems, nil +} + +func (dvd DefaultValueDecoders) readCodeWithScope(dc DecodeContext, vr bsonrw.ValueReader) (primitive.CodeWithScope, error) { + var cws primitive.CodeWithScope + + code, dr, err := vr.ReadCodeWithScope() + if err != nil { + return cws, err + } + + scope := reflect.New(tD).Elem() + elems, err := dvd.decodeElemsFromDocumentReader(dc, dr) + if err != nil { + return cws, err + } + + scope.Set(reflect.MakeSlice(tD, 0, len(elems))) + scope.Set(reflect.Append(scope, elems...)) + + cws = primitive.CodeWithScope{ + Code: primitive.JavaScript(code), + Scope: scope.Interface().(primitive.D), + } + return cws, nil +} + +func (dvd DefaultValueDecoders) codeWithScopeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tCodeWithScope { + return emptyValue, ValueDecoderError{ + Name: "CodeWithScopeDecodeValue", + Types: []reflect.Type{tCodeWithScope}, + Received: reflect.Zero(t), + } + } + + var cws primitive.CodeWithScope + var err error + switch vrType := vr.Type(); vrType { + case bsontype.CodeWithScope: + cws, err = dvd.readCodeWithScope(dc, vr) + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a primitive.CodeWithScope", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(cws), nil +} + +// CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tCodeWithScope { + return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} + } + + elem, err := dvd.codeWithScopeDecodeType(dc, vr, tCodeWithScope) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (dvd DefaultValueDecoders) decodeD(dc DecodeContext, vr bsonrw.ValueReader, _ reflect.Value) ([]reflect.Value, error) { + switch vr.Type() { + case bsontype.Type(0), bsontype.EmbeddedDocument: + default: + return nil, fmt.Errorf("cannot decode %v into a D", vr.Type()) + } + + dr, err := vr.ReadDocument() + if err != nil { + return nil, err + } + + return dvd.decodeElemsFromDocumentReader(dc, dr) +} + +func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr bsonrw.DocumentReader) ([]reflect.Value, error) { + decoder, err := dc.LookupDecoder(tEmpty) + if err != nil { + return nil, err + } + + elems := make([]reflect.Value, 0) + for { + key, vr, err := dr.ReadElement() + if errors.Is(err, bsonrw.ErrEOD) { + break + } + if err != nil { + return nil, err + } + + val := reflect.New(tEmpty).Elem() + err = decoder.DecodeValue(dc, vr, val) + if err != nil { + return nil, newDecodeError(key, err) + } + + elems = append(elems, reflect.ValueOf(primitive.E{Key: key, Value: val.Interface()})) + } + + return elems, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go new file mode 100644 index 000000000000..4751ae995e7e --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go @@ -0,0 +1,856 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "net/url" + "reflect" + "sync" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var defaultValueEncoders DefaultValueEncoders + +var bvwPool = bsonrw.NewBSONValueWriterPool() + +var errInvalidValue = errors.New("cannot encode invalid element") + +var sliceWriterPool = sync.Pool{ + New: func() interface{} { + sw := make(bsonrw.SliceWriter, 0) + return &sw + }, +} + +func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) error { + vw, err := dw.WriteDocumentElement(e.Key) + if err != nil { + return err + } + + if e.Value == nil { + return vw.WriteNull() + } + encoder, err := ec.LookupEncoder(reflect.TypeOf(e.Value)) + if err != nil { + return err + } + + err = encoder.EncodeValue(ec, vw, reflect.ValueOf(e.Value)) + if err != nil { + return err + } + return nil +} + +// DefaultValueEncoders is a namespace type for the default ValueEncoders used +// when creating a registry. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +type DefaultValueEncoders struct{} + +// RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with +// the provided RegistryBuilder. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { + if rb == nil { + panic(errors.New("argument to RegisterDefaultEncoders must not be nil")) + } + rb. + RegisterTypeEncoder(tByteSlice, defaultByteSliceCodec). + RegisterTypeEncoder(tTime, defaultTimeCodec). + RegisterTypeEncoder(tEmpty, defaultEmptyInterfaceCodec). + RegisterTypeEncoder(tCoreArray, defaultArrayCodec). + RegisterTypeEncoder(tOID, ValueEncoderFunc(dve.ObjectIDEncodeValue)). + RegisterTypeEncoder(tDecimal, ValueEncoderFunc(dve.Decimal128EncodeValue)). + RegisterTypeEncoder(tJSONNumber, ValueEncoderFunc(dve.JSONNumberEncodeValue)). + RegisterTypeEncoder(tURL, ValueEncoderFunc(dve.URLEncodeValue)). + RegisterTypeEncoder(tJavaScript, ValueEncoderFunc(dve.JavaScriptEncodeValue)). + RegisterTypeEncoder(tSymbol, ValueEncoderFunc(dve.SymbolEncodeValue)). + RegisterTypeEncoder(tBinary, ValueEncoderFunc(dve.BinaryEncodeValue)). + RegisterTypeEncoder(tUndefined, ValueEncoderFunc(dve.UndefinedEncodeValue)). + RegisterTypeEncoder(tDateTime, ValueEncoderFunc(dve.DateTimeEncodeValue)). + RegisterTypeEncoder(tNull, ValueEncoderFunc(dve.NullEncodeValue)). + RegisterTypeEncoder(tRegex, ValueEncoderFunc(dve.RegexEncodeValue)). + RegisterTypeEncoder(tDBPointer, ValueEncoderFunc(dve.DBPointerEncodeValue)). + RegisterTypeEncoder(tTimestamp, ValueEncoderFunc(dve.TimestampEncodeValue)). + RegisterTypeEncoder(tMinKey, ValueEncoderFunc(dve.MinKeyEncodeValue)). + RegisterTypeEncoder(tMaxKey, ValueEncoderFunc(dve.MaxKeyEncodeValue)). + RegisterTypeEncoder(tCoreDocument, ValueEncoderFunc(dve.CoreDocumentEncodeValue)). + RegisterTypeEncoder(tCodeWithScope, ValueEncoderFunc(dve.CodeWithScopeEncodeValue)). + RegisterDefaultEncoder(reflect.Bool, ValueEncoderFunc(dve.BooleanEncodeValue)). + RegisterDefaultEncoder(reflect.Int, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int8, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int16, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int32, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int64, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Uint, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint8, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint16, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint32, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint64, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Float32, ValueEncoderFunc(dve.FloatEncodeValue)). + RegisterDefaultEncoder(reflect.Float64, ValueEncoderFunc(dve.FloatEncodeValue)). + RegisterDefaultEncoder(reflect.Array, ValueEncoderFunc(dve.ArrayEncodeValue)). + RegisterDefaultEncoder(reflect.Map, defaultMapCodec). + RegisterDefaultEncoder(reflect.Slice, defaultSliceCodec). + RegisterDefaultEncoder(reflect.String, defaultStringCodec). + RegisterDefaultEncoder(reflect.Struct, newDefaultStructCodec()). + RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec()). + RegisterHookEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)). + RegisterHookEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)). + RegisterHookEncoder(tProxy, ValueEncoderFunc(dve.ProxyEncodeValue)) +} + +// BooleanEncodeValue is the ValueEncoderFunc for bool types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) BooleanEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Bool { + return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} + } + return vw.WriteBoolean(val.Bool()) +} + +func fitsIn32Bits(i int64) bool { + return math.MinInt32 <= i && i <= math.MaxInt32 +} + +// IntEncodeValue is the ValueEncoderFunc for int types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32: + return vw.WriteInt32(int32(val.Int())) + case reflect.Int: + i64 := val.Int() + if fitsIn32Bits(i64) { + return vw.WriteInt32(int32(i64)) + } + return vw.WriteInt64(i64) + case reflect.Int64: + i64 := val.Int() + if ec.MinSize && fitsIn32Bits(i64) { + return vw.WriteInt32(int32(i64)) + } + return vw.WriteInt64(i64) + } + + return ValueEncoderError{ + Name: "IntEncodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: val, + } +} + +// UintEncodeValue is the ValueEncoderFunc for uint types. +// +// Deprecated: UintEncodeValue is not registered by default. Use UintCodec.EncodeValue instead. +func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Uint8, reflect.Uint16: + return vw.WriteInt32(int32(val.Uint())) + case reflect.Uint, reflect.Uint32, reflect.Uint64: + u64 := val.Uint() + if ec.MinSize && u64 <= math.MaxInt32 { + return vw.WriteInt32(int32(u64)) + } + if u64 > math.MaxInt64 { + return fmt.Errorf("%d overflows int64", u64) + } + return vw.WriteInt64(int64(u64)) + } + + return ValueEncoderError{ + Name: "UintEncodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } +} + +// FloatEncodeValue is the ValueEncoderFunc for float types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) FloatEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Float32, reflect.Float64: + return vw.WriteDouble(val.Float()) + } + + return ValueEncoderError{Name: "FloatEncodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val} +} + +// StringEncodeValue is the ValueEncoderFunc for string types. +// +// Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead. +func (dve DefaultValueEncoders) StringEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.Kind() != reflect.String { + return ValueEncoderError{ + Name: "StringEncodeValue", + Kinds: []reflect.Kind{reflect.String}, + Received: val, + } + } + + return vw.WriteString(val.String()) +} + +// ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ObjectIDEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tOID { + return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val} + } + return vw.WriteObjectID(val.Interface().(primitive.ObjectID)) +} + +// Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) Decimal128EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tDecimal { + return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val} + } + return vw.WriteDecimal128(val.Interface().(primitive.Decimal128)) +} + +// JSONNumberEncodeValue is the ValueEncoderFunc for json.Number. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tJSONNumber { + return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} + } + jsnum := val.Interface().(json.Number) + + // Attempt int first, then float64 + if i64, err := jsnum.Int64(); err == nil { + return dve.IntEncodeValue(ec, vw, reflect.ValueOf(i64)) + } + + f64, err := jsnum.Float64() + if err != nil { + return err + } + + return dve.FloatEncodeValue(ec, vw, reflect.ValueOf(f64)) +} + +// URLEncodeValue is the ValueEncoderFunc for url.URL. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) URLEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tURL { + return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val} + } + u := val.Interface().(url.URL) + return vw.WriteString(u.String()) +} + +// TimeEncodeValue is the ValueEncoderFunc for time.TIme. +// +// Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead. +func (dve DefaultValueEncoders) TimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tTime { + return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} + } + tt := val.Interface().(time.Time) + dt := primitive.NewDateTimeFromTime(tt) + return vw.WriteDateTime(int64(dt)) +} + +// ByteSliceEncodeValue is the ValueEncoderFunc for []byte. +// +// Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead. +func (dve DefaultValueEncoders) ByteSliceEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tByteSlice { + return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + if val.IsNil() { + return vw.WriteNull() + } + return vw.WriteBinary(val.Interface().([]byte)) +} + +// MapEncodeValue is the ValueEncoderFunc for map[string]* types. +// +// Deprecated: MapEncodeValue is not registered by default. Use MapCodec.EncodeValue instead. +func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { + return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + if val.IsNil() { + // If we have a nill map but we can't WriteNull, that means we're probably trying to encode + // to a TopLevel document. We can't currently tell if this is what actually happened, but if + // there's a deeper underlying problem, the error will also be returned from WriteDocument, + // so just continue. The operations on a map reflection value are valid, so we can call + // MapKeys within mapEncodeValue without a problem. + err := vw.WriteNull() + if err == nil { + return nil + } + } + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + return dve.mapEncodeValue(ec, dw, val, nil) +} + +// mapEncodeValue handles encoding of the values of a map. The collisionFn returns +// true if the provided key exists, this is mainly used for inline maps in the +// struct codec. +func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + keys := val.MapKeys() + for _, key := range keys { + if collisionFn != nil && collisionFn(key.String()) { + return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) + } + + currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.MapIndex(key)) + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { + return lookupErr + } + + vw, err := dw.WriteDocumentElement(key.String()) + if err != nil { + return err + } + + if errors.Is(lookupErr, errInvalidValue) { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() +} + +// ArrayEncodeValue is the ValueEncoderFunc for array types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Array { + return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} + } + + // If we have a []primitive.E we want to treat it as a document instead of as an array. + if val.Type().Elem() == tE { + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + e := val.Index(idx).Interface().(primitive.E) + err = encodeElement(ec, dw, e) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() + } + + // If we have a []byte we want to treat it as a binary instead of as an array. + if val.Type().Elem() == tByte { + var byteSlice []byte + for idx := 0; idx < val.Len(); idx++ { + byteSlice = append(byteSlice, val.Index(idx).Interface().(byte)) + } + return vw.WriteBinary(byteSlice) + } + + aw, err := vw.WriteArray() + if err != nil { + return err + } + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { + return lookupErr + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + if errors.Is(lookupErr, errInvalidValue) { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + return aw.WriteArrayEnd() +} + +// SliceEncodeValue is the ValueEncoderFunc for slice types. +// +// Deprecated: SliceEncodeValue is not registered by default. Use SliceCodec.EncodeValue instead. +func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Slice { + return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + + // If we have a []primitive.E we want to treat it as a document instead of as an array. + if val.Type().ConvertibleTo(tD) { + d := val.Convert(tD).Interface().(primitive.D) + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + for _, e := range d { + err = encodeElement(ec, dw, e) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() + } + + aw, err := vw.WriteArray() + if err != nil { + return err + } + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { + return lookupErr + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + if errors.Is(lookupErr, errInvalidValue) { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + return aw.WriteArrayEnd() +} + +func (dve DefaultValueEncoders) lookupElementEncoder(ec EncodeContext, origEncoder ValueEncoder, currVal reflect.Value) (ValueEncoder, reflect.Value, error) { + if origEncoder != nil || (currVal.Kind() != reflect.Interface) { + return origEncoder, currVal, nil + } + currVal = currVal.Elem() + if !currVal.IsValid() { + return nil, currVal, errInvalidValue + } + currEncoder, err := ec.LookupEncoder(currVal.Type()) + + return currEncoder, currVal, err +} + +// EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}. +// +// Deprecated: EmptyInterfaceEncodeValue is not registered by default. Use EmptyInterfaceCodec.EncodeValue instead. +func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tEmpty { + return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + encoder, err := ec.LookupEncoder(val.Elem().Type()) + if err != nil { + return err + } + + return encoder.EncodeValue(ec, vw, val.Elem()) +} + +// ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + // Either val or a pointer to val must implement ValueMarshaler + switch { + case !val.IsValid(): + return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} + case val.Type().Implements(tValueMarshaler): + // If ValueMarshaler is implemented on a concrete type, make sure that val isn't a nil pointer + if isImplementationNil(val, tValueMarshaler) { + return vw.WriteNull() + } + case reflect.PtrTo(val.Type()).Implements(tValueMarshaler) && val.CanAddr(): + val = val.Addr() + default: + return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} + } + + m, ok := val.Interface().(ValueMarshaler) + if !ok { + return vw.WriteNull() + } + t, data, err := m.MarshalBSONValue() + if err != nil { + return err + } + return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data) +} + +// MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) MarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + // Either val or a pointer to val must implement Marshaler + switch { + case !val.IsValid(): + return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} + case val.Type().Implements(tMarshaler): + // If Marshaler is implemented on a concrete type, make sure that val isn't a nil pointer + if isImplementationNil(val, tMarshaler) { + return vw.WriteNull() + } + case reflect.PtrTo(val.Type()).Implements(tMarshaler) && val.CanAddr(): + val = val.Addr() + default: + return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} + } + + m, ok := val.Interface().(Marshaler) + if !ok { + return vw.WriteNull() + } + data, err := m.MarshalBSON() + if err != nil { + return err + } + return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data) +} + +// ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + // Either val or a pointer to val must implement Proxy + switch { + case !val.IsValid(): + return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} + case val.Type().Implements(tProxy): + // If Proxy is implemented on a concrete type, make sure that val isn't a nil pointer + if isImplementationNil(val, tProxy) { + return vw.WriteNull() + } + case reflect.PtrTo(val.Type()).Implements(tProxy) && val.CanAddr(): + val = val.Addr() + default: + return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} + } + + m, ok := val.Interface().(Proxy) + if !ok { + return vw.WriteNull() + } + v, err := m.ProxyBSON() + if err != nil { + return err + } + if v == nil { + encoder, err := ec.LookupEncoder(nil) + if err != nil { + return err + } + return encoder.EncodeValue(ec, vw, reflect.ValueOf(nil)) + } + vv := reflect.ValueOf(v) + switch vv.Kind() { + case reflect.Ptr, reflect.Interface: + vv = vv.Elem() + } + encoder, err := ec.LookupEncoder(vv.Type()) + if err != nil { + return err + } + return encoder.EncodeValue(ec, vw, vv) +} + +// JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) JavaScriptEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tJavaScript { + return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val} + } + + return vw.WriteJavascript(val.String()) +} + +// SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) SymbolEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tSymbol { + return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val} + } + + return vw.WriteSymbol(val.String()) +} + +// BinaryEncodeValue is the ValueEncoderFunc for Binary. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) BinaryEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tBinary { + return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val} + } + b := val.Interface().(primitive.Binary) + + return vw.WriteBinaryWithSubtype(b.Data, b.Subtype) +} + +// UndefinedEncodeValue is the ValueEncoderFunc for Undefined. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) UndefinedEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tUndefined { + return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val} + } + + return vw.WriteUndefined() +} + +// DateTimeEncodeValue is the ValueEncoderFunc for DateTime. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) DateTimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tDateTime { + return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val} + } + + return vw.WriteDateTime(val.Int()) +} + +// NullEncodeValue is the ValueEncoderFunc for Null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) NullEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tNull { + return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val} + } + + return vw.WriteNull() +} + +// RegexEncodeValue is the ValueEncoderFunc for Regex. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) RegexEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tRegex { + return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val} + } + + regex := val.Interface().(primitive.Regex) + + return vw.WriteRegex(regex.Pattern, regex.Options) +} + +// DBPointerEncodeValue is the ValueEncoderFunc for DBPointer. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) DBPointerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tDBPointer { + return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val} + } + + dbp := val.Interface().(primitive.DBPointer) + + return vw.WriteDBPointer(dbp.DB, dbp.Pointer) +} + +// TimestampEncodeValue is the ValueEncoderFunc for Timestamp. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) TimestampEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tTimestamp { + return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val} + } + + ts := val.Interface().(primitive.Timestamp) + + return vw.WriteTimestamp(ts.T, ts.I) +} + +// MinKeyEncodeValue is the ValueEncoderFunc for MinKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) MinKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tMinKey { + return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val} + } + + return vw.WriteMinKey() +} + +// MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) MaxKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tMaxKey { + return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val} + } + + return vw.WriteMaxKey() +} + +// CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) CoreDocumentEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tCoreDocument { + return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} + } + + cdoc := val.Interface().(bsoncore.Document) + + return bsonrw.Copier{}.CopyDocumentFromBytes(vw, cdoc) +} + +// CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tCodeWithScope { + return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} + } + + cws := val.Interface().(primitive.CodeWithScope) + + dw, err := vw.WriteCodeWithScope(string(cws.Code)) + if err != nil { + return err + } + + sw := sliceWriterPool.Get().(*bsonrw.SliceWriter) + defer sliceWriterPool.Put(sw) + *sw = (*sw)[:0] + + scopeVW := bvwPool.Get(sw) + defer bvwPool.Put(scopeVW) + + encoder, err := ec.LookupEncoder(reflect.TypeOf(cws.Scope)) + if err != nil { + return err + } + + err = encoder.EncodeValue(ec, scopeVW, reflect.ValueOf(cws.Scope)) + if err != nil { + return err + } + + err = bsonrw.Copier{}.CopyBytesToDocumentWriter(dw, *sw) + if err != nil { + return err + } + return dw.WriteDocumentEnd() +} + +// isImplementationNil returns if val is a nil pointer and inter is implemented on a concrete type +func isImplementationNil(val reflect.Value, inter reflect.Type) bool { + vt := val.Type() + for vt.Kind() == reflect.Ptr { + vt = vt.Elem() + } + return vt.Implements(inter) && val.Kind() == reflect.Ptr && val.IsNil() +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go new file mode 100644 index 000000000000..4613e5a1ec79 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go @@ -0,0 +1,95 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsoncodec provides a system for encoding values to BSON representations and decoding +// values from BSON representations. This package considers both binary BSON and ExtendedJSON as +// BSON representations. The types in this package enable a flexible system for handling this +// encoding and decoding. +// +// The codec system is composed of two parts: +// +// 1) ValueEncoders and ValueDecoders that handle encoding and decoding Go values to and from BSON +// representations. +// +// 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for +// retrieving them. +// +// # ValueEncoders and ValueDecoders +// +// The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON. +// The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the +// EncodeValue method to actually create the BSON representation. For convenience, ValueEncoderFunc +// is provided to allow use of a function with the correct signature as a ValueEncoder. An +// EncodeContext instance is provided to allow implementations to lookup further ValueEncoders and +// to provide configuration information. +// +// The ValueDecoder interface is the inverse of the ValueEncoder. Implementations should ensure that +// the value they receive is settable. Similar to ValueEncoderFunc, ValueDecoderFunc is provided to +// allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext +// instance is provided and serves similar functionality to the EncodeContext. +// +// # Registry +// +// A Registry is a store for ValueEncoders, ValueDecoders, and a type map. See the Registry type +// documentation for examples of registering various custom encoders and decoders. A Registry can +// have three main types of codecs: +// +// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and +// RegisterTypeDecoder methods. The registered codec will be invoked when encoding/decoding a value +// whose type matches the registered type exactly. +// If the registered type is an interface, the codec will be invoked when encoding or decoding +// values whose type is the interface, but not for values with concrete types that implement the +// interface. +// +// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and +// RegisterHookDecoder methods. These methods only accept interface types and the registered codecs +// will be invoked when encoding or decoding values whose types implement the interface. An example +// of a hook defined by the driver is bson.Marshaler. The driver will call the MarshalBSON method +// for any value whose type implements bson.Marshaler, regardless of the value's concrete type. +// +// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type +// associations are used when decoding into a bson.D/bson.M or a struct field of type interface{}. +// For example, by default, BSON int32 and int64 values decode as Go int32 and int64 instances, +// respectively, when decoding into a bson.D. The following code would change the behavior so these +// values decode as Go int instances instead: +// +// intType := reflect.TypeOf(int(0)) +// registry.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) +// +// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and +// RegisterDefaultDecoder methods. The registered codec will be invoked when encoding or decoding +// values whose reflect.Kind matches the registered reflect.Kind as long as the value's type doesn't +// match a registered type or hook encoder/decoder first. These methods should be used to change the +// behavior for all values for a specific kind. +// +// # Registry Lookup Procedure +// +// When looking up an encoder in a Registry, the precedence rules are as follows: +// +// 1. A type encoder registered for the exact type of the value. +// +// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to +// the value. If the value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and +// bsoncodec.ValueMarshaler), the first one registered will be selected. Note that registries +// constructed using bson.NewRegistry have driver-defined hooks registered for the +// bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those will take +// precedence over any new hooks. +// +// 3. A kind encoder registered for the value's kind. +// +// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The +// same precedence rules apply for decoders, with the exception that an error of type ErrNoDecoder +// will be returned if no decoder is found. +// +// # DefaultValueEncoders and DefaultValueDecoders +// +// The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and +// ValueDecoders for handling a wide range of Go types, including all of the types within the +// primitive package. To make registering these codecs easier, a helper method on each type is +// provided. For the DefaultValueEncoders type the method is called RegisterDefaultEncoders and for +// the DefaultValueDecoders type the method is called RegisterDefaultDecoders, this method also +// handles registering type map entries for each BSON type. +package bsoncodec diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go new file mode 100644 index 000000000000..098368f0711f --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go @@ -0,0 +1,173 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +// EmptyInterfaceCodec is the Codec used for interface{} values. +// +// Deprecated: EmptyInterfaceCodec will not be directly configurable in Go +// Driver 2.0. To configure the empty interface encode and decode behavior, use +// the configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the empty interface +// encode and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to unmarshal BSON binary field +// values as a Go byte slice, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// BinaryAsSlice: true, +// }) +// +// See the deprecation notice for each field in EmptyInterfaceCodec for the +// corresponding settings. +type EmptyInterfaceCodec struct { + // DecodeBinaryAsSlice causes DecodeValue to unmarshal BSON binary field values that are the + // "Generic" or "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. + // + // Deprecated: Use bson.Decoder.BinaryAsSlice or options.BSONOptions.BinaryAsSlice instead. + DecodeBinaryAsSlice bool +} + +var ( + defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec() + + // Assert that defaultEmptyInterfaceCodec satisfies the typeDecoder interface, which allows it + // to be used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. + _ typeDecoder = defaultEmptyInterfaceCodec +) + +// NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts. +// +// Deprecated: NewEmptyInterfaceCodec will not be available in Go Driver 2.0. See +// [EmptyInterfaceCodec] for more details. +func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec { + interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...) + + codec := EmptyInterfaceCodec{} + if interfaceOpt.DecodeBinaryAsSlice != nil { + codec.DecodeBinaryAsSlice = *interfaceOpt.DecodeBinaryAsSlice + } + return &codec +} + +// EncodeValue is the ValueEncoderFunc for interface{}. +func (eic EmptyInterfaceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tEmpty { + return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + encoder, err := ec.LookupEncoder(val.Elem().Type()) + if err != nil { + return err + } + + return encoder.EncodeValue(ec, vw, val.Elem()) +} + +func (eic EmptyInterfaceCodec) getEmptyInterfaceDecodeType(dc DecodeContext, valueType bsontype.Type) (reflect.Type, error) { + isDocument := valueType == bsontype.Type(0) || valueType == bsontype.EmbeddedDocument + if isDocument { + if dc.defaultDocumentType != nil { + // If the bsontype is an embedded document and the DocumentType is set on the DecodeContext, then return + // that type. + return dc.defaultDocumentType, nil + } + if dc.Ancestor != nil { + // Using ancestor information rather than looking up the type map entry forces consistent decoding. + // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry + // has been registered. + return dc.Ancestor, nil + } + } + + rtype, err := dc.LookupTypeMapEntry(valueType) + if err == nil { + return rtype, nil + } + + if isDocument { + // For documents, fallback to looking up a type map entry for bsontype.Type(0) or bsontype.EmbeddedDocument, + // depending on the original valueType. + var lookupType bsontype.Type + switch valueType { + case bsontype.Type(0): + lookupType = bsontype.EmbeddedDocument + case bsontype.EmbeddedDocument: + lookupType = bsontype.Type(0) + } + + rtype, err = dc.LookupTypeMapEntry(lookupType) + if err == nil { + return rtype, nil + } + } + + return nil, err +} + +func (eic EmptyInterfaceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tEmpty { + return emptyValue, ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: reflect.Zero(t)} + } + + rtype, err := eic.getEmptyInterfaceDecodeType(dc, vr.Type()) + if err != nil { + switch vr.Type() { + case bsontype.Null: + return reflect.Zero(t), vr.ReadNull() + default: + return emptyValue, err + } + } + + decoder, err := dc.LookupDecoder(rtype) + if err != nil { + return emptyValue, err + } + + elem, err := decodeTypeOrValue(decoder, dc, vr, rtype) + if err != nil { + return emptyValue, err + } + + if (eic.DecodeBinaryAsSlice || dc.binaryAsSlice) && rtype == tBinary { + binElem := elem.Interface().(primitive.Binary) + if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld { + elem = reflect.ValueOf(binElem.Data) + } + } + + return elem, nil +} + +// DecodeValue is the ValueDecoderFunc for interface{}. +func (eic EmptyInterfaceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tEmpty { + return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + elem, err := eic.decodeType(dc, vr, val.Type()) + if err != nil { + return err + } + + val.Set(elem) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go new file mode 100644 index 000000000000..d7e00ffa8d16 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go @@ -0,0 +1,343 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +var defaultMapCodec = NewMapCodec() + +// MapCodec is the Codec used for map values. +// +// Deprecated: MapCodec will not be directly configurable in Go Driver 2.0. To +// configure the map encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the map encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal nil Go maps as empty BSON +// documents, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilMapAsEmpty: true, +// }) +// +// See the deprecation notice for each field in MapCodec for the corresponding +// settings. +type MapCodec struct { + // DecodeZerosMap causes DecodeValue to delete any existing values from Go maps in the destination + // value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: Use bson.Decoder.ZeroMaps or options.BSONOptions.ZeroMaps instead. + DecodeZerosMap bool + + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go maps as empty BSON documents instead of + // BSON null. + // + // Deprecated: Use bson.Encoder.NilMapAsEmpty or options.BSONOptions.NilMapAsEmpty instead. + EncodeNilAsEmpty bool + + // EncodeKeysWithStringer causes the Encoder to convert Go map keys to BSON document field name + // strings using fmt.Sprintf() instead of the default string conversion logic. + // + // Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt or + // options.BSONOptions.StringifyMapKeysWithFmt instead. + EncodeKeysWithStringer bool +} + +// KeyMarshaler is the interface implemented by an object that can marshal itself into a string key. +// This applies to types used as map keys and is similar to encoding.TextMarshaler. +type KeyMarshaler interface { + MarshalKey() (key string, err error) +} + +// KeyUnmarshaler is the interface implemented by an object that can unmarshal a string representation +// of itself. This applies to types used as map keys and is similar to encoding.TextUnmarshaler. +// +// UnmarshalKey must be able to decode the form generated by MarshalKey. +// UnmarshalKey must copy the text if it wishes to retain the text +// after returning. +type KeyUnmarshaler interface { + UnmarshalKey(key string) error +} + +// NewMapCodec returns a MapCodec with options opts. +// +// Deprecated: NewMapCodec will not be available in Go Driver 2.0. See +// [MapCodec] for more details. +func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { + mapOpt := bsonoptions.MergeMapCodecOptions(opts...) + + codec := MapCodec{} + if mapOpt.DecodeZerosMap != nil { + codec.DecodeZerosMap = *mapOpt.DecodeZerosMap + } + if mapOpt.EncodeNilAsEmpty != nil { + codec.EncodeNilAsEmpty = *mapOpt.EncodeNilAsEmpty + } + if mapOpt.EncodeKeysWithStringer != nil { + codec.EncodeKeysWithStringer = *mapOpt.EncodeKeysWithStringer + } + return &codec +} + +// EncodeValue is the ValueEncoder for map[*]* types. +func (mc *MapCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Map { + return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + if val.IsNil() && !mc.EncodeNilAsEmpty && !ec.nilMapAsEmpty { + // If we have a nil map but we can't WriteNull, that means we're probably trying to encode + // to a TopLevel document. We can't currently tell if this is what actually happened, but if + // there's a deeper underlying problem, the error will also be returned from WriteDocument, + // so just continue. The operations on a map reflection value are valid, so we can call + // MapKeys within mapEncodeValue without a problem. + err := vw.WriteNull() + if err == nil { + return nil + } + } + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + return mc.mapEncodeValue(ec, dw, val, nil) +} + +// mapEncodeValue handles encoding of the values of a map. The collisionFn returns +// true if the provided key exists, this is mainly used for inline maps in the +// struct codec. +func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + keys := val.MapKeys() + for _, key := range keys { + keyStr, err := mc.encodeKey(key, ec.stringifyMapKeysWithFmt) + if err != nil { + return err + } + + if collisionFn != nil && collisionFn(keyStr) { + return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) + } + + currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.MapIndex(key)) + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { + return lookupErr + } + + vw, err := dw.WriteDocumentElement(keyStr) + if err != nil { + return err + } + + if errors.Is(lookupErr, errInvalidValue) { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() +} + +// DecodeValue is the ValueDecoder for map[string/decimal]* types. +func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if val.Kind() != reflect.Map || (!val.CanSet() && val.IsNil()) { + return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Type(0), bsontype.EmbeddedDocument: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + case bsontype.Undefined: + val.Set(reflect.Zero(val.Type())) + return vr.ReadUndefined() + default: + return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeMap(val.Type())) + } + + if val.Len() > 0 && (mc.DecodeZerosMap || dc.zeroMaps) { + clearMap(val) + } + + eType := val.Type().Elem() + decoder, err := dc.LookupDecoder(eType) + if err != nil { + return err + } + eTypeDecoder, _ := decoder.(typeDecoder) + + if eType == tEmpty { + dc.Ancestor = val.Type() + } + + keyType := val.Type().Key() + + for { + key, vr, err := dr.ReadElement() + if errors.Is(err, bsonrw.ErrEOD) { + break + } + if err != nil { + return err + } + + k, err := mc.decodeKey(key, keyType) + if err != nil { + return err + } + + elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) + if err != nil { + return newDecodeError(key, err) + } + + val.SetMapIndex(k, elem) + } + return nil +} + +func clearMap(m reflect.Value) { + var none reflect.Value + for _, k := range m.MapKeys() { + m.SetMapIndex(k, none) + } +} + +func (mc *MapCodec) encodeKey(val reflect.Value, encodeKeysWithStringer bool) (string, error) { + if mc.EncodeKeysWithStringer || encodeKeysWithStringer { + return fmt.Sprint(val), nil + } + + // keys of any string type are used directly + if val.Kind() == reflect.String { + return val.String(), nil + } + // KeyMarshalers are marshaled + if km, ok := val.Interface().(KeyMarshaler); ok { + if val.Kind() == reflect.Ptr && val.IsNil() { + return "", nil + } + buf, err := km.MarshalKey() + if err == nil { + return buf, nil + } + return "", err + } + // keys implement encoding.TextMarshaler are marshaled. + if km, ok := val.Interface().(encoding.TextMarshaler); ok { + if val.Kind() == reflect.Ptr && val.IsNil() { + return "", nil + } + + buf, err := km.MarshalText() + if err != nil { + return "", err + } + + return string(buf), nil + } + + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(val.Int(), 10), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(val.Uint(), 10), nil + } + return "", fmt.Errorf("unsupported key type: %v", val.Type()) +} + +var keyUnmarshalerType = reflect.TypeOf((*KeyUnmarshaler)(nil)).Elem() +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + +func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, error) { + keyVal := reflect.ValueOf(key) + var err error + switch { + // First, if EncodeKeysWithStringer is not enabled, try to decode withKeyUnmarshaler + case !mc.EncodeKeysWithStringer && reflect.PtrTo(keyType).Implements(keyUnmarshalerType): + keyVal = reflect.New(keyType) + v := keyVal.Interface().(KeyUnmarshaler) + err = v.UnmarshalKey(key) + keyVal = keyVal.Elem() + // Try to decode encoding.TextUnmarshalers. + case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + keyVal = reflect.New(keyType) + v := keyVal.Interface().(encoding.TextUnmarshaler) + err = v.UnmarshalText([]byte(key)) + keyVal = keyVal.Elem() + // Otherwise, go to type specific behavior + default: + switch keyType.Kind() { + case reflect.String: + keyVal = reflect.ValueOf(key).Convert(keyType) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, parseErr := strconv.ParseInt(key, 10, 64) + if parseErr != nil || reflect.Zero(keyType).OverflowInt(n) { + err = fmt.Errorf("failed to unmarshal number key %v", key) + } + keyVal = reflect.ValueOf(n).Convert(keyType) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, parseErr := strconv.ParseUint(key, 10, 64) + if parseErr != nil || reflect.Zero(keyType).OverflowUint(n) { + err = fmt.Errorf("failed to unmarshal number key %v", key) + break + } + keyVal = reflect.ValueOf(n).Convert(keyType) + case reflect.Float32, reflect.Float64: + if mc.EncodeKeysWithStringer { + parsed, err := strconv.ParseFloat(key, 64) + if err != nil { + return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %w", keyType.Kind(), err) + } + keyVal = reflect.ValueOf(parsed) + break + } + fallthrough + default: + return keyVal, fmt.Errorf("unsupported key type: %v", keyType) + } + } + return keyVal, err +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go new file mode 100644 index 000000000000..fbd9f0a9e972 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go @@ -0,0 +1,65 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import "fmt" + +type mode int + +const ( + _ mode = iota + mTopLevel + mDocument + mArray + mValue + mElement + mCodeWithScope + mSpacer +) + +func (m mode) String() string { + var str string + + switch m { + case mTopLevel: + str = "TopLevel" + case mDocument: + str = "DocumentMode" + case mArray: + str = "ArrayMode" + case mValue: + str = "ValueMode" + case mElement: + str = "ElementMode" + case mCodeWithScope: + str = "CodeWithScopeMode" + case mSpacer: + str = "CodeWithScopeSpacerFrame" + default: + str = "UnknownMode" + } + + return str +} + +// TransitionError is an error returned when an invalid progressing a +// ValueReader or ValueWriter state machine occurs. +type TransitionError struct { + parent mode + current mode + destination mode +} + +func (te TransitionError) Error() string { + if te.destination == mode(0) { + return fmt.Sprintf("invalid state transition: cannot read/write value while in %s", te.current) + } + if te.parent == mode(0) { + return fmt.Sprintf("invalid state transition: %s -> %s", te.current, te.destination) + } + return fmt.Sprintf("invalid state transition: %s -> %s; parent %s", te.current, te.destination, te.parent) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go new file mode 100644 index 000000000000..ddfa4a33e180 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go @@ -0,0 +1,108 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +var _ ValueEncoder = &PointerCodec{} +var _ ValueDecoder = &PointerCodec{} + +// PointerCodec is the Codec used for pointers. +// +// Deprecated: PointerCodec will not be directly accessible in Go Driver 2.0. To +// override the default pointer encode and decode behavior, create a new registry +// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new +// encoder and decoder for pointers. +// +// For example, +// +// reg := bson.NewRegistry() +// reg.RegisterKindEncoder(reflect.Ptr, myPointerEncoder) +// reg.RegisterKindDecoder(reflect.Ptr, myPointerDecoder) +type PointerCodec struct { + ecache typeEncoderCache + dcache typeDecoderCache +} + +// NewPointerCodec returns a PointerCodec that has been initialized. +// +// Deprecated: NewPointerCodec will not be available in Go Driver 2.0. See +// [PointerCodec] for more details. +func NewPointerCodec() *PointerCodec { + return &PointerCodec{} +} + +// EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil +// or looking up an encoder for the type of value the pointer points to. +func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.Kind() != reflect.Ptr { + if !val.IsValid() { + return vw.WriteNull() + } + return ValueEncoderError{Name: "PointerCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + + typ := val.Type() + if v, ok := pc.ecache.Load(typ); ok { + if v == nil { + return ErrNoEncoder{Type: typ} + } + return v.EncodeValue(ec, vw, val.Elem()) + } + // TODO(charlie): handle concurrent requests for the same type + enc, err := ec.LookupEncoder(typ.Elem()) + enc = pc.ecache.LoadOrStore(typ, enc) + if err != nil { + return err + } + return enc.EncodeValue(ec, vw, val.Elem()) +} + +// DecodeValue handles decoding a pointer by looking up a decoder for the type it points to and +// using that to decode. If the BSON value is Null, this method will set the pointer to nil. +func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Ptr { + return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} + } + + typ := val.Type() + if vr.Type() == bsontype.Null { + val.Set(reflect.Zero(typ)) + return vr.ReadNull() + } + if vr.Type() == bsontype.Undefined { + val.Set(reflect.Zero(typ)) + return vr.ReadUndefined() + } + + if val.IsNil() { + val.Set(reflect.New(typ.Elem())) + } + + if v, ok := pc.dcache.Load(typ); ok { + if v == nil { + return ErrNoDecoder{Type: typ} + } + return v.DecodeValue(dc, vr, val.Elem()) + } + // TODO(charlie): handle concurrent requests for the same type + dec, err := dc.LookupDecoder(typ.Elem()) + dec = pc.dcache.LoadOrStore(typ, dec) + if err != nil { + return err + } + return dec.DecodeValue(dc, vr, val.Elem()) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go new file mode 100644 index 000000000000..4cf2b01ab482 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +// Proxy is an interface implemented by types that cannot themselves be directly encoded. Types +// that implement this interface with have ProxyBSON called during the encoding process and that +// value will be encoded in place for the implementer. +type Proxy interface { + ProxyBSON() (interface{}, error) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go new file mode 100644 index 000000000000..196c491bbbf1 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -0,0 +1,524 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "errors" + "fmt" + "reflect" + "sync" + + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder. +// +// Deprecated: ErrNilType will not be supported in Go Driver 2.0. +var ErrNilType = errors.New("cannot perform a decoder lookup on ") + +// ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder. +// +// Deprecated: ErrNotPointer will not be supported in Go Driver 2.0. +var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder") + +// ErrNoEncoder is returned when there wasn't an encoder available for a type. +// +// Deprecated: ErrNoEncoder will not be supported in Go Driver 2.0. +type ErrNoEncoder struct { + Type reflect.Type +} + +func (ene ErrNoEncoder) Error() string { + if ene.Type == nil { + return "no encoder found for " + } + return "no encoder found for " + ene.Type.String() +} + +// ErrNoDecoder is returned when there wasn't a decoder available for a type. +// +// Deprecated: ErrNoDecoder will not be supported in Go Driver 2.0. +type ErrNoDecoder struct { + Type reflect.Type +} + +func (end ErrNoDecoder) Error() string { + return "no decoder found for " + end.Type.String() +} + +// ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type. +// +// Deprecated: ErrNoTypeMapEntry will not be supported in Go Driver 2.0. +type ErrNoTypeMapEntry struct { + Type bsontype.Type +} + +func (entme ErrNoTypeMapEntry) Error() string { + return "no type map entry found for " + entme.Type.String() +} + +// ErrNotInterface is returned when the provided type is not an interface. +// +// Deprecated: ErrNotInterface will not be supported in Go Driver 2.0. +var ErrNotInterface = errors.New("The provided type is not an interface") + +// A RegistryBuilder is used to build a Registry. This type is not goroutine +// safe. +// +// Deprecated: Use Registry instead. +type RegistryBuilder struct { + registry *Registry +} + +// NewRegistryBuilder creates a new empty RegistryBuilder. +// +// Deprecated: Use NewRegistry instead. +func NewRegistryBuilder() *RegistryBuilder { + return &RegistryBuilder{ + registry: NewRegistry(), + } +} + +// RegisterCodec will register the provided ValueCodec for the provided type. +// +// Deprecated: Use Registry.RegisterTypeEncoder and Registry.RegisterTypeDecoder instead. +func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder { + rb.RegisterTypeEncoder(t, codec) + rb.RegisterTypeDecoder(t, codec) + return rb +} + +// RegisterTypeEncoder will register the provided ValueEncoder for the provided type. +// +// The type will be used directly, so an encoder can be registered for a type and a different encoder can be registered +// for a pointer to that type. +// +// If the given type is an interface, the encoder will be called when marshaling a type that is that interface. It +// will not be called when marshaling a non-interface type that implements the interface. +// +// Deprecated: Use Registry.RegisterTypeEncoder instead. +func (rb *RegistryBuilder) RegisterTypeEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { + rb.registry.RegisterTypeEncoder(t, enc) + return rb +} + +// RegisterHookEncoder will register an encoder for the provided interface type t. This encoder will be called when +// marshaling a type if the type implements t or a pointer to the type implements t. If the provided type is not +// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. +// +// Deprecated: Use Registry.RegisterInterfaceEncoder instead. +func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { + rb.registry.RegisterInterfaceEncoder(t, enc) + return rb +} + +// RegisterTypeDecoder will register the provided ValueDecoder for the provided type. +// +// The type will be used directly, so a decoder can be registered for a type and a different decoder can be registered +// for a pointer to that type. +// +// If the given type is an interface, the decoder will be called when unmarshaling into a type that is that interface. +// It will not be called when unmarshaling into a non-interface type that implements the interface. +// +// Deprecated: Use Registry.RegisterTypeDecoder instead. +func (rb *RegistryBuilder) RegisterTypeDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { + rb.registry.RegisterTypeDecoder(t, dec) + return rb +} + +// RegisterHookDecoder will register an decoder for the provided interface type t. This decoder will be called when +// unmarshaling into a type if the type implements t or a pointer to the type implements t. If the provided type is not +// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. +// +// Deprecated: Use Registry.RegisterInterfaceDecoder instead. +func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { + rb.registry.RegisterInterfaceDecoder(t, dec) + return rb +} + +// RegisterEncoder registers the provided type and encoder pair. +// +// Deprecated: Use Registry.RegisterTypeEncoder or Registry.RegisterInterfaceEncoder instead. +func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { + if t == tEmpty { + rb.registry.RegisterTypeEncoder(t, enc) + return rb + } + switch t.Kind() { + case reflect.Interface: + rb.registry.RegisterInterfaceEncoder(t, enc) + default: + rb.registry.RegisterTypeEncoder(t, enc) + } + return rb +} + +// RegisterDecoder registers the provided type and decoder pair. +// +// Deprecated: Use Registry.RegisterTypeDecoder or Registry.RegisterInterfaceDecoder instead. +func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { + if t == nil { + rb.registry.RegisterTypeDecoder(t, dec) + return rb + } + if t == tEmpty { + rb.registry.RegisterTypeDecoder(t, dec) + return rb + } + switch t.Kind() { + case reflect.Interface: + rb.registry.RegisterInterfaceDecoder(t, dec) + default: + rb.registry.RegisterTypeDecoder(t, dec) + } + return rb +} + +// RegisterDefaultEncoder will register the provided ValueEncoder to the provided +// kind. +// +// Deprecated: Use Registry.RegisterKindEncoder instead. +func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder { + rb.registry.RegisterKindEncoder(kind, enc) + return rb +} + +// RegisterDefaultDecoder will register the provided ValueDecoder to the +// provided kind. +// +// Deprecated: Use Registry.RegisterKindDecoder instead. +func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder { + rb.registry.RegisterKindDecoder(kind, dec) + return rb +} + +// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this +// mapping is decoding situations where an empty interface is used and a default type needs to be +// created and decoded into. +// +// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON +// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents +// to decode to bson.Raw, use the following code: +// +// rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) +// +// Deprecated: Use Registry.RegisterTypeMapEntry instead. +func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { + rb.registry.RegisterTypeMapEntry(bt, rt) + return rb +} + +// Build creates a Registry from the current state of this RegistryBuilder. +// +// Deprecated: Use NewRegistry instead. +func (rb *RegistryBuilder) Build() *Registry { + r := &Registry{ + interfaceEncoders: append([]interfaceValueEncoder(nil), rb.registry.interfaceEncoders...), + interfaceDecoders: append([]interfaceValueDecoder(nil), rb.registry.interfaceDecoders...), + typeEncoders: rb.registry.typeEncoders.Clone(), + typeDecoders: rb.registry.typeDecoders.Clone(), + kindEncoders: rb.registry.kindEncoders.Clone(), + kindDecoders: rb.registry.kindDecoders.Clone(), + } + rb.registry.typeMap.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + r.typeMap.Store(k, v) + } + return true + }) + return r +} + +// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main +// typed passed around and Encoders and Decoders are constructed from it. +type Registry struct { + interfaceEncoders []interfaceValueEncoder + interfaceDecoders []interfaceValueDecoder + typeEncoders *typeEncoderCache + typeDecoders *typeDecoderCache + kindEncoders *kindEncoderCache + kindDecoders *kindDecoderCache + typeMap sync.Map // map[bsontype.Type]reflect.Type +} + +// NewRegistry creates a new empty Registry. +func NewRegistry() *Registry { + return &Registry{ + typeEncoders: new(typeEncoderCache), + typeDecoders: new(typeDecoderCache), + kindEncoders: new(kindEncoderCache), + kindDecoders: new(kindDecoderCache), + } +} + +// RegisterTypeEncoder registers the provided ValueEncoder for the provided type. +// +// The type will be used as provided, so an encoder can be registered for a type and a different +// encoder can be registered for a pointer to that type. +// +// If the given type is an interface, the encoder will be called when marshaling a type that is +// that interface. It will not be called when marshaling a non-interface type that implements the +// interface. To get the latter behavior, call RegisterHookEncoder instead. +// +// RegisterTypeEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterTypeEncoder(valueType reflect.Type, enc ValueEncoder) { + r.typeEncoders.Store(valueType, enc) +} + +// RegisterTypeDecoder registers the provided ValueDecoder for the provided type. +// +// The type will be used as provided, so a decoder can be registered for a type and a different +// decoder can be registered for a pointer to that type. +// +// If the given type is an interface, the decoder will be called when unmarshaling into a type that +// is that interface. It will not be called when unmarshaling into a non-interface type that +// implements the interface. To get the latter behavior, call RegisterHookDecoder instead. +// +// RegisterTypeDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterTypeDecoder(valueType reflect.Type, dec ValueDecoder) { + r.typeDecoders.Store(valueType, dec) +} + +// RegisterKindEncoder registers the provided ValueEncoder for the provided kind. +// +// Use RegisterKindEncoder to register an encoder for any type with the same underlying kind. For +// example, consider the type MyInt defined as +// +// type MyInt int32 +// +// To define an encoder for MyInt and int32, use RegisterKindEncoder like +// +// reg.RegisterKindEncoder(reflect.Int32, myEncoder) +// +// RegisterKindEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterKindEncoder(kind reflect.Kind, enc ValueEncoder) { + r.kindEncoders.Store(kind, enc) +} + +// RegisterKindDecoder registers the provided ValueDecoder for the provided kind. +// +// Use RegisterKindDecoder to register a decoder for any type with the same underlying kind. For +// example, consider the type MyInt defined as +// +// type MyInt int32 +// +// To define an decoder for MyInt and int32, use RegisterKindDecoder like +// +// reg.RegisterKindDecoder(reflect.Int32, myDecoder) +// +// RegisterKindDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterKindDecoder(kind reflect.Kind, dec ValueDecoder) { + r.kindDecoders.Store(kind, dec) +} + +// RegisterInterfaceEncoder registers an encoder for the provided interface type iface. This encoder will +// be called when marshaling a type if the type implements iface or a pointer to the type +// implements iface. If the provided type is not an interface +// (i.e. iface.Kind() != reflect.Interface), this method will panic. +// +// RegisterInterfaceEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterInterfaceEncoder(iface reflect.Type, enc ValueEncoder) { + if iface.Kind() != reflect.Interface { + panicStr := fmt.Errorf("RegisterInterfaceEncoder expects a type with kind reflect.Interface, "+ + "got type %s with kind %s", iface, iface.Kind()) + panic(panicStr) + } + + for idx, encoder := range r.interfaceEncoders { + if encoder.i == iface { + r.interfaceEncoders[idx].ve = enc + return + } + } + + r.interfaceEncoders = append(r.interfaceEncoders, interfaceValueEncoder{i: iface, ve: enc}) +} + +// RegisterInterfaceDecoder registers an decoder for the provided interface type iface. This decoder will +// be called when unmarshaling into a type if the type implements iface or a pointer to the type +// implements iface. If the provided type is not an interface (i.e. iface.Kind() != reflect.Interface), +// this method will panic. +// +// RegisterInterfaceDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterInterfaceDecoder(iface reflect.Type, dec ValueDecoder) { + if iface.Kind() != reflect.Interface { + panicStr := fmt.Errorf("RegisterInterfaceDecoder expects a type with kind reflect.Interface, "+ + "got type %s with kind %s", iface, iface.Kind()) + panic(panicStr) + } + + for idx, decoder := range r.interfaceDecoders { + if decoder.i == iface { + r.interfaceDecoders[idx].vd = dec + return + } + } + + r.interfaceDecoders = append(r.interfaceDecoders, interfaceValueDecoder{i: iface, vd: dec}) +} + +// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this +// mapping is decoding situations where an empty interface is used and a default type needs to be +// created and decoded into. +// +// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON +// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents +// to decode to bson.Raw, use the following code: +// +// reg.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) +func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) { + r.typeMap.Store(bt, rt) +} + +// LookupEncoder returns the first matching encoder in the Registry. It uses the following lookup +// order: +// +// 1. An encoder registered for the exact type. If the given type is an interface, an encoder +// registered using RegisterTypeEncoder for that interface will be selected. +// +// 2. An encoder registered using RegisterInterfaceEncoder for an interface implemented by the type +// or by a pointer to the type. +// +// 3. An encoder registered using RegisterKindEncoder for the kind of value. +// +// If no encoder is found, an error of type ErrNoEncoder is returned. LookupEncoder is safe for +// concurrent use by multiple goroutines after all codecs and encoders are registered. +func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) { + if valueType == nil { + return nil, ErrNoEncoder{Type: valueType} + } + enc, found := r.lookupTypeEncoder(valueType) + if found { + if enc == nil { + return nil, ErrNoEncoder{Type: valueType} + } + return enc, nil + } + + enc, found = r.lookupInterfaceEncoder(valueType, true) + if found { + return r.typeEncoders.LoadOrStore(valueType, enc), nil + } + + if v, ok := r.kindEncoders.Load(valueType.Kind()); ok { + return r.storeTypeEncoder(valueType, v), nil + } + return nil, ErrNoEncoder{Type: valueType} +} + +func (r *Registry) storeTypeEncoder(rt reflect.Type, enc ValueEncoder) ValueEncoder { + return r.typeEncoders.LoadOrStore(rt, enc) +} + +func (r *Registry) lookupTypeEncoder(rt reflect.Type) (ValueEncoder, bool) { + return r.typeEncoders.Load(rt) +} + +func (r *Registry) lookupInterfaceEncoder(valueType reflect.Type, allowAddr bool) (ValueEncoder, bool) { + if valueType == nil { + return nil, false + } + for _, ienc := range r.interfaceEncoders { + if valueType.Implements(ienc.i) { + return ienc.ve, true + } + if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(ienc.i) { + // if *t implements an interface, this will catch if t implements an interface further + // ahead in interfaceEncoders + defaultEnc, found := r.lookupInterfaceEncoder(valueType, false) + if !found { + defaultEnc, _ = r.kindEncoders.Load(valueType.Kind()) + } + return newCondAddrEncoder(ienc.ve, defaultEnc), true + } + } + return nil, false +} + +// LookupDecoder returns the first matching decoder in the Registry. It uses the following lookup +// order: +// +// 1. A decoder registered for the exact type. If the given type is an interface, a decoder +// registered using RegisterTypeDecoder for that interface will be selected. +// +// 2. A decoder registered using RegisterInterfaceDecoder for an interface implemented by the type or by +// a pointer to the type. +// +// 3. A decoder registered using RegisterKindDecoder for the kind of value. +// +// If no decoder is found, an error of type ErrNoDecoder is returned. LookupDecoder is safe for +// concurrent use by multiple goroutines after all codecs and decoders are registered. +func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) { + if valueType == nil { + return nil, ErrNilType + } + dec, found := r.lookupTypeDecoder(valueType) + if found { + if dec == nil { + return nil, ErrNoDecoder{Type: valueType} + } + return dec, nil + } + + dec, found = r.lookupInterfaceDecoder(valueType, true) + if found { + return r.storeTypeDecoder(valueType, dec), nil + } + + if v, ok := r.kindDecoders.Load(valueType.Kind()); ok { + return r.storeTypeDecoder(valueType, v), nil + } + return nil, ErrNoDecoder{Type: valueType} +} + +func (r *Registry) lookupTypeDecoder(valueType reflect.Type) (ValueDecoder, bool) { + return r.typeDecoders.Load(valueType) +} + +func (r *Registry) storeTypeDecoder(typ reflect.Type, dec ValueDecoder) ValueDecoder { + return r.typeDecoders.LoadOrStore(typ, dec) +} + +func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool) (ValueDecoder, bool) { + for _, idec := range r.interfaceDecoders { + if valueType.Implements(idec.i) { + return idec.vd, true + } + if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(idec.i) { + // if *t implements an interface, this will catch if t implements an interface further + // ahead in interfaceDecoders + defaultDec, found := r.lookupInterfaceDecoder(valueType, false) + if !found { + defaultDec, _ = r.kindDecoders.Load(valueType.Kind()) + } + return newCondAddrDecoder(idec.vd, defaultDec), true + } + } + return nil, false +} + +// LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON +// type. If no type is found, ErrNoTypeMapEntry is returned. +// +// LookupTypeMapEntry should not be called concurrently with any other Registry method. +func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) { + v, ok := r.typeMap.Load(bt) + if v == nil || !ok { + return nil, ErrNoTypeMapEntry{Type: bt} + } + return v.(reflect.Type), nil +} + +type interfaceValueEncoder struct { + i reflect.Type + ve ValueEncoder +} + +type interfaceValueDecoder struct { + i reflect.Type + vd ValueDecoder +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go new file mode 100644 index 000000000000..14c9fd25646e --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go @@ -0,0 +1,214 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "errors" + "fmt" + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +var defaultSliceCodec = NewSliceCodec() + +// SliceCodec is the Codec used for slice values. +// +// Deprecated: SliceCodec will not be directly configurable in Go Driver 2.0. To +// configure the slice encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the slice encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal nil Go slices as empty +// BSON arrays, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilSliceAsEmpty: true, +// }) +// +// See the deprecation notice for each field in SliceCodec for the corresponding +// settings. +type SliceCodec struct { + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go slices as empty BSON arrays instead of + // BSON null. + // + // Deprecated: Use bson.Encoder.NilSliceAsEmpty instead. + EncodeNilAsEmpty bool +} + +// NewSliceCodec returns a MapCodec with options opts. +// +// Deprecated: NewSliceCodec will not be available in Go Driver 2.0. See +// [SliceCodec] for more details. +func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec { + sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...) + + codec := SliceCodec{} + if sliceOpt.EncodeNilAsEmpty != nil { + codec.EncodeNilAsEmpty = *sliceOpt.EncodeNilAsEmpty + } + return &codec +} + +// EncodeValue is the ValueEncoder for slice types. +func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Slice { + return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + if val.IsNil() && !sc.EncodeNilAsEmpty && !ec.nilSliceAsEmpty { + return vw.WriteNull() + } + + // If we have a []byte we want to treat it as a binary instead of as an array. + if val.Type().Elem() == tByte { + byteSlice := make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(byteSlice), val) + return vw.WriteBinary(byteSlice) + } + + // If we have a []primitive.E we want to treat it as a document instead of as an array. + if val.Type() == tD || val.Type().ConvertibleTo(tD) { + d := val.Convert(tD).Interface().(primitive.D) + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + for _, e := range d { + err = encodeElement(ec, dw, e) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() + } + + aw, err := vw.WriteArray() + if err != nil { + return err + } + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.Index(idx)) + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { + return lookupErr + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + if errors.Is(lookupErr, errInvalidValue) { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + return aw.WriteArrayEnd() +} + +// DecodeValue is the ValueDecoder for slice types. +func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Slice { + return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Array: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + case bsontype.Undefined: + val.Set(reflect.Zero(val.Type())) + return vr.ReadUndefined() + case bsontype.Type(0), bsontype.EmbeddedDocument: + if val.Type().Elem() != tE { + return fmt.Errorf("cannot decode document into %s", val.Type()) + } + case bsontype.Binary: + if val.Type().Elem() != tByte { + return fmt.Errorf("SliceDecodeValue can only decode a binary into a byte array, got %v", vrType) + } + data, subtype, err := vr.ReadBinary() + if err != nil { + return err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return fmt.Errorf("SliceDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(data))) + } + val.SetLen(0) + val.Set(reflect.AppendSlice(val, reflect.ValueOf(data))) + return nil + case bsontype.String: + if sliceType := val.Type().Elem(); sliceType != tByte { + return fmt.Errorf("SliceDecodeValue can only decode a string into a byte array, got %v", sliceType) + } + str, err := vr.ReadString() + if err != nil { + return err + } + byteStr := []byte(str) + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(byteStr))) + } + val.SetLen(0) + val.Set(reflect.AppendSlice(val, reflect.ValueOf(byteStr))) + return nil + default: + return fmt.Errorf("cannot decode %v into a slice", vrType) + } + + var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) + switch val.Type().Elem() { + case tE: + dc.Ancestor = val.Type() + elemsFunc = defaultValueDecoders.decodeD + default: + elemsFunc = defaultValueDecoders.decodeDefault + } + + elems, err := elemsFunc(dc, vr, val) + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) + } + + val.SetLen(0) + val.Set(reflect.Append(val, elems...)) + + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go new file mode 100644 index 000000000000..a8f885a854f4 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go @@ -0,0 +1,140 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// StringCodec is the Codec used for string values. +// +// Deprecated: StringCodec will not be directly accessible in Go Driver 2.0. To +// override the default string encode and decode behavior, create a new registry +// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new +// encoder and decoder for strings. +// +// For example, +// +// reg := bson.NewRegistry() +// reg.RegisterKindEncoder(reflect.String, myStringEncoder) +// reg.RegisterKindDecoder(reflect.String, myStringDecoder) +type StringCodec struct { + // DecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. + // If false, a string made from the raw object ID bytes will be used. Defaults to true. + // + // Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. + DecodeObjectIDAsHex bool +} + +var ( + defaultStringCodec = NewStringCodec() + + // Assert that defaultStringCodec satisfies the typeDecoder interface, which allows it to be + // used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. + _ typeDecoder = defaultStringCodec +) + +// NewStringCodec returns a StringCodec with options opts. +// +// Deprecated: NewStringCodec will not be available in Go Driver 2.0. See +// [StringCodec] for more details. +func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec { + stringOpt := bsonoptions.MergeStringCodecOptions(opts...) + return &StringCodec{*stringOpt.DecodeObjectIDAsHex} +} + +// EncodeValue is the ValueEncoder for string types. +func (sc *StringCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.Kind() != reflect.String { + return ValueEncoderError{ + Name: "StringEncodeValue", + Kinds: []reflect.Kind{reflect.String}, + Received: val, + } + } + + return vw.WriteString(val.String()) +} + +func (sc *StringCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t.Kind() != reflect.String { + return emptyValue, ValueDecoderError{ + Name: "StringDecodeValue", + Kinds: []reflect.Kind{reflect.String}, + Received: reflect.Zero(t), + } + } + + var str string + var err error + switch vr.Type() { + case bsontype.String: + str, err = vr.ReadString() + if err != nil { + return emptyValue, err + } + case bsontype.ObjectID: + oid, err := vr.ReadObjectID() + if err != nil { + return emptyValue, err + } + if sc.DecodeObjectIDAsHex { + str = oid.Hex() + } else { + // TODO(GODRIVER-2796): Return an error here instead of decoding to a garbled string. + byteArray := [12]byte(oid) + str = string(byteArray[:]) + } + case bsontype.Symbol: + str, err = vr.ReadSymbol() + if err != nil { + return emptyValue, err + } + case bsontype.Binary: + data, subtype, err := vr.ReadBinary() + if err != nil { + return emptyValue, err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return emptyValue, decodeBinaryError{subtype: subtype, typeName: "string"} + } + str = string(data) + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into a string type", vr.Type()) + } + + return reflect.ValueOf(str), nil +} + +// DecodeValue is the ValueDecoder for string types. +func (sc *StringCodec) DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.String { + return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} + } + + elem, err := sc.decodeType(dctx, vr, val.Type()) + if err != nil { + return err + } + + val.SetString(elem.String()) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go new file mode 100644 index 000000000000..f8d9690c139e --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go @@ -0,0 +1,736 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strings" + "sync" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// DecodeError represents an error that occurs when unmarshalling BSON bytes into a native Go type. +type DecodeError struct { + keys []string + wrapped error +} + +// Unwrap returns the underlying error +func (de *DecodeError) Unwrap() error { + return de.wrapped +} + +// Error implements the error interface. +func (de *DecodeError) Error() string { + // The keys are stored in reverse order because the de.keys slice is builtup while propagating the error up the + // stack of BSON keys, so we call de.Keys(), which reverses them. + keyPath := strings.Join(de.Keys(), ".") + return fmt.Sprintf("error decoding key %s: %v", keyPath, de.wrapped) +} + +// Keys returns the BSON key path that caused an error as a slice of strings. The keys in the slice are in top-down +// order. For example, if the document being unmarshalled was {a: {b: {c: 1}}} and the value for c was supposed to be +// a string, the keys slice will be ["a", "b", "c"]. +func (de *DecodeError) Keys() []string { + reversedKeys := make([]string, 0, len(de.keys)) + for idx := len(de.keys) - 1; idx >= 0; idx-- { + reversedKeys = append(reversedKeys, de.keys[idx]) + } + + return reversedKeys +} + +// Zeroer allows custom struct types to implement a report of zero +// state. All struct types that don't implement Zeroer or where IsZero +// returns false are considered to be not zero. +type Zeroer interface { + IsZero() bool +} + +// StructCodec is the Codec used for struct values. +// +// Deprecated: StructCodec will not be directly configurable in Go Driver 2.0. +// To configure the struct encode and decode behavior, use the configuration +// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the struct encode +// and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to omit zero-value structs when +// using the "omitempty" struct tag, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// OmitZeroStruct: true, +// }) +// +// See the deprecation notice for each field in StructCodec for the corresponding +// settings. +type StructCodec struct { + cache sync.Map // map[reflect.Type]*structDescription + parser StructTagParser + + // DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the + // destination value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: Use bson.Decoder.ZeroStructs or options.BSONOptions.ZeroStructs instead. + DecodeZeroStruct bool + + // DecodeDeepZeroInline causes DecodeValue to delete any existing values from Go structs in the + // destination value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. + DecodeDeepZeroInline bool + + // EncodeOmitDefaultStruct causes the Encoder to consider the zero value for a struct (e.g. + // MyStruct{}) as empty and omit it from the marshaled BSON when the "omitempty" struct tag + // option is set. + // + // Deprecated: Use bson.Encoder.OmitZeroStruct or options.BSONOptions.OmitZeroStruct instead. + EncodeOmitDefaultStruct bool + + // AllowUnexportedFields allows encoding and decoding values from un-exported struct fields. + // + // Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be + // supported in Go Driver 2.0. + AllowUnexportedFields bool + + // OverwriteDuplicatedInlinedFields, if false, causes EncodeValue to return an error if there is + // a duplicate field in the marshaled BSON when the "inline" struct tag option is set. The + // default value is true. + // + // Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates or + // options.BSONOptions.ErrorOnInlineDuplicates instead. + OverwriteDuplicatedInlinedFields bool +} + +var _ ValueEncoder = &StructCodec{} +var _ ValueDecoder = &StructCodec{} + +// NewStructCodec returns a StructCodec that uses p for struct tag parsing. +// +// Deprecated: NewStructCodec will not be available in Go Driver 2.0. See +// [StructCodec] for more details. +func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) { + if p == nil { + return nil, errors.New("a StructTagParser must be provided to NewStructCodec") + } + + structOpt := bsonoptions.MergeStructCodecOptions(opts...) + + codec := &StructCodec{ + parser: p, + } + + if structOpt.DecodeZeroStruct != nil { + codec.DecodeZeroStruct = *structOpt.DecodeZeroStruct + } + if structOpt.DecodeDeepZeroInline != nil { + codec.DecodeDeepZeroInline = *structOpt.DecodeDeepZeroInline + } + if structOpt.EncodeOmitDefaultStruct != nil { + codec.EncodeOmitDefaultStruct = *structOpt.EncodeOmitDefaultStruct + } + if structOpt.OverwriteDuplicatedInlinedFields != nil { + codec.OverwriteDuplicatedInlinedFields = *structOpt.OverwriteDuplicatedInlinedFields + } + if structOpt.AllowUnexportedFields != nil { + codec.AllowUnexportedFields = *structOpt.AllowUnexportedFields + } + + return codec, nil +} + +// EncodeValue handles encoding generic struct types. +func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Struct { + return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} + } + + sd, err := sc.describeStruct(ec.Registry, val.Type(), ec.useJSONStructTags, ec.errorOnInlineDuplicates) + if err != nil { + return err + } + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + var rv reflect.Value + for _, desc := range sd.fl { + if desc.inline == nil { + rv = val.Field(desc.idx) + } else { + rv, err = fieldByIndexErr(val, desc.inline) + if err != nil { + continue + } + } + + desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(ec, desc.encoder, rv) + + if err != nil && !errors.Is(err, errInvalidValue) { + return err + } + + if errors.Is(err, errInvalidValue) { + if desc.omitEmpty { + continue + } + vw2, err := dw.WriteDocumentElement(desc.name) + if err != nil { + return err + } + err = vw2.WriteNull() + if err != nil { + return err + } + continue + } + + if desc.encoder == nil { + return ErrNoEncoder{Type: rv.Type()} + } + + encoder := desc.encoder + + var empty bool + if cz, ok := encoder.(CodecZeroer); ok { + empty = cz.IsTypeZero(rv.Interface()) + } else if rv.Kind() == reflect.Interface { + // isEmpty will not treat an interface rv as an interface, so we need to check for the + // nil interface separately. + empty = rv.IsNil() + } else { + empty = isEmpty(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) + } + if desc.omitEmpty && empty { + continue + } + + vw2, err := dw.WriteDocumentElement(desc.name) + if err != nil { + return err + } + + ectx := EncodeContext{ + Registry: ec.Registry, + MinSize: desc.minSize || ec.MinSize, + errorOnInlineDuplicates: ec.errorOnInlineDuplicates, + stringifyMapKeysWithFmt: ec.stringifyMapKeysWithFmt, + nilMapAsEmpty: ec.nilMapAsEmpty, + nilSliceAsEmpty: ec.nilSliceAsEmpty, + nilByteSliceAsEmpty: ec.nilByteSliceAsEmpty, + omitZeroStruct: ec.omitZeroStruct, + useJSONStructTags: ec.useJSONStructTags, + } + err = encoder.EncodeValue(ectx, vw2, rv) + if err != nil { + return err + } + } + + if sd.inlineMap >= 0 { + rv := val.Field(sd.inlineMap) + collisionFn := func(key string) bool { + _, exists := sd.fm[key] + return exists + } + + return defaultMapCodec.mapEncodeValue(ec, dw, rv, collisionFn) + } + + return dw.WriteDocumentEnd() +} + +func newDecodeError(key string, original error) error { + var de *DecodeError + if !errors.As(original, &de) { + return &DecodeError{ + keys: []string{key}, + wrapped: original, + } + } + + de.keys = append(de.keys, key) + return de +} + +// DecodeValue implements the Codec interface. +// By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr. +// For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared. +func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Struct { + return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Type(0), bsontype.EmbeddedDocument: + case bsontype.Null: + if err := vr.ReadNull(); err != nil { + return err + } + + val.Set(reflect.Zero(val.Type())) + return nil + case bsontype.Undefined: + if err := vr.ReadUndefined(); err != nil { + return err + } + + val.Set(reflect.Zero(val.Type())) + return nil + default: + return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) + } + + sd, err := sc.describeStruct(dc.Registry, val.Type(), dc.useJSONStructTags, false) + if err != nil { + return err + } + + if sc.DecodeZeroStruct || dc.zeroStructs { + val.Set(reflect.Zero(val.Type())) + } + if sc.DecodeDeepZeroInline && sd.inline { + val.Set(deepZero(val.Type())) + } + + var decoder ValueDecoder + var inlineMap reflect.Value + if sd.inlineMap >= 0 { + inlineMap = val.Field(sd.inlineMap) + decoder, err = dc.LookupDecoder(inlineMap.Type().Elem()) + if err != nil { + return err + } + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + for { + name, vr, err := dr.ReadElement() + if errors.Is(err, bsonrw.ErrEOD) { + break + } + if err != nil { + return err + } + + fd, exists := sd.fm[name] + if !exists { + // if the original name isn't found in the struct description, try again with the name in lowercase + // this could match if a BSON tag isn't specified because by default, describeStruct lowercases all field + // names + fd, exists = sd.fm[strings.ToLower(name)] + } + + if !exists { + if sd.inlineMap < 0 { + // The encoding/json package requires a flag to return on error for non-existent fields. + // This functionality seems appropriate for the struct codec. + err = vr.Skip() + if err != nil { + return err + } + continue + } + + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + + elem := reflect.New(inlineMap.Type().Elem()).Elem() + dc.Ancestor = inlineMap.Type() + err = decoder.DecodeValue(dc, vr, elem) + if err != nil { + return err + } + inlineMap.SetMapIndex(reflect.ValueOf(name), elem) + continue + } + + var field reflect.Value + if fd.inline == nil { + field = val.Field(fd.idx) + } else { + field, err = getInlineField(val, fd.inline) + if err != nil { + return err + } + } + + if !field.CanSet() { // Being settable is a super set of being addressable. + innerErr := fmt.Errorf("field %v is not settable", field) + return newDecodeError(fd.name, innerErr) + } + if field.Kind() == reflect.Ptr && field.IsNil() { + field.Set(reflect.New(field.Type().Elem())) + } + field = field.Addr() + + dctx := DecodeContext{ + Registry: dc.Registry, + Truncate: fd.truncate || dc.Truncate, + defaultDocumentType: dc.defaultDocumentType, + binaryAsSlice: dc.binaryAsSlice, + useJSONStructTags: dc.useJSONStructTags, + useLocalTimeZone: dc.useLocalTimeZone, + zeroMaps: dc.zeroMaps, + zeroStructs: dc.zeroStructs, + } + + if fd.decoder == nil { + return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()}) + } + + err = fd.decoder.DecodeValue(dctx, vr, field.Elem()) + if err != nil { + return newDecodeError(fd.name, err) + } + } + + return nil +} + +func isEmpty(v reflect.Value, omitZeroStruct bool) bool { + kind := v.Kind() + if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) { + return v.Interface().(Zeroer).IsZero() + } + switch kind { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Struct: + if !omitZeroStruct { + return false + } + vt := v.Type() + if vt == tTime { + return v.Interface().(time.Time).IsZero() + } + numField := vt.NumField() + for i := 0; i < numField; i++ { + ff := vt.Field(i) + if ff.PkgPath != "" && !ff.Anonymous { + continue // Private field + } + if !isEmpty(v.Field(i), omitZeroStruct) { + return false + } + } + return true + } + return !v.IsValid() || v.IsZero() +} + +type structDescription struct { + fm map[string]fieldDescription + fl []fieldDescription + inlineMap int + inline bool +} + +type fieldDescription struct { + name string // BSON key name + fieldName string // struct field name + idx int + omitEmpty bool + minSize bool + truncate bool + inline []int + encoder ValueEncoder + decoder ValueDecoder +} + +type byIndex []fieldDescription + +func (bi byIndex) Len() int { return len(bi) } + +func (bi byIndex) Swap(i, j int) { bi[i], bi[j] = bi[j], bi[i] } + +func (bi byIndex) Less(i, j int) bool { + // If a field is inlined, its index in the top level struct is stored at inline[0] + iIdx, jIdx := bi[i].idx, bi[j].idx + if len(bi[i].inline) > 0 { + iIdx = bi[i].inline[0] + } + if len(bi[j].inline) > 0 { + jIdx = bi[j].inline[0] + } + if iIdx != jIdx { + return iIdx < jIdx + } + for k, biik := range bi[i].inline { + if k >= len(bi[j].inline) { + return false + } + if biik != bi[j].inline[k] { + return biik < bi[j].inline[k] + } + } + return len(bi[i].inline) < len(bi[j].inline) +} + +func (sc *StructCodec) describeStruct( + r *Registry, + t reflect.Type, + useJSONStructTags bool, + errorOnDuplicates bool, +) (*structDescription, error) { + // We need to analyze the struct, including getting the tags, collecting + // information about inlining, and create a map of the field name to the field. + if v, ok := sc.cache.Load(t); ok { + return v.(*structDescription), nil + } + // TODO(charlie): Only describe the struct once when called + // concurrently with the same type. + ds, err := sc.describeStructSlow(r, t, useJSONStructTags, errorOnDuplicates) + if err != nil { + return nil, err + } + if v, loaded := sc.cache.LoadOrStore(t, ds); loaded { + ds = v.(*structDescription) + } + return ds, nil +} + +func (sc *StructCodec) describeStructSlow( + r *Registry, + t reflect.Type, + useJSONStructTags bool, + errorOnDuplicates bool, +) (*structDescription, error) { + numFields := t.NumField() + sd := &structDescription{ + fm: make(map[string]fieldDescription, numFields), + fl: make([]fieldDescription, 0, numFields), + inlineMap: -1, + } + + var fields []fieldDescription + for i := 0; i < numFields; i++ { + sf := t.Field(i) + if sf.PkgPath != "" && (!sc.AllowUnexportedFields || !sf.Anonymous) { + // field is private or unexported fields aren't allowed, ignore + continue + } + + sfType := sf.Type + encoder, err := r.LookupEncoder(sfType) + if err != nil { + encoder = nil + } + decoder, err := r.LookupDecoder(sfType) + if err != nil { + decoder = nil + } + + description := fieldDescription{ + fieldName: sf.Name, + idx: i, + encoder: encoder, + decoder: decoder, + } + + var stags StructTags + // If the caller requested that we use JSON struct tags, use the JSONFallbackStructTagParser + // instead of the parser defined on the codec. + if useJSONStructTags { + stags, err = JSONFallbackStructTagParser.ParseStructTags(sf) + } else { + stags, err = sc.parser.ParseStructTags(sf) + } + if err != nil { + return nil, err + } + if stags.Skip { + continue + } + description.name = stags.Name + description.omitEmpty = stags.OmitEmpty + description.minSize = stags.MinSize + description.truncate = stags.Truncate + + if stags.Inline { + sd.inline = true + switch sfType.Kind() { + case reflect.Map: + if sd.inlineMap >= 0 { + return nil, errors.New("(struct " + t.String() + ") multiple inline maps") + } + if sfType.Key() != tString { + return nil, errors.New("(struct " + t.String() + ") inline map must have a string keys") + } + sd.inlineMap = description.idx + case reflect.Ptr: + sfType = sfType.Elem() + if sfType.Kind() != reflect.Struct { + return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) + } + fallthrough + case reflect.Struct: + inlinesf, err := sc.describeStruct(r, sfType, useJSONStructTags, errorOnDuplicates) + if err != nil { + return nil, err + } + for _, fd := range inlinesf.fl { + if fd.inline == nil { + fd.inline = []int{i, fd.idx} + } else { + fd.inline = append([]int{i}, fd.inline...) + } + fields = append(fields, fd) + + } + default: + return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) + } + continue + } + fields = append(fields, description) + } + + // Sort fieldDescriptions by name and use dominance rules to determine which should be added for each name + sort.Slice(fields, func(i, j int) bool { + x := fields + // sort field by name, breaking ties with depth, then + // breaking ties with index sequence. + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].inline) != len(x[j].inline) { + return len(x[i].inline) < len(x[j].inline) + } + return byIndex(x).Less(i, j) + }) + + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + sd.fl = append(sd.fl, fi) + sd.fm[name] = fi + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if !ok || !sc.OverwriteDuplicatedInlinedFields || errorOnDuplicates { + return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name) + } + sd.fl = append(sd.fl, dominant) + sd.fm[name] = dominant + } + + sort.Sort(byIndex(sd.fl)) + + return sd, nil +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's inlining rules. If there are multiple top-level +// fields, the boolean will be false: This condition is an error in Go +// and we skip all the fields. +func dominantField(fields []fieldDescription) (fieldDescription, bool) { + // The fields are sorted in increasing index-length order, then by presence of tag. + // That means that the first field is the dominant one. We need only check + // for error cases: two fields at top level. + if len(fields) > 1 && + len(fields[0].inline) == len(fields[1].inline) { + return fieldDescription{}, false + } + return fields[0], true +} + +func fieldByIndexErr(v reflect.Value, index []int) (result reflect.Value, err error) { + defer func() { + if recovered := recover(); recovered != nil { + switch r := recovered.(type) { + case string: + err = fmt.Errorf("%s", r) + case error: + err = r + } + } + }() + + result = v.FieldByIndex(index) + return +} + +func getInlineField(val reflect.Value, index []int) (reflect.Value, error) { + field, err := fieldByIndexErr(val, index) + if err == nil { + return field, nil + } + + // if parent of this element doesn't exist, fix its parent + inlineParent := index[:len(index)-1] + var fParent reflect.Value + if fParent, err = fieldByIndexErr(val, inlineParent); err != nil { + fParent, err = getInlineField(val, inlineParent) + if err != nil { + return fParent, err + } + } + fParent.Set(reflect.New(fParent.Type().Elem())) + + return fieldByIndexErr(val, index) +} + +// DeepZero returns recursive zero object +func deepZero(st reflect.Type) (result reflect.Value) { + if st.Kind() == reflect.Struct { + numField := st.NumField() + for i := 0; i < numField; i++ { + if result == emptyValue { + result = reflect.Indirect(reflect.New(st)) + } + f := result.Field(i) + if f.CanInterface() { + if f.Type().Kind() == reflect.Struct { + result.Field(i).Set(recursivePointerTo(deepZero(f.Type().Elem()))) + } + } + } + } + return result +} + +// recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside +func recursivePointerTo(v reflect.Value) reflect.Value { + v = reflect.Indirect(v) + result := reflect.New(v.Type()) + if v.Kind() == reflect.Struct { + for i := 0; i < v.NumField(); i++ { + if f := v.Field(i); f.Kind() == reflect.Ptr { + if f.Elem().Kind() == reflect.Struct { + result.Elem().Field(i).Set(recursivePointerTo(f)) + } + } + } + } + + return result +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go new file mode 100644 index 000000000000..18d85bfb0312 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go @@ -0,0 +1,148 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + "strings" +) + +// StructTagParser returns the struct tags for a given struct field. +// +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. +type StructTagParser interface { + ParseStructTags(reflect.StructField) (StructTags, error) +} + +// StructTagParserFunc is an adapter that allows a generic function to be used +// as a StructTagParser. +// +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. +type StructTagParserFunc func(reflect.StructField) (StructTags, error) + +// ParseStructTags implements the StructTagParser interface. +func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructTags, error) { + return stpf(sf) +} + +// StructTags represents the struct tag fields that the StructCodec uses during +// the encoding and decoding process. +// +// In the case of a struct, the lowercased field name is used as the key for each exported +// field but this behavior may be changed using a struct tag. The tag may also contain flags to +// adjust the marshalling behavior for the field. +// +// The properties are defined below: +// +// OmitEmpty Only include the field if it's not set to the zero value for the type or to +// empty slices or maps. +// +// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's +// feasible while preserving the numeric value. +// +// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within +// a float32. +// +// Inline Inline the field, which must be a struct or a map, causing all of its fields +// or keys to be processed as if they were part of the outer struct. For maps, +// keys must not conflict with the bson keys of other struct fields. +// +// Skip This struct field should be skipped. This is usually denoted by parsing a "-" +// for the name. +// +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. +type StructTags struct { + Name string + OmitEmpty bool + MinSize bool + Truncate bool + Inline bool + Skip bool +} + +// DefaultStructTagParser is the StructTagParser used by the StructCodec by default. +// It will handle the bson struct tag. See the documentation for StructTags to see +// what each of the returned fields means. +// +// If there is no name in the struct tag fields, the struct field name is lowercased. +// The tag formats accepted are: +// +// "[][,[,]]" +// +// `(...) bson:"[][,[,]]" (...)` +// +// An example: +// +// type T struct { +// A bool +// B int "myb" +// C string "myc,omitempty" +// D string `bson:",omitempty" json:"jsonkey"` +// E int64 ",minsize" +// F int64 "myf,omitempty,minsize" +// } +// +// A struct tag either consisting entirely of '-' or with a bson key with a +// value consisting entirely of '-' will return a StructTags with Skip true and +// the remaining fields will be their default values. +// +// Deprecated: DefaultStructTagParser will be removed in Go Driver 2.0. +var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { + key := strings.ToLower(sf.Name) + tag, ok := sf.Tag.Lookup("bson") + if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { + tag = string(sf.Tag) + } + return parseTags(key, tag) +} + +func parseTags(key string, tag string) (StructTags, error) { + var st StructTags + if tag == "-" { + st.Skip = true + return st, nil + } + + for idx, str := range strings.Split(tag, ",") { + if idx == 0 && str != "" { + key = str + } + switch str { + case "omitempty": + st.OmitEmpty = true + case "minsize": + st.MinSize = true + case "truncate": + st.Truncate = true + case "inline": + st.Inline = true + } + } + + st.Name = key + + return st, nil +} + +// JSONFallbackStructTagParser has the same behavior as DefaultStructTagParser +// but will also fallback to parsing the json tag instead on a field where the +// bson tag isn't available. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] and +// [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. +var JSONFallbackStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { + key := strings.ToLower(sf.Name) + tag, ok := sf.Tag.Lookup("bson") + if !ok { + tag, ok = sf.Tag.Lookup("json") + } + if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { + tag = string(sf.Tag) + } + + return parseTags(key, tag) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go new file mode 100644 index 000000000000..22fb762c4151 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go @@ -0,0 +1,151 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "reflect" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +const ( + timeFormatString = "2006-01-02T15:04:05.999Z07:00" +) + +// TimeCodec is the Codec used for time.Time values. +// +// Deprecated: TimeCodec will not be directly configurable in Go Driver 2.0. +// To configure the time.Time encode and decode behavior, use the configuration +// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the time.Time encode +// and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to ..., use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// UseLocalTimeZone: true, +// }) +// +// See the deprecation notice for each field in TimeCodec for the corresponding +// settings. +type TimeCodec struct { + // UseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. + // + // Deprecated: Use bson.Decoder.UseLocalTimeZone or options.BSONOptions.UseLocalTimeZone + // instead. + UseLocalTimeZone bool +} + +var ( + defaultTimeCodec = NewTimeCodec() + + // Assert that defaultTimeCodec satisfies the typeDecoder interface, which allows it to be used + // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. + _ typeDecoder = defaultTimeCodec +) + +// NewTimeCodec returns a TimeCodec with options opts. +// +// Deprecated: NewTimeCodec will not be available in Go Driver 2.0. See +// [TimeCodec] for more details. +func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec { + timeOpt := bsonoptions.MergeTimeCodecOptions(opts...) + + codec := TimeCodec{} + if timeOpt.UseLocalTimeZone != nil { + codec.UseLocalTimeZone = *timeOpt.UseLocalTimeZone + } + return &codec +} + +func (tc *TimeCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tTime { + return emptyValue, ValueDecoderError{ + Name: "TimeDecodeValue", + Types: []reflect.Type{tTime}, + Received: reflect.Zero(t), + } + } + + var timeVal time.Time + switch vrType := vr.Type(); vrType { + case bsontype.DateTime: + dt, err := vr.ReadDateTime() + if err != nil { + return emptyValue, err + } + timeVal = time.Unix(dt/1000, dt%1000*1000000) + case bsontype.String: + // assume strings are in the isoTimeFormat + timeStr, err := vr.ReadString() + if err != nil { + return emptyValue, err + } + timeVal, err = time.Parse(timeFormatString, timeStr) + if err != nil { + return emptyValue, err + } + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return emptyValue, err + } + timeVal = time.Unix(i64/1000, i64%1000*1000000) + case bsontype.Timestamp: + t, _, err := vr.ReadTimestamp() + if err != nil { + return emptyValue, err + } + timeVal = time.Unix(int64(t), 0) + case bsontype.Null: + if err := vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err := vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into a time.Time", vrType) + } + + if !tc.UseLocalTimeZone && !dc.useLocalTimeZone { + timeVal = timeVal.UTC() + } + return reflect.ValueOf(timeVal), nil +} + +// DecodeValue is the ValueDecoderFunc for time.Time. +func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tTime { + return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} + } + + elem, err := tc.decodeType(dc, vr, tTime) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +// EncodeValue is the ValueEncoderFunc for time.TIme. +func (tc *TimeCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tTime { + return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} + } + tt := val.Interface().(time.Time) + dt := primitive.NewDateTimeFromTime(tt) + return vw.WriteDateTime(int64(dt)) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go new file mode 100644 index 000000000000..6ade17b7d3fd --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go @@ -0,0 +1,58 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding/json" + "net/url" + "reflect" + "time" + + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var tBool = reflect.TypeOf(false) +var tFloat64 = reflect.TypeOf(float64(0)) +var tInt32 = reflect.TypeOf(int32(0)) +var tInt64 = reflect.TypeOf(int64(0)) +var tString = reflect.TypeOf("") +var tTime = reflect.TypeOf(time.Time{}) + +var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem() +var tByteSlice = reflect.TypeOf([]byte(nil)) +var tByte = reflect.TypeOf(byte(0x00)) +var tURL = reflect.TypeOf(url.URL{}) +var tJSONNumber = reflect.TypeOf(json.Number("")) + +var tValueMarshaler = reflect.TypeOf((*ValueMarshaler)(nil)).Elem() +var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem() +var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() +var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem() +var tZeroer = reflect.TypeOf((*Zeroer)(nil)).Elem() + +var tBinary = reflect.TypeOf(primitive.Binary{}) +var tUndefined = reflect.TypeOf(primitive.Undefined{}) +var tOID = reflect.TypeOf(primitive.ObjectID{}) +var tDateTime = reflect.TypeOf(primitive.DateTime(0)) +var tNull = reflect.TypeOf(primitive.Null{}) +var tRegex = reflect.TypeOf(primitive.Regex{}) +var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{}) +var tDBPointer = reflect.TypeOf(primitive.DBPointer{}) +var tJavaScript = reflect.TypeOf(primitive.JavaScript("")) +var tSymbol = reflect.TypeOf(primitive.Symbol("")) +var tTimestamp = reflect.TypeOf(primitive.Timestamp{}) +var tDecimal = reflect.TypeOf(primitive.Decimal128{}) +var tMinKey = reflect.TypeOf(primitive.MinKey{}) +var tMaxKey = reflect.TypeOf(primitive.MaxKey{}) +var tD = reflect.TypeOf(primitive.D{}) +var tA = reflect.TypeOf(primitive.A{}) +var tE = reflect.TypeOf(primitive.E{}) + +var tCoreDocument = reflect.TypeOf(bsoncore.Document{}) +var tCoreArray = reflect.TypeOf(bsoncore.Array{}) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go new file mode 100644 index 000000000000..39b07135b182 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go @@ -0,0 +1,202 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "math" + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// UIntCodec is the Codec used for uint values. +// +// Deprecated: UIntCodec will not be directly configurable in Go Driver 2.0. To +// configure the uint encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the uint encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal Go uint values as the +// minimum BSON int size that can represent the value, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// IntMinSize: true, +// }) +// +// See the deprecation notice for each field in UIntCodec for the corresponding +// settings. +type UIntCodec struct { + // EncodeToMinSize causes EncodeValue to marshal Go uint values (excluding uint64) as the + // minimum BSON int size (either 32-bit or 64-bit) that can represent the integer value. + // + // Deprecated: Use bson.Encoder.IntMinSize or options.BSONOptions.IntMinSize instead. + EncodeToMinSize bool +} + +var ( + defaultUIntCodec = NewUIntCodec() + + // Assert that defaultUIntCodec satisfies the typeDecoder interface, which allows it to be used + // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. + _ typeDecoder = defaultUIntCodec +) + +// NewUIntCodec returns a UIntCodec with options opts. +// +// Deprecated: NewUIntCodec will not be available in Go Driver 2.0. See +// [UIntCodec] for more details. +func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec { + uintOpt := bsonoptions.MergeUIntCodecOptions(opts...) + + codec := UIntCodec{} + if uintOpt.EncodeToMinSize != nil { + codec.EncodeToMinSize = *uintOpt.EncodeToMinSize + } + return &codec +} + +// EncodeValue is the ValueEncoder for uint types. +func (uic *UIntCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Uint8, reflect.Uint16: + return vw.WriteInt32(int32(val.Uint())) + case reflect.Uint, reflect.Uint32, reflect.Uint64: + u64 := val.Uint() + + // If ec.MinSize or if encodeToMinSize is true for a non-uint64 value we should write val as an int32 + useMinSize := ec.MinSize || (uic.EncodeToMinSize && val.Kind() != reflect.Uint64) + + if u64 <= math.MaxInt32 && useMinSize { + return vw.WriteInt32(int32(u64)) + } + if u64 > math.MaxInt64 { + return fmt.Errorf("%d overflows int64", u64) + } + return vw.WriteInt64(int64(u64)) + } + + return ValueEncoderError{ + Name: "UintEncodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } +} + +func (uic *UIntCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + var i64 int64 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return emptyValue, err + } + i64 = int64(i32) + case bsontype.Int64: + i64, err = vr.ReadInt64() + if err != nil { + return emptyValue, err + } + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return emptyValue, err + } + if !dc.Truncate && math.Floor(f64) != f64 { + return emptyValue, errCannotTruncate + } + if f64 > float64(math.MaxInt64) { + return emptyValue, fmt.Errorf("%g overflows int64", f64) + } + i64 = int64(f64) + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return emptyValue, err + } + if b { + i64 = 1 + } + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) + } + + switch t.Kind() { + case reflect.Uint8: + if i64 < 0 || i64 > math.MaxUint8 { + return emptyValue, fmt.Errorf("%d overflows uint8", i64) + } + + return reflect.ValueOf(uint8(i64)), nil + case reflect.Uint16: + if i64 < 0 || i64 > math.MaxUint16 { + return emptyValue, fmt.Errorf("%d overflows uint16", i64) + } + + return reflect.ValueOf(uint16(i64)), nil + case reflect.Uint32: + if i64 < 0 || i64 > math.MaxUint32 { + return emptyValue, fmt.Errorf("%d overflows uint32", i64) + } + + return reflect.ValueOf(uint32(i64)), nil + case reflect.Uint64: + if i64 < 0 { + return emptyValue, fmt.Errorf("%d overflows uint64", i64) + } + + return reflect.ValueOf(uint64(i64)), nil + case reflect.Uint: + if i64 < 0 { + return emptyValue, fmt.Errorf("%d overflows uint", i64) + } + v := uint64(i64) + if v > math.MaxUint { // Can we fit this inside of an uint + return emptyValue, fmt.Errorf("%d overflows uint", i64) + } + + return reflect.ValueOf(uint(v)), nil + default: + return emptyValue, ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: reflect.Zero(t), + } + } +} + +// DecodeValue is the ValueDecoder for uint types. +func (uic *UIntCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() { + return ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } + } + + elem, err := uic.decodeType(dc, vr, val.Type()) + if err != nil { + return err + } + + val.SetUint(elem.Uint()) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go new file mode 100644 index 000000000000..996bd17127a5 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go @@ -0,0 +1,49 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// ByteSliceCodecOptions represents all possible options for byte slice encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type ByteSliceCodecOptions struct { + EncodeNilAsEmpty *bool // Specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. +} + +// ByteSliceCodec creates a new *ByteSliceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func ByteSliceCodec() *ByteSliceCodecOptions { + return &ByteSliceCodecOptions{} +} + +// SetEncodeNilAsEmpty specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. +func (bs *ByteSliceCodecOptions) SetEncodeNilAsEmpty(b bool) *ByteSliceCodecOptions { + bs.EncodeNilAsEmpty = &b + return bs +} + +// MergeByteSliceCodecOptions combines the given *ByteSliceCodecOptions into a single *ByteSliceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeByteSliceCodecOptions(opts ...*ByteSliceCodecOptions) *ByteSliceCodecOptions { + bs := ByteSliceCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.EncodeNilAsEmpty != nil { + bs.EncodeNilAsEmpty = opt.EncodeNilAsEmpty + } + } + + return bs +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go new file mode 100644 index 000000000000..c40973c8d436 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go @@ -0,0 +1,8 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsonoptions defines the optional configurations for the BSON codecs. +package bsonoptions diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go new file mode 100644 index 000000000000..f522c7e03fef --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go @@ -0,0 +1,49 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// EmptyInterfaceCodecOptions represents all possible options for interface{} encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type EmptyInterfaceCodecOptions struct { + DecodeBinaryAsSlice *bool // Specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. +} + +// EmptyInterfaceCodec creates a new *EmptyInterfaceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func EmptyInterfaceCodec() *EmptyInterfaceCodecOptions { + return &EmptyInterfaceCodecOptions{} +} + +// SetDecodeBinaryAsSlice specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. +func (e *EmptyInterfaceCodecOptions) SetDecodeBinaryAsSlice(b bool) *EmptyInterfaceCodecOptions { + e.DecodeBinaryAsSlice = &b + return e +} + +// MergeEmptyInterfaceCodecOptions combines the given *EmptyInterfaceCodecOptions into a single *EmptyInterfaceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeEmptyInterfaceCodecOptions(opts ...*EmptyInterfaceCodecOptions) *EmptyInterfaceCodecOptions { + e := EmptyInterfaceCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.DecodeBinaryAsSlice != nil { + e.DecodeBinaryAsSlice = opt.DecodeBinaryAsSlice + } + } + + return e +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go new file mode 100644 index 000000000000..a7a7c1d9804b --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go @@ -0,0 +1,82 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// MapCodecOptions represents all possible options for map encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type MapCodecOptions struct { + DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false. + EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false. + // Specifies how keys should be handled. If false, the behavior matches encoding/json, where the encoding key type must + // either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key type must either be a + // string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with fmt.Sprint() and the + // encoding key type must be a string, an integer type, or a float. If true, the use of Stringer will override + // TextMarshaler/TextUnmarshaler. Defaults to false. + EncodeKeysWithStringer *bool +} + +// MapCodec creates a new *MapCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func MapCodec() *MapCodecOptions { + return &MapCodecOptions{} +} + +// SetDecodeZerosMap specifies if the map should be zeroed before decoding into it. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. +func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions { + t.DecodeZerosMap = &b + return t +} + +// SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. +func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { + t.EncodeNilAsEmpty = &b + return t +} + +// SetEncodeKeysWithStringer specifies how keys should be handled. If false, the behavior matches encoding/json, where the +// encoding key type must either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key +// type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with +// fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer +// will override TextMarshaler/TextUnmarshaler. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. +func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions { + t.EncodeKeysWithStringer = &b + return t +} + +// MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions { + s := MapCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.DecodeZerosMap != nil { + s.DecodeZerosMap = opt.DecodeZerosMap + } + if opt.EncodeNilAsEmpty != nil { + s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty + } + if opt.EncodeKeysWithStringer != nil { + s.EncodeKeysWithStringer = opt.EncodeKeysWithStringer + } + } + + return s +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go new file mode 100644 index 000000000000..3c1e4f35ba1d --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go @@ -0,0 +1,49 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// SliceCodecOptions represents all possible options for slice encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type SliceCodecOptions struct { + EncodeNilAsEmpty *bool // Specifies if a nil slice should encode as an empty array instead of null. Defaults to false. +} + +// SliceCodec creates a new *SliceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func SliceCodec() *SliceCodecOptions { + return &SliceCodecOptions{} +} + +// SetEncodeNilAsEmpty specifies if a nil slice should encode as an empty array instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. +func (s *SliceCodecOptions) SetEncodeNilAsEmpty(b bool) *SliceCodecOptions { + s.EncodeNilAsEmpty = &b + return s +} + +// MergeSliceCodecOptions combines the given *SliceCodecOptions into a single *SliceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeSliceCodecOptions(opts ...*SliceCodecOptions) *SliceCodecOptions { + s := SliceCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.EncodeNilAsEmpty != nil { + s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty + } + } + + return s +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go new file mode 100644 index 000000000000..f8b76f996e49 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go @@ -0,0 +1,52 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +var defaultDecodeOIDAsHex = true + +// StringCodecOptions represents all possible options for string encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type StringCodecOptions struct { + DecodeObjectIDAsHex *bool // Specifies if we should decode ObjectID as the hex value. Defaults to true. +} + +// StringCodec creates a new *StringCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func StringCodec() *StringCodecOptions { + return &StringCodecOptions{} +} + +// SetDecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. If false, a string made +// from the raw object ID bytes will be used. Defaults to true. +// +// Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. +func (t *StringCodecOptions) SetDecodeObjectIDAsHex(b bool) *StringCodecOptions { + t.DecodeObjectIDAsHex = &b + return t +} + +// MergeStringCodecOptions combines the given *StringCodecOptions into a single *StringCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeStringCodecOptions(opts ...*StringCodecOptions) *StringCodecOptions { + s := &StringCodecOptions{&defaultDecodeOIDAsHex} + for _, opt := range opts { + if opt == nil { + continue + } + if opt.DecodeObjectIDAsHex != nil { + s.DecodeObjectIDAsHex = opt.DecodeObjectIDAsHex + } + } + + return s +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go new file mode 100644 index 000000000000..1cbfa32e8b40 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go @@ -0,0 +1,107 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +var defaultOverwriteDuplicatedInlinedFields = true + +// StructCodecOptions represents all possible options for struct encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type StructCodecOptions struct { + DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false. + DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false. + EncodeOmitDefaultStruct *bool // Specifies if default structs should be considered empty by omitempty. Defaults to false. + AllowUnexportedFields *bool // Specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. + OverwriteDuplicatedInlinedFields *bool // Specifies if fields in inlined structs can be overwritten by higher level struct fields with the same key. Defaults to true. +} + +// StructCodec creates a new *StructCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func StructCodec() *StructCodecOptions { + return &StructCodecOptions{} +} + +// SetDecodeZeroStruct specifies if structs should be zeroed before decoding into them. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. +func (t *StructCodecOptions) SetDecodeZeroStruct(b bool) *StructCodecOptions { + t.DecodeZeroStruct = &b + return t +} + +// SetDecodeDeepZeroInline specifies if structs should be zeroed before decoding into them. Defaults to false. +// +// Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. +func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions { + t.DecodeDeepZeroInline = &b + return t +} + +// SetEncodeOmitDefaultStruct specifies if default structs should be considered empty by omitempty. A default struct has all +// its values set to their default value. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. +func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOptions { + t.EncodeOmitDefaultStruct = &b + return t +} + +// SetOverwriteDuplicatedInlinedFields specifies if inlined struct fields can be overwritten by higher level struct fields with the +// same bson key. When true and decoding, values will be written to the outermost struct with a matching key, and when +// encoding, keys will have the value of the top-most matching field. When false, decoding and encoding will error if +// there are duplicate keys after the struct is inlined. Defaults to true. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. +func (t *StructCodecOptions) SetOverwriteDuplicatedInlinedFields(b bool) *StructCodecOptions { + t.OverwriteDuplicatedInlinedFields = &b + return t +} + +// SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. +// +// Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be +// supported in Go Driver 2.0. +func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions { + t.AllowUnexportedFields = &b + return t +} + +// MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions { + s := &StructCodecOptions{ + OverwriteDuplicatedInlinedFields: &defaultOverwriteDuplicatedInlinedFields, + } + for _, opt := range opts { + if opt == nil { + continue + } + + if opt.DecodeZeroStruct != nil { + s.DecodeZeroStruct = opt.DecodeZeroStruct + } + if opt.DecodeDeepZeroInline != nil { + s.DecodeDeepZeroInline = opt.DecodeDeepZeroInline + } + if opt.EncodeOmitDefaultStruct != nil { + s.EncodeOmitDefaultStruct = opt.EncodeOmitDefaultStruct + } + if opt.OverwriteDuplicatedInlinedFields != nil { + s.OverwriteDuplicatedInlinedFields = opt.OverwriteDuplicatedInlinedFields + } + if opt.AllowUnexportedFields != nil { + s.AllowUnexportedFields = opt.AllowUnexportedFields + } + } + + return s +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go new file mode 100644 index 000000000000..3f38433d2265 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go @@ -0,0 +1,49 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// TimeCodecOptions represents all possible options for time.Time encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type TimeCodecOptions struct { + UseLocalTimeZone *bool // Specifies if we should decode into the local time zone. Defaults to false. +} + +// TimeCodec creates a new *TimeCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func TimeCodec() *TimeCodecOptions { + return &TimeCodecOptions{} +} + +// SetUseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. +func (t *TimeCodecOptions) SetUseLocalTimeZone(b bool) *TimeCodecOptions { + t.UseLocalTimeZone = &b + return t +} + +// MergeTimeCodecOptions combines the given *TimeCodecOptions into a single *TimeCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeTimeCodecOptions(opts ...*TimeCodecOptions) *TimeCodecOptions { + t := TimeCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.UseLocalTimeZone != nil { + t.UseLocalTimeZone = opt.UseLocalTimeZone + } + } + + return t +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go new file mode 100644 index 000000000000..5091e4d9633f --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go @@ -0,0 +1,49 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// UIntCodecOptions represents all possible options for uint encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type UIntCodecOptions struct { + EncodeToMinSize *bool // Specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. +} + +// UIntCodec creates a new *UIntCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func UIntCodec() *UIntCodecOptions { + return &UIntCodecOptions{} +} + +// SetEncodeToMinSize specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.IntMinSize] instead. +func (u *UIntCodecOptions) SetEncodeToMinSize(b bool) *UIntCodecOptions { + u.EncodeToMinSize = &b + return u +} + +// MergeUIntCodecOptions combines the given *UIntCodecOptions into a single *UIntCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeUIntCodecOptions(opts ...*UIntCodecOptions) *UIntCodecOptions { + u := UIntCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.EncodeToMinSize != nil { + u.EncodeToMinSize = opt.EncodeToMinSize + } + } + + return u +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go new file mode 100644 index 000000000000..1e25570b8556 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go @@ -0,0 +1,489 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "errors" + "fmt" + "io" + + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +// Copier is a type that allows copying between ValueReaders, ValueWriters, and +// []byte values. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +type Copier struct{} + +// NewCopier creates a new copier with the given registry. If a nil registry is provided +// a default registry is used. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func NewCopier() Copier { + return Copier{} +} + +// CopyDocument handles copying a document from src to dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func CopyDocument(dst ValueWriter, src ValueReader) error { + return Copier{}.CopyDocument(dst, src) +} + +// CopyDocument handles copying one document from the src to the dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { + dr, err := src.ReadDocument() + if err != nil { + return err + } + + dw, err := dst.WriteDocument() + if err != nil { + return err + } + + return c.copyDocumentCore(dw, dr) +} + +// CopyArrayFromBytes copies the values from a BSON array represented as a +// []byte to a ValueWriter. +// +// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error { + aw, err := dst.WriteArray() + if err != nil { + return err + } + + err = c.CopyBytesToArrayWriter(aw, src) + if err != nil { + return err + } + + return aw.WriteArrayEnd() +} + +// CopyDocumentFromBytes copies the values from a BSON document represented as a +// []byte to a ValueWriter. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error { + dw, err := dst.WriteDocument() + if err != nil { + return err + } + + err = c.CopyBytesToDocumentWriter(dw, src) + if err != nil { + return err + } + + return dw.WriteDocumentEnd() +} + +type writeElementFn func(key string) (ValueWriter, error) + +// CopyBytesToArrayWriter copies the values from a BSON Array represented as a []byte to an +// ArrayWriter. +// +// Deprecated: Copying BSON arrays using the ArrayWriter interface will not be supported in Go +// Driver 2.0. +func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error { + wef := func(_ string) (ValueWriter, error) { + return dst.WriteArrayElement() + } + + return c.copyBytesToValueWriter(src, wef) +} + +// CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a +// DocumentWriter. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error { + wef := func(key string) (ValueWriter, error) { + return dst.WriteDocumentElement(key) + } + + return c.copyBytesToValueWriter(src, wef) +} + +func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error { + // TODO(skriptble): Create errors types here. Anything that is a tag should be a property. + length, rem, ok := bsoncore.ReadLength(src) + if !ok { + return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src)) + } + if len(src) < int(length) { + return fmt.Errorf("length read exceeds number of bytes available. length=%d bytes=%d", len(src), length) + } + rem = rem[:length-4] + + var t bsontype.Type + var key string + var val bsoncore.Value + for { + t, rem, ok = bsoncore.ReadType(rem) + if !ok { + return io.EOF + } + if t == bsontype.Type(0) { + if len(rem) != 0 { + return fmt.Errorf("document end byte found before end of document. remaining bytes=%v", rem) + } + break + } + + key, rem, ok = bsoncore.ReadKey(rem) + if !ok { + return fmt.Errorf("invalid key found. remaining bytes=%v", rem) + } + + // write as either array element or document element using writeElementFn + vw, err := wef(key) + if err != nil { + return err + } + + val, rem, ok = bsoncore.ReadValue(rem, t) + if !ok { + return fmt.Errorf("not enough bytes available to read type. bytes=%d type=%s", len(rem), t) + } + err = c.CopyValueFromBytes(vw, t, val.Data) + if err != nil { + return err + } + } + return nil +} + +// CopyDocumentToBytes copies an entire document from the ValueReader and +// returns it as bytes. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) { + return c.AppendDocumentBytes(nil, src) +} + +// AppendDocumentBytes functions the same as CopyDocumentToBytes, but will +// append the result to dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) { + if br, ok := src.(BytesReader); ok { + _, dst, err := br.ReadValueBytes(dst) + return dst, err + } + + vw := vwPool.Get().(*valueWriter) + defer putValueWriter(vw) + + vw.reset(dst) + + err := c.CopyDocument(vw, src) + dst = vw.buf + return dst, err +} + +// AppendArrayBytes copies an array from the ValueReader to dst. +// +// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { + if br, ok := src.(BytesReader); ok { + _, dst, err := br.ReadValueBytes(dst) + return dst, err + } + + vw := vwPool.Get().(*valueWriter) + defer putValueWriter(vw) + + vw.reset(dst) + + err := c.copyArray(vw, src) + dst = vw.buf + return dst, err +} + +// CopyValueFromBytes will write the value represtend by t and src to dst. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.UnmarshalValue] instead. +func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error { + if wvb, ok := dst.(BytesWriter); ok { + return wvb.WriteValueBytes(t, src) + } + + vr := vrPool.Get().(*valueReader) + defer vrPool.Put(vr) + + vr.reset(src) + vr.pushElement(t) + + return c.CopyValue(dst, vr) +} + +// CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a +// []byte. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.MarshalValue] instead. +func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) { + return c.AppendValueBytes(nil, src) +} + +// AppendValueBytes functions the same as CopyValueToBytes, but will append the +// result to dst. +// +// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go +// Driver 2.0. +func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) { + if br, ok := src.(BytesReader); ok { + return br.ReadValueBytes(dst) + } + + vw := vwPool.Get().(*valueWriter) + defer putValueWriter(vw) + + start := len(dst) + + vw.reset(dst) + vw.push(mElement) + + err := c.CopyValue(vw, src) + if err != nil { + return 0, dst, err + } + + return bsontype.Type(vw.buf[start]), vw.buf[start+2:], nil +} + +// CopyValue will copy a single value from src to dst. +// +// Deprecated: Copying BSON values using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error { + var err error + switch src.Type() { + case bsontype.Double: + var f64 float64 + f64, err = src.ReadDouble() + if err != nil { + break + } + err = dst.WriteDouble(f64) + case bsontype.String: + var str string + str, err = src.ReadString() + if err != nil { + return err + } + err = dst.WriteString(str) + case bsontype.EmbeddedDocument: + err = c.CopyDocument(dst, src) + case bsontype.Array: + err = c.copyArray(dst, src) + case bsontype.Binary: + var data []byte + var subtype byte + data, subtype, err = src.ReadBinary() + if err != nil { + break + } + err = dst.WriteBinaryWithSubtype(data, subtype) + case bsontype.Undefined: + err = src.ReadUndefined() + if err != nil { + break + } + err = dst.WriteUndefined() + case bsontype.ObjectID: + var oid primitive.ObjectID + oid, err = src.ReadObjectID() + if err != nil { + break + } + err = dst.WriteObjectID(oid) + case bsontype.Boolean: + var b bool + b, err = src.ReadBoolean() + if err != nil { + break + } + err = dst.WriteBoolean(b) + case bsontype.DateTime: + var dt int64 + dt, err = src.ReadDateTime() + if err != nil { + break + } + err = dst.WriteDateTime(dt) + case bsontype.Null: + err = src.ReadNull() + if err != nil { + break + } + err = dst.WriteNull() + case bsontype.Regex: + var pattern, options string + pattern, options, err = src.ReadRegex() + if err != nil { + break + } + err = dst.WriteRegex(pattern, options) + case bsontype.DBPointer: + var ns string + var pointer primitive.ObjectID + ns, pointer, err = src.ReadDBPointer() + if err != nil { + break + } + err = dst.WriteDBPointer(ns, pointer) + case bsontype.JavaScript: + var js string + js, err = src.ReadJavascript() + if err != nil { + break + } + err = dst.WriteJavascript(js) + case bsontype.Symbol: + var symbol string + symbol, err = src.ReadSymbol() + if err != nil { + break + } + err = dst.WriteSymbol(symbol) + case bsontype.CodeWithScope: + var code string + var srcScope DocumentReader + code, srcScope, err = src.ReadCodeWithScope() + if err != nil { + break + } + + var dstScope DocumentWriter + dstScope, err = dst.WriteCodeWithScope(code) + if err != nil { + break + } + err = c.copyDocumentCore(dstScope, srcScope) + case bsontype.Int32: + var i32 int32 + i32, err = src.ReadInt32() + if err != nil { + break + } + err = dst.WriteInt32(i32) + case bsontype.Timestamp: + var t, i uint32 + t, i, err = src.ReadTimestamp() + if err != nil { + break + } + err = dst.WriteTimestamp(t, i) + case bsontype.Int64: + var i64 int64 + i64, err = src.ReadInt64() + if err != nil { + break + } + err = dst.WriteInt64(i64) + case bsontype.Decimal128: + var d128 primitive.Decimal128 + d128, err = src.ReadDecimal128() + if err != nil { + break + } + err = dst.WriteDecimal128(d128) + case bsontype.MinKey: + err = src.ReadMinKey() + if err != nil { + break + } + err = dst.WriteMinKey() + case bsontype.MaxKey: + err = src.ReadMaxKey() + if err != nil { + break + } + err = dst.WriteMaxKey() + default: + err = fmt.Errorf("Cannot copy unknown BSON type %s", src.Type()) + } + + return err +} + +func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { + ar, err := src.ReadArray() + if err != nil { + return err + } + + aw, err := dst.WriteArray() + if err != nil { + return err + } + + for { + vr, err := ar.ReadValue() + if errors.Is(err, ErrEOA) { + break + } + if err != nil { + return err + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + err = c.CopyValue(vw, vr) + if err != nil { + return err + } + } + + return aw.WriteArrayEnd() +} + +func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error { + for { + key, vr, err := dr.ReadElement() + if errors.Is(err, ErrEOD) { + break + } + if err != nil { + return err + } + + vw, err := dw.WriteDocumentElement(key) + if err != nil { + return err + } + + err = c.CopyValue(vw, vr) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go new file mode 100644 index 000000000000..750b0d2af51e --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go @@ -0,0 +1,9 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsonrw contains abstractions for reading and writing +// BSON and BSON like types from sources. +package bsonrw // import "go.mongodb.org/mongo-driver/bson/bsonrw" diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go new file mode 100644 index 000000000000..f0702d9d3025 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go @@ -0,0 +1,806 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +const maxNestingDepth = 200 + +// ErrInvalidJSON indicates the JSON input is invalid +var ErrInvalidJSON = errors.New("invalid JSON input") + +type jsonParseState byte + +const ( + jpsStartState jsonParseState = iota + jpsSawBeginObject + jpsSawEndObject + jpsSawBeginArray + jpsSawEndArray + jpsSawColon + jpsSawComma + jpsSawKey + jpsSawValue + jpsDoneState + jpsInvalidState +) + +type jsonParseMode byte + +const ( + jpmInvalidMode jsonParseMode = iota + jpmObjectMode + jpmArrayMode +) + +type extJSONValue struct { + t bsontype.Type + v interface{} +} + +type extJSONObject struct { + keys []string + values []*extJSONValue +} + +type extJSONParser struct { + js *jsonScanner + s jsonParseState + m []jsonParseMode + k string + v *extJSONValue + + err error + canonical bool + depth int + maxDepth int + + emptyObject bool + relaxedUUID bool +} + +// newExtJSONParser returns a new extended JSON parser, ready to to begin +// parsing from the first character of the argued json input. It will not +// perform any read-ahead and will therefore not report any errors about +// malformed JSON at this point. +func newExtJSONParser(r io.Reader, canonical bool) *extJSONParser { + return &extJSONParser{ + js: &jsonScanner{r: r}, + s: jpsStartState, + m: []jsonParseMode{}, + canonical: canonical, + maxDepth: maxNestingDepth, + } +} + +// peekType examines the next value and returns its BSON Type +func (ejp *extJSONParser) peekType() (bsontype.Type, error) { + var t bsontype.Type + var err error + initialState := ejp.s + + ejp.advanceState() + switch ejp.s { + case jpsSawValue: + t = ejp.v.t + case jpsSawBeginArray: + t = bsontype.Array + case jpsInvalidState: + err = ejp.err + case jpsSawComma: + // in array mode, seeing a comma means we need to progress again to actually observe a type + if ejp.peekMode() == jpmArrayMode { + return ejp.peekType() + } + case jpsSawEndArray: + // this would only be a valid state if we were in array mode, so return end-of-array error + err = ErrEOA + case jpsSawBeginObject: + // peek key to determine type + ejp.advanceState() + switch ejp.s { + case jpsSawEndObject: // empty embedded document + t = bsontype.EmbeddedDocument + ejp.emptyObject = true + case jpsInvalidState: + err = ejp.err + case jpsSawKey: + if initialState == jpsStartState { + return bsontype.EmbeddedDocument, nil + } + t = wrapperKeyBSONType(ejp.k) + + // if $uuid is encountered, parse as binary subtype 4 + if ejp.k == "$uuid" { + ejp.relaxedUUID = true + t = bsontype.Binary + } + + switch t { + case bsontype.JavaScript: + // just saw $code, need to check for $scope at same level + _, err = ejp.readValue(bsontype.JavaScript) + if err != nil { + break + } + + switch ejp.s { + case jpsSawEndObject: // type is TypeJavaScript + case jpsSawComma: + ejp.advanceState() + + if ejp.s == jpsSawKey && ejp.k == "$scope" { + t = bsontype.CodeWithScope + } else { + err = fmt.Errorf("invalid extended JSON: unexpected key %s in CodeWithScope object", ejp.k) + } + case jpsInvalidState: + err = ejp.err + default: + err = ErrInvalidJSON + } + case bsontype.CodeWithScope: + err = errors.New("invalid extended JSON: code with $scope must contain $code before $scope") + } + } + } + + return t, err +} + +// readKey parses the next key and its type and returns them +func (ejp *extJSONParser) readKey() (string, bsontype.Type, error) { + if ejp.emptyObject { + ejp.emptyObject = false + return "", 0, ErrEOD + } + + // advance to key (or return with error) + switch ejp.s { + case jpsStartState: + ejp.advanceState() + if ejp.s == jpsSawBeginObject { + ejp.advanceState() + } + case jpsSawBeginObject: + ejp.advanceState() + case jpsSawValue, jpsSawEndObject, jpsSawEndArray: + ejp.advanceState() + switch ejp.s { + case jpsSawBeginObject, jpsSawComma: + ejp.advanceState() + case jpsSawEndObject: + return "", 0, ErrEOD + case jpsDoneState: + return "", 0, io.EOF + case jpsInvalidState: + return "", 0, ejp.err + default: + return "", 0, ErrInvalidJSON + } + case jpsSawKey: // do nothing (key was peeked before) + default: + return "", 0, invalidRequestError("key") + } + + // read key + var key string + + switch ejp.s { + case jpsSawKey: + key = ejp.k + case jpsSawEndObject: + return "", 0, ErrEOD + case jpsInvalidState: + return "", 0, ejp.err + default: + return "", 0, invalidRequestError("key") + } + + // check for colon + ejp.advanceState() + if err := ensureColon(ejp.s, key); err != nil { + return "", 0, err + } + + // peek at the value to determine type + t, err := ejp.peekType() + if err != nil { + return "", 0, err + } + + return key, t, nil +} + +// readValue returns the value corresponding to the Type returned by peekType +func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { + if ejp.s == jpsInvalidState { + return nil, ejp.err + } + + var v *extJSONValue + + switch t { + case bsontype.Null, bsontype.Boolean, bsontype.String: + if ejp.s != jpsSawValue { + return nil, invalidRequestError(t.String()) + } + v = ejp.v + case bsontype.Int32, bsontype.Int64, bsontype.Double: + // relaxed version allows these to be literal number values + if ejp.s == jpsSawValue { + v = ejp.v + break + } + fallthrough + case bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID, bsontype.MinKey, bsontype.MaxKey, bsontype.Undefined: + switch ejp.s { + case jpsSawKey: + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + // read value + ejp.advanceState() + if ejp.s != jpsSawValue || !ejp.ensureExtValueType(t) { + return nil, invalidJSONErrorForType("value", t) + } + + v = ejp.v + + // read end object + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("} after value", t) + } + default: + return nil, invalidRequestError(t.String()) + } + case bsontype.Binary, bsontype.Regex, bsontype.Timestamp, bsontype.DBPointer: + if ejp.s != jpsSawKey { + return nil, invalidRequestError(t.String()) + } + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + ejp.advanceState() + if t == bsontype.Binary && ejp.s == jpsSawValue { + // convert relaxed $uuid format + if ejp.relaxedUUID { + defer func() { ejp.relaxedUUID = false }() + uuid, err := ejp.v.parseSymbol() + if err != nil { + return nil, err + } + + // RFC 4122 defines the length of a UUID as 36 and the hyphens in a UUID as appearing + // in the 8th, 13th, 18th, and 23rd characters. + // + // See https://tools.ietf.org/html/rfc4122#section-3 + valid := len(uuid) == 36 && + string(uuid[8]) == "-" && + string(uuid[13]) == "-" && + string(uuid[18]) == "-" && + string(uuid[23]) == "-" + if !valid { + return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") + } + + // remove hyphens + uuidNoHyphens := strings.ReplaceAll(uuid, "-", "") + if len(uuidNoHyphens) != 32 { + return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") + } + + // convert hex to bytes + bytes, err := hex.DecodeString(uuidNoHyphens) + if err != nil { + return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %w", err) + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("$uuid and value and then }", bsontype.Binary) + } + + base64 := &extJSONValue{ + t: bsontype.String, + v: base64.StdEncoding.EncodeToString(bytes), + } + subType := &extJSONValue{ + t: bsontype.String, + v: "04", + } + + v = &extJSONValue{ + t: bsontype.EmbeddedDocument, + v: &extJSONObject{ + keys: []string{"base64", "subType"}, + values: []*extJSONValue{base64, subType}, + }, + } + + break + } + + // convert legacy $binary format + base64 := ejp.v + + ejp.advanceState() + if ejp.s != jpsSawComma { + return nil, invalidJSONErrorForType(",", bsontype.Binary) + } + + ejp.advanceState() + key, t, err := ejp.readKey() + if err != nil { + return nil, err + } + if key != "$type" { + return nil, invalidJSONErrorForType("$type", bsontype.Binary) + } + + subType, err := ejp.readValue(t) + if err != nil { + return nil, err + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("2 key-value pairs and then }", bsontype.Binary) + } + + v = &extJSONValue{ + t: bsontype.EmbeddedDocument, + v: &extJSONObject{ + keys: []string{"base64", "subType"}, + values: []*extJSONValue{base64, subType}, + }, + } + break + } + + // read KV pairs + if ejp.s != jpsSawBeginObject { + return nil, invalidJSONErrorForType("{", t) + } + + keys, vals, err := ejp.readObject(2, true) + if err != nil { + return nil, err + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("2 key-value pairs and then }", t) + } + + v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} + + case bsontype.DateTime: + switch ejp.s { + case jpsSawValue: + v = ejp.v + case jpsSawKey: + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + ejp.advanceState() + switch ejp.s { + case jpsSawBeginObject: + keys, vals, err := ejp.readObject(1, true) + if err != nil { + return nil, err + } + v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} + case jpsSawValue: + if ejp.canonical { + return nil, invalidJSONError("{") + } + v = ejp.v + default: + if ejp.canonical { + return nil, invalidJSONErrorForType("object", t) + } + return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as described in RFC-3339", t) + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("value and then }", t) + } + default: + return nil, invalidRequestError(t.String()) + } + case bsontype.JavaScript: + switch ejp.s { + case jpsSawKey: + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + // read value + ejp.advanceState() + if ejp.s != jpsSawValue { + return nil, invalidJSONErrorForType("value", t) + } + v = ejp.v + + // read end object or comma and just return + ejp.advanceState() + case jpsSawEndObject: + v = ejp.v + default: + return nil, invalidRequestError(t.String()) + } + case bsontype.CodeWithScope: + if ejp.s == jpsSawKey && ejp.k == "$scope" { + v = ejp.v // this is the $code string from earlier + + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + // read { + ejp.advanceState() + if ejp.s != jpsSawBeginObject { + return nil, invalidJSONError("$scope to be embedded document") + } + } else { + return nil, invalidRequestError(t.String()) + } + case bsontype.EmbeddedDocument, bsontype.Array: + return nil, invalidRequestError(t.String()) + } + + return v, nil +} + +// readObject is a utility method for reading full objects of known (or expected) size +// it is useful for extended JSON types such as binary, datetime, regex, and timestamp +func (ejp *extJSONParser) readObject(numKeys int, started bool) ([]string, []*extJSONValue, error) { + keys := make([]string, numKeys) + vals := make([]*extJSONValue, numKeys) + + if !started { + ejp.advanceState() + if ejp.s != jpsSawBeginObject { + return nil, nil, invalidJSONError("{") + } + } + + for i := 0; i < numKeys; i++ { + key, t, err := ejp.readKey() + if err != nil { + return nil, nil, err + } + + switch ejp.s { + case jpsSawKey: + v, err := ejp.readValue(t) + if err != nil { + return nil, nil, err + } + + keys[i] = key + vals[i] = v + case jpsSawValue: + keys[i] = key + vals[i] = ejp.v + default: + return nil, nil, invalidJSONError("value") + } + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, nil, invalidJSONError("}") + } + + return keys, vals, nil +} + +// advanceState reads the next JSON token from the scanner and transitions +// from the current state based on that token's type +func (ejp *extJSONParser) advanceState() { + if ejp.s == jpsDoneState || ejp.s == jpsInvalidState { + return + } + + jt, err := ejp.js.nextToken() + + if err != nil { + ejp.err = err + ejp.s = jpsInvalidState + return + } + + valid := ejp.validateToken(jt.t) + if !valid { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + return + } + + switch jt.t { + case jttBeginObject: + ejp.s = jpsSawBeginObject + ejp.pushMode(jpmObjectMode) + ejp.depth++ + + if ejp.depth > ejp.maxDepth { + ejp.err = nestingDepthError(jt.p, ejp.depth) + ejp.s = jpsInvalidState + } + case jttEndObject: + ejp.s = jpsSawEndObject + ejp.depth-- + + if ejp.popMode() != jpmObjectMode { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + } + case jttBeginArray: + ejp.s = jpsSawBeginArray + ejp.pushMode(jpmArrayMode) + case jttEndArray: + ejp.s = jpsSawEndArray + + if ejp.popMode() != jpmArrayMode { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + } + case jttColon: + ejp.s = jpsSawColon + case jttComma: + ejp.s = jpsSawComma + case jttEOF: + ejp.s = jpsDoneState + if len(ejp.m) != 0 { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + } + case jttString: + switch ejp.s { + case jpsSawComma: + if ejp.peekMode() == jpmArrayMode { + ejp.s = jpsSawValue + ejp.v = extendJSONToken(jt) + return + } + fallthrough + case jpsSawBeginObject: + ejp.s = jpsSawKey + ejp.k = jt.v.(string) + return + } + fallthrough + default: + ejp.s = jpsSawValue + ejp.v = extendJSONToken(jt) + } +} + +var jpsValidTransitionTokens = map[jsonParseState]map[jsonTokenType]bool{ + jpsStartState: { + jttBeginObject: true, + jttBeginArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + jttEOF: true, + }, + jpsSawBeginObject: { + jttEndObject: true, + jttString: true, + }, + jpsSawEndObject: { + jttEndObject: true, + jttEndArray: true, + jttComma: true, + jttEOF: true, + }, + jpsSawBeginArray: { + jttBeginObject: true, + jttBeginArray: true, + jttEndArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + }, + jpsSawEndArray: { + jttEndObject: true, + jttEndArray: true, + jttComma: true, + jttEOF: true, + }, + jpsSawColon: { + jttBeginObject: true, + jttBeginArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + }, + jpsSawComma: { + jttBeginObject: true, + jttBeginArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + }, + jpsSawKey: { + jttColon: true, + }, + jpsSawValue: { + jttEndObject: true, + jttEndArray: true, + jttComma: true, + jttEOF: true, + }, + jpsDoneState: {}, + jpsInvalidState: {}, +} + +func (ejp *extJSONParser) validateToken(jtt jsonTokenType) bool { + switch ejp.s { + case jpsSawEndObject: + // if we are at depth zero and the next token is a '{', + // we can consider it valid only if we are not in array mode. + if jtt == jttBeginObject && ejp.depth == 0 { + return ejp.peekMode() != jpmArrayMode + } + case jpsSawComma: + switch ejp.peekMode() { + // the only valid next token after a comma inside a document is a string (a key) + case jpmObjectMode: + return jtt == jttString + case jpmInvalidMode: + return false + } + } + + _, ok := jpsValidTransitionTokens[ejp.s][jtt] + return ok +} + +// ensureExtValueType returns true if the current value has the expected +// value type for single-key extended JSON types. For example, +// {"$numberInt": v} v must be TypeString +func (ejp *extJSONParser) ensureExtValueType(t bsontype.Type) bool { + switch t { + case bsontype.MinKey, bsontype.MaxKey: + return ejp.v.t == bsontype.Int32 + case bsontype.Undefined: + return ejp.v.t == bsontype.Boolean + case bsontype.Int32, bsontype.Int64, bsontype.Double, bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID: + return ejp.v.t == bsontype.String + default: + return false + } +} + +func (ejp *extJSONParser) pushMode(m jsonParseMode) { + ejp.m = append(ejp.m, m) +} + +func (ejp *extJSONParser) popMode() jsonParseMode { + l := len(ejp.m) + if l == 0 { + return jpmInvalidMode + } + + m := ejp.m[l-1] + ejp.m = ejp.m[:l-1] + + return m +} + +func (ejp *extJSONParser) peekMode() jsonParseMode { + l := len(ejp.m) + if l == 0 { + return jpmInvalidMode + } + + return ejp.m[l-1] +} + +func extendJSONToken(jt *jsonToken) *extJSONValue { + var t bsontype.Type + + switch jt.t { + case jttInt32: + t = bsontype.Int32 + case jttInt64: + t = bsontype.Int64 + case jttDouble: + t = bsontype.Double + case jttString: + t = bsontype.String + case jttBool: + t = bsontype.Boolean + case jttNull: + t = bsontype.Null + default: + return nil + } + + return &extJSONValue{t: t, v: jt.v} +} + +func ensureColon(s jsonParseState, key string) error { + if s != jpsSawColon { + return fmt.Errorf("invalid JSON input: missing colon after key \"%s\"", key) + } + + return nil +} + +func invalidRequestError(s string) error { + return fmt.Errorf("invalid request to read %s", s) +} + +func invalidJSONError(expected string) error { + return fmt.Errorf("invalid JSON input; expected %s", expected) +} + +func invalidJSONErrorForType(expected string, t bsontype.Type) error { + return fmt.Errorf("invalid JSON input; expected %s for %s", expected, t) +} + +func unexpectedTokenError(jt *jsonToken) error { + switch jt.t { + case jttInt32, jttInt64, jttDouble: + return fmt.Errorf("invalid JSON input; unexpected number (%v) at position %d", jt.v, jt.p) + case jttString: + return fmt.Errorf("invalid JSON input; unexpected string (\"%v\") at position %d", jt.v, jt.p) + case jttBool: + return fmt.Errorf("invalid JSON input; unexpected boolean literal (%v) at position %d", jt.v, jt.p) + case jttNull: + return fmt.Errorf("invalid JSON input; unexpected null literal at position %d", jt.p) + case jttEOF: + return fmt.Errorf("invalid JSON input; unexpected end of input at position %d", jt.p) + default: + return fmt.Errorf("invalid JSON input; unexpected %c at position %d", jt.v.(byte), jt.p) + } +} + +func nestingDepthError(p, depth int) error { + return fmt.Errorf("invalid JSON input; nesting too deep (%d levels) at position %d", depth, p) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go new file mode 100644 index 000000000000..59ddfc448583 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go @@ -0,0 +1,653 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "errors" + "fmt" + "io" + "sync" + + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +// ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. +type ExtJSONValueReaderPool struct { + pool sync.Pool +} + +// NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. +func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { + return &ExtJSONValueReaderPool{ + pool: sync.Pool{ + New: func() interface{} { + return new(extJSONValueReader) + }, + }, + } +} + +// Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. +func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) { + vr := bvrp.pool.Get().(*extJSONValueReader) + return vr.reset(r, canonical) +} + +// Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing +// is inserted into the pool and ok will be false. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. +func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) { + bvr, ok := vr.(*extJSONValueReader) + if !ok { + return false + } + + bvr, _ = bvr.reset(nil, false) + bvrp.pool.Put(bvr) + return true +} + +type ejvrState struct { + mode mode + vType bsontype.Type + depth int +} + +// extJSONValueReader is for reading extended JSON. +type extJSONValueReader struct { + p *extJSONParser + + stack []ejvrState + frame int +} + +// NewExtJSONValueReader creates a new ValueReader from a given io.Reader +// It will interpret the JSON of r as canonical or relaxed according to the +// given canonical flag +func NewExtJSONValueReader(r io.Reader, canonical bool) (ValueReader, error) { + return newExtJSONValueReader(r, canonical) +} + +func newExtJSONValueReader(r io.Reader, canonical bool) (*extJSONValueReader, error) { + ejvr := new(extJSONValueReader) + return ejvr.reset(r, canonical) +} + +func (ejvr *extJSONValueReader) reset(r io.Reader, canonical bool) (*extJSONValueReader, error) { + p := newExtJSONParser(r, canonical) + typ, err := p.peekType() + + if err != nil { + return nil, ErrInvalidJSON + } + + var m mode + switch typ { + case bsontype.EmbeddedDocument: + m = mTopLevel + case bsontype.Array: + m = mArray + default: + m = mValue + } + + stack := make([]ejvrState, 1, 5) + stack[0] = ejvrState{ + mode: m, + vType: typ, + } + return &extJSONValueReader{ + p: p, + stack: stack, + }, nil +} + +func (ejvr *extJSONValueReader) advanceFrame() { + if ejvr.frame+1 >= len(ejvr.stack) { // We need to grow the stack + length := len(ejvr.stack) + if length+1 >= cap(ejvr.stack) { + // double it + buf := make([]ejvrState, 2*cap(ejvr.stack)+1) + copy(buf, ejvr.stack) + ejvr.stack = buf + } + ejvr.stack = ejvr.stack[:length+1] + } + ejvr.frame++ + + // Clean the stack + ejvr.stack[ejvr.frame].mode = 0 + ejvr.stack[ejvr.frame].vType = 0 + ejvr.stack[ejvr.frame].depth = 0 +} + +func (ejvr *extJSONValueReader) pushDocument() { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = mDocument + ejvr.stack[ejvr.frame].depth = ejvr.p.depth +} + +func (ejvr *extJSONValueReader) pushCodeWithScope() { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = mCodeWithScope +} + +func (ejvr *extJSONValueReader) pushArray() { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = mArray +} + +func (ejvr *extJSONValueReader) push(m mode, t bsontype.Type) { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = m + ejvr.stack[ejvr.frame].vType = t +} + +func (ejvr *extJSONValueReader) pop() { + switch ejvr.stack[ejvr.frame].mode { + case mElement, mValue: + ejvr.frame-- + case mDocument, mArray, mCodeWithScope: + ejvr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc... + } +} + +func (ejvr *extJSONValueReader) skipObject() { + // read entire object until depth returns to 0 (last ending } or ] seen) + depth := 1 + for depth > 0 { + ejvr.p.advanceState() + + // If object is empty, raise depth and continue. When emptyObject is true, the + // parser has already read both the opening and closing brackets of an empty + // object ("{}"), so the next valid token will be part of the parent document, + // not part of the nested document. + // + // If there is a comma, there are remaining fields, emptyObject must be set back + // to false, and comma must be skipped with advanceState(). + if ejvr.p.emptyObject { + if ejvr.p.s == jpsSawComma { + ejvr.p.emptyObject = false + ejvr.p.advanceState() + } + depth-- + continue + } + + switch ejvr.p.s { + case jpsSawBeginObject, jpsSawBeginArray: + depth++ + case jpsSawEndObject, jpsSawEndArray: + depth-- + } + } +} + +func (ejvr *extJSONValueReader) invalidTransitionErr(destination mode, name string, modes []mode) error { + te := TransitionError{ + name: name, + current: ejvr.stack[ejvr.frame].mode, + destination: destination, + modes: modes, + action: "read", + } + if ejvr.frame != 0 { + te.parent = ejvr.stack[ejvr.frame-1].mode + } + return te +} + +func (ejvr *extJSONValueReader) typeError(t bsontype.Type) error { + return fmt.Errorf("positioned on %s, but attempted to read %s", ejvr.stack[ejvr.frame].vType, t) +} + +func (ejvr *extJSONValueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string, addModes ...mode) error { + switch ejvr.stack[ejvr.frame].mode { + case mElement, mValue: + if ejvr.stack[ejvr.frame].vType != t { + return ejvr.typeError(t) + } + default: + modes := []mode{mElement, mValue} + if addModes != nil { + modes = append(modes, addModes...) + } + return ejvr.invalidTransitionErr(destination, callerName, modes) + } + + return nil +} + +func (ejvr *extJSONValueReader) Type() bsontype.Type { + return ejvr.stack[ejvr.frame].vType +} + +func (ejvr *extJSONValueReader) Skip() error { + switch ejvr.stack[ejvr.frame].mode { + case mElement, mValue: + default: + return ejvr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue}) + } + + defer ejvr.pop() + + t := ejvr.stack[ejvr.frame].vType + switch t { + case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope: + // read entire array, doc or CodeWithScope + ejvr.skipObject() + default: + _, err := ejvr.p.readValue(t) + if err != nil { + return err + } + } + + return nil +} + +func (ejvr *extJSONValueReader) ReadArray() (ArrayReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mTopLevel: // allow reading array from top level + case mArray: + return ejvr, nil + default: + if err := ejvr.ensureElementValue(bsontype.Array, mArray, "ReadArray", mTopLevel, mArray); err != nil { + return nil, err + } + } + + ejvr.pushArray() + + return ejvr, nil +} + +func (ejvr *extJSONValueReader) ReadBinary() (b []byte, btype byte, err error) { + if err := ejvr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil { + return nil, 0, err + } + + v, err := ejvr.p.readValue(bsontype.Binary) + if err != nil { + return nil, 0, err + } + + b, btype, err = v.parseBinary() + + ejvr.pop() + return b, btype, err +} + +func (ejvr *extJSONValueReader) ReadBoolean() (bool, error) { + if err := ejvr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil { + return false, err + } + + v, err := ejvr.p.readValue(bsontype.Boolean) + if err != nil { + return false, err + } + + if v.t != bsontype.Boolean { + return false, fmt.Errorf("expected type bool, but got type %s", v.t) + } + + ejvr.pop() + return v.v.(bool), nil +} + +func (ejvr *extJSONValueReader) ReadDocument() (DocumentReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mTopLevel: + return ejvr, nil + case mElement, mValue: + if ejvr.stack[ejvr.frame].vType != bsontype.EmbeddedDocument { + return nil, ejvr.typeError(bsontype.EmbeddedDocument) + } + + ejvr.pushDocument() + return ejvr, nil + default: + return nil, ejvr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue}) + } +} + +func (ejvr *extJSONValueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) { + if err = ejvr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil { + return "", nil, err + } + + v, err := ejvr.p.readValue(bsontype.CodeWithScope) + if err != nil { + return "", nil, err + } + + code, err = v.parseJavascript() + + ejvr.pushCodeWithScope() + return code, ejvr, err +} + +func (ejvr *extJSONValueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) { + if err = ejvr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil { + return "", primitive.NilObjectID, err + } + + v, err := ejvr.p.readValue(bsontype.DBPointer) + if err != nil { + return "", primitive.NilObjectID, err + } + + ns, oid, err = v.parseDBPointer() + + ejvr.pop() + return ns, oid, err +} + +func (ejvr *extJSONValueReader) ReadDateTime() (int64, error) { + if err := ejvr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.DateTime) + if err != nil { + return 0, err + } + + d, err := v.parseDateTime() + + ejvr.pop() + return d, err +} + +func (ejvr *extJSONValueReader) ReadDecimal128() (primitive.Decimal128, error) { + if err := ejvr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil { + return primitive.Decimal128{}, err + } + + v, err := ejvr.p.readValue(bsontype.Decimal128) + if err != nil { + return primitive.Decimal128{}, err + } + + d, err := v.parseDecimal128() + + ejvr.pop() + return d, err +} + +func (ejvr *extJSONValueReader) ReadDouble() (float64, error) { + if err := ejvr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.Double) + if err != nil { + return 0, err + } + + d, err := v.parseDouble() + + ejvr.pop() + return d, err +} + +func (ejvr *extJSONValueReader) ReadInt32() (int32, error) { + if err := ejvr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.Int32) + if err != nil { + return 0, err + } + + i, err := v.parseInt32() + + ejvr.pop() + return i, err +} + +func (ejvr *extJSONValueReader) ReadInt64() (int64, error) { + if err := ejvr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.Int64) + if err != nil { + return 0, err + } + + i, err := v.parseInt64() + + ejvr.pop() + return i, err +} + +func (ejvr *extJSONValueReader) ReadJavascript() (code string, err error) { + if err = ejvr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil { + return "", err + } + + v, err := ejvr.p.readValue(bsontype.JavaScript) + if err != nil { + return "", err + } + + code, err = v.parseJavascript() + + ejvr.pop() + return code, err +} + +func (ejvr *extJSONValueReader) ReadMaxKey() error { + if err := ejvr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.MaxKey) + if err != nil { + return err + } + + err = v.parseMinMaxKey("max") + + ejvr.pop() + return err +} + +func (ejvr *extJSONValueReader) ReadMinKey() error { + if err := ejvr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.MinKey) + if err != nil { + return err + } + + err = v.parseMinMaxKey("min") + + ejvr.pop() + return err +} + +func (ejvr *extJSONValueReader) ReadNull() error { + if err := ejvr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.Null) + if err != nil { + return err + } + + if v.t != bsontype.Null { + return fmt.Errorf("expected type null but got type %s", v.t) + } + + ejvr.pop() + return nil +} + +func (ejvr *extJSONValueReader) ReadObjectID() (primitive.ObjectID, error) { + if err := ejvr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil { + return primitive.ObjectID{}, err + } + + v, err := ejvr.p.readValue(bsontype.ObjectID) + if err != nil { + return primitive.ObjectID{}, err + } + + oid, err := v.parseObjectID() + + ejvr.pop() + return oid, err +} + +func (ejvr *extJSONValueReader) ReadRegex() (pattern string, options string, err error) { + if err = ejvr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil { + return "", "", err + } + + v, err := ejvr.p.readValue(bsontype.Regex) + if err != nil { + return "", "", err + } + + pattern, options, err = v.parseRegex() + + ejvr.pop() + return pattern, options, err +} + +func (ejvr *extJSONValueReader) ReadString() (string, error) { + if err := ejvr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil { + return "", err + } + + v, err := ejvr.p.readValue(bsontype.String) + if err != nil { + return "", err + } + + if v.t != bsontype.String { + return "", fmt.Errorf("expected type string but got type %s", v.t) + } + + ejvr.pop() + return v.v.(string), nil +} + +func (ejvr *extJSONValueReader) ReadSymbol() (symbol string, err error) { + if err = ejvr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil { + return "", err + } + + v, err := ejvr.p.readValue(bsontype.Symbol) + if err != nil { + return "", err + } + + symbol, err = v.parseSymbol() + + ejvr.pop() + return symbol, err +} + +func (ejvr *extJSONValueReader) ReadTimestamp() (t uint32, i uint32, err error) { + if err = ejvr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil { + return 0, 0, err + } + + v, err := ejvr.p.readValue(bsontype.Timestamp) + if err != nil { + return 0, 0, err + } + + t, i, err = v.parseTimestamp() + + ejvr.pop() + return t, i, err +} + +func (ejvr *extJSONValueReader) ReadUndefined() error { + if err := ejvr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.Undefined) + if err != nil { + return err + } + + err = v.parseUndefined() + + ejvr.pop() + return err +} + +func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mTopLevel, mDocument, mCodeWithScope: + default: + return "", nil, ejvr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope}) + } + + name, t, err := ejvr.p.readKey() + + if err != nil { + if errors.Is(err, ErrEOD) { + if ejvr.stack[ejvr.frame].mode == mCodeWithScope { + _, err := ejvr.p.peekType() + if err != nil { + return "", nil, err + } + } + + ejvr.pop() + } + + return "", nil, err + } + + ejvr.push(mElement, t) + return name, ejvr, nil +} + +func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mArray: + default: + return nil, ejvr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray}) + } + + t, err := ejvr.p.peekType() + if err != nil { + if errors.Is(err, ErrEOA) { + ejvr.pop() + } + + return nil, err + } + + ejvr.push(mValue, t) + return ejvr, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go new file mode 100644 index 000000000000..ba39c9601fb9 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go @@ -0,0 +1,223 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Based on github.com/golang/go by The Go Authors +// See THIRD-PARTY-NOTICES for original license terms. + +package bsonrw + +import "unicode/utf8" + +// safeSet holds the value true if the ASCII character with the given array +// position can be represented inside a JSON string without any further +// escaping. +// +// All values are true except for the ASCII control characters (0-31), the +// double quote ("), and the backslash character ("\"). +var safeSet = [utf8.RuneSelf]bool{ + ' ': true, + '!': true, + '"': false, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + ',': true, + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + ';': true, + '<': true, + '=': true, + '>': true, + '?': true, + '@': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + '[': true, + '\\': false, + ']': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '{': true, + '|': true, + '}': true, + '~': true, + '\u007f': true, +} + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML