From 9fa14575f4220595695dd1e9a09790441e56f950 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Mar 2026 12:46:06 +0000 Subject: [PATCH] chore(deps): bump github.com/modelcontextprotocol/go-sdk Bumps [github.com/modelcontextprotocol/go-sdk](https://github.com/modelcontextprotocol/go-sdk) from 1.3.0 to 1.4.1. - [Release notes](https://github.com/modelcontextprotocol/go-sdk/releases) - [Commits](https://github.com/modelcontextprotocol/go-sdk/compare/v1.3.0...v1.4.1) --- updated-dependencies: - dependency-name: github.com/modelcontextprotocol/go-sdk dependency-version: 1.4.1 dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- go.mod | 4 +- go.sum | 8 +- .../modelcontextprotocol/go-sdk/LICENSE | 197 ++- .../modelcontextprotocol/go-sdk/auth/auth.go | 6 +- .../go-sdk/auth/authorization_code.go | 565 ++++++ .../go-sdk/auth/client.go | 139 +- .../go-sdk/auth/client_private.go | 135 ++ .../go-sdk/internal/json/json.go | 19 + .../go-sdk/internal/jsonrpc2/conn.go | 3 +- .../go-sdk/internal/jsonrpc2/frame.go | 4 +- .../go-sdk/internal/jsonrpc2/messages.go | 46 +- .../go-sdk/internal/mcpgodebug/mcpgodebug.go | 52 + .../go-sdk/internal/util/net.go | 26 + .../modelcontextprotocol/go-sdk/mcp/client.go | 129 +- .../go-sdk/mcp/content.go | 219 ++- .../modelcontextprotocol/go-sdk/mcp/event.go | 65 +- .../go-sdk/mcp/logging.go | 11 +- .../go-sdk/mcp/protocol.go | 301 +++- .../go-sdk/mcp/requests.go | 1 + .../go-sdk/mcp/resource.go | 17 + .../go-sdk/mcp/resource_go124.go | 29 - .../go-sdk/mcp/resource_pre_go124.go | 25 - .../modelcontextprotocol/go-sdk/mcp/server.go | 55 +- .../modelcontextprotocol/go-sdk/mcp/shared.go | 3 +- .../modelcontextprotocol/go-sdk/mcp/sse.go | 3 +- .../go-sdk/mcp/streamable.go | 231 ++- .../go-sdk/mcp/streamable_client.go | 6 +- .../modelcontextprotocol/go-sdk/mcp/tool.go | 3 +- .../go-sdk/mcp/transport.go | 9 +- .../modelcontextprotocol/go-sdk/mcp/util.go | 19 +- .../go-sdk/oauthex/auth_meta.go | 93 +- .../go-sdk/oauthex/dcr.go | 8 +- .../go-sdk/oauthex/oauth2.go | 39 +- .../go-sdk/oauthex/oauthex.go | 86 - .../go-sdk/oauthex/resource_meta.go | 100 +- .../go-sdk/oauthex/resource_meta_public.go | 105 ++ vendor/github.com/segmentio/asm/LICENSE | 21 + .../github.com/segmentio/asm/ascii/ascii.go | 53 + .../segmentio/asm/ascii/equal_fold.go | 30 + .../segmentio/asm/ascii/equal_fold_amd64.go | 13 + .../segmentio/asm/ascii/equal_fold_amd64.s | 304 ++++ .../segmentio/asm/ascii/equal_fold_default.go | 60 + .../github.com/segmentio/asm/ascii/valid.go | 18 + .../segmentio/asm/ascii/valid_amd64.go | 9 + .../segmentio/asm/ascii/valid_amd64.s | 132 ++ .../segmentio/asm/ascii/valid_default.go | 48 + .../segmentio/asm/ascii/valid_print.go | 18 + .../segmentio/asm/ascii/valid_print_amd64.go | 9 + .../segmentio/asm/ascii/valid_print_amd64.s | 185 ++ .../asm/ascii/valid_print_default.go | 46 + .../github.com/segmentio/asm/base64/base64.go | 67 + .../segmentio/asm/base64/base64_amd64.go | 160 ++ .../segmentio/asm/base64/base64_default.go | 14 + .../segmentio/asm/base64/decode_amd64.go | 10 + .../segmentio/asm/base64/decode_amd64.s | 144 ++ .../segmentio/asm/base64/encode_amd64.go | 8 + .../segmentio/asm/base64/encode_amd64.s | 88 + .../github.com/segmentio/asm/cpu/arm/arm.go | 80 + .../segmentio/asm/cpu/arm64/arm64.go | 74 + vendor/github.com/segmentio/asm/cpu/cpu.go | 22 + .../segmentio/asm/cpu/cpuid/cpuid.go | 32 + .../github.com/segmentio/asm/cpu/x86/x86.go | 76 + .../asm/internal/unsafebytes/unsafebytes.go | 20 + .../github.com/segmentio/asm/keyset/keyset.go | 40 + .../segmentio/asm/keyset/keyset_amd64.go | 10 + .../segmentio/asm/keyset/keyset_amd64.s | 108 ++ .../segmentio/asm/keyset/keyset_arm64.go | 8 + .../segmentio/asm/keyset/keyset_arm64.s | 143 ++ .../segmentio/asm/keyset/keyset_default.go | 19 + vendor/github.com/segmentio/encoding/LICENSE | 21 + .../segmentio/encoding/ascii/equal_fold.go | 40 + .../segmentio/encoding/ascii/valid.go | 26 + .../segmentio/encoding/ascii/valid_print.go | 26 + .../segmentio/encoding/iso8601/parse.go | 185 ++ .../segmentio/encoding/iso8601/valid.go | 179 ++ .../segmentio/encoding/json/README.md | 76 + .../segmentio/encoding/json/codec.go | 1240 +++++++++++++ .../segmentio/encoding/json/decode.go | 1534 +++++++++++++++++ .../segmentio/encoding/json/encode.go | 970 +++++++++++ .../github.com/segmentio/encoding/json/int.go | 98 ++ .../segmentio/encoding/json/json.go | 594 +++++++ .../segmentio/encoding/json/parse.go | 781 +++++++++ .../segmentio/encoding/json/reflect.go | 20 + .../encoding/json/reflect_optimize.go | 30 + .../segmentio/encoding/json/string.go | 89 + .../segmentio/encoding/json/token.go | 426 +++++ vendor/modules.txt | 22 +- 87 files changed, 10613 insertions(+), 574 deletions(-) create mode 100644 vendor/github.com/modelcontextprotocol/go-sdk/auth/authorization_code.go create mode 100644 vendor/github.com/modelcontextprotocol/go-sdk/auth/client_private.go create mode 100644 vendor/github.com/modelcontextprotocol/go-sdk/internal/json/json.go create mode 100644 vendor/github.com/modelcontextprotocol/go-sdk/internal/mcpgodebug/mcpgodebug.go create mode 100644 vendor/github.com/modelcontextprotocol/go-sdk/internal/util/net.go delete mode 100644 vendor/github.com/modelcontextprotocol/go-sdk/mcp/resource_go124.go delete mode 100644 vendor/github.com/modelcontextprotocol/go-sdk/mcp/resource_pre_go124.go create mode 100644 vendor/github.com/modelcontextprotocol/go-sdk/oauthex/resource_meta_public.go create mode 100644 vendor/github.com/segmentio/asm/LICENSE create mode 100644 vendor/github.com/segmentio/asm/ascii/ascii.go create mode 100644 vendor/github.com/segmentio/asm/ascii/equal_fold.go create mode 100644 vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.go create mode 100644 vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.s create mode 100644 vendor/github.com/segmentio/asm/ascii/equal_fold_default.go create mode 100644 vendor/github.com/segmentio/asm/ascii/valid.go create mode 100644 vendor/github.com/segmentio/asm/ascii/valid_amd64.go create mode 100644 vendor/github.com/segmentio/asm/ascii/valid_amd64.s create mode 100644 vendor/github.com/segmentio/asm/ascii/valid_default.go create mode 100644 vendor/github.com/segmentio/asm/ascii/valid_print.go create mode 100644 vendor/github.com/segmentio/asm/ascii/valid_print_amd64.go create mode 100644 vendor/github.com/segmentio/asm/ascii/valid_print_amd64.s create mode 100644 vendor/github.com/segmentio/asm/ascii/valid_print_default.go create mode 100644 vendor/github.com/segmentio/asm/base64/base64.go create mode 100644 vendor/github.com/segmentio/asm/base64/base64_amd64.go create mode 100644 vendor/github.com/segmentio/asm/base64/base64_default.go create mode 100644 vendor/github.com/segmentio/asm/base64/decode_amd64.go create mode 100644 vendor/github.com/segmentio/asm/base64/decode_amd64.s create mode 100644 vendor/github.com/segmentio/asm/base64/encode_amd64.go create mode 100644 vendor/github.com/segmentio/asm/base64/encode_amd64.s create mode 100644 vendor/github.com/segmentio/asm/cpu/arm/arm.go create mode 100644 vendor/github.com/segmentio/asm/cpu/arm64/arm64.go create mode 100644 vendor/github.com/segmentio/asm/cpu/cpu.go create mode 100644 vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go create mode 100644 vendor/github.com/segmentio/asm/cpu/x86/x86.go create mode 100644 vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go create mode 100644 vendor/github.com/segmentio/asm/keyset/keyset.go create mode 100644 vendor/github.com/segmentio/asm/keyset/keyset_amd64.go create mode 100644 vendor/github.com/segmentio/asm/keyset/keyset_amd64.s create mode 100644 vendor/github.com/segmentio/asm/keyset/keyset_arm64.go create mode 100644 vendor/github.com/segmentio/asm/keyset/keyset_arm64.s create mode 100644 vendor/github.com/segmentio/asm/keyset/keyset_default.go create mode 100644 vendor/github.com/segmentio/encoding/LICENSE create mode 100644 vendor/github.com/segmentio/encoding/ascii/equal_fold.go create mode 100644 vendor/github.com/segmentio/encoding/ascii/valid.go create mode 100644 vendor/github.com/segmentio/encoding/ascii/valid_print.go create mode 100644 vendor/github.com/segmentio/encoding/iso8601/parse.go create mode 100644 vendor/github.com/segmentio/encoding/iso8601/valid.go create mode 100644 vendor/github.com/segmentio/encoding/json/README.md create mode 100644 vendor/github.com/segmentio/encoding/json/codec.go create mode 100644 vendor/github.com/segmentio/encoding/json/decode.go create mode 100644 vendor/github.com/segmentio/encoding/json/encode.go create mode 100644 vendor/github.com/segmentio/encoding/json/int.go create mode 100644 vendor/github.com/segmentio/encoding/json/json.go create mode 100644 vendor/github.com/segmentio/encoding/json/parse.go create mode 100644 vendor/github.com/segmentio/encoding/json/reflect.go create mode 100644 vendor/github.com/segmentio/encoding/json/reflect_optimize.go create mode 100644 vendor/github.com/segmentio/encoding/json/string.go create mode 100644 vendor/github.com/segmentio/encoding/json/token.go diff --git a/go.mod b/go.mod index 14ffa42e..28d89594 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/coder/coder/v2 v2.30.0 github.com/google/uuid v1.6.0 github.com/lib/pq v1.10.9 - github.com/modelcontextprotocol/go-sdk v1.3.0 + github.com/modelcontextprotocol/go-sdk v1.4.1 github.com/stretchr/testify v1.11.1 golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da k8s.io/api v0.35.0 @@ -296,6 +296,8 @@ require ( github.com/sashamelentyev/usestdlibvars v1.29.0 // indirect github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect github.com/securego/gosec/v2 v2.22.11 // indirect + github.com/segmentio/asm v1.1.3 // indirect + github.com/segmentio/encoding v0.5.4 // indirect github.com/shirou/gopsutil/v4 v4.25.12 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect diff --git a/go.sum b/go.sum index e704cbcc..b88451f5 100644 --- a/go.sum +++ b/go.sum @@ -577,8 +577,8 @@ github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modelcontextprotocol/go-sdk v1.3.0 h1:gMfZkv3DzQF5q/DcQePo5rahEY+sguyPfXDfNBcT0Zs= -github.com/modelcontextprotocol/go-sdk v1.3.0/go.mod h1:AnQ//Qc6+4nIyyrB4cxBU7UW9VibK4iOZBeyP/rF1IE= +github.com/modelcontextprotocol/go-sdk v1.4.1 h1:M4x9GyIPj+HoIlHNGpK2hq5o3BFhC+78PkEaldQRphc= +github.com/modelcontextprotocol/go-sdk v1.4.1/go.mod h1:Bo/mS87hPQqHSRkMv4dQq1XCu6zv4INdXnFZabkNU6s= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -698,6 +698,10 @@ github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3 github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= github.com/securego/gosec/v2 v2.22.11 h1:tW+weM/hCM/GX3iaCV91d5I6hqaRT2TPsFM1+USPXwg= github.com/securego/gosec/v2 v2.22.11/go.mod h1:KE4MW/eH0GLWztkbt4/7XpyH0zJBBnu7sYB4l6Wn7Mw= +github.com/segmentio/asm v1.1.3 h1:WM03sfUOENvvKexOLp+pCqgb/WDjsi7EK8gIsICtzhc= +github.com/segmentio/asm v1.1.3/go.mod h1:Ld3L4ZXGNcSLRg4JBsZ3//1+f/TjYl0Mzen/DQy1EJg= +github.com/segmentio/encoding v0.5.4 h1:OW1VRern8Nw6ITAtwSZ7Idrl3MXCFwXHPgqESYfvNt0= +github.com/segmentio/encoding v0.5.4/go.mod h1:HS1ZKa3kSN32ZHVZ7ZLPLXWvOVIiZtyJnO1gPH1sKt0= github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shirou/gopsutil/v4 v4.25.12 h1:e7PvW/0RmJ8p8vPGJH4jvNkOyLmbkXgXW4m6ZPic6CY= diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/LICENSE b/vendor/github.com/modelcontextprotocol/go-sdk/LICENSE index 508be926..5791499c 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/LICENSE +++ b/vendor/github.com/modelcontextprotocol/go-sdk/LICENSE @@ -1,6 +1,193 @@ +The MCP project is undergoing a licensing transition from the MIT License to the Apache License, Version 2.0 ("Apache-2.0"). All new code and specification contributions to the project are licensed under Apache-2.0. Documentation contributions (excluding specifications) are licensed under CC-BY-4.0. + +Contributions for which relicensing consent has been obtained are licensed under Apache-2.0. Contributions made by authors who originally licensed their work under the MIT License and who have not yet granted explicit permission to relicense remain licensed under the MIT License. + +No rights beyond those granted by the applicable original license are conveyed for such contributions. + +--- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright + owner or by an individual or Legal Entity authorized to submit on behalf + of the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +--- + MIT License -Copyright (c) 2025 Go MCP SDK Authors +Copyright (c) 2024-2025 Model Context Protocol a Series of LF Projects, LLC. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -19,3 +206,11 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +--- + +Creative Commons Attribution 4.0 International (CC-BY-4.0) + +Documentation in this project (excluding specifications) is licensed under +CC-BY-4.0. See https://creativecommons.org/licenses/by/4.0/legalcode for +the full license text. diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/auth/auth.go b/vendor/github.com/modelcontextprotocol/go-sdk/auth/auth.go index 87665121..36ff259e 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/auth/auth.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/auth/auth.go @@ -25,8 +25,7 @@ type TokenInfo struct { // session hijacking by ensuring that all requests for a given session // come from the same user. UserID string - // TODO: add standard JWT fields - Extra map[string]any + Extra map[string]any } // The error that a TokenVerifier should return if the token cannot be verified. @@ -106,6 +105,9 @@ func verify(req *http.Request, verifier TokenVerifier, opts *RequireBearerTokenO } return nil, err.Error(), http.StatusInternalServerError } + if tokenInfo == nil { + return nil, "token validation failed", http.StatusInternalServerError + } // Check scopes. All must be present. if opts != nil { diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/auth/authorization_code.go b/vendor/github.com/modelcontextprotocol/go-sdk/auth/authorization_code.go new file mode 100644 index 00000000..1190836e --- /dev/null +++ b/vendor/github.com/modelcontextprotocol/go-sdk/auth/authorization_code.go @@ -0,0 +1,565 @@ +// Copyright 2026 The Go MCP SDK Authors. All rights reserved. +// Use of this source code is governed by the license +// that can be found in the LICENSE file. + +//go:build mcp_go_client_oauth + +package auth + +import ( + "context" + "crypto/rand" + "errors" + "fmt" + "net/http" + "net/url" + "slices" + "strings" + + "github.com/modelcontextprotocol/go-sdk/oauthex" + "golang.org/x/oauth2" +) + +// ClientSecretAuthConfig is used to configure client authentication using client_secret. +// Authentication method will be selected based on the authorization server's supported methods, +// according to the following preference order: +// 1. client_secret_post +// 2. client_secret_basic +type ClientSecretAuthConfig struct { + // ClientID is the client ID to be used for client authentication. + ClientID string + // ClientSecret is the client secret to be used for client authentication. + ClientSecret string +} + +// ClientIDMetadataDocumentConfig is used to configure the Client ID Metadata Document +// based client registration per +// https://modelcontextprotocol.io/specification/2025-11-25/basic/authorization#client-id-metadata-documents. +// See https://client.dev/ for more information. +type ClientIDMetadataDocumentConfig struct { + // URL is the client identifier URL as per + // https://datatracker.ietf.org/doc/html/draft-ietf-oauth-client-id-metadata-document-00#section-3. + URL string +} + +// PreregisteredClientConfig is used to configure a pre-registered client per +// https://modelcontextprotocol.io/specification/2025-11-25/basic/authorization#preregistration. +// Currently only "client_secret_basic" and "client_secret_post" authentication methods are supported. +type PreregisteredClientConfig struct { + // ClientSecretAuthConfig is the client_secret based configuration to be used for client authentication. + ClientSecretAuthConfig *ClientSecretAuthConfig +} + +// DynamicClientRegistrationConfig is used to configure dynamic client registration per +// https://modelcontextprotocol.io/specification/2025-11-25/basic/authorization#dynamic-client-registration. +type DynamicClientRegistrationConfig struct { + // Metadata to be used in dynamic client registration request as per + // https://datatracker.ietf.org/doc/html/rfc7591#section-2. + Metadata *oauthex.ClientRegistrationMetadata +} + +// AuthorizationResult is the result of an authorization flow. +// It is returned by [AuthorizationCodeHandler].AuthorizationCodeFetcher implementations. +type AuthorizationResult struct { + // Code is the authorization code obtained from the authorization server. + Code string + // State string returned by the authorization server. + State string +} + +// AuthorizationArgs is the input to [AuthorizationCodeHandlerConfig].AuthorizationCodeFetcher. +type AuthorizationArgs struct { + // Authorization URL to be opened in a browser for the user to start the authorization process. + URL string +} + +// AuthorizationCodeHandlerConfig is the configuration for [AuthorizationCodeHandler]. +type AuthorizationCodeHandlerConfig struct { + // Client registration configuration. + // It is attempted in the following order: + // 1. Client ID Metadata Document + // 2. Preregistration + // 3. Dynamic Client Registration + // At least one method must be configured. + ClientIDMetadataDocumentConfig *ClientIDMetadataDocumentConfig + PreregisteredClientConfig *PreregisteredClientConfig + DynamicClientRegistrationConfig *DynamicClientRegistrationConfig + + // RedirectURL is a required URL to redirect to after authorization. + // The caller is responsible for handling the redirect out of band. + // + // If Dynamic Client Registration is used: + // - this field is permitted to be empty, in which case it will be set + // to the first redirect URI from + // DynamicClientRegistrationConfig.Metadata.RedirectURIs. + // - if the field is not empty, it must be one of the redirect URIs in + // DynamicClientRegistrationConfig.Metadata.RedirectURIs. + RedirectURL string + + // AuthorizationCodeFetcher is a required function called to initiate the authorization flow. + // It is responsible for opening the URL in a browser for the user to start the authorization process. + // It should return the authorization code and state once the Authorization Server + // redirects back to the RedirectURL. + AuthorizationCodeFetcher func(ctx context.Context, args *AuthorizationArgs) (*AuthorizationResult, error) + + // Client is an optional HTTP client to use for HTTP requests. + // It is used for the following requests: + // - Fetching Protected Resource Metadata + // - Fetching Authorization Server Metadata + // - Registering a client dynamically + // - Exchanging an authorization code for an access token + // - Refreshing an access token + // Custom clients can include additional security configurations, + // such as SSRF protections, see + // https://modelcontextprotocol.io/docs/tutorials/security/security_best_practices#server-side-request-forgery-ssrf + // If not provided, http.DefaultClient will be used. + Client *http.Client +} + +// AuthorizationCodeHandler is an implementation of [OAuthHandler] that uses +// the authorization code flow to obtain access tokens. +type AuthorizationCodeHandler struct { + config *AuthorizationCodeHandlerConfig + + // tokenSource is the token source to use for authorization. + tokenSource oauth2.TokenSource +} + +var _ OAuthHandler = (*AuthorizationCodeHandler)(nil) + +func (h *AuthorizationCodeHandler) isOAuthHandler() {} + +func (h *AuthorizationCodeHandler) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { + return h.tokenSource, nil +} + +// NewAuthorizationCodeHandler creates a new AuthorizationCodeHandler. +// It performs validation of the configuration and returns an error if it is invalid. +// The passed config is consumed by the handler and should not be modified after. +func NewAuthorizationCodeHandler(config *AuthorizationCodeHandlerConfig) (*AuthorizationCodeHandler, error) { + if config == nil { + return nil, errors.New("config must be provided") + } + if config.ClientIDMetadataDocumentConfig == nil && + config.PreregisteredClientConfig == nil && + config.DynamicClientRegistrationConfig == nil { + return nil, errors.New("at least one client registration configuration must be provided") + } + if config.AuthorizationCodeFetcher == nil { + return nil, errors.New("AuthorizationCodeFetcher is required") + } + if config.ClientIDMetadataDocumentConfig != nil && !isNonRootHTTPSURL(config.ClientIDMetadataDocumentConfig.URL) { + return nil, fmt.Errorf("client ID metadata document URL must be a non-root HTTPS URL") + } + preCfg := config.PreregisteredClientConfig + if preCfg != nil { + if preCfg.ClientSecretAuthConfig == nil { + return nil, errors.New("ClientSecretAuthConfig is required for pre-registered client") + } + if preCfg.ClientSecretAuthConfig.ClientID == "" || preCfg.ClientSecretAuthConfig.ClientSecret == "" { + return nil, fmt.Errorf("pre-registered client ID or secret is empty") + } + } + dCfg := config.DynamicClientRegistrationConfig + if dCfg != nil { + if dCfg.Metadata == nil { + return nil, errors.New("Metadata is required for dynamic client registration") + } + if len(dCfg.Metadata.RedirectURIs) == 0 { + return nil, errors.New("Metadata.RedirectURIs is required for dynamic client registration") + } + if config.RedirectURL == "" { + config.RedirectURL = dCfg.Metadata.RedirectURIs[0] + } else if !slices.Contains(dCfg.Metadata.RedirectURIs, config.RedirectURL) { + return nil, fmt.Errorf("RedirectURL %q is not in the list of allowed redirect URIs for dynamic client registration", config.RedirectURL) + } + } + if config.RedirectURL == "" { + // If the RedirectURL was supposed to be set by the dynamic client registration, + // it should have been set by now. Otherwise, it is required. + return nil, errors.New("RedirectURL is required") + } + if config.Client == nil { + config.Client = http.DefaultClient + } + return &AuthorizationCodeHandler{config: config}, nil +} + +func isNonRootHTTPSURL(u string) bool { + pu, err := url.Parse(u) + if err != nil { + return false + } + return pu.Scheme == "https" && pu.Path != "" +} + +// Authorize performs the authorization flow. +// It is designed to perform the whole Authorization Code Grant flow. +// On success, [AuthorizationCodeHandler.TokenSource] will return a token source with the fetched token. +func (h *AuthorizationCodeHandler) Authorize(ctx context.Context, req *http.Request, resp *http.Response) error { + defer resp.Body.Close() + + wwwChallenges, err := oauthex.ParseWWWAuthenticate(resp.Header[http.CanonicalHeaderKey("WWW-Authenticate")]) + if err != nil { + return fmt.Errorf("failed to parse WWW-Authenticate header: %v", err) + } + + if resp.StatusCode == http.StatusForbidden && errorFromChallenges(wwwChallenges) != "insufficient_scope" { + // We only want to perform step-up authorization for insufficient_scope errors. + // Returning nil, so that the call is retried immediately and the response + // is handled appropriately by the connection. + // Step-up authorization is defined at + // https://modelcontextprotocol.io/specification/2025-11-25/basic/authorization#step-up-authorization-flow + return nil + } + + prm, err := h.getProtectedResourceMetadata(ctx, wwwChallenges, req.URL.String()) + if err != nil { + return err + } + + asm, err := h.getAuthServerMetadata(ctx, prm) + if err != nil { + return err + } + + resolvedClientConfig, err := h.handleRegistration(ctx, asm) + if err != nil { + return err + } + + scps := scopesFromChallenges(wwwChallenges) + if len(scps) == 0 && len(prm.ScopesSupported) > 0 { + scps = prm.ScopesSupported + } + + cfg := &oauth2.Config{ + ClientID: resolvedClientConfig.clientID, + ClientSecret: resolvedClientConfig.clientSecret, + + Endpoint: oauth2.Endpoint{ + AuthURL: asm.AuthorizationEndpoint, + TokenURL: asm.TokenEndpoint, + AuthStyle: resolvedClientConfig.authStyle, + }, + RedirectURL: h.config.RedirectURL, + Scopes: scps, + } + + authRes, err := h.getAuthorizationCode(ctx, cfg, req.URL.String()) + if err != nil { + // Purposefully leaving the error unwrappable so it can be handled by the caller. + return err + } + + return h.exchangeAuthorizationCode(ctx, cfg, authRes, prm.Resource) +} + +// resourceMetadataURLFromChallenges returns a resource metadata URL from the given "WWW-Authenticate" header challenges, +// or the empty string if there is none. +func resourceMetadataURLFromChallenges(cs []oauthex.Challenge) string { + for _, c := range cs { + if u := c.Params["resource_metadata"]; u != "" { + return u + } + } + return "" +} + +// scopesFromChallenges returns the scopes from the given "WWW-Authenticate" header challenges. +// It only looks at challenges with the "Bearer" scheme. +func scopesFromChallenges(cs []oauthex.Challenge) []string { + for _, c := range cs { + if c.Scheme == "bearer" && c.Params["scope"] != "" { + return strings.Fields(c.Params["scope"]) + } + } + return nil +} + +// errorFromChallenges returns the error from the given "WWW-Authenticate" header challenges. +// It only looks at challenges with the "Bearer" scheme. +func errorFromChallenges(cs []oauthex.Challenge) string { + for _, c := range cs { + if c.Scheme == "bearer" && c.Params["error"] != "" { + return c.Params["error"] + } + } + return "" +} + +// getProtectedResourceMetadata returns the protected resource metadata. +// If no metadata was found or the fetched metadata fails security checks, +// it returns an error. +func (h *AuthorizationCodeHandler) getProtectedResourceMetadata(ctx context.Context, wwwChallenges []oauthex.Challenge, mcpServerURL string) (*oauthex.ProtectedResourceMetadata, error) { + var errs []error + // Use MCP server URL as the resource URI per + // https://modelcontextprotocol.io/specification/2025-11-25/basic/authorization#canonical-server-uri. + for _, url := range protectedResourceMetadataURLs(resourceMetadataURLFromChallenges(wwwChallenges), mcpServerURL) { + prm, err := oauthex.GetProtectedResourceMetadata(ctx, url.URL, url.Resource, h.config.Client) + if err != nil { + errs = append(errs, err) + continue + } + if prm == nil { + errs = append(errs, fmt.Errorf("protected resource metadata is nil")) + continue + } + return prm, nil + } + return nil, fmt.Errorf("failed to get protected resource metadata: %v", errors.Join(errs...)) +} + +type prmURL struct { + // URL represents a URL where Protected Resource Metadata may be retrieved. + URL string + // Resource represents the corresponding resource URL for [URL]. + // It is required to perform validation described in RFC 9728, section 3.3. + Resource string +} + +// protectedResourceMetadataURLs returns a list of URLs to try when looking for +// protected resource metadata as mandated by the MCP specification: +// https://modelcontextprotocol.io/specification/2025-11-25/basic/authorization#protected-resource-metadata-discovery-requirements +func protectedResourceMetadataURLs(metadataURL, resourceURL string) []prmURL { + var urls []prmURL + if metadataURL != "" { + urls = append(urls, prmURL{ + URL: metadataURL, + Resource: resourceURL, + }) + } + ru, err := url.Parse(resourceURL) + if err != nil { + return urls + } + mu := *ru + // "At the path of the server's MCP endpoint". + mu.Path = "/.well-known/oauth-protected-resource/" + strings.TrimLeft(ru.Path, "/") + urls = append(urls, prmURL{ + URL: mu.String(), + Resource: resourceURL, + }) + // "At the root". + mu.Path = "/.well-known/oauth-protected-resource" + ru.Path = "" + urls = append(urls, prmURL{ + URL: mu.String(), + Resource: ru.String(), + }) + return urls +} + +// getAuthServerMetadata returns the authorization server metadata. +// The provided Protected Resource Metadata must not be nil. +// It returns an error if the metadata request fails with non-4xx HTTP status code +// or the fetched metadata fails security checks. +// If no metadata was found, it returns a minimal set of endpoints +// as a fallback to 2025-03-26 spec. +func (h *AuthorizationCodeHandler) getAuthServerMetadata(ctx context.Context, prm *oauthex.ProtectedResourceMetadata) (*oauthex.AuthServerMeta, error) { + var authServerURL string + if len(prm.AuthorizationServers) > 0 { + // Use the first authorization server, similarly to other SDKs. + authServerURL = prm.AuthorizationServers[0] + } else { + // Fallback to 2025-03-26 spec: MCP server base URL acts as Authorization Server. + authURL, err := url.Parse(prm.Resource) + if err != nil { + return nil, fmt.Errorf("failed to parse resource URL: %v", err) + } + authURL.Path = "" + authServerURL = authURL.String() + } + + for _, u := range authorizationServerMetadataURLs(authServerURL) { + asm, err := oauthex.GetAuthServerMeta(ctx, u, authServerURL, h.config.Client) + if err != nil { + return nil, fmt.Errorf("failed to get authorization server metadata: %w", err) + } + if asm != nil { + return asm, nil + } + } + + // Fallback to 2025-03-26 spec: predefined endpoints. + // https://modelcontextprotocol.io/specification/2025-03-26/basic/authorization#fallbacks-for-servers-without-metadata-discovery + asm := &oauthex.AuthServerMeta{ + Issuer: authServerURL, + AuthorizationEndpoint: authServerURL + "/authorize", + TokenEndpoint: authServerURL + "/token", + RegistrationEndpoint: authServerURL + "/register", + } + return asm, nil +} + +// authorizationServerMetadataURLs returns a list of URLs to try when looking for +// authorization server metadata as mandated by the MCP specification: +// https://modelcontextprotocol.io/specification/2025-11-25/basic/authorization#authorization-server-metadata-discovery. +func authorizationServerMetadataURLs(issuerURL string) []string { + var urls []string + + baseURL, err := url.Parse(issuerURL) + if err != nil { + return nil + } + + if baseURL.Path == "" { + // "OAuth 2.0 Authorization Server Metadata". + baseURL.Path = "/.well-known/oauth-authorization-server" + urls = append(urls, baseURL.String()) + // "OpenID Connect Discovery 1.0". + baseURL.Path = "/.well-known/openid-configuration" + urls = append(urls, baseURL.String()) + return urls + } + + originalPath := baseURL.Path + // "OAuth 2.0 Authorization Server Metadata with path insertion". + baseURL.Path = "/.well-known/oauth-authorization-server/" + strings.TrimLeft(originalPath, "/") + urls = append(urls, baseURL.String()) + // "OpenID Connect Discovery 1.0 with path insertion". + baseURL.Path = "/.well-known/openid-configuration/" + strings.TrimLeft(originalPath, "/") + urls = append(urls, baseURL.String()) + // "OpenID Connect Discovery 1.0 with path appending". + baseURL.Path = "/" + strings.Trim(originalPath, "/") + "/.well-known/openid-configuration" + urls = append(urls, baseURL.String()) + return urls +} + +type registrationType int + +const ( + registrationTypeClientIDMetadataDocument registrationType = iota + registrationTypePreregistered + registrationTypeDynamic +) + +type resolvedClientConfig struct { + registrationType registrationType + clientID string + clientSecret string + authStyle oauth2.AuthStyle +} + +func selectTokenAuthMethod(supported []string) oauth2.AuthStyle { + prefOrder := []string{ + // Preferred in OAuth 2.1 draft: https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-14.html#name-client-secret. + "client_secret_post", + "client_secret_basic", + } + for _, method := range prefOrder { + if slices.Contains(supported, method) { + return authMethodToStyle(method) + } + } + return oauth2.AuthStyleAutoDetect +} + +func authMethodToStyle(method string) oauth2.AuthStyle { + switch method { + case "client_secret_post": + return oauth2.AuthStyleInParams + case "client_secret_basic": + return oauth2.AuthStyleInHeader + case "none": + // "none" is equivalent to "client_secret_post" but without sending client secret. + return oauth2.AuthStyleInParams + default: + // "client_secret_basic" is the default per https://datatracker.ietf.org/doc/html/rfc7591#section-2. + return oauth2.AuthStyleInHeader + } +} + +// handleRegistration handles client registration. +// The provided authorization server metadata must be non-nil. +// Support for different registration methods is defined as follows: +// - Client ID Metadata Document: metadata must have +// `ClientIDMetadataDocumentSupported` set to true. +// - Pre-registered client: assumed to be supported. +// - Dynamic client registration: metadata must have +// `RegistrationEndpoint` set to a non-empty value. +func (h *AuthorizationCodeHandler) handleRegistration(ctx context.Context, asm *oauthex.AuthServerMeta) (*resolvedClientConfig, error) { + // 1. Attempt to use Client ID Metadata Document (SEP-991). + cimdCfg := h.config.ClientIDMetadataDocumentConfig + if cimdCfg != nil && asm.ClientIDMetadataDocumentSupported { + return &resolvedClientConfig{ + registrationType: registrationTypeClientIDMetadataDocument, + clientID: cimdCfg.URL, + }, nil + } + // 2. Attempt to use pre-registered client configuration. + pCfg := h.config.PreregisteredClientConfig + if pCfg != nil { + authStyle := selectTokenAuthMethod(asm.TokenEndpointAuthMethodsSupported) + return &resolvedClientConfig{ + registrationType: registrationTypePreregistered, + clientID: pCfg.ClientSecretAuthConfig.ClientID, + clientSecret: pCfg.ClientSecretAuthConfig.ClientSecret, + authStyle: authStyle, + }, nil + } + // 3. Attempt to use dynamic client registration. + dcrCfg := h.config.DynamicClientRegistrationConfig + if dcrCfg != nil && asm.RegistrationEndpoint != "" { + regResp, err := oauthex.RegisterClient(ctx, asm.RegistrationEndpoint, dcrCfg.Metadata, h.config.Client) + if err != nil { + return nil, fmt.Errorf("failed to register client: %w", err) + } + cfg := &resolvedClientConfig{ + registrationType: registrationTypeDynamic, + clientID: regResp.ClientID, + clientSecret: regResp.ClientSecret, + authStyle: authMethodToStyle(regResp.TokenEndpointAuthMethod), + } + return cfg, nil + } + return nil, fmt.Errorf("no configured client registration methods are supported by the authorization server") +} + +type authResult struct { + *AuthorizationResult + // usedCodeVerifier is the PKCE code verifier used to obtain the authorization code. + // It is preserved for the token exchange step. + usedCodeVerifier string +} + +// getAuthorizationCode uses the [AuthorizationCodeHandler.AuthorizationCodeFetcher] +// to obtain an authorization code. +func (h *AuthorizationCodeHandler) getAuthorizationCode(ctx context.Context, cfg *oauth2.Config, resourceURL string) (*authResult, error) { + codeVerifier := oauth2.GenerateVerifier() + state := rand.Text() + + authURL := cfg.AuthCodeURL(state, + oauth2.S256ChallengeOption(codeVerifier), + oauth2.SetAuthURLParam("resource", resourceURL), + ) + + authRes, err := h.config.AuthorizationCodeFetcher(ctx, &AuthorizationArgs{URL: authURL}) + if err != nil { + // Purposefully leaving the error unwrappable so it can be handled by the caller. + return nil, err + } + if authRes.State != state { + return nil, fmt.Errorf("state mismatch") + } + return &authResult{ + AuthorizationResult: authRes, + usedCodeVerifier: codeVerifier, + }, nil +} + +// exchangeAuthorizationCode exchanges the authorization code for a token +// and stores it in a token source. +func (h *AuthorizationCodeHandler) exchangeAuthorizationCode(ctx context.Context, cfg *oauth2.Config, authResult *authResult, resourceURL string) error { + opts := []oauth2.AuthCodeOption{ + oauth2.VerifierOption(authResult.usedCodeVerifier), + oauth2.SetAuthURLParam("resource", resourceURL), + } + clientCtx := context.WithValue(ctx, oauth2.HTTPClient, h.config.Client) + token, err := cfg.Exchange(clientCtx, authResult.Code, opts...) + if err != nil { + return fmt.Errorf("token exchange failed: %w", err) + } + h.tokenSource = cfg.TokenSource(clientCtx, token) + return nil +} diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/auth/client.go b/vendor/github.com/modelcontextprotocol/go-sdk/auth/client.go index acadc51b..0af6963f 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/auth/client.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/auth/client.go @@ -2,122 +2,41 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. -//go:build mcp_go_client_oauth - package auth import ( - "bytes" - "errors" - "io" + "context" "net/http" - "sync" "golang.org/x/oauth2" ) -// An OAuthHandler conducts an OAuth flow and returns a [oauth2.TokenSource] if the authorization -// is approved, or an error if not. -// The handler receives the HTTP request and response that triggered the authentication flow. -// To obtain the protected resource metadata, call [oauthex.GetProtectedResourceMetadataFromHeader]. -type OAuthHandler func(req *http.Request, res *http.Response) (oauth2.TokenSource, error) - -// HTTPTransport is an [http.RoundTripper] that follows the MCP -// OAuth protocol when it encounters a 401 Unauthorized response. -type HTTPTransport struct { - handler OAuthHandler - mu sync.Mutex // protects opts.Base - opts HTTPTransportOptions -} - -// NewHTTPTransport returns a new [*HTTPTransport]. -// The handler is invoked when an HTTP request results in a 401 Unauthorized status. -// It is called only once per transport. Once a TokenSource is obtained, it is used -// for the lifetime of the transport; subsequent 401s are not processed. -func NewHTTPTransport(handler OAuthHandler, opts *HTTPTransportOptions) (*HTTPTransport, error) { - if handler == nil { - return nil, errors.New("handler cannot be nil") - } - t := &HTTPTransport{ - handler: handler, - } - if opts != nil { - t.opts = *opts - } - if t.opts.Base == nil { - t.opts.Base = http.DefaultTransport - } - return t, nil -} - -// HTTPTransportOptions are options to [NewHTTPTransport]. -type HTTPTransportOptions struct { - // Base is the [http.RoundTripper] to use. - // If nil, [http.DefaultTransport] is used. - Base http.RoundTripper -} - -func (t *HTTPTransport) RoundTrip(req *http.Request) (*http.Response, error) { - t.mu.Lock() - base := t.opts.Base - t.mu.Unlock() - - var ( - // If haveBody is set, the request has a nontrivial body, and we need avoid - // reading (or closing) it multiple times. In that case, bodyBytes is its - // content. - haveBody bool - bodyBytes []byte - ) - if req.Body != nil && req.Body != http.NoBody { - // if we're setting Body, we must mutate first. - req = req.Clone(req.Context()) - haveBody = true - var err error - bodyBytes, err = io.ReadAll(req.Body) - if err != nil { - return nil, err - } - // Now that we've read the request body, http.RoundTripper requires that we - // close it. - req.Body.Close() // ignore error - req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) - } - - resp, err := base.RoundTrip(req) - if err != nil { - return nil, err - } - if resp.StatusCode != http.StatusUnauthorized { - return resp, nil - } - if _, ok := base.(*oauth2.Transport); ok { - // We failed to authorize even with a token source; give up. - return resp, nil - } - - resp.Body.Close() - // Try to authorize. - t.mu.Lock() - defer t.mu.Unlock() - // If we don't have a token source, get one by following the OAuth flow. - // (We may have obtained one while t.mu was not held above.) - // TODO: We hold the lock for the entire OAuth flow. This could be a long - // time. Is there a better way? - if _, ok := t.opts.Base.(*oauth2.Transport); !ok { - ts, err := t.handler(req, resp) - if err != nil { - return nil, err - } - t.opts.Base = &oauth2.Transport{Base: t.opts.Base, Source: ts} - } - - // If we don't have a body, the request is reusable, though it will be cloned - // by the base. However, if we've had to read the body, we must clone. - if haveBody { - req = req.Clone(req.Context()) - req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) - } - - return t.opts.Base.RoundTrip(req) +// OAuthHandler is an interface for handling OAuth flows. +// +// If a transport wishes to support OAuth 2 authorization, it should support +// being configured with an OAuthHandler. It should call the handler's +// TokenSource method whenever it sends an HTTP request to set the +// Authorization header. If a request fails with a 401 or 403, it should call +// Authorize, and if that returns nil, it should retry the request. It should +// not call Authorize after the second failure. See +// [github.com/modelcontextprotocol/go-sdk/mcp.StreamableClientTransport] +// for an example. +type OAuthHandler interface { + isOAuthHandler() + + // TokenSource returns a token source to be used for outgoing requests. + // Returned token source might be nil. In that case, the transport will not + // add any authorization headers to the request. + TokenSource(context.Context) (oauth2.TokenSource, error) + + // Authorize is called when an HTTP request results in an error that may + // be addressed by the authorization flow (currently 401 Unauthorized and 403 Forbidden). + // It is responsible for performing the OAuth flow to obtain an access token. + // The arguments are the request that failed and the response that was received for it. + // The headers of the request are available, but the body will have already been consumed + // when Authorize is called. + // If the returned error is nil, TokenSource is expected to return a non-nil token source. + // After a successful call to Authorize, the HTTP request will be retried by the transport. + // The function is responsible for closing the response body. + Authorize(context.Context, *http.Request, *http.Response) error } diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/auth/client_private.go b/vendor/github.com/modelcontextprotocol/go-sdk/auth/client_private.go new file mode 100644 index 00000000..767c59ee --- /dev/null +++ b/vendor/github.com/modelcontextprotocol/go-sdk/auth/client_private.go @@ -0,0 +1,135 @@ +// Copyright 2025 The Go MCP SDK Authors. All rights reserved. +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +//go:build mcp_go_client_oauth + +package auth + +import ( + "bytes" + "errors" + "io" + "net/http" + "sync" + + "golang.org/x/oauth2" +) + +// An OAuthHandlerLegacy conducts an OAuth flow and returns a [oauth2.TokenSource] if the authorization +// is approved, or an error if not. +// The handler receives the HTTP request and response that triggered the authentication flow. +// To obtain the protected resource metadata, call [oauthex.GetProtectedResourceMetadataFromHeader]. +// +// Deprecated: Please use the new [OAuthHandler] abstraction that is built +// into the streamable transport. This struct will be removed in v1.5.0. +type OAuthHandlerLegacy func(req *http.Request, res *http.Response) (oauth2.TokenSource, error) + +// HTTPTransport is an [http.RoundTripper] that follows the MCP +// OAuth protocol when it encounters a 401 Unauthorized response. +// +// Deprecated: Please use the new [OAuthHandler] abstraction that is built +// into the streamable transport. This struct will be removed in v1.5.0. +type HTTPTransport struct { + handler OAuthHandlerLegacy + mu sync.Mutex // protects opts.Base + opts HTTPTransportOptions +} + +// NewHTTPTransport returns a new [*HTTPTransport]. +// The handler is invoked when an HTTP request results in a 401 Unauthorized status. +// It is called only once per transport. Once a TokenSource is obtained, it is used +// for the lifetime of the transport; subsequent 401s are not processed. +// +// Deprecated: Please use the new [OAuthHandler] abstraction that is built +// into the streamable transport. This struct will be removed in v1.5.0. +func NewHTTPTransport(handler OAuthHandlerLegacy, opts *HTTPTransportOptions) (*HTTPTransport, error) { + if handler == nil { + return nil, errors.New("handler cannot be nil") + } + t := &HTTPTransport{ + handler: handler, + } + if opts != nil { + t.opts = *opts + } + if t.opts.Base == nil { + t.opts.Base = http.DefaultTransport + } + return t, nil +} + +// HTTPTransportOptions are options to [NewHTTPTransport]. +// +// Deprecated: Please use the new [OAuthHandler] abstraction that is built +// into the streamable transport. This struct will be removed in v1.5.0. +type HTTPTransportOptions struct { + // Base is the [http.RoundTripper] to use. + // If nil, [http.DefaultTransport] is used. + Base http.RoundTripper +} + +func (t *HTTPTransport) RoundTrip(req *http.Request) (*http.Response, error) { + t.mu.Lock() + base := t.opts.Base + t.mu.Unlock() + + var ( + // If haveBody is set, the request has a nontrivial body, and we need avoid + // reading (or closing) it multiple times. In that case, bodyBytes is its + // content. + haveBody bool + bodyBytes []byte + ) + if req.Body != nil && req.Body != http.NoBody { + // if we're setting Body, we must mutate first. + req = req.Clone(req.Context()) + haveBody = true + var err error + bodyBytes, err = io.ReadAll(req.Body) + if err != nil { + return nil, err + } + // Now that we've read the request body, http.RoundTripper requires that we + // close it. + req.Body.Close() // ignore error + req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + } + + resp, err := base.RoundTrip(req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusUnauthorized { + return resp, nil + } + if _, ok := base.(*oauth2.Transport); ok { + // We failed to authorize even with a token source; give up. + return resp, nil + } + + resp.Body.Close() + // Try to authorize. + t.mu.Lock() + defer t.mu.Unlock() + // If we don't have a token source, get one by following the OAuth flow. + // (We may have obtained one while t.mu was not held above.) + // TODO: We hold the lock for the entire OAuth flow. This could be a long + // time. Is there a better way? + if _, ok := t.opts.Base.(*oauth2.Transport); !ok { + ts, err := t.handler(req, resp) + if err != nil { + return nil, err + } + t.opts.Base = &oauth2.Transport{Base: t.opts.Base, Source: ts} + } + + // If we don't have a body, the request is reusable, though it will be cloned + // by the base. However, if we've had to read the body, we must clone. + if haveBody { + req = req.Clone(req.Context()) + req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + } + + return t.opts.Base.RoundTrip(req) +} diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/internal/json/json.go b/vendor/github.com/modelcontextprotocol/go-sdk/internal/json/json.go new file mode 100644 index 00000000..1148770e --- /dev/null +++ b/vendor/github.com/modelcontextprotocol/go-sdk/internal/json/json.go @@ -0,0 +1,19 @@ +// Copyright 2025 The Go MCP SDK Authors. All rights reserved. +// Use of this source code is governed by the license +// that can be found in the LICENSE file. + +// Package json provides internal JSON utilities. + +package json + +import ( + "bytes" + + "github.com/segmentio/encoding/json" +) + +func Unmarshal(data []byte, v any) error { + dec := json.NewDecoder(bytes.NewReader(data)) + dec.DontMatchCaseInsensitiveStructFields() + return dec.Decode(v) +} diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/internal/jsonrpc2/conn.go b/vendor/github.com/modelcontextprotocol/go-sdk/internal/jsonrpc2/conn.go index 627ffe7b..571df63a 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/internal/jsonrpc2/conn.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/internal/jsonrpc2/conn.go @@ -6,13 +6,14 @@ package jsonrpc2 import ( "context" - "encoding/json" "errors" "fmt" "io" "sync" "sync/atomic" "time" + + "github.com/modelcontextprotocol/go-sdk/internal/json" ) // Binder builds a connection configuration. diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/internal/jsonrpc2/frame.go b/vendor/github.com/modelcontextprotocol/go-sdk/internal/jsonrpc2/frame.go index 46fcc9db..72527cb9 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/internal/jsonrpc2/frame.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/internal/jsonrpc2/frame.go @@ -163,8 +163,8 @@ func (r *headerReader) Read(ctx context.Context) (Message, error) { return nil, fmt.Errorf("invalid header line %q", line) } name, value := line[:colon], strings.TrimSpace(line[colon+1:]) - switch name { - case "Content-Length": + switch { + case strings.EqualFold(name, "Content-Length"): if contentLength, err = strconv.ParseInt(value, 10, 32); err != nil { return nil, fmt.Errorf("failed parsing Content-Length: %v", value) } diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/internal/jsonrpc2/messages.go b/vendor/github.com/modelcontextprotocol/go-sdk/internal/jsonrpc2/messages.go index 791e698d..b424780e 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/internal/jsonrpc2/messages.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/internal/jsonrpc2/messages.go @@ -5,9 +5,14 @@ package jsonrpc2 import ( + "bytes" "encoding/json" "errors" "fmt" + + internaljson "github.com/modelcontextprotocol/go-sdk/internal/json" + + "github.com/modelcontextprotocol/go-sdk/internal/mcpgodebug" ) // ID is a Request identifier, which is defined by the spec to be a string, integer, or null. @@ -145,9 +150,9 @@ func toWireError(err error) *WireError { func EncodeMessage(msg Message) ([]byte, error) { wire := wireCombined{VersionTag: wireVersion} msg.marshal(&wire) - data, err := json.Marshal(&wire) + data, err := jsonMarshal(&wire) if err != nil { - return data, fmt.Errorf("marshaling jsonrpc message: %w", err) + return nil, fmt.Errorf("marshaling jsonrpc message: %w", err) } return data, nil } @@ -158,16 +163,19 @@ func EncodeMessage(msg Message) ([]byte, error) { func EncodeIndent(msg Message, prefix, indent string) ([]byte, error) { wire := wireCombined{VersionTag: wireVersion} msg.marshal(&wire) - data, err := json.MarshalIndent(&wire, prefix, indent) - if err != nil { - return data, fmt.Errorf("marshaling jsonrpc message: %w", err) + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + enc.SetEscapeHTML(false) + enc.SetIndent(prefix, indent) + if err := enc.Encode(&wire); err != nil { + return nil, fmt.Errorf("marshaling jsonrpc message: %w", err) } - return data, nil + return bytes.TrimRight(buf.Bytes(), "\n"), nil } func DecodeMessage(data []byte) (Message, error) { msg := wireCombined{} - if err := json.Unmarshal(data, &msg); err != nil { + if err := internaljson.Unmarshal(data, &msg); err != nil { return nil, fmt.Errorf("unmarshaling jsonrpc message: %w", err) } if msg.VersionTag != wireVersion { @@ -204,9 +212,31 @@ func marshalToRaw(obj any) (json.RawMessage, error) { if obj == nil { return nil, nil } - data, err := json.Marshal(obj) + data, err := jsonMarshal(obj) if err != nil { return nil, err } return json.RawMessage(data), nil } + +// jsonescaping is a compatibility parameter that allows to restore +// JSON escaping in the JSON marshaling, which stopped being the default +// in the 1.4.0 version of the SDK. See the documentation for the +// mcpgodebug package for instructions how to enable it. +// The option will be removed in the 1.6.0 version of the SDK. +var jsonescaping = mcpgodebug.Value("jsonescaping") + +// jsonMarshal marshals obj to JSON like json.Marshal but without HTML escaping. +func jsonMarshal(obj any) ([]byte, error) { + if jsonescaping == "1" { + return json.Marshal(obj) + } + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + enc.SetEscapeHTML(false) + if err := enc.Encode(obj); err != nil { + return nil, err + } + // json.Encoder.Encode adds a trailing newline. Trim it to be consistent with json.Marshal. + return bytes.TrimRight(buf.Bytes(), "\n"), nil +} diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/internal/mcpgodebug/mcpgodebug.go b/vendor/github.com/modelcontextprotocol/go-sdk/internal/mcpgodebug/mcpgodebug.go new file mode 100644 index 00000000..7f8f7ca3 --- /dev/null +++ b/vendor/github.com/modelcontextprotocol/go-sdk/internal/mcpgodebug/mcpgodebug.go @@ -0,0 +1,52 @@ +// Copyright 2025 The Go MCP SDK Authors. All rights reserved. +// Use of this source code is governed by the license +// that can be found in the LICENSE file. + +// Package mcpgodebug provides a mechanism to configure compatibility parameters +// via the MCPGODEBUG environment variable. +// +// The value of MCPGODEBUG is a comma-separated list of key=value pairs. +// For example: +// +// MCPGODEBUG=someoption=1,otheroption=value +package mcpgodebug + +import ( + "fmt" + "os" + "strings" +) + +const compatibilityEnvKey = "MCPGODEBUG" + +var compatibilityParams map[string]string + +func init() { + var err error + compatibilityParams, err = parseCompatibility(os.Getenv(compatibilityEnvKey)) + if err != nil { + panic(err) + } +} + +// Value returns the value of the compatibility parameter with the given key. +// It returns an empty string if the key is not set. +func Value(key string) string { + return compatibilityParams[key] +} + +func parseCompatibility(envValue string) (map[string]string, error) { + if envValue == "" { + return nil, nil + } + + params := make(map[string]string) + for part := range strings.SplitSeq(envValue, ",") { + k, v, ok := strings.Cut(part, "=") + if !ok { + return nil, fmt.Errorf("MCPGODEBUG: invalid format: %q", part) + } + params[strings.TrimSpace(k)] = strings.TrimSpace(v) + } + return params, nil +} diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/internal/util/net.go b/vendor/github.com/modelcontextprotocol/go-sdk/internal/util/net.go new file mode 100644 index 00000000..6858614e --- /dev/null +++ b/vendor/github.com/modelcontextprotocol/go-sdk/internal/util/net.go @@ -0,0 +1,26 @@ +// Copyright 2025 The Go MCP SDK Authors. All rights reserved. +// Use of this source code is governed by the license +// that can be found in the LICENSE file. +package util + +import ( + "net" + "net/netip" + "strings" +) + +func IsLoopback(addr string) bool { + host, _, err := net.SplitHostPort(addr) + if err != nil { + // If SplitHostPort fails, it might be just a host without a port. + host = strings.Trim(addr, "[]") + } + if host == "localhost" { + return true + } + ip, err := netip.ParseAddr(host) + if err != nil { + return false + } + return ip.IsLoopback() +} diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/client.go b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/client.go index 57c30fb3..74900b1c 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/client.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/client.go @@ -6,7 +6,6 @@ package mcp import ( "context" - "encoding/json" "errors" "fmt" "iter" @@ -18,7 +17,7 @@ import ( "time" "github.com/google/jsonschema-go/jsonschema" - + "github.com/modelcontextprotocol/go-sdk/internal/json" "github.com/modelcontextprotocol/go-sdk/internal/jsonrpc2" "github.com/modelcontextprotocol/go-sdk/jsonrpc" ) @@ -52,6 +51,9 @@ func NewClient(impl *Implementation, options *ClientOptions) *Client { } options = nil // prevent reuse + if opts.CreateMessageHandler != nil && opts.CreateMessageWithToolsHandler != nil { + panic("cannot set both CreateMessageHandler and CreateMessageWithToolsHandler; use CreateMessageWithToolsHandler for tool support, or CreateMessageHandler for basic sampling") + } if opts.Logger == nil { // ensure we have a logger opts.Logger = ensureLogger(nil) } @@ -77,6 +79,19 @@ type ClientOptions struct { // non nil value for [ClientCapabilities.Sampling], that value overrides the // inferred capability. CreateMessageHandler func(context.Context, *CreateMessageRequest) (*CreateMessageResult, error) + // CreateMessageWithToolsHandler handles incoming sampling/createMessage + // requests that may involve tool use. It returns + // [CreateMessageWithToolsResult], which supports array content for parallel + // tool calls. + // + // Setting this handler causes the client to advertise the sampling + // capability with tools support (sampling.tools). As with + // [CreateMessageHandler], [ClientOptions.Capabilities].Sampling overrides + // the inferred capability. + // + // It is a panic to set both CreateMessageHandler and + // CreateMessageWithToolsHandler. + CreateMessageWithToolsHandler func(context.Context, *CreateMessageWithToolsRequest) (*CreateMessageWithToolsResult, error) // ElicitationHandler handles incoming requests for elicitation/create. // // Setting ElicitationHandler to a non-nil value automatically causes the @@ -109,7 +124,16 @@ type ClientOptions struct { // are set in the Capabilities field, their values override the inferred // value. // - // For example, to to configure elicitation modes: + // For example, to advertise sampling with tools and context support: + // + // Capabilities: &ClientCapabilities{ + // Sampling: &SamplingCapabilities{ + // Tools: &SamplingToolsCapabilities{}, + // Context: &SamplingContextCapabilities{}, + // }, + // } + // + // Or to configure elicitation modes: // // Capabilities: &ClientCapabilities{ // Elicitation: &ElicitationCapabilities{ @@ -119,8 +143,7 @@ type ClientOptions struct { // } // // Conversely, if Capabilities does not set a field (for example, if the - // Elicitation field is nil), the inferred elicitation capability will be - // used. + // Elicitation field is nil), the inferred capability will be used. Capabilities *ClientCapabilities // ElicitationCompleteHandler handles incoming notifications for notifications/elicitation/complete. ElicitationCompleteHandler func(context.Context, *ElicitationCompleteNotificationRequest) @@ -198,10 +221,13 @@ func (c *Client) capabilities(protocolVersion string) *ClientCapabilities { caps.Roots = *caps.RootsV2 } - // Augment with sampling capability if handler is set. - if c.opts.CreateMessageHandler != nil { + // Augment with sampling capability if a handler is set. + if c.opts.CreateMessageHandler != nil || c.opts.CreateMessageWithToolsHandler != nil { if caps.Sampling == nil { caps.Sampling = &SamplingCapabilities{} + if c.opts.CreateMessageWithToolsHandler != nil { + caps.Sampling.Tools = &SamplingToolsCapabilities{} + } } } @@ -453,12 +479,27 @@ func (c *Client) listRoots(_ context.Context, req *ListRootsRequest) (*ListRoots }, nil } -func (c *Client) createMessage(ctx context.Context, req *CreateMessageRequest) (*CreateMessageResult, error) { - if c.opts.CreateMessageHandler == nil { - // TODO: wrap or annotate this error? Pick a standard code? - return nil, &jsonrpc.Error{Code: codeUnsupportedMethod, Message: "client does not support CreateMessage"} +func (c *Client) createMessage(ctx context.Context, req *CreateMessageWithToolsRequest) (*CreateMessageWithToolsResult, error) { + if c.opts.CreateMessageWithToolsHandler != nil { + return c.opts.CreateMessageWithToolsHandler(ctx, req) } - return c.opts.CreateMessageHandler(ctx, req) + if c.opts.CreateMessageHandler != nil { + // Downconvert the request for the basic handler. + baseParams, err := req.Params.toBase() + if err != nil { + return nil, err + } + baseReq := &CreateMessageRequest{ + Session: req.Session, + Params: baseParams, + } + res, err := c.opts.CreateMessageHandler(ctx, baseReq) + if err != nil { + return nil, err + } + return res.toWithTools(), nil + } + return nil, &jsonrpc.Error{Code: codeUnsupportedMethod, Message: "client does not support CreateMessage"} } // urlElicitationMiddleware returns middleware that automatically handles URL elicitation @@ -590,7 +631,7 @@ func (c *Client) elicit(ctx context.Context, req *ElicitRequest) (*ElicitResult, return nil, err } // Validate elicitation result content against requested schema. - if schema != nil && res.Content != nil { + if res.Action == "accept" && schema != nil && res.Content != nil { resolved, err := schema.Resolve(nil) if err != nil { return nil, &jsonrpc.Error{Code: jsonrpc.CodeInvalidParams, Message: fmt.Sprintf("failed to resolve requested schema: %v", err)} @@ -668,8 +709,10 @@ func validateElicitProperty(propName string, propSchema *jsonschema.Schema) erro return validateElicitNumberProperty(propName, propSchema) case "boolean": return validateElicitBooleanProperty(propName, propSchema) + case "array": + return validateElicitArrayProperty(propName, propSchema) default: - return fmt.Errorf("elicit schema property %q has unsupported type %q, only string, number, integer, and boolean are allowed", propName, propSchema.Type) + return fmt.Errorf("elicit schema property %q has unsupported type %q, only string, number, integer, boolean, and array are allowed", propName, propSchema.Type) } } @@ -682,7 +725,7 @@ func validateElicitStringProperty(propName string, propSchema *jsonschema.Schema return fmt.Errorf("elicit schema property %q has enum values but type is %q, enums are only supported for string type", propName, propSchema.Type) } // Enum values themselves are validated by the JSON schema library - // Validate enumNames if present - must match enum length + // Validate legacy enumNames if present - must match enum length. if propSchema.Extra != nil { if enumNamesRaw, exists := propSchema.Extra["enumNames"]; exists { // Type check enumNames - should be a slice @@ -697,6 +740,15 @@ func validateElicitStringProperty(propName string, propSchema *jsonschema.Schema } return nil } + // Handle new style of titled enums. + if propSchema.OneOf != nil { + for _, entry := range propSchema.OneOf { + if err := validateTitledEnumEntry(entry); err != nil { + return fmt.Errorf("elicit schema property %q oneOf has invalid entry: %v", propName, err) + } + } + return nil + } // Validate format if specified - only specific formats are allowed if propSchema.Format != "" { @@ -749,6 +801,53 @@ func validateElicitNumberProperty(propName string, propSchema *jsonschema.Schema return nil } +// validateElicitArrayProperty validates multi-select enum properties. +func validateElicitArrayProperty(propName string, propSchema *jsonschema.Schema) error { + if propSchema.Items == nil { + return fmt.Errorf("elicit schema property %q is array but missing 'items' definition", propName) + } + + items := propSchema.Items + switch items.Type { + case "string": + // Untitled enums. + if items.Enum == nil { + return fmt.Errorf("elicit schema property %q items must specify enum for untitled enums", propName) + } + return nil + case "": + // Titled enums. + if len(items.AnyOf) == 0 { + return fmt.Errorf("elicit schema property %q items must specify anyOf for titled enums", propName) + } + for _, entry := range items.AnyOf { + if err := validateTitledEnumEntry(entry); err != nil { + return fmt.Errorf("elicit schema property %q items has invalid entry: %v", propName, err) + } + } + return nil + default: + return fmt.Errorf("elicit schema property %q items have unsupported type %q", propName, items.Type) + } +} + +func validateTitledEnumEntry(entry *jsonschema.Schema) error { + if entry.Const == nil { + return fmt.Errorf("const is required for titled enum entries") + } + constVal, ok := (*entry.Const).(string) + if !ok { + return fmt.Errorf("const must be a string for titled enum entries") + } + if constVal == "" { + return fmt.Errorf("const cannot be empty for titled enum entries") + } + if entry.Title == "" { + return fmt.Errorf("title is required for titled enum entries") + } + return nil +} + // validateElicitBooleanProperty validates boolean-type properties. func validateElicitBooleanProperty(propName string, propSchema *jsonschema.Schema) error { return validateDefaultProperty[bool](propName, propSchema) diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/content.go b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/content.go index fb1a0d1e..95ea40d8 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/content.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/content.go @@ -9,12 +9,16 @@ package mcp import ( "encoding/json" - "errors" "fmt" + + internaljson "github.com/modelcontextprotocol/go-sdk/internal/json" ) // A Content is a [TextContent], [ImageContent], [AudioContent], -// [ResourceLink], or [EmbeddedResource]. +// [ResourceLink], [EmbeddedResource], [ToolUseContent], or [ToolResultContent]. +// +// Note: [ToolUseContent] and [ToolResultContent] are only valid in sampling +// message contexts (CreateMessageParams/CreateMessageResult). type Content interface { MarshalJSON() ([]byte, error) fromWire(*wireContent) @@ -183,69 +187,165 @@ func (c *EmbeddedResource) fromWire(wire *wireContent) { c.Annotations = wire.Annotations } -// ResourceContents contains the contents of a specific resource or -// sub-resource. -type ResourceContents struct { - URI string `json:"uri"` - MIMEType string `json:"mimeType,omitempty"` - Text string `json:"text,omitempty"` - Blob []byte `json:"blob,omitempty"` - Meta Meta `json:"_meta,omitempty"` +// ToolUseContent represents a request from the assistant to invoke a tool. +// This content type is only valid in sampling messages. +type ToolUseContent struct { + // ID is a unique identifier for this tool use, used to match with ToolResultContent. + ID string + // Name is the name of the tool to invoke. + Name string + // Input contains the tool arguments as a JSON object. + Input map[string]any + Meta Meta } -func (r *ResourceContents) MarshalJSON() ([]byte, error) { - // If we could assume Go 1.24, we could use omitzero for Blob and avoid this method. - if r.URI == "" { - return nil, errors.New("ResourceContents missing URI") +func (c *ToolUseContent) MarshalJSON() ([]byte, error) { + input := c.Input + if input == nil { + input = map[string]any{} + } + wire := struct { + Type string `json:"type"` + ID string `json:"id"` + Name string `json:"name"` + Input map[string]any `json:"input"` + Meta Meta `json:"_meta,omitempty"` + }{ + Type: "tool_use", + ID: c.ID, + Name: c.Name, + Input: input, + Meta: c.Meta, } - if r.Blob == nil { - // Text. Marshal normally. - type wireResourceContents ResourceContents // (lacks MarshalJSON method) - return json.Marshal((wireResourceContents)(*r)) + return json.Marshal(wire) +} + +func (c *ToolUseContent) fromWire(wire *wireContent) { + c.ID = wire.ID + c.Name = wire.Name + c.Input = wire.Input + c.Meta = wire.Meta +} + +// ToolResultContent represents the result of a tool invocation. +// This content type is only valid in sampling messages with role "user". +type ToolResultContent struct { + // ToolUseID references the ID from the corresponding ToolUseContent. + ToolUseID string + // Content holds the unstructured result of the tool call. + Content []Content + // StructuredContent holds an optional structured result as a JSON object. + StructuredContent any + // IsError indicates whether the tool call ended in an error. + IsError bool + Meta Meta +} + +func (c *ToolResultContent) MarshalJSON() ([]byte, error) { + // Marshal nested content + var contentWire []*wireContent + for _, content := range c.Content { + data, err := content.MarshalJSON() + if err != nil { + return nil, err + } + var w wireContent + if err := internaljson.Unmarshal(data, &w); err != nil { + return nil, err + } + contentWire = append(contentWire, &w) } - // Blob. - if r.Text != "" { - return nil, errors.New("ResourceContents has non-zero Text and Blob fields") + if contentWire == nil { + contentWire = []*wireContent{} // avoid JSON null } - // r.Blob may be the empty slice, so marshal with an alternative definition. - br := struct { - URI string `json:"uri,omitempty"` - MIMEType string `json:"mimeType,omitempty"` - Blob []byte `json:"blob"` - Meta Meta `json:"_meta,omitempty"` + + wire := struct { + Type string `json:"type"` + ToolUseID string `json:"toolUseId"` + Content []*wireContent `json:"content"` + StructuredContent any `json:"structuredContent,omitempty"` + IsError bool `json:"isError,omitempty"` + Meta Meta `json:"_meta,omitempty"` }{ - URI: r.URI, - MIMEType: r.MIMEType, - Blob: r.Blob, - Meta: r.Meta, + Type: "tool_result", + ToolUseID: c.ToolUseID, + Content: contentWire, + StructuredContent: c.StructuredContent, + IsError: c.IsError, + Meta: c.Meta, } - return json.Marshal(br) + return json.Marshal(wire) +} + +func (c *ToolResultContent) fromWire(wire *wireContent) { + c.ToolUseID = wire.ToolUseID + c.StructuredContent = wire.StructuredContent + c.IsError = wire.IsError + c.Meta = wire.Meta + // Content is handled separately in contentFromWire due to nested content +} + +// ResourceContents contains the contents of a specific resource or +// sub-resource. +type ResourceContents struct { + URI string `json:"uri"` + MIMEType string `json:"mimeType,omitempty"` + Text string `json:"text,omitempty"` + Blob []byte `json:"blob,omitzero"` + Meta Meta `json:"_meta,omitempty"` } // wireContent is the wire format for content. // It represents the protocol types TextContent, ImageContent, AudioContent, -// ResourceLink, and EmbeddedResource. +// ResourceLink, EmbeddedResource, ToolUseContent, and ToolResultContent. // The Type field distinguishes them. In the protocol, each type has a constant // value for the field. -// At most one of Text, Data, Resource, and URI is non-zero. type wireContent struct { - Type string `json:"type"` - Text string `json:"text,omitempty"` - MIMEType string `json:"mimeType,omitempty"` - Data []byte `json:"data,omitempty"` - Resource *ResourceContents `json:"resource,omitempty"` - URI string `json:"uri,omitempty"` - Name string `json:"name,omitempty"` - Title string `json:"title,omitempty"` - Description string `json:"description,omitempty"` - Size *int64 `json:"size,omitempty"` - Meta Meta `json:"_meta,omitempty"` - Annotations *Annotations `json:"annotations,omitempty"` - Icons []Icon `json:"icons,omitempty"` + Type string `json:"type"` + Text string `json:"text,omitempty"` // TextContent + MIMEType string `json:"mimeType,omitempty"` // ImageContent, AudioContent, ResourceLink + Data []byte `json:"data,omitempty"` // ImageContent, AudioContent + Resource *ResourceContents `json:"resource,omitempty"` // EmbeddedResource + URI string `json:"uri,omitempty"` // ResourceLink + Name string `json:"name,omitempty"` // ResourceLink, ToolUseContent + Title string `json:"title,omitempty"` // ResourceLink + Description string `json:"description,omitempty"` // ResourceLink + Size *int64 `json:"size,omitempty"` // ResourceLink + Meta Meta `json:"_meta,omitempty"` // all types + Annotations *Annotations `json:"annotations,omitempty"` // all types except ToolUseContent, ToolResultContent + Icons []Icon `json:"icons,omitempty"` // ResourceLink + ID string `json:"id,omitempty"` // ToolUseContent + Input map[string]any `json:"input,omitempty"` // ToolUseContent + ToolUseID string `json:"toolUseId,omitempty"` // ToolResultContent + NestedContent []*wireContent `json:"content,omitempty"` // ToolResultContent + StructuredContent any `json:"structuredContent,omitempty"` // ToolResultContent + IsError bool `json:"isError,omitempty"` // ToolResultContent +} + +// unmarshalContent unmarshals JSON that is either a single content object or +// an array of content objects. A single object is wrapped in a one-element slice. +func unmarshalContent(raw json.RawMessage, allow map[string]bool) ([]Content, error) { + if len(raw) == 0 || string(raw) == "null" { + return nil, fmt.Errorf("nil content") + } + // Try array first, then fall back to single object. + var wires []*wireContent + if err := internaljson.Unmarshal(raw, &wires); err == nil { + return contentsFromWire(wires, allow) + } + var wire wireContent + if err := internaljson.Unmarshal(raw, &wire); err != nil { + return nil, err + } + c, err := contentFromWire(&wire, allow) + if err != nil { + return nil, err + } + return []Content{c}, nil } func contentsFromWire(wires []*wireContent, allow map[string]bool) ([]Content, error) { - var blocks []Content + blocks := make([]Content, 0, len(wires)) for _, wire := range wires { block, err := contentFromWire(wire, allow) if err != nil { @@ -284,6 +384,27 @@ func contentFromWire(wire *wireContent, allow map[string]bool) (Content, error) v := new(EmbeddedResource) v.fromWire(wire) return v, nil + case "tool_use": + v := new(ToolUseContent) + v.fromWire(wire) + return v, nil + case "tool_result": + v := new(ToolResultContent) + v.fromWire(wire) + // Handle nested content - tool_result content can contain text, image, audio, + // resource_link, and resource (same as CallToolResult.content) + if wire.NestedContent != nil { + toolResultContentAllow := map[string]bool{ + "text": true, "image": true, "audio": true, + "resource_link": true, "resource": true, + } + nestedContent, err := contentsFromWire(wire.NestedContent, toolResultContentAllow) + if err != nil { + return nil, fmt.Errorf("tool_result nested content: %w", err) + } + v.Content = nestedContent + } + return v, nil } - return nil, fmt.Errorf("internal error: unrecognized content type %s", wire.Type) + return nil, fmt.Errorf("unrecognized content type %q", wire.Type) } diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/event.go b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/event.go index 5c322c4a..62dd2ad2 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/event.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/event.go @@ -67,9 +67,7 @@ func writeEvent(w io.Writer, evt Event) (int, error) { // TODO(rfindley): consider a different API here that makes failure modes more // apparent. func scanEvents(r io.Reader) iter.Seq2[Event, error] { - scanner := bufio.NewScanner(r) - const maxTokenSize = 1 * 1024 * 1024 // 1 MiB max line size - scanner.Buffer(nil, maxTokenSize) + reader := bufio.NewReader(r) // TODO: investigate proper behavior when events are out of order, or have // non-standard names. @@ -94,31 +92,43 @@ func scanEvents(r io.Reader) iter.Seq2[Event, error] { evt Event dataBuf *bytes.Buffer // if non-nil, preceding field was also data ) - flushData := func() { + yieldEvent := func() bool { if dataBuf != nil { evt.Data = dataBuf.Bytes() dataBuf = nil } + if evt.Empty() { + return true + } + if !yield(evt, nil) { + return false + } + evt = Event{} + return true } - for scanner.Scan() { - line := scanner.Bytes() + for { + line, err := reader.ReadBytes('\n') + if err != nil && !errors.Is(err, io.EOF) { + yield(Event{}, fmt.Errorf("error reading event: %v", err)) + return + } + line = bytes.TrimRight(line, "\r\n") + isEOF := errors.Is(err, io.EOF) + if len(line) == 0 { - flushData() - // \n\n is the record delimiter - if !evt.Empty() && !yield(evt, nil) { + if !yieldEvent() { + return + } + if isEOF { return } - evt = Event{} continue } before, after, found := bytes.Cut(line, []byte{':'}) if !found { - yield(Event{}, fmt.Errorf("malformed line in SSE stream: %q", string(line))) + yield(Event{}, fmt.Errorf("%w: malformed line in SSE stream: %q", errMalformedEvent, string(line))) return } - if !bytes.Equal(before, dataKey) { - flushData() - } switch { case bytes.Equal(before, eventKey): evt.Name = strings.TrimSpace(string(after)) @@ -128,27 +138,19 @@ func scanEvents(r io.Reader) iter.Seq2[Event, error] { evt.Retry = strings.TrimSpace(string(after)) case bytes.Equal(before, dataKey): data := bytes.TrimSpace(after) - if dataBuf != nil { - dataBuf.WriteByte('\n') - dataBuf.Write(data) - } else { + if dataBuf == nil { dataBuf = new(bytes.Buffer) - dataBuf.Write(data) + } else { + dataBuf.WriteByte('\n') } + dataBuf.Write(data) } - } - if err := scanner.Err(); err != nil { - if errors.Is(err, bufio.ErrTooLong) { - err = fmt.Errorf("event exceeded max line length of %d", maxTokenSize) - } - if !yield(Event{}, err) { + + if isEOF { + yieldEvent() return } } - flushData() - if !evt.Empty() { - yield(evt, nil) - } } } @@ -310,6 +312,11 @@ func (s *MemoryEventStore) Append(_ context.Context, sessionID, streamID string, // index is no longer available. var ErrEventsPurged = errors.New("data purged") +// errMalformedEvent is returned when an SSE event cannot be parsed due to format violations. +// This is a hard error indicating corrupted data or protocol violations, as opposed to +// transient I/O errors which may be retryable. +var errMalformedEvent = errors.New("malformed event") + // After implements [EventStore.After]. func (s *MemoryEventStore) After(_ context.Context, sessionID, streamID string, index int) iter.Seq2[[]byte, error] { // Return the data items to yield. diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/logging.go b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/logging.go index 96a96b82..b1bd82b1 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/logging.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/logging.go @@ -89,21 +89,12 @@ type LoggingHandler struct { handler slog.Handler } -// discardHandler is a slog.Handler that drops all logs. -// TODO: use slog.DiscardHandler when we require Go 1.24+. -type discardHandler struct{} - -func (discardHandler) Enabled(context.Context, slog.Level) bool { return false } -func (discardHandler) Handle(context.Context, slog.Record) error { return nil } -func (discardHandler) WithAttrs([]slog.Attr) slog.Handler { return discardHandler{} } -func (discardHandler) WithGroup(string) slog.Handler { return discardHandler{} } - // ensureLogger returns l if non-nil, otherwise a discard logger. func ensureLogger(l *slog.Logger) *slog.Logger { if l != nil { return l } - return slog.New(discardHandler{}) + return slog.New(slog.DiscardHandler) } // NewLoggingHandler creates a [LoggingHandler] that logs to the given [ServerSession] using a diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/protocol.go b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/protocol.go index bea776f9..837ce784 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/protocol.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/protocol.go @@ -13,6 +13,9 @@ package mcp import ( "encoding/json" "fmt" + "maps" + + internaljson "github.com/modelcontextprotocol/go-sdk/internal/json" ) // Optional annotations for the client. The client can use annotations to inform @@ -140,7 +143,7 @@ func (x *CallToolResult) UnmarshalJSON(data []byte) error { res Content []*wireContent `json:"content"` } - if err := json.Unmarshal(data, &wire); err != nil { + if err := internaljson.Unmarshal(data, &wire); err != nil { return err } var err error @@ -188,12 +191,18 @@ type RootCapabilities struct { // this schema, but this is not a closed set: any client can define its own, // additional capabilities. type ClientCapabilities struct { - // NOTE: any addition to ClientCapabilities must also be reflected in // [ClientCapabilities.clone]. // Experimental reports non-standard capabilities that the client supports. + // The caller should not modify the map after assigning it. Experimental map[string]any `json:"experimental,omitempty"` + // Extensions reports extensions that the client supports. + // Keys are extension identifiers in "{vendor-prefix}/{extension-name}" format. + // Values are per-extension settings objects; use [ClientCapabilities.AddExtension] + // to ensure nil settings are normalized to empty objects. + // The caller should not modify the map or its values after assigning it. + Extensions map[string]any `json:"extensions,omitempty"` // Roots describes the client's support for roots. // // Deprecated: use RootsV2. As described in #607, Roots should have been a @@ -212,11 +221,33 @@ type ClientCapabilities struct { Elicitation *ElicitationCapabilities `json:"elicitation,omitempty"` } -// clone returns a deep copy of the ClientCapabilities. +// AddExtension adds an extension with the given name and settings. +// If settings is nil, an empty map is used to ensure valid JSON serialization +// (the spec requires an object, not null). +// The settings map should not be modified after the call. +func (c *ClientCapabilities) AddExtension(name string, settings map[string]any) { + if c.Extensions == nil { + c.Extensions = make(map[string]any) + } + if settings == nil { + settings = map[string]any{} + } + c.Extensions[name] = settings +} + +// clone returns a copy of the ClientCapabilities. +// Values in the Extensions and Experimental maps are shallow-copied. func (c *ClientCapabilities) clone() *ClientCapabilities { cp := *c + cp.Experimental = maps.Clone(c.Experimental) + cp.Extensions = maps.Clone(c.Extensions) cp.RootsV2 = shallowClone(c.RootsV2) - cp.Sampling = shallowClone(c.Sampling) + if c.Sampling != nil { + x := *c.Sampling + x.Tools = shallowClone(c.Sampling.Tools) + x.Context = shallowClone(c.Sampling.Context) + cp.Sampling = &x + } if c.Elicitation != nil { x := *c.Elicitation x.Form = shallowClone(c.Elicitation.Form) @@ -286,7 +317,7 @@ type CompleteReference struct { func (r *CompleteReference) UnmarshalJSON(data []byte) error { type wireCompleteReference CompleteReference // for naive unmarshaling var r2 wireCompleteReference - if err := json.Unmarshal(data, &r2); err != nil { + if err := internaljson.Unmarshal(data, &r2); err != nil { return err } switch r2.Type { @@ -357,6 +388,11 @@ type CreateMessageParams struct { Meta `json:"_meta,omitempty"` // A request to include context from one or more MCP servers (including the // caller), to be attached to the prompt. The client may ignore this request. + // + // The default is "none". Values "thisServer" and + // "allServers" are soft-deprecated. Servers SHOULD only use these values if + // the client declares ClientCapabilities.sampling.context. These values may + // be removed in future spec releases. IncludeContext string `json:"includeContext,omitempty"` // The maximum number of tokens to sample, as requested by the server. The // client may choose to sample fewer tokens than requested. @@ -379,6 +415,106 @@ func (x *CreateMessageParams) isParams() {} func (x *CreateMessageParams) GetProgressToken() any { return getProgressToken(x) } func (x *CreateMessageParams) SetProgressToken(t any) { setProgressToken(x, t) } +// CreateMessageWithToolsParams is a sampling request that includes tools. +// It extends the basic [CreateMessageParams] fields with tools, tool choice, +// and messages that support array content (for parallel tool calls). +// +// Use with [ServerSession.CreateMessageWithTools]. +type CreateMessageWithToolsParams struct { + Meta `json:"_meta,omitempty"` + IncludeContext string `json:"includeContext,omitempty"` + MaxTokens int64 `json:"maxTokens"` + // Messages supports array content for tool_use and tool_result blocks. + Messages []*SamplingMessageV2 `json:"messages"` + Metadata any `json:"metadata,omitempty"` + ModelPreferences *ModelPreferences `json:"modelPreferences,omitempty"` + StopSequences []string `json:"stopSequences,omitempty"` + SystemPrompt string `json:"systemPrompt,omitempty"` + Temperature float64 `json:"temperature,omitempty"` + // Tools is the list of tools available for the model to use. + Tools []*Tool `json:"tools,omitempty"` + // ToolChoice controls how the model should use tools. + ToolChoice *ToolChoice `json:"toolChoice,omitempty"` +} + +func (x *CreateMessageWithToolsParams) isParams() {} +func (x *CreateMessageWithToolsParams) GetProgressToken() any { return getProgressToken(x) } +func (x *CreateMessageWithToolsParams) SetProgressToken(t any) { setProgressToken(x, t) } + +// toBase converts to CreateMessageParams by taking the content block from each +// message. Tools and ToolChoice are dropped. Returns an error if any message +// has multiple content blocks, since SamplingMessage only supports one. +func (p *CreateMessageWithToolsParams) toBase() (*CreateMessageParams, error) { + var msgs []*SamplingMessage + for _, m := range p.Messages { + if len(m.Content) > 1 { + return nil, fmt.Errorf("message has %d content blocks; use CreateMessageWithToolsHandler to support multiple content", len(m.Content)) + } + var content Content + if len(m.Content) > 0 { + content = m.Content[0] + } + msgs = append(msgs, &SamplingMessage{Content: content, Role: m.Role}) + } + return &CreateMessageParams{ + Meta: p.Meta, + IncludeContext: p.IncludeContext, + MaxTokens: p.MaxTokens, + Messages: msgs, + Metadata: p.Metadata, + ModelPreferences: p.ModelPreferences, + StopSequences: p.StopSequences, + SystemPrompt: p.SystemPrompt, + Temperature: p.Temperature, + }, nil +} + +// SamplingMessageV2 describes a message issued to or received from an +// LLM API, supporting array content for parallel tool calls. The "V2" refers +// to the 2025-11-25 spec, which changed content from a single block to +// single-or-array. In v2 of the SDK, this will replace [SamplingMessage]. +// +// When marshaling, a single-element Content slice is marshaled as a single +// object for compatibility with pre-2025-11-25 implementations. When +// unmarshaling, a single JSON content object is accepted and wrapped in a +// one-element slice. +type SamplingMessageV2 struct { + Content []Content `json:"content"` + Role Role `json:"role"` +} + +var samplingWithToolsAllow = map[string]bool{ + "text": true, "image": true, "audio": true, + "tool_use": true, "tool_result": true, +} + +// MarshalJSON marshals the message. A single-element Content slice is marshaled +// as a single object for backward compatibility. +func (m *SamplingMessageV2) MarshalJSON() ([]byte, error) { + if len(m.Content) == 1 { + return json.Marshal(&SamplingMessage{Content: m.Content[0], Role: m.Role}) + } + type msg SamplingMessageV2 // avoid recursion + return json.Marshal((*msg)(m)) +} + +func (m *SamplingMessageV2) UnmarshalJSON(data []byte) error { + type msg SamplingMessageV2 // avoid recursion + var wire struct { + msg + Content json.RawMessage `json:"content"` + } + if err := internaljson.Unmarshal(data, &wire); err != nil { + return err + } + var err error + if wire.msg.Content, err = unmarshalContent(wire.Content, samplingWithToolsAllow); err != nil { + return err + } + *m = SamplingMessageV2(wire.msg) + return nil +} + // The client's response to a sampling/create_message request from the server. // The client should inform the user before returning the sampled message, to // allow them to inspect the response (human in the loop) and decide whether to @@ -392,6 +528,12 @@ type CreateMessageResult struct { Model string `json:"model"` Role Role `json:"role"` // The reason why sampling stopped, if known. + // + // Standard values: + // - "endTurn": natural end of the assistant's turn + // - "stopSequence": a stop sequence was encountered + // - "maxTokens": reached the maximum token limit + // - "toolUse": the model wants to use one or more tools StopReason string `json:"stopReason,omitempty"` } @@ -402,7 +544,7 @@ func (r *CreateMessageResult) UnmarshalJSON(data []byte) error { result Content *wireContent `json:"content"` } - if err := json.Unmarshal(data, &wire); err != nil { + if err := internaljson.Unmarshal(data, &wire); err != nil { return err } var err error @@ -413,6 +555,84 @@ func (r *CreateMessageResult) UnmarshalJSON(data []byte) error { return nil } +// CreateMessageWithToolsResult is the client's response to a +// sampling/create_message request that included tools. Content is a slice to +// support parallel tool calls (multiple tool_use blocks in one response). +// +// Use [ServerSession.CreateMessageWithTools] to send a sampling request with +// tools and receive this result type. +// +// When unmarshaling, a single JSON content object is accepted and wrapped in a +// one-element slice, for compatibility with clients that return a single block. +type CreateMessageWithToolsResult struct { + Meta `json:"_meta,omitempty"` + Content []Content `json:"content"` + Model string `json:"model"` + Role Role `json:"role"` + // The reason why sampling stopped. + // + // Standard values: "endTurn", "stopSequence", "maxTokens", "toolUse". + StopReason string `json:"stopReason,omitempty"` +} + +// createMessageWithToolsResultAllow lists content types valid in assistant responses. +// tool_result is excluded: it only appears in user messages. +var createMessageWithToolsResultAllow = map[string]bool{ + "text": true, "image": true, "audio": true, + "tool_use": true, +} + +func (*CreateMessageWithToolsResult) isResult() {} + +// MarshalJSON marshals the result. When Content has a single element, it is +// marshaled as a single object for compatibility with pre-2025-11-25 +// implementations that expect a single content block. +func (r *CreateMessageWithToolsResult) MarshalJSON() ([]byte, error) { + if len(r.Content) == 1 { + return json.Marshal(&CreateMessageResult{ + Meta: r.Meta, + Content: r.Content[0], + Model: r.Model, + Role: r.Role, + StopReason: r.StopReason, + }) + } + type result CreateMessageWithToolsResult // avoid recursion + return json.Marshal((*result)(r)) +} + +func (r *CreateMessageWithToolsResult) UnmarshalJSON(data []byte) error { + type result CreateMessageWithToolsResult // avoid recursion + var wire struct { + result + Content json.RawMessage `json:"content"` + } + if err := internaljson.Unmarshal(data, &wire); err != nil { + return err + } + var err error + if wire.result.Content, err = unmarshalContent(wire.Content, createMessageWithToolsResultAllow); err != nil { + return err + } + *r = CreateMessageWithToolsResult(wire.result) + return nil +} + +// toWithTools converts a CreateMessageResult to CreateMessageWithToolsResult. +func (r *CreateMessageResult) toWithTools() *CreateMessageWithToolsResult { + var content []Content + if r.Content != nil { + content = []Content{r.Content} + } + return &CreateMessageWithToolsResult{ + Meta: r.Meta, + Content: content, + Model: r.Model, + Role: r.Role, + StopReason: r.StopReason, + } +} + type GetPromptParams struct { // This property is reserved by the protocol to allow clients and servers to // attach additional metadata to their responses. @@ -838,7 +1058,7 @@ func (m *PromptMessage) UnmarshalJSON(data []byte) error { msg Content *wireContent `json:"content"` } - if err := json.Unmarshal(data, &wire); err != nil { + if err := internaljson.Unmarshal(data, &wire); err != nil { return err } var err error @@ -982,25 +1202,46 @@ func (x *RootsListChangedParams) SetProgressToken(t any) { setProgressToken(x, t // below directly above ClientCapabilities. // SamplingCapabilities describes the client's support for sampling. -type SamplingCapabilities struct{} +type SamplingCapabilities struct { + // Context indicates the client supports includeContext values other than "none". + Context *SamplingContextCapabilities `json:"context,omitempty"` + // Tools indicates the client supports tools and toolChoice in sampling requests. + Tools *SamplingToolsCapabilities `json:"tools,omitempty"` +} + +// SamplingContextCapabilities indicates the client supports context inclusion. +type SamplingContextCapabilities struct{} + +// SamplingToolsCapabilities indicates the client supports tool use in sampling. +type SamplingToolsCapabilities struct{} + +// ToolChoice controls how the model uses tools during sampling. +type ToolChoice struct { + // Mode controls tool invocation behavior: + // - "auto": Model decides whether to use tools (default) + // - "required": Model must use at least one tool + // - "none": Model must not use any tools + Mode string `json:"mode,omitempty"` +} // ElicitationCapabilities describes the capabilities for elicitation. // // If neither Form nor URL is set, the 'Form' capabilitiy is assumed. type ElicitationCapabilities struct { - Form *FormElicitationCapabilities - URL *URLElicitationCapabilities + Form *FormElicitationCapabilities `json:"form,omitempty"` + URL *URLElicitationCapabilities `json:"url,omitempty"` } // FormElicitationCapabilities describes capabilities for form elicitation. -type FormElicitationCapabilities struct { -} +type FormElicitationCapabilities struct{} // URLElicitationCapabilities describes capabilities for url elicitation. -type URLElicitationCapabilities struct { -} +type URLElicitationCapabilities struct{} // Describes a message issued to or received from an LLM API. +// +// For assistant messages, Content may be text, image, audio, or tool_use. +// For user messages, Content may be text, image, audio, or tool_result. type SamplingMessage struct { Content Content `json:"content"` Role Role `json:"role"` @@ -1014,11 +1255,12 @@ func (m *SamplingMessage) UnmarshalJSON(data []byte) error { msg Content *wireContent `json:"content"` } - if err := json.Unmarshal(data, &wire); err != nil { + if err := internaljson.Unmarshal(data, &wire); err != nil { return err } + // Allow text, image, audio, tool_use, and tool_result in sampling messages var err error - if wire.msg.Content, err = contentFromWire(wire.Content, map[string]bool{"text": true, "image": true, "audio": true}); err != nil { + if wire.msg.Content, err = contentFromWire(wire.Content, map[string]bool{"text": true, "image": true, "audio": true, "tool_use": true, "tool_result": true}); err != nil { return err } *m = SamplingMessage(wire.msg) @@ -1297,12 +1539,18 @@ type ToolCapabilities struct { // ServerCapabilities describes capabilities that a server supports. type ServerCapabilities struct { - // NOTE: any addition to ServerCapabilities must also be reflected in // [ServerCapabilities.clone]. // Experimental reports non-standard capabilities that the server supports. + // The caller should not modify the map after assigning it. Experimental map[string]any `json:"experimental,omitempty"` + // Extensions reports extensions that the server supports. + // Keys are extension identifiers in "{vendor-prefix}/{extension-name}" format. + // Values are per-extension settings objects; use [ServerCapabilities.AddExtension] + // to ensure nil settings are normalized to empty objects. + // The caller should not modify the map or its values after assigning it. + Extensions map[string]any `json:"extensions,omitempty"` // Completions is present if the server supports argument autocompletion // suggestions. Completions *CompletionCapabilities `json:"completions,omitempty"` @@ -1316,9 +1564,26 @@ type ServerCapabilities struct { Tools *ToolCapabilities `json:"tools,omitempty"` } -// clone returns a deep copy of the ServerCapabilities. +// AddExtension adds an extension with the given name and settings. +// If settings is nil, an empty map is used to ensure valid JSON serialization +// (the spec requires an object, not null). +// The settings map should not be modified after the call. +func (c *ServerCapabilities) AddExtension(name string, settings map[string]any) { + if c.Extensions == nil { + c.Extensions = make(map[string]any) + } + if settings == nil { + settings = map[string]any{} + } + c.Extensions[name] = settings +} + +// clone returns a copy of the ServerCapabilities. +// Values in the Extensions and Experimental maps are shallow-copied. func (c *ServerCapabilities) clone() *ServerCapabilities { cp := *c + cp.Experimental = maps.Clone(c.Experimental) + cp.Extensions = maps.Clone(c.Extensions) cp.Completions = shallowClone(c.Completions) cp.Logging = shallowClone(c.Logging) cp.Prompts = shallowClone(c.Prompts) diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/requests.go b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/requests.go index f64d6fb6..42809413 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/requests.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/requests.go @@ -24,6 +24,7 @@ type ( type ( CreateMessageRequest = ClientRequest[*CreateMessageParams] + CreateMessageWithToolsRequest = ClientRequest[*CreateMessageWithToolsParams] ElicitRequest = ClientRequest[*ElicitParams] initializedClientRequest = ClientRequest[*InitializedParams] InitializeRequest = ClientRequest[*InitializeParams] diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/resource.go b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/resource.go index dc657f5d..bc4b3cb1 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/resource.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/resource.go @@ -113,6 +113,23 @@ func computeURIFilepath(rawURI, dirFilepath string, rootFilepaths []string) (str return uriFilepathRel, nil } +// withFile calls f on the file at join(dir, rel), +// protecting against path traversal attacks. +func withFile(dir, rel string, f func(*os.File) error) (err error) { + r, err := os.OpenRoot(dir) + if err != nil { + return err + } + defer r.Close() + file, err := r.Open(rel) + if err != nil { + return err + } + // Record error, in case f writes. + defer func() { err = errors.Join(err, file.Close()) }() + return f(file) +} + // fileRoots transforms the Roots obtained from the client into absolute paths on // the local filesystem. // TODO(jba): expose this functionality to user ResourceHandlers, diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/resource_go124.go b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/resource_go124.go deleted file mode 100644 index 4a35603c..00000000 --- a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/resource_go124.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2025 The Go MCP SDK Authors. All rights reserved. -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -//go:build go1.24 - -package mcp - -import ( - "errors" - "os" -) - -// withFile calls f on the file at join(dir, rel), -// protecting against path traversal attacks. -func withFile(dir, rel string, f func(*os.File) error) (err error) { - r, err := os.OpenRoot(dir) - if err != nil { - return err - } - defer r.Close() - file, err := r.Open(rel) - if err != nil { - return err - } - // Record error, in case f writes. - defer func() { err = errors.Join(err, file.Close()) }() - return f(file) -} diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/resource_pre_go124.go b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/resource_pre_go124.go deleted file mode 100644 index d1f72eed..00000000 --- a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/resource_pre_go124.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2025 The Go MCP SDK Authors. All rights reserved. -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -//go:build !go1.24 - -package mcp - -import ( - "errors" - "os" - "path/filepath" -) - -// withFile calls f on the file at join(dir, rel). -// It does not protect against path traversal attacks. -func withFile(dir, rel string, f func(*os.File) error) (err error) { - file, err := os.Open(filepath.Join(dir, rel)) - if err != nil { - return err - } - // Record error, in case f writes. - defer func() { err = errors.Join(err, file.Close()) }() - return f(file) -} diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/server.go b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/server.go index 687e7998..e3c03e27 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/server.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/server.go @@ -7,6 +7,7 @@ package mcp import ( "bytes" "context" + "crypto/rand" "encoding/base64" "encoding/gob" "encoding/json" @@ -24,6 +25,7 @@ import ( "time" "github.com/google/jsonschema-go/jsonschema" + internaljson "github.com/modelcontextprotocol/go-sdk/internal/json" "github.com/modelcontextprotocol/go-sdk/internal/jsonrpc2" "github.com/modelcontextprotocol/go-sdk/internal/util" "github.com/modelcontextprotocol/go-sdk/jsonrpc" @@ -175,7 +177,7 @@ func NewServer(impl *Implementation, options *ServerOptions) *Server { } if opts.GetSessionID == nil { - opts.GetSessionID = randText + opts.GetSessionID = rand.Text } if opts.Logger == nil { // ensure we have a logger @@ -326,7 +328,7 @@ func toolForErr[In, Out any](t *Tool, h ToolHandlerFor[In, Out], cache *SchemaCa // Unmarshal and validate args. var in In if input != nil { - if err := json.Unmarshal(input, &in); err != nil { + if err := internaljson.Unmarshal(input, &in); err != nil { return nil, fmt.Errorf("%w: %v", jsonrpc2.ErrInvalidParams, err) } } @@ -1162,6 +1164,10 @@ func (ss *ServerSession) ListRoots(ctx context.Context, params *ListRootsParams) } // CreateMessage sends a sampling request to the client. +// +// If the client returns multiple content blocks (e.g. parallel tool calls), +// CreateMessage returns an error. Use [ServerSession.CreateMessageWithTools] +// for tool-enabled sampling. func (ss *ServerSession) CreateMessage(ctx context.Context, params *CreateMessageParams) (*CreateMessageResult, error) { if err := ss.checkInitialized(methodCreateMessage); err != nil { return nil, err @@ -1174,7 +1180,44 @@ func (ss *ServerSession) CreateMessage(ctx context.Context, params *CreateMessag p2.Messages = []*SamplingMessage{} // avoid JSON "null" params = &p2 } - return handleSend[*CreateMessageResult](ctx, methodCreateMessage, newServerRequest(ss, orZero[Params](params))) + res, err := handleSend[*CreateMessageWithToolsResult](ctx, methodCreateMessage, newServerRequest(ss, orZero[Params](params))) + if err != nil { + return nil, err + } + // Downconvert to singular content. + if len(res.Content) > 1 { + return nil, fmt.Errorf("CreateMessage result has %d content blocks; use CreateMessageWithTools for multiple content", len(res.Content)) + } + var content Content + if len(res.Content) > 0 { + content = res.Content[0] + } + return &CreateMessageResult{ + Meta: res.Meta, + Content: content, + Model: res.Model, + Role: res.Role, + StopReason: res.StopReason, + }, nil +} + +// CreateMessageWithTools sends a sampling request with tools to the client, +// returning a [CreateMessageWithToolsResult] that supports array content +// (for parallel tool calls). Use this instead of [ServerSession.CreateMessage] +// when the request includes tools. +func (ss *ServerSession) CreateMessageWithTools(ctx context.Context, params *CreateMessageWithToolsParams) (*CreateMessageWithToolsResult, error) { + if err := ss.checkInitialized(methodCreateMessage); err != nil { + return nil, err + } + if params == nil { + params = &CreateMessageWithToolsParams{Messages: []*SamplingMessageV2{}} + } + if params.Messages == nil { + p2 := *params + p2.Messages = []*SamplingMessageV2{} // avoid JSON "null" + params = &p2 + } + return handleSend[*CreateMessageWithToolsResult](ctx, methodCreateMessage, newServerRequest(ss, orZero[Params](params))) } // Elicit sends an elicitation request to the client asking for user input. @@ -1218,6 +1261,10 @@ func (ss *ServerSession) Elicit(ctx context.Context, params *ElicitParams) (*Eli return nil, err } + if res.Action != "accept" { + return res, nil + } + if params.RequestedSchema == nil { return res, nil } @@ -1325,7 +1372,7 @@ func initializeMethodInfo() methodInfo { info.unmarshalParams = func(m json.RawMessage) (Params, error) { var params *initializeParamsV2 if m != nil { - if err := json.Unmarshal(m, ¶ms); err != nil { + if err := internaljson.Unmarshal(m, ¶ms); err != nil { return nil, fmt.Errorf("unmarshaling %q into a %T: %w", m, params, err) } } diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/shared.go b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/shared.go index d83eae7d..bda00c20 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/shared.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/shared.go @@ -23,6 +23,7 @@ import ( "time" "github.com/modelcontextprotocol/go-sdk/auth" + internaljson "github.com/modelcontextprotocol/go-sdk/internal/json" "github.com/modelcontextprotocol/go-sdk/internal/jsonrpc2" "github.com/modelcontextprotocol/go-sdk/jsonrpc" ) @@ -283,7 +284,7 @@ func newMethodInfo[P paramsPtr[T], R Result, T any](flags methodFlags) methodInf unmarshalParams: func(m json.RawMessage) (Params, error) { var p P if m != nil { - if err := json.Unmarshal(m, &p); err != nil { + if err := internaljson.Unmarshal(m, &p); err != nil { return nil, fmt.Errorf("unmarshaling %q into a %T: %w", m, p, err) } } diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/sse.go b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/sse.go index ae65c16c..e57dad10 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/sse.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/sse.go @@ -7,6 +7,7 @@ package mcp import ( "bytes" "context" + "crypto/rand" "fmt" "io" "net/http" @@ -216,7 +217,7 @@ func (h *SSEHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { w.Header().Set("Cache-Control", "no-cache") w.Header().Set("Connection", "keep-alive") - sessionID = randText() + sessionID = rand.Text() endpoint, err := req.URL.Parse("?sessionid=" + sessionID) if err != nil { http.Error(w, "internal error: failed to create endpoint", http.StatusInternalServerError) diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/streamable.go b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/streamable.go index f2a28955..16bca070 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/streamable.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/streamable.go @@ -11,6 +11,7 @@ package mcp import ( "bytes" "context" + crand "crypto/rand" "encoding/json" "errors" "fmt" @@ -19,16 +20,19 @@ import ( "maps" "math" "math/rand/v2" + "net" "net/http" "slices" "strconv" "strings" "sync" - "sync/atomic" "time" "github.com/modelcontextprotocol/go-sdk/auth" + internaljson "github.com/modelcontextprotocol/go-sdk/internal/json" "github.com/modelcontextprotocol/go-sdk/internal/jsonrpc2" + "github.com/modelcontextprotocol/go-sdk/internal/mcpgodebug" + "github.com/modelcontextprotocol/go-sdk/internal/util" "github.com/modelcontextprotocol/go-sdk/internal/xcontext" "github.com/modelcontextprotocol/go-sdk/jsonrpc" ) @@ -160,6 +164,24 @@ type StreamableHTTPOptions struct { // // If SessionTimeout is the zero value, idle sessions are never closed. SessionTimeout time.Duration + + // DisableLocalhostProtection disables automatic DNS rebinding protection. + // By default, requests arriving via a localhost address (127.0.0.1, [::1]) + // that have a non-localhost Host header are rejected with 403 Forbidden. + // This protects against DNS rebinding attacks regardless of whether the + // server is listening on localhost specifically or on 0.0.0.0. + // + // Only disable this if you understand the security implications. + // See: https://modelcontextprotocol.io/specification/2025-11-25/basic/security_best_practices#local-mcp-server-compromise + DisableLocalhostProtection bool + + // CrossOriginProtection allows to customize cross-origin protection. + // The deny handler set in the CrossOriginProtection through SetDenyHandler + // is ignored. + // If nil, default (zero-value) cross-origin protection will be used. + // Use `disablecrossoriginprotection` MCPGODEBUG compatibility parameter + // to disable the default protection until v1.6.0. + CrossOriginProtection *http.CrossOriginProtection } // NewStreamableHTTPHandler returns a new [StreamableHTTPHandler]. @@ -176,8 +198,10 @@ func NewStreamableHTTPHandler(getServer func(*http.Request) *Server, opts *Strea h.opts = *opts } - if h.opts.Logger == nil { // ensure we have a logger - h.opts.Logger = ensureLogger(nil) + h.opts.Logger = ensureLogger(h.opts.Logger) + + if h.opts.CrossOriginProtection == nil { + h.opts.CrossOriginProtection = &http.CrossOriginProtection{} } return h @@ -206,7 +230,47 @@ func (h *StreamableHTTPHandler) closeAll() { } } +// disablelocalhostprotection is a compatibility parameter that allows to disable +// DNS rebinding protection, which was added in the 1.4.0 version of the SDK. +// See the documentation for the mcpgodebug package for instructions how to enable it. +// The option will be removed in the 1.6.0 version of the SDK. +var disablelocalhostprotection = mcpgodebug.Value("disablelocalhostprotection") + +// disablecrossoriginprotection is a compatibility parameter that allows to disable +// the verification of the 'Origin' and 'Content-Type' headers, which was added in +// the 1.4.1 version of the SDK. See the documentation for the mcpgodebug package +// for instructions how to enable it. +// The option will be removed in the 1.6.0 version of the SDK. +var disablecrossoriginprotection = mcpgodebug.Value("disablecrossoriginprotection") + func (h *StreamableHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + // DNS rebinding protection: auto-enabled for localhost servers. + // See: https://modelcontextprotocol.io/specification/2025-11-25/basic/security_best_practices#local-mcp-server-compromise + if !h.opts.DisableLocalhostProtection && disablelocalhostprotection != "1" { + if localAddr, ok := req.Context().Value(http.LocalAddrContextKey).(net.Addr); ok && localAddr != nil { + if util.IsLoopback(localAddr.String()) && !util.IsLoopback(req.Host) { + http.Error(w, fmt.Sprintf("Forbidden: invalid Host header %q", req.Host), http.StatusForbidden) + return + } + } + } + + if disablecrossoriginprotection != "1" { + // Verify the 'Origin' header to protect against CSRF attacks. + if err := h.opts.CrossOriginProtection.Check(req); err != nil { + http.Error(w, err.Error(), http.StatusForbidden) + return + } + // Validate 'Content-Type' header. + if req.Method == http.MethodPost { + contentType := req.Header.Get("Content-Type") + if contentType != "application/json" { + http.Error(w, "Content-Type must be 'application/json'", http.StatusUnsupportedMediaType) + return + } + } + } + // Allow multiple 'Accept' headers. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Accept#syntax accept := strings.Split(strings.Join(req.Header.Values("Accept"), ","), ",") @@ -373,7 +437,7 @@ func (h *StreamableHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Reque // stateless servers. body, err := io.ReadAll(req.Body) if err != nil { - http.Error(w, "failed to read body", http.StatusInternalServerError) + http.Error(w, "failed to read body", http.StatusBadRequest) return } req.Body.Close() @@ -1074,7 +1138,7 @@ func (c *streamableServerConn) servePOST(w http.ResponseWriter, req *http.Reques isInitialize = true // Extract the protocol version from InitializeParams. var params InitializeParams - if err := json.Unmarshal(jreq.Params, ¶ms); err == nil { + if err := internaljson.Unmarshal(jreq.Params, ¶ms); err == nil { initializeProtocolVersion = params.ProtocolVersion } } @@ -1140,7 +1204,7 @@ func (c *streamableServerConn) servePOST(w http.ResponseWriter, req *http.Reques // Important: don't publish the incoming messages until the stream is // registered, as the server may attempt to respond to imcoming messages as // soon as they're published. - stream, err := c.newStream(req.Context(), calls, randText()) + stream, err := c.newStream(req.Context(), calls, crand.Text()) if err != nil { http.Error(w, fmt.Sprintf("storing stream: %v", err), http.StatusInternalServerError) return @@ -1424,6 +1488,9 @@ type StreamableClientTransport struct { // - You want to avoid maintaining a persistent connection DisableStandaloneSSE bool + // OAuthHandler is an optional field that, if provided, will be used to authorize the requests. + OAuthHandler auth.OAuthHandler + // TODO(rfindley): propose exporting these. // If strict is set, the transport is in 'strict mode', where any violation // of the MCP spec causes a failure. @@ -1499,6 +1566,7 @@ func (t *StreamableClientTransport) Connect(ctx context.Context) (Connection, er cancel: cancel, failed: make(chan struct{}), disableStandaloneSSE: t.DisableStandaloneSSE, + oauthHandler: t.OAuthHandler, } return conn, nil } @@ -1517,6 +1585,9 @@ type streamableClientConn struct { // for receiving server-to-client notifications when no request is in flight. disableStandaloneSSE bool // from [StreamableClientTransport.DisableStandaloneSSE] + // oauthHandler is the OAuth handler for the connection. + oauthHandler auth.OAuthHandler // from [StreamableClientTransport.OAuthHandler] + // Guard calls to Close, as it may be called multiple times. closeOnce sync.Once closeErr error @@ -1539,17 +1610,6 @@ type streamableClientConn struct { sessionID string } -// errSessionMissing distinguishes if the session is known to not be present on -// the server (see [streamableClientConn.fail]). -// -// TODO(rfindley): should we expose this error value (and its corresponding -// API) to the user? -// -// The spec says that if the server returns 404, clients should reestablish -// a session. For now, we delegate that to the user, but do they need a way to -// differentiate a 'NotFound' error from other errors? -var errSessionMissing = errors.New("session not found") - var _ clientConnection = (*streamableClientConn)(nil) func (c *streamableClientConn) sessionUpdated(state clientSessionState) { @@ -1628,7 +1688,7 @@ func (c *streamableClientConn) connectStandaloneSSE() { // If err is non-nil, it is terminal, and subsequent (or pending) Reads will // fail. // -// If err wraps errSessionMissing, the failure indicates that the session is no +// If err wraps ErrSessionMissing, the failure indicates that the session is no // longer present on the server, and no final DELETE will be performed when // closing the connection. func (c *streamableClientConn) fail(err error) { @@ -1697,20 +1757,46 @@ func (c *streamableClientConn) Write(ctx context.Context, msg jsonrpc.Message) e return fmt.Errorf("%s: %v", requestSummary, err) } - req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.url, bytes.NewReader(data)) + doRequest := func() (*http.Request, *http.Response, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.url, bytes.NewReader(data)) + if err != nil { + return nil, nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json, text/event-stream") + if err := c.setMCPHeaders(req); err != nil { + // Failure to set headers means that the request was not sent. + // Wrap with ErrRejected so the jsonrpc2 connection doesn't set writeErr + // and permanently break the connection. + return nil, nil, fmt.Errorf("%s: %w: %v", requestSummary, jsonrpc2.ErrRejected, err) + } + resp, err := c.client.Do(req) + if err != nil { + // Any error from client.Do means the request didn't reach the server. + // Wrap with ErrRejected so the jsonrpc2 connection doesn't set writeErr + // and permanently break the connection. + err = fmt.Errorf("%s: %w: %v", requestSummary, jsonrpc2.ErrRejected, err) + } + return req, resp, err + } + + req, resp, err := doRequest() if err != nil { return err } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Accept", "application/json, text/event-stream") - c.setMCPHeaders(req) - resp, err := c.client.Do(req) - if err != nil { - // Any error from client.Do means the request didn't reach the server. - // Wrap with ErrRejected so the jsonrpc2 connection doesn't set writeErr - // and permanently break the connection. - return fmt.Errorf("%w: %s: %v", jsonrpc2.ErrRejected, requestSummary, err) + if (resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden) && c.oauthHandler != nil { + if err := c.oauthHandler.Authorize(ctx, req, resp); err != nil { + // Wrap with ErrRejected so the jsonrpc2 connection doesn't set writeErr + // and permanently break the connection. + // Wrap the authorization error as well for client inspection. + return fmt.Errorf("%s: %w: %w", requestSummary, jsonrpc2.ErrRejected, err) + } + // Retry the request after successful authorization. + _, resp, err = doRequest() + if err != nil { + return err + } } if err := c.checkResponse(requestSummary, resp); err != nil { @@ -1778,23 +1864,32 @@ func (c *streamableClientConn) Write(ctx context.Context, msg jsonrpc.Message) e return nil } -// testAuth controls whether a fake Authorization header is added to outgoing requests. -// TODO: replace with a better mechanism when client-side auth is in place. -var testAuth atomic.Bool - -func (c *streamableClientConn) setMCPHeaders(req *http.Request) { +func (c *streamableClientConn) setMCPHeaders(req *http.Request) error { c.mu.Lock() defer c.mu.Unlock() + if c.oauthHandler != nil { + ts, err := c.oauthHandler.TokenSource(c.ctx) + if err != nil { + return err + } + if ts != nil { + token, err := ts.Token() + if err != nil { + return err + } + if token != nil { + req.Header.Set("Authorization", "Bearer "+token.AccessToken) + } + } + } if c.initializedResult != nil { req.Header.Set(protocolVersionHeader, c.initializedResult.ProtocolVersion) } if c.sessionID != "" { req.Header.Set(sessionIDHeader, c.sessionID) } - if testAuth.Load() { - req.Header.Set("Authorization", "Bearer foo") - } + return nil } func (c *streamableClientConn) handleJSON(requestSummary string, resp *http.Response) { @@ -1823,15 +1918,14 @@ func (c *streamableClientConn) handleJSON(requestSummary string, resp *http.Resp // stream is complete when we receive its response. Otherwise, this is the // standalone stream. func (c *streamableClientConn) handleSSE(ctx context.Context, requestSummary string, resp *http.Response, forCall *jsonrpc2.Request) { + // Track the last event ID to detect progress. + // The retry counter is only reset when progress is made (lastEventID advances). + // This prevents infinite retry loops when a server repeatedly terminates + // connections without making progress (#679). + var prevLastEventID string + retriesWithoutProgress := 0 + for { - // Connection was successful. Continue the loop with the new response. - // - // TODO(#679): we should set a reasonable limit on the number of times - // we'll try getting a response for a given request, or enforce that we - // actually make progress. - // - // Eventually, if we don't get the response, we should stop trying and - // fail the request. lastEventID, reconnectDelay, clientClosed := c.processStream(ctx, requestSummary, resp, forCall) // If the connection was closed by the client, we're done. @@ -1845,6 +1939,23 @@ func (c *streamableClientConn) handleSSE(ctx context.Context, requestSummary str return } + // Check if we made progress (lastEventID advanced). + // Only reset the retry counter when actual progress is made. + if lastEventID != "" && lastEventID != prevLastEventID { + // Progress was made: reset the retry counter. + retriesWithoutProgress = 0 + prevLastEventID = lastEventID + } else { + // No progress: increment the retry counter. + retriesWithoutProgress++ + if retriesWithoutProgress > c.maxRetries { + if ctx.Err() == nil { + c.fail(fmt.Errorf("%s: exceeded %d retries without progress (session ID: %v)", requestSummary, c.maxRetries, c.sessionID)) + } + return + } + } + // The stream was interrupted or ended by the server. Attempt to reconnect. newResp, err := c.connectSSE(ctx, lastEventID, reconnectDelay, false) if err != nil { @@ -1879,9 +1990,9 @@ func (c *streamableClientConn) checkResponse(requestSummary string, resp *http.R // which it MUST respond to requests containing that session ID with HTTP // 404 Not Found." if resp.StatusCode == http.StatusNotFound { - // Return an errSessionMissing to avoid sending a redundant DELETE when the + // Return an ErrSessionMissing to avoid sending a redundant DELETE when the // session is already gone. - return fmt.Errorf("%s: failed to connect (session ID: %v): %w", requestSummary, c.sessionID, errSessionMissing) + return fmt.Errorf("%s: failed to connect (session ID: %v): %w", requestSummary, c.sessionID, ErrSessionMissing) } // Transient server errors (502, 503, 504, 429) should not break the connection. // Wrap them with ErrRejected so the jsonrpc2 layer doesn't set writeErr. @@ -1909,6 +2020,14 @@ func (c *streamableClientConn) processStream(ctx context.Context, requestSummary if ctx.Err() != nil { return "", 0, true // don't reconnect: client cancelled } + + // Malformed events are hard errors that indicate corrupted data or protocol + // violations. These should fail the connection permanently. + if errors.Is(err, errMalformedEvent) { + c.fail(fmt.Errorf("%s: %v", requestSummary, err)) + return "", 0, true + } + break } @@ -1921,6 +2040,15 @@ func (c *streamableClientConn) processStream(ctx context.Context, requestSummary reconnectDelay = time.Duration(n) * time.Millisecond } } + + // According to SSE specification + // (https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation) + // events with an empty data buffer are allowed. + // In MCP these can be priming events (SEP-1699) that carry only a Last-Event-ID for stream resumption. + if len(evt.Data) == 0 { + continue + } + // According to SSE spec, events with no name default to "message" if evt.Name != "" && evt.Name != "message" { continue @@ -2014,7 +2142,9 @@ func (c *streamableClientConn) connectSSE(ctx context.Context, lastEventID strin if err != nil { return nil, err } - c.setMCPHeaders(req) + if err := c.setMCPHeaders(req); err != nil { + return nil, err + } if lastEventID != "" { req.Header.Set(lastEventIDHeader, lastEventID) } @@ -2038,15 +2168,16 @@ func (c *streamableClientConn) connectSSE(ctx context.Context, lastEventID strin // Close implements the [Connection] interface. func (c *streamableClientConn) Close() error { c.closeOnce.Do(func() { - if errors.Is(c.failure(), errSessionMissing) { + if errors.Is(c.failure(), ErrSessionMissing) { // If the session is missing, no need to delete it. } else { req, err := http.NewRequestWithContext(c.ctx, http.MethodDelete, c.url, nil) if err != nil { c.closeErr = err } else { - c.setMCPHeaders(req) - if _, err := c.client.Do(req); err != nil { + if err := c.setMCPHeaders(req); err != nil { + c.closeErr = err + } else if _, err := c.client.Do(req); err != nil { c.closeErr = err } } diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/streamable_client.go b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/streamable_client.go index 41a10046..c2cc25b8 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/streamable_client.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/streamable_client.go @@ -161,7 +161,7 @@ The client must handle two response formats from POST requests: - DELETE: Terminate the session - Used by [streamableClientConn.Close] - - Skipped if session is already known to be gone ([errSessionMissing]) + - Skipped if session is already known to be gone ([ErrSessionMissing]) # Error Handling @@ -173,7 +173,7 @@ Errors are categorized and handled differently: - Triggers reconnection in [streamableClientConn.handleSSE] 2. Terminal (breaks the connection): - - 404 Not Found: Session terminated by server ([errSessionMissing]) + - 404 Not Found: Session terminated by server ([ErrSessionMissing]) - Message decode errors: Protocol violation - Context cancellation: Client closed connection - Mismatched session IDs: Protocol error @@ -183,7 +183,7 @@ Terminal errors are stored via [streamableClientConn.fail] and returned by subsequent [streamableClientConn.Read] calls. The [streamableClientConn.failed] channel signals that the connection is broken. -Special case: [errSessionMissing] indicates the server has terminated the session, +Special case: [ErrSessionMissing] indicates the server has terminated the session, so [streamableClientConn.Close] skips the DELETE request. # Protocol Version Header diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/tool.go b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/tool.go index 8aa7c3c0..3ecb59d3 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/tool.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/tool.go @@ -11,6 +11,7 @@ import ( "strings" "github.com/google/jsonschema-go/jsonschema" + internaljson "github.com/modelcontextprotocol/go-sdk/internal/json" ) // A ToolHandler handles a call to tools/call. @@ -83,7 +84,7 @@ func applySchema(data json.RawMessage, resolved *jsonschema.Resolved) (json.RawM if resolved != nil { v := make(map[string]any) if len(data) > 0 { - if err := json.Unmarshal(data, &v); err != nil { + if err := internaljson.Unmarshal(data, &v); err != nil { return nil, fmt.Errorf("unmarshaling arguments: %w", err) } } diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/transport.go b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/transport.go index 25f1d5d0..5f2a5007 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/transport.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/transport.go @@ -15,6 +15,7 @@ import ( "os" "sync" + internaljson "github.com/modelcontextprotocol/go-sdk/internal/json" "github.com/modelcontextprotocol/go-sdk/internal/jsonrpc2" "github.com/modelcontextprotocol/go-sdk/internal/xcontext" "github.com/modelcontextprotocol/go-sdk/jsonrpc" @@ -24,6 +25,10 @@ import ( // is closed or in the process of closing. var ErrConnectionClosed = errors.New("connection closed") +// ErrSessionMissing is returned when the session is known to not be present on +// the server. +var ErrSessionMissing = errors.New("session not found") + // A Transport is used to create a bidirectional connection between MCP client // and server. // @@ -189,7 +194,7 @@ type canceller struct { func (c *canceller) Preempt(ctx context.Context, req *jsonrpc.Request) (result any, err error) { if req.Method == notificationCancelled { var params CancelledParams - if err := json.Unmarshal(req.Params, ¶ms); err != nil { + if err := internaljson.Unmarshal(req.Params, ¶ms); err != nil { return nil, err } id, err := jsonrpc2.MakeID(params.RequestID) @@ -565,7 +570,7 @@ func (t *ioConn) Read(ctx context.Context) (jsonrpc.Message, error) { func readBatch(data []byte) (msgs []jsonrpc.Message, isBatch bool, _ error) { // Try to read an array of messages first. var rawBatch []json.RawMessage - if err := json.Unmarshal(data, &rawBatch); err == nil { + if err := internaljson.Unmarshal(data, &rawBatch); err == nil { if len(rawBatch) == 0 { return nil, true, fmt.Errorf("empty batch") } diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/util.go b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/util.go index 5ada466e..8ffaa74e 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/mcp/util.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/mcp/util.go @@ -5,8 +5,9 @@ package mcp import ( - "crypto/rand" "encoding/json" + + internaljson "github.com/modelcontextprotocol/go-sdk/internal/json" ) func assert(cond bool, msg string) { @@ -15,20 +16,6 @@ func assert(cond bool, msg string) { } } -// Copied from crypto/rand. -// TODO: once 1.24 is assured, just use crypto/rand. -const base32alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567" - -func randText() string { - // ⌈log₃₂ 2¹²⁸⌉ = 26 chars - src := make([]byte, 26) - rand.Read(src) - for i := range src { - src[i] = base32alphabet[src[i]%32] - } - return string(src) -} - // remarshal marshals from to JSON, and then unmarshals into to, which must be // a pointer type. func remarshal(from, to any) error { @@ -36,7 +23,7 @@ func remarshal(from, to any) error { if err != nil { return err } - if err := json.Unmarshal(data, to); err != nil { + if err := internaljson.Unmarshal(data, to); err != nil { return err } return nil diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/auth_meta.go b/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/auth_meta.go index 9aa0c8d7..36210576 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/auth_meta.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/auth_meta.go @@ -28,8 +28,6 @@ import ( // // [RFC 8414]: https://tools.ietf.org/html/rfc8414) type AuthServerMeta struct { - // GENERATED BY GEMINI 2.5. - // Issuer is the REQUIRED URL identifying the authorization server. Issuer string `json:"issuer"` @@ -113,55 +111,63 @@ type AuthServerMeta struct { // CodeChallengeMethodsSupported is a RECOMMENDED JSON array of strings containing a list of // PKCE code challenge methods supported by this authorization server. CodeChallengeMethodsSupported []string `json:"code_challenge_methods_supported,omitempty"` -} -var wellKnownPaths = []string{ - "/.well-known/oauth-authorization-server", - "/.well-known/openid-configuration", + // ClientIDMetadataDocumentSupported is a boolean indicating whether the authorization server + // supports client ID metadata documents. + ClientIDMetadataDocumentSupported bool `json:"client_id_metadata_document_supported,omitempty"` } // GetAuthServerMeta issues a GET request to retrieve authorization server metadata -// from an OAuth authorization server with the given issuerURL. +// from an OAuth authorization server with the given metadataURL. // // It follows [RFC 8414]: -// - The well-known paths specified there are inserted into the URL's path, one at time. -// The first to succeed is used. -// - The Issuer field is checked against issuerURL. +// - The metadataURL must use HTTPS or be a local address. +// - The Issuer field is checked against metadataURL.Issuer. +// +// It also verifies that the authorization server supports PKCE and that the URLs +// in the metadata don't use dangerous schemes. +// +// It returns an error if the request fails with a non-4xx status code or the fetched +// metadata doesn't pass security validations. +// It returns nil if the request fails with a 4xx status code. // // [RFC 8414]: https://tools.ietf.org/html/rfc8414 -func GetAuthServerMeta(ctx context.Context, issuerURL string, c *http.Client) (*AuthServerMeta, error) { - var errs []error - for _, p := range wellKnownPaths { - u, err := prependToPath(issuerURL, p) - if err != nil { - // issuerURL is bad; no point in continuing. - return nil, err - } - asm, err := getJSON[AuthServerMeta](ctx, c, u, 1<<20) - if err == nil { - if asm.Issuer != issuerURL { // section 3.3 - // Security violation; don't keep trying. - return nil, fmt.Errorf("metadata issuer %q does not match issuer URL %q", asm.Issuer, issuerURL) - } - - if len(asm.CodeChallengeMethodsSupported) == 0 { - return nil, fmt.Errorf("authorization server at %s does not implement PKCE", issuerURL) +func GetAuthServerMeta(ctx context.Context, metadataURL, issuer string, c *http.Client) (*AuthServerMeta, error) { + // Only allow HTTP for local addresses (testing or development purposes). + if err := checkHTTPSOrLoopback(metadataURL); err != nil { + return nil, fmt.Errorf("metadataURL: %v", err) + } + asm, err := getJSON[AuthServerMeta](ctx, c, metadataURL, 1<<20) + if err != nil { + var httpErr *httpStatusError + if errors.As(err, &httpErr) { + if 400 <= httpErr.StatusCode && httpErr.StatusCode < 500 { + return nil, nil } + } + return nil, fmt.Errorf("%v", err) // Do not expose error types. + } + if asm.Issuer != issuer { + // Validate the Issuer field (see RFC 8414, section 3.3). + return nil, fmt.Errorf("metadata issuer %q does not match issuer URL %q", asm.Issuer, issuer) + } - // Validate endpoint URLs to prevent XSS attacks (see #526). - if err := validateAuthServerMetaURLs(asm); err != nil { - return nil, err - } + if len(asm.CodeChallengeMethodsSupported) == 0 { + return nil, fmt.Errorf("authorization server at %s does not implement PKCE", issuer) + } - return asm, nil - } - errs = append(errs, err) + // Validate endpoint URLs to prevent XSS attacks (see #526). + if err := validateAuthServerMetaURLs(asm); err != nil { + return nil, err } - return nil, fmt.Errorf("failed to get auth server metadata from %q: %w", issuerURL, errors.Join(errs...)) + + return asm, nil } // validateAuthServerMetaURLs validates all URL fields in AuthServerMeta // to ensure they don't use dangerous schemes that could enable XSS attacks. +// It also validates that URLs likely to be called by the client use +// HTTPS or are loopback addresses. func validateAuthServerMetaURLs(asm *AuthServerMeta) error { urls := []struct { name string @@ -183,5 +189,22 @@ func validateAuthServerMetaURLs(asm *AuthServerMeta) error { return fmt.Errorf("%s: %w", u.name, err) } } + + urls = []struct { + name string + value string + }{ + {"authorization_endpoint", asm.AuthorizationEndpoint}, + {"token_endpoint", asm.TokenEndpoint}, + {"registration_endpoint", asm.RegistrationEndpoint}, + {"introspection_endpoint", asm.IntrospectionEndpoint}, + } + + for _, u := range urls { + if err := checkHTTPSOrLoopback(u.value); err != nil { + return fmt.Errorf("%s: %w", u.name, err) + } + } + return nil } diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/dcr.go b/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/dcr.go index c64cb8cd..6db30255 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/dcr.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/dcr.go @@ -17,6 +17,8 @@ import ( "io" "net/http" "time" + + internaljson "github.com/modelcontextprotocol/go-sdk/internal/json" ) // ClientRegistrationMetadata represents the client metadata fields for the DCR POST request (RFC 7591). @@ -144,7 +146,7 @@ func (r *ClientRegistrationResponse) UnmarshalJSON(data []byte) error { }{ alias: (*alias)(r), } - if err := json.Unmarshal(data, &aux); err != nil { + if err := internaljson.Unmarshal(data, &aux); err != nil { return err } if aux.ClientIDIssuedAt != 0 { @@ -206,7 +208,7 @@ func RegisterClient(ctx context.Context, registrationEndpoint string, clientMeta if resp.StatusCode == http.StatusCreated { var regResponse ClientRegistrationResponse - if err := json.Unmarshal(body, ®Response); err != nil { + if err := internaljson.Unmarshal(body, ®Response); err != nil { return nil, fmt.Errorf("failed to decode successful registration response: %w (%s)", err, string(body)) } if regResponse.ClientID == "" { @@ -221,7 +223,7 @@ func RegisterClient(ctx context.Context, registrationEndpoint string, clientMeta if resp.StatusCode == http.StatusBadRequest { var regError ClientRegistrationError - if err := json.Unmarshal(body, ®Error); err != nil { + if err := internaljson.Unmarshal(body, ®Error); err != nil { return nil, fmt.Errorf("failed to decode registration error response: %w (%s)", err, string(body)) } return nil, ®Error diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/oauth2.go b/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/oauth2.go index cdda695b..d8aeb3c2 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/oauth2.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/oauth2.go @@ -17,23 +17,16 @@ import ( "net/http" "net/url" "strings" + + "github.com/modelcontextprotocol/go-sdk/internal/util" ) -// prependToPath prepends pre to the path of urlStr. -// When pre is the well-known path, this is the algorithm specified in both RFC 9728 -// section 3.1 and RFC 8414 section 3.1. -func prependToPath(urlStr, pre string) (string, error) { - u, err := url.Parse(urlStr) - if err != nil { - return "", err - } - p := "/" + strings.Trim(pre, "/") - if u.Path != "" { - p += "/" - } +type httpStatusError struct { + StatusCode int +} - u.Path = p + strings.TrimLeft(u.Path, "/") - return u.String(), nil +func (e *httpStatusError) Error() string { + return fmt.Sprintf("bad status %d", e.StatusCode) } // getJSON retrieves JSON and unmarshals JSON from the URL, as specified in both @@ -53,11 +46,9 @@ func getJSON[T any](ctx context.Context, c *http.Client, url string, limit int64 } defer res.Body.Close() - // Specs require a 200. if res.StatusCode != http.StatusOK { - return nil, fmt.Errorf("bad status %s", res.Status) + return nil, &httpStatusError{StatusCode: res.StatusCode} } - // Specs require application/json. ct := res.Header.Get("Content-Type") mediaType, _, err := mime.ParseMediaType(ct) if err != nil || mediaType != "application/json" { @@ -89,3 +80,17 @@ func checkURLScheme(u string) error { } return nil } + +func checkHTTPSOrLoopback(addr string) error { + if addr == "" { + return nil + } + u, err := url.Parse(addr) + if err != nil { + return err + } + if !util.IsLoopback(u.Host) && u.Scheme != "https" { + return fmt.Errorf("URL %q does not use HTTPS or is not a loopback address", addr) + } + return nil +} diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/oauthex.go b/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/oauthex.go index 34ed55b5..151da7e5 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/oauthex.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/oauthex.go @@ -4,89 +4,3 @@ // Package oauthex implements extensions to OAuth2. package oauthex - -// ProtectedResourceMetadata is the metadata for an OAuth 2.0 protected resource, -// as defined in section 2 of https://www.rfc-editor.org/rfc/rfc9728.html. -// -// The following features are not supported: -// - additional keys (§2, last sentence) -// - human-readable metadata (§2.1) -// - signed metadata (§2.2) -type ProtectedResourceMetadata struct { - // GENERATED BY GEMINI 2.5. - - // Resource (resource) is the protected resource's resource identifier. - // Required. - Resource string `json:"resource"` - - // AuthorizationServers (authorization_servers) is an optional slice containing a list of - // OAuth authorization server issuer identifiers (as defined in RFC 8414) that can be - // used with this protected resource. - AuthorizationServers []string `json:"authorization_servers,omitempty"` - - // JWKSURI (jwks_uri) is an optional URL of the protected resource's JSON Web Key (JWK) Set - // document. This contains public keys belonging to the protected resource, such as - // signing key(s) that the resource server uses to sign resource responses. - JWKSURI string `json:"jwks_uri,omitempty"` - - // ScopesSupported (scopes_supported) is a recommended slice containing a list of scope - // values (as defined in RFC 6749) used in authorization requests to request access - // to this protected resource. - ScopesSupported []string `json:"scopes_supported,omitempty"` - - // BearerMethodsSupported (bearer_methods_supported) is an optional slice containing - // a list of the supported methods of sending an OAuth 2.0 bearer token to the - // protected resource. Defined values are "header", "body", and "query". - BearerMethodsSupported []string `json:"bearer_methods_supported,omitempty"` - - // ResourceSigningAlgValuesSupported (resource_signing_alg_values_supported) is an optional - // slice of JWS signing algorithms (alg values) supported by the protected - // resource for signing resource responses. - ResourceSigningAlgValuesSupported []string `json:"resource_signing_alg_values_supported,omitempty"` - - // ResourceName (resource_name) is a human-readable name of the protected resource - // intended for display to the end user. It is RECOMMENDED that this field be included. - // This value may be internationalized. - ResourceName string `json:"resource_name,omitempty"` - - // ResourceDocumentation (resource_documentation) is an optional URL of a page containing - // human-readable information for developers using the protected resource. - // This value may be internationalized. - ResourceDocumentation string `json:"resource_documentation,omitempty"` - - // ResourcePolicyURI (resource_policy_uri) is an optional URL of a page containing - // human-readable policy information on how a client can use the data provided. - // This value may be internationalized. - ResourcePolicyURI string `json:"resource_policy_uri,omitempty"` - - // ResourceTOSURI (resource_tos_uri) is an optional URL of a page containing the protected - // resource's human-readable terms of service. This value may be internationalized. - ResourceTOSURI string `json:"resource_tos_uri,omitempty"` - - // TLSClientCertificateBoundAccessTokens (tls_client_certificate_bound_access_tokens) is an - // optional boolean indicating support for mutual-TLS client certificate-bound - // access tokens (RFC 8705). Defaults to false if omitted. - TLSClientCertificateBoundAccessTokens bool `json:"tls_client_certificate_bound_access_tokens,omitempty"` - - // AuthorizationDetailsTypesSupported (authorization_details_types_supported) is an optional - // slice of 'type' values supported by the resource server for the - // 'authorization_details' parameter (RFC 9396). - AuthorizationDetailsTypesSupported []string `json:"authorization_details_types_supported,omitempty"` - - // DPOPSigningAlgValuesSupported (dpop_signing_alg_values_supported) is an optional - // slice of JWS signing algorithms supported by the resource server for validating - // DPoP proof JWTs (RFC 9449). - DPOPSigningAlgValuesSupported []string `json:"dpop_signing_alg_values_supported,omitempty"` - - // DPOPBoundAccessTokensRequired (dpop_bound_access_tokens_required) is an optional boolean - // specifying whether the protected resource always requires the use of DPoP-bound - // access tokens (RFC 9449). Defaults to false if omitted. - DPOPBoundAccessTokensRequired bool `json:"dpop_bound_access_tokens_required,omitempty"` - - // SignedMetadata (signed_metadata) is an optional JWT containing metadata parameters - // about the protected resource as claims. If present, these values take precedence - // over values conveyed in plain JSON. - // TODO:implement. - // Note that §2.2 says it's okay to ignore this. - // SignedMetadata string `json:"signed_metadata,omitempty"` -} diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/resource_meta.go b/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/resource_meta.go index bb61f797..4680c153 100644 --- a/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/resource_meta.go +++ b/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/resource_meta.go @@ -38,6 +38,8 @@ const defaultProtectedResourceMetadataURI = "/.well-known/oauth-protected-resour // // It then retrieves the metadata at that location using the given client (or the // default client if nil) and validates its resource field against resourceID. +// +// Deprecated: Use [GetProtectedResourceMetadata] instead. This function will be removed in v1.5.0. func GetProtectedResourceMetadataFromID(ctx context.Context, resourceID string, c *http.Client) (_ *ProtectedResourceMetadata, err error) { defer util.Wrapf(&err, "GetProtectedResourceMetadataFromID(%q)", resourceID) @@ -47,7 +49,7 @@ func GetProtectedResourceMetadataFromID(ctx context.Context, resourceID string, } // Insert well-known URI into URL. u.Path = path.Join(defaultProtectedResourceMetadataURI, u.Path) - return getPRM(ctx, u.String(), c, resourceID) + return GetProtectedResourceMetadata(ctx, u.String(), resourceID, c) } // GetProtectedResourceMetadataFromHeader retrieves protected resource metadata @@ -57,8 +59,9 @@ func GetProtectedResourceMetadataFromID(ctx context.Context, resourceID string, // Per RFC 9728 section 3.3, it validates that the resource field of the resulting metadata // matches the serverURL (the URL that the client used to make the original request to the resource server). // If there is no metadata URL in the header, it returns nil, nil. +// +// Deprecated: Use [GetProtectedResourceMetadata] instead. This function will be removed in v1.5.0. func GetProtectedResourceMetadataFromHeader(ctx context.Context, serverURL string, header http.Header, c *http.Client) (_ *ProtectedResourceMetadata, err error) { - defer util.Wrapf(&err, "GetProtectedResourceMetadataFromHeader") headers := header[http.CanonicalHeaderKey("WWW-Authenticate")] if len(headers) == 0 { return nil, nil @@ -67,67 +70,64 @@ func GetProtectedResourceMetadataFromHeader(ctx context.Context, serverURL strin if err != nil { return nil, err } - metadataURL := ResourceMetadataURL(cs) + metadataURL := resourceMetadataURL(cs) if metadataURL == "" { return nil, nil } - return getPRM(ctx, metadataURL, c, serverURL) + return GetProtectedResourceMetadata(ctx, metadataURL, serverURL, c) } -// getPRM makes a GET request to the given URL, and validates the response. -// As part of the validation, it compares the returned resource field to wantResource. -func getPRM(ctx context.Context, purl string, c *http.Client, wantResource string) (*ProtectedResourceMetadata, error) { - if !strings.HasPrefix(strings.ToUpper(purl), "HTTPS://") { - return nil, fmt.Errorf("resource URL %q does not use HTTPS", purl) +// resourceMetadataURL returns a resource metadata URL from the given "WWW-Authenticate" header challenges, +// or the empty string if there is none. +func resourceMetadataURL(cs []Challenge) string { + for _, c := range cs { + if u := c.Params["resource_metadata"]; u != "" { + return u + } } - prm, err := getJSON[ProtectedResourceMetadata](ctx, c, purl, 1<<20) + return "" +} + +// GetProtectedResourceMetadataFromID issues a GET request to retrieve protected resource +// metadata from a resource server. +// The metadataURL is typically a URL with a host:port and possibly a path. +// The resourceURL is the resource URI the metadataURL is for. +// The following checks are performed: +// - The metadataURL must use HTTPS or be a local address. +// - The resource field of the resulting metadata must match the resourceURL. +// - The authorization_servers field of the resulting metadata is checked for dangerous URL schemes. +func GetProtectedResourceMetadata(ctx context.Context, metadataURL, resourceURL string, c *http.Client) (_ *ProtectedResourceMetadata, err error) { + defer util.Wrapf(&err, "GetProtectedResourceMetadata(%q)", metadataURL) + // Only allow HTTP for local addresses (testing or development purposes). + if err := checkHTTPSOrLoopback(metadataURL); err != nil { + return nil, fmt.Errorf("metadataURL: %v", err) + } + prm, err := getJSON[ProtectedResourceMetadata](ctx, c, metadataURL, 1<<20) if err != nil { return nil, err } // Validate the Resource field (see RFC 9728, section 3.3). - if prm.Resource != wantResource { - return nil, fmt.Errorf("got metadata resource %q, want %q", prm.Resource, wantResource) + if prm.Resource != resourceURL { + return nil, fmt.Errorf("got metadata resource %q, want %q", prm.Resource, resourceURL) } // Validate the authorization server URLs to prevent XSS attacks (see #526). - for _, u := range prm.AuthorizationServers { + for i, u := range prm.AuthorizationServers { if err := checkURLScheme(u); err != nil { - return nil, err + return nil, fmt.Errorf("authorization_servers[%d]: %v", i, err) } - } - return prm, nil -} - -// challenge represents a single authentication challenge from a WWW-Authenticate header. -// As per RFC 9110, Section 11.6.1, a challenge consists of a scheme and optional parameters. -type challenge struct { - // GENERATED BY GEMINI 2.5. - // - // Scheme is the authentication scheme (e.g., "Bearer", "Basic"). - // It is case-insensitive. A parsed value will always be lower-case. - Scheme string - // Params is a map of authentication parameters. - // Keys are case-insensitive. Parsed keys are always lower-case. - Params map[string]string -} - -// ResourceMetadataURL returns a resource metadata URL from the given challenges, -// or the empty string if there is none. -func ResourceMetadataURL(cs []challenge) string { - for _, c := range cs { - if u := c.Params["resource_metadata"]; u != "" { - return u + if err := checkHTTPSOrLoopback(u); err != nil { + return nil, fmt.Errorf("authorization_servers[%d]: %v", i, err) } } - return "" + return prm, nil } // ParseWWWAuthenticate parses a WWW-Authenticate header string. // The header format is defined in RFC 9110, Section 11.6.1, and can contain // one or more challenges, separated by commas. // It returns a slice of challenges or an error if one of the headers is malformed. -func ParseWWWAuthenticate(headers []string) ([]challenge, error) { - // GENERATED BY GEMINI 2.5 (human-tweaked) - var challenges []challenge +func ParseWWWAuthenticate(headers []string) ([]Challenge, error) { + var challenges []Challenge for _, h := range headers { challengeStrings, err := splitChallenges(h) if err != nil { @@ -151,7 +151,6 @@ func ParseWWWAuthenticate(headers []string) ([]challenge, error) { // It correctly handles commas within quoted strings and distinguishes between // commas separating auth-params and commas separating challenges. func splitChallenges(header string) ([]string, error) { - // GENERATED BY GEMINI 2.5. var challenges []string inQuotes := false start := 0 @@ -195,15 +194,14 @@ func splitChallenges(header string) ([]string, error) { // parseSingleChallenge parses a string containing exactly one challenge. // challenge = auth-scheme [ 1*SP ( token68 / #auth-param ) ] -func parseSingleChallenge(s string) (challenge, error) { - // GENERATED BY GEMINI 2.5, human-tweaked. +func parseSingleChallenge(s string) (Challenge, error) { s = strings.TrimSpace(s) if s == "" { - return challenge{}, errors.New("empty challenge string") + return Challenge{}, errors.New("empty challenge string") } scheme, paramsStr, found := strings.Cut(s, " ") - c := challenge{Scheme: strings.ToLower(scheme)} + c := Challenge{Scheme: strings.ToLower(scheme)} if !found { return c, nil } @@ -215,7 +213,7 @@ func parseSingleChallenge(s string) (challenge, error) { // Find the end of the parameter key. keyEnd := strings.Index(paramsStr, "=") if keyEnd <= 0 { - return challenge{}, fmt.Errorf("malformed auth parameter: expected key=value, but got %q", paramsStr) + return Challenge{}, fmt.Errorf("malformed auth parameter: expected key=value, but got %q", paramsStr) } key := strings.TrimSpace(paramsStr[:keyEnd]) @@ -243,7 +241,7 @@ func parseSingleChallenge(s string) (challenge, error) { // A quoted string must be terminated. if i == len(paramsStr) { - return challenge{}, fmt.Errorf("unterminated quoted string in auth parameter") + return Challenge{}, fmt.Errorf("unterminated quoted string in auth parameter") } value = valBuilder.String() @@ -261,7 +259,7 @@ func parseSingleChallenge(s string) (challenge, error) { } } if value == "" { - return challenge{}, fmt.Errorf("no value for auth param %q", key) + return Challenge{}, fmt.Errorf("no value for auth param %q", key) } // Per RFC 9110, parameter keys are case-insensitive. @@ -272,10 +270,10 @@ func parseSingleChallenge(s string) (challenge, error) { paramsStr = strings.TrimSpace(paramsStr[1:]) } else if paramsStr != "" { // If there's content but it's not a new parameter, the format is wrong. - return challenge{}, fmt.Errorf("malformed auth parameter: expected comma after value, but got %q", paramsStr) + return Challenge{}, fmt.Errorf("malformed auth parameter: expected comma after value, but got %q", paramsStr) } } // Per RFC 9110, the scheme is case-insensitive. - return challenge{Scheme: strings.ToLower(scheme), Params: params}, nil + return Challenge{Scheme: strings.ToLower(scheme), Params: params}, nil } diff --git a/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/resource_meta_public.go b/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/resource_meta_public.go new file mode 100644 index 00000000..3bf7d9ac --- /dev/null +++ b/vendor/github.com/modelcontextprotocol/go-sdk/oauthex/resource_meta_public.go @@ -0,0 +1,105 @@ +// Copyright 2025 The Go MCP SDK Authors. All rights reserved. +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +// This file implements Protected Resource Metadata. +// See https://www.rfc-editor.org/rfc/rfc9728.html. + +// This is a temporary file to expose the required objects to the main package. + +package oauthex + +// ProtectedResourceMetadata is the metadata for an OAuth 2.0 protected resource, +// as defined in section 2 of https://www.rfc-editor.org/rfc/rfc9728.html. +// +// The following features are not supported: +// - additional keys (§2, last sentence) +// - human-readable metadata (§2.1) +// - signed metadata (§2.2) +type ProtectedResourceMetadata struct { + // Resource (resource) is the protected resource's resource identifier. + // Required. + Resource string `json:"resource"` + + // AuthorizationServers (authorization_servers) is an optional slice containing a list of + // OAuth authorization server issuer identifiers (as defined in RFC 8414) that can be + // used with this protected resource. + AuthorizationServers []string `json:"authorization_servers,omitempty"` + + // JWKSURI (jwks_uri) is an optional URL of the protected resource's JSON Web Key (JWK) Set + // document. This contains public keys belonging to the protected resource, such as + // signing key(s) that the resource server uses to sign resource responses. + JWKSURI string `json:"jwks_uri,omitempty"` + + // ScopesSupported (scopes_supported) is a recommended slice containing a list of scope + // values (as defined in RFC 6749) used in authorization requests to request access + // to this protected resource. + ScopesSupported []string `json:"scopes_supported,omitempty"` + + // BearerMethodsSupported (bearer_methods_supported) is an optional slice containing + // a list of the supported methods of sending an OAuth 2.0 bearer token to the + // protected resource. Defined values are "header", "body", and "query". + BearerMethodsSupported []string `json:"bearer_methods_supported,omitempty"` + + // ResourceSigningAlgValuesSupported (resource_signing_alg_values_supported) is an optional + // slice of JWS signing algorithms (alg values) supported by the protected + // resource for signing resource responses. + ResourceSigningAlgValuesSupported []string `json:"resource_signing_alg_values_supported,omitempty"` + + // ResourceName (resource_name) is a human-readable name of the protected resource + // intended for display to the end user. It is RECOMMENDED that this field be included. + // This value may be internationalized. + ResourceName string `json:"resource_name,omitempty"` + + // ResourceDocumentation (resource_documentation) is an optional URL of a page containing + // human-readable information for developers using the protected resource. + // This value may be internationalized. + ResourceDocumentation string `json:"resource_documentation,omitempty"` + + // ResourcePolicyURI (resource_policy_uri) is an optional URL of a page containing + // human-readable policy information on how a client can use the data provided. + // This value may be internationalized. + ResourcePolicyURI string `json:"resource_policy_uri,omitempty"` + + // ResourceTOSURI (resource_tos_uri) is an optional URL of a page containing the protected + // resource's human-readable terms of service. This value may be internationalized. + ResourceTOSURI string `json:"resource_tos_uri,omitempty"` + + // TLSClientCertificateBoundAccessTokens (tls_client_certificate_bound_access_tokens) is an + // optional boolean indicating support for mutual-TLS client certificate-bound + // access tokens (RFC 8705). Defaults to false if omitted. + TLSClientCertificateBoundAccessTokens bool `json:"tls_client_certificate_bound_access_tokens,omitempty"` + + // AuthorizationDetailsTypesSupported (authorization_details_types_supported) is an optional + // slice of 'type' values supported by the resource server for the + // 'authorization_details' parameter (RFC 9396). + AuthorizationDetailsTypesSupported []string `json:"authorization_details_types_supported,omitempty"` + + // DPOPSigningAlgValuesSupported (dpop_signing_alg_values_supported) is an optional + // slice of JWS signing algorithms supported by the resource server for validating + // DPoP proof JWTs (RFC 9449). + DPOPSigningAlgValuesSupported []string `json:"dpop_signing_alg_values_supported,omitempty"` + + // DPOPBoundAccessTokensRequired (dpop_bound_access_tokens_required) is an optional boolean + // specifying whether the protected resource always requires the use of DPoP-bound + // access tokens (RFC 9449). Defaults to false if omitted. + DPOPBoundAccessTokensRequired bool `json:"dpop_bound_access_tokens_required,omitempty"` + + // SignedMetadata (signed_metadata) is an optional JWT containing metadata parameters + // about the protected resource as claims. If present, these values take precedence + // over values conveyed in plain JSON. + // TODO:implement. + // Note that §2.2 says it's okay to ignore this. + // SignedMetadata string `json:"signed_metadata,omitempty"` +} + +// Challenge represents a single authentication challenge from a WWW-Authenticate header. +// As per RFC 9110, Section 11.6.1, a challenge consists of a scheme and optional parameters. +type Challenge struct { + // Scheme is the authentication scheme (e.g., "Bearer", "Basic"). + // It is case-insensitive. A parsed value will always be lower-case. + Scheme string + // Params is a map of authentication parameters. + // Keys are case-insensitive. Parsed keys are always lower-case. + Params map[string]string +} diff --git a/vendor/github.com/segmentio/asm/LICENSE b/vendor/github.com/segmentio/asm/LICENSE new file mode 100644 index 00000000..29e1ab6b --- /dev/null +++ b/vendor/github.com/segmentio/asm/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Segment + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/segmentio/asm/ascii/ascii.go b/vendor/github.com/segmentio/asm/ascii/ascii.go new file mode 100644 index 00000000..4805146d --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/ascii.go @@ -0,0 +1,53 @@ +package ascii + +import _ "github.com/segmentio/asm/cpu" + +// https://graphics.stanford.edu/~seander/bithacks.html#HasLessInWord +const ( + hasLessConstL64 = (^uint64(0)) / 255 + hasLessConstR64 = hasLessConstL64 * 128 + + hasLessConstL32 = (^uint32(0)) / 255 + hasLessConstR32 = hasLessConstL32 * 128 + + hasMoreConstL64 = (^uint64(0)) / 255 + hasMoreConstR64 = hasMoreConstL64 * 128 + + hasMoreConstL32 = (^uint32(0)) / 255 + hasMoreConstR32 = hasMoreConstL32 * 128 +) + +func hasLess64(x, n uint64) bool { + return ((x - (hasLessConstL64 * n)) & ^x & hasLessConstR64) != 0 +} + +func hasLess32(x, n uint32) bool { + return ((x - (hasLessConstL32 * n)) & ^x & hasLessConstR32) != 0 +} + +func hasMore64(x, n uint64) bool { + return (((x + (hasMoreConstL64 * (127 - n))) | x) & hasMoreConstR64) != 0 +} + +func hasMore32(x, n uint32) bool { + return (((x + (hasMoreConstL32 * (127 - n))) | x) & hasMoreConstR32) != 0 +} + +var lowerCase = [256]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, +} diff --git a/vendor/github.com/segmentio/asm/ascii/equal_fold.go b/vendor/github.com/segmentio/asm/ascii/equal_fold.go new file mode 100644 index 00000000..d90d8caf --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/equal_fold.go @@ -0,0 +1,30 @@ +package ascii + +import ( + "github.com/segmentio/asm/internal/unsafebytes" +) + +// EqualFold is a version of bytes.EqualFold designed to work on ASCII input +// instead of UTF-8. +// +// When the program has guarantees that the input is composed of ASCII +// characters only, it allows for greater optimizations. +func EqualFold(a, b []byte) bool { + return EqualFoldString(unsafebytes.String(a), unsafebytes.String(b)) +} + +func HasPrefixFold(s, prefix []byte) bool { + return len(s) >= len(prefix) && EqualFold(s[:len(prefix)], prefix) +} + +func HasSuffixFold(s, suffix []byte) bool { + return len(s) >= len(suffix) && EqualFold(s[len(s)-len(suffix):], suffix) +} + +func HasPrefixFoldString(s, prefix string) bool { + return len(s) >= len(prefix) && EqualFoldString(s[:len(prefix)], prefix) +} + +func HasSuffixFoldString(s, suffix string) bool { + return len(s) >= len(suffix) && EqualFoldString(s[len(s)-len(suffix):], suffix) +} diff --git a/vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.go b/vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.go new file mode 100644 index 00000000..07cf6cdb --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.go @@ -0,0 +1,13 @@ +// Code generated by command: go run equal_fold_asm.go -pkg ascii -out ../ascii/equal_fold_amd64.s -stubs ../ascii/equal_fold_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +package ascii + +// EqualFoldString is a version of strings.EqualFold designed to work on ASCII +// input instead of UTF-8. +// +// When the program has guarantees that the input is composed of ASCII +// characters only, it allows for greater optimizations. +func EqualFoldString(a string, b string) bool diff --git a/vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.s b/vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.s new file mode 100644 index 00000000..34495a62 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.s @@ -0,0 +1,304 @@ +// Code generated by command: go run equal_fold_asm.go -pkg ascii -out ../ascii/equal_fold_amd64.s -stubs ../ascii/equal_fold_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +#include "textflag.h" + +// func EqualFoldString(a string, b string) bool +// Requires: AVX, AVX2, SSE4.1 +TEXT ·EqualFoldString(SB), NOSPLIT, $0-33 + MOVQ a_base+0(FP), CX + MOVQ a_len+8(FP), DX + MOVQ b_base+16(FP), BX + CMPQ DX, b_len+24(FP) + JNE done + XORQ AX, AX + CMPQ DX, $0x10 + JB init_x86 + BTL $0x08, github·com∕segmentio∕asm∕cpu·X86+0(SB) + JCS init_avx + +init_x86: + LEAQ github·com∕segmentio∕asm∕ascii·lowerCase+0(SB), R9 + XORL SI, SI + +cmp8: + CMPQ DX, $0x08 + JB cmp7 + MOVBLZX (CX)(AX*1), DI + MOVBLZX (BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 1(CX)(AX*1), DI + MOVBLZX 1(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 2(CX)(AX*1), DI + MOVBLZX 2(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 3(CX)(AX*1), DI + MOVBLZX 3(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 4(CX)(AX*1), DI + MOVBLZX 4(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 5(CX)(AX*1), DI + MOVBLZX 5(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 6(CX)(AX*1), DI + MOVBLZX 6(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 7(CX)(AX*1), DI + MOVBLZX 7(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + JNE done + ADDQ $0x08, AX + SUBQ $0x08, DX + JMP cmp8 + +cmp7: + CMPQ DX, $0x07 + JB cmp6 + MOVBLZX 6(CX)(AX*1), DI + MOVBLZX 6(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +cmp6: + CMPQ DX, $0x06 + JB cmp5 + MOVBLZX 5(CX)(AX*1), DI + MOVBLZX 5(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +cmp5: + CMPQ DX, $0x05 + JB cmp4 + MOVBLZX 4(CX)(AX*1), DI + MOVBLZX 4(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +cmp4: + CMPQ DX, $0x04 + JB cmp3 + MOVBLZX 3(CX)(AX*1), DI + MOVBLZX 3(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +cmp3: + CMPQ DX, $0x03 + JB cmp2 + MOVBLZX 2(CX)(AX*1), DI + MOVBLZX 2(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +cmp2: + CMPQ DX, $0x02 + JB cmp1 + MOVBLZX 1(CX)(AX*1), DI + MOVBLZX 1(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +cmp1: + CMPQ DX, $0x01 + JB success + MOVBLZX (CX)(AX*1), DI + MOVBLZX (BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +done: + SETEQ ret+32(FP) + RET + +success: + MOVB $0x01, ret+32(FP) + RET + +init_avx: + MOVB $0x20, SI + PINSRB $0x00, SI, X12 + VPBROADCASTB X12, Y12 + MOVB $0x1f, SI + PINSRB $0x00, SI, X13 + VPBROADCASTB X13, Y13 + MOVB $0x9a, SI + PINSRB $0x00, SI, X14 + VPBROADCASTB X14, Y14 + MOVB $0x01, SI + PINSRB $0x00, SI, X15 + VPBROADCASTB X15, Y15 + +cmp128: + CMPQ DX, $0x80 + JB cmp64 + VMOVDQU (CX)(AX*1), Y0 + VMOVDQU 32(CX)(AX*1), Y1 + VMOVDQU 64(CX)(AX*1), Y2 + VMOVDQU 96(CX)(AX*1), Y3 + VMOVDQU (BX)(AX*1), Y4 + VMOVDQU 32(BX)(AX*1), Y5 + VMOVDQU 64(BX)(AX*1), Y6 + VMOVDQU 96(BX)(AX*1), Y7 + VXORPD Y0, Y4, Y4 + VPCMPEQB Y12, Y4, Y8 + VORPD Y12, Y0, Y0 + VPADDB Y13, Y0, Y0 + VPCMPGTB Y0, Y14, Y0 + VPAND Y8, Y0, Y0 + VPAND Y15, Y0, Y0 + VPSLLW $0x05, Y0, Y0 + VPCMPEQB Y4, Y0, Y0 + VXORPD Y1, Y5, Y5 + VPCMPEQB Y12, Y5, Y9 + VORPD Y12, Y1, Y1 + VPADDB Y13, Y1, Y1 + VPCMPGTB Y1, Y14, Y1 + VPAND Y9, Y1, Y1 + VPAND Y15, Y1, Y1 + VPSLLW $0x05, Y1, Y1 + VPCMPEQB Y5, Y1, Y1 + VXORPD Y2, Y6, Y6 + VPCMPEQB Y12, Y6, Y10 + VORPD Y12, Y2, Y2 + VPADDB Y13, Y2, Y2 + VPCMPGTB Y2, Y14, Y2 + VPAND Y10, Y2, Y2 + VPAND Y15, Y2, Y2 + VPSLLW $0x05, Y2, Y2 + VPCMPEQB Y6, Y2, Y2 + VXORPD Y3, Y7, Y7 + VPCMPEQB Y12, Y7, Y11 + VORPD Y12, Y3, Y3 + VPADDB Y13, Y3, Y3 + VPCMPGTB Y3, Y14, Y3 + VPAND Y11, Y3, Y3 + VPAND Y15, Y3, Y3 + VPSLLW $0x05, Y3, Y3 + VPCMPEQB Y7, Y3, Y3 + VPAND Y1, Y0, Y0 + VPAND Y3, Y2, Y2 + VPAND Y2, Y0, Y0 + ADDQ $0x80, AX + SUBQ $0x80, DX + VPMOVMSKB Y0, SI + XORL $0xffffffff, SI + JNE done + JMP cmp128 + +cmp64: + CMPQ DX, $0x40 + JB cmp32 + VMOVDQU (CX)(AX*1), Y0 + VMOVDQU 32(CX)(AX*1), Y1 + VMOVDQU (BX)(AX*1), Y2 + VMOVDQU 32(BX)(AX*1), Y3 + VXORPD Y0, Y2, Y2 + VPCMPEQB Y12, Y2, Y4 + VORPD Y12, Y0, Y0 + VPADDB Y13, Y0, Y0 + VPCMPGTB Y0, Y14, Y0 + VPAND Y4, Y0, Y0 + VPAND Y15, Y0, Y0 + VPSLLW $0x05, Y0, Y0 + VPCMPEQB Y2, Y0, Y0 + VXORPD Y1, Y3, Y3 + VPCMPEQB Y12, Y3, Y5 + VORPD Y12, Y1, Y1 + VPADDB Y13, Y1, Y1 + VPCMPGTB Y1, Y14, Y1 + VPAND Y5, Y1, Y1 + VPAND Y15, Y1, Y1 + VPSLLW $0x05, Y1, Y1 + VPCMPEQB Y3, Y1, Y1 + VPAND Y1, Y0, Y0 + ADDQ $0x40, AX + SUBQ $0x40, DX + VPMOVMSKB Y0, SI + XORL $0xffffffff, SI + JNE done + +cmp32: + CMPQ DX, $0x20 + JB cmp16 + VMOVDQU (CX)(AX*1), Y0 + VMOVDQU (BX)(AX*1), Y1 + VXORPD Y0, Y1, Y1 + VPCMPEQB Y12, Y1, Y2 + VORPD Y12, Y0, Y0 + VPADDB Y13, Y0, Y0 + VPCMPGTB Y0, Y14, Y0 + VPAND Y2, Y0, Y0 + VPAND Y15, Y0, Y0 + VPSLLW $0x05, Y0, Y0 + VPCMPEQB Y1, Y0, Y0 + ADDQ $0x20, AX + SUBQ $0x20, DX + VPMOVMSKB Y0, SI + XORL $0xffffffff, SI + JNE done + +cmp16: + CMPQ DX, $0x10 + JLE cmp_tail + VMOVDQU (CX)(AX*1), X0 + VMOVDQU (BX)(AX*1), X1 + VXORPD X0, X1, X1 + VPCMPEQB X12, X1, X2 + VORPD X12, X0, X0 + VPADDB X13, X0, X0 + VPCMPGTB X0, X14, X0 + VPAND X2, X0, X0 + VPAND X15, X0, X0 + VPSLLW $0x05, X0, X0 + VPCMPEQB X1, X0, X0 + ADDQ $0x10, AX + SUBQ $0x10, DX + VPMOVMSKB X0, SI + XORL $0x0000ffff, SI + JNE done + +cmp_tail: + SUBQ $0x10, DX + ADDQ DX, AX + VMOVDQU (CX)(AX*1), X0 + VMOVDQU (BX)(AX*1), X1 + VXORPD X0, X1, X1 + VPCMPEQB X12, X1, X2 + VORPD X12, X0, X0 + VPADDB X13, X0, X0 + VPCMPGTB X0, X14, X0 + VPAND X2, X0, X0 + VPAND X15, X0, X0 + VPSLLW $0x05, X0, X0 + VPCMPEQB X1, X0, X0 + VPMOVMSKB X0, AX + XORL $0x0000ffff, AX + JMP done diff --git a/vendor/github.com/segmentio/asm/ascii/equal_fold_default.go b/vendor/github.com/segmentio/asm/ascii/equal_fold_default.go new file mode 100644 index 00000000..1ae5a13a --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/equal_fold_default.go @@ -0,0 +1,60 @@ +//go:build purego || !amd64 +// +build purego !amd64 + +package ascii + +// EqualFoldString is a version of strings.EqualFold designed to work on ASCII +// input instead of UTF-8. +// +// When the program has guarantees that the input is composed of ASCII +// characters only, it allows for greater optimizations. +func EqualFoldString(a, b string) bool { + if len(a) != len(b) { + return false + } + + var cmp byte + + for len(a) >= 8 { + cmp |= lowerCase[a[0]] ^ lowerCase[b[0]] + cmp |= lowerCase[a[1]] ^ lowerCase[b[1]] + cmp |= lowerCase[a[2]] ^ lowerCase[b[2]] + cmp |= lowerCase[a[3]] ^ lowerCase[b[3]] + cmp |= lowerCase[a[4]] ^ lowerCase[b[4]] + cmp |= lowerCase[a[5]] ^ lowerCase[b[5]] + cmp |= lowerCase[a[6]] ^ lowerCase[b[6]] + cmp |= lowerCase[a[7]] ^ lowerCase[b[7]] + + if cmp != 0 { + return false + } + + a = a[8:] + b = b[8:] + } + + switch len(a) { + case 7: + cmp |= lowerCase[a[6]] ^ lowerCase[b[6]] + fallthrough + case 6: + cmp |= lowerCase[a[5]] ^ lowerCase[b[5]] + fallthrough + case 5: + cmp |= lowerCase[a[4]] ^ lowerCase[b[4]] + fallthrough + case 4: + cmp |= lowerCase[a[3]] ^ lowerCase[b[3]] + fallthrough + case 3: + cmp |= lowerCase[a[2]] ^ lowerCase[b[2]] + fallthrough + case 2: + cmp |= lowerCase[a[1]] ^ lowerCase[b[1]] + fallthrough + case 1: + cmp |= lowerCase[a[0]] ^ lowerCase[b[0]] + } + + return cmp == 0 +} diff --git a/vendor/github.com/segmentio/asm/ascii/valid.go b/vendor/github.com/segmentio/asm/ascii/valid.go new file mode 100644 index 00000000..a5168ef5 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid.go @@ -0,0 +1,18 @@ +package ascii + +import "github.com/segmentio/asm/internal/unsafebytes" + +// Valid returns true if b contains only ASCII characters. +func Valid(b []byte) bool { + return ValidString(unsafebytes.String(b)) +} + +// ValidBytes returns true if b is an ASCII character. +func ValidByte(b byte) bool { + return b <= 0x7f +} + +// ValidBytes returns true if b is an ASCII character. +func ValidRune(r rune) bool { + return r <= 0x7f +} diff --git a/vendor/github.com/segmentio/asm/ascii/valid_amd64.go b/vendor/github.com/segmentio/asm/ascii/valid_amd64.go new file mode 100644 index 00000000..72dc7b43 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_amd64.go @@ -0,0 +1,9 @@ +// Code generated by command: go run valid_asm.go -pkg ascii -out ../ascii/valid_amd64.s -stubs ../ascii/valid_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +package ascii + +// ValidString returns true if s contains only ASCII characters. +func ValidString(s string) bool diff --git a/vendor/github.com/segmentio/asm/ascii/valid_amd64.s b/vendor/github.com/segmentio/asm/ascii/valid_amd64.s new file mode 100644 index 00000000..0214b0ce --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_amd64.s @@ -0,0 +1,132 @@ +// Code generated by command: go run valid_asm.go -pkg ascii -out ../ascii/valid_amd64.s -stubs ../ascii/valid_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +#include "textflag.h" + +// func ValidString(s string) bool +// Requires: AVX, AVX2, SSE4.1 +TEXT ·ValidString(SB), NOSPLIT, $0-17 + MOVQ s_base+0(FP), AX + MOVQ s_len+8(FP), CX + MOVQ $0x8080808080808080, DX + CMPQ CX, $0x10 + JB cmp8 + BTL $0x08, github·com∕segmentio∕asm∕cpu·X86+0(SB) + JCS init_avx + +cmp8: + CMPQ CX, $0x08 + JB cmp4 + TESTQ DX, (AX) + JNZ invalid + ADDQ $0x08, AX + SUBQ $0x08, CX + JMP cmp8 + +cmp4: + CMPQ CX, $0x04 + JB cmp3 + TESTL $0x80808080, (AX) + JNZ invalid + ADDQ $0x04, AX + SUBQ $0x04, CX + +cmp3: + CMPQ CX, $0x03 + JB cmp2 + MOVWLZX (AX), CX + MOVBLZX 2(AX), AX + SHLL $0x10, AX + ORL CX, AX + TESTL $0x80808080, AX + JMP done + +cmp2: + CMPQ CX, $0x02 + JB cmp1 + TESTW $0x8080, (AX) + JMP done + +cmp1: + CMPQ CX, $0x00 + JE done + TESTB $0x80, (AX) + +done: + SETEQ ret+16(FP) + RET + +invalid: + MOVB $0x00, ret+16(FP) + RET + +init_avx: + PINSRQ $0x00, DX, X4 + VPBROADCASTQ X4, Y4 + +cmp256: + CMPQ CX, $0x00000100 + JB cmp128 + VMOVDQU (AX), Y0 + VPOR 32(AX), Y0, Y0 + VMOVDQU 64(AX), Y1 + VPOR 96(AX), Y1, Y1 + VMOVDQU 128(AX), Y2 + VPOR 160(AX), Y2, Y2 + VMOVDQU 192(AX), Y3 + VPOR 224(AX), Y3, Y3 + VPOR Y1, Y0, Y0 + VPOR Y3, Y2, Y2 + VPOR Y2, Y0, Y0 + VPTEST Y0, Y4 + JNZ invalid + ADDQ $0x00000100, AX + SUBQ $0x00000100, CX + JMP cmp256 + +cmp128: + CMPQ CX, $0x80 + JB cmp64 + VMOVDQU (AX), Y0 + VPOR 32(AX), Y0, Y0 + VMOVDQU 64(AX), Y1 + VPOR 96(AX), Y1, Y1 + VPOR Y1, Y0, Y0 + VPTEST Y0, Y4 + JNZ invalid + ADDQ $0x80, AX + SUBQ $0x80, CX + +cmp64: + CMPQ CX, $0x40 + JB cmp32 + VMOVDQU (AX), Y0 + VPOR 32(AX), Y0, Y0 + VPTEST Y0, Y4 + JNZ invalid + ADDQ $0x40, AX + SUBQ $0x40, CX + +cmp32: + CMPQ CX, $0x20 + JB cmp16 + VPTEST (AX), Y4 + JNZ invalid + ADDQ $0x20, AX + SUBQ $0x20, CX + +cmp16: + CMPQ CX, $0x10 + JLE cmp_tail + VPTEST (AX), X4 + JNZ invalid + ADDQ $0x10, AX + SUBQ $0x10, CX + +cmp_tail: + SUBQ $0x10, CX + ADDQ CX, AX + VPTEST (AX), X4 + JMP done diff --git a/vendor/github.com/segmentio/asm/ascii/valid_default.go b/vendor/github.com/segmentio/asm/ascii/valid_default.go new file mode 100644 index 00000000..715a090d --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_default.go @@ -0,0 +1,48 @@ +//go:build purego || !amd64 +// +build purego !amd64 + +package ascii + +import ( + "unsafe" +) + +// ValidString returns true if s contains only ASCII characters. +func ValidString(s string) bool { + p := *(*unsafe.Pointer)(unsafe.Pointer(&s)) + i := uintptr(0) + n := uintptr(len(s)) + + for i+8 <= n { + if (*(*uint64)(unsafe.Pointer(uintptr(p) + i)) & 0x8080808080808080) != 0 { + return false + } + i += 8 + } + + if i+4 <= n { + if (*(*uint32)(unsafe.Pointer(uintptr(p) + i)) & 0x80808080) != 0 { + return false + } + i += 4 + } + + if i == n { + return true + } + + p = unsafe.Pointer(uintptr(p) + i) + + var x uint32 + switch n - i { + case 3: + x = uint32(*(*uint16)(p)) | uint32(*(*uint8)(unsafe.Pointer(uintptr(p) + 2)))<<16 + case 2: + x = uint32(*(*uint16)(p)) + case 1: + x = uint32(*(*uint8)(p)) + default: + return true + } + return (x & 0x80808080) == 0 +} diff --git a/vendor/github.com/segmentio/asm/ascii/valid_print.go b/vendor/github.com/segmentio/asm/ascii/valid_print.go new file mode 100644 index 00000000..aa0db7f6 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_print.go @@ -0,0 +1,18 @@ +package ascii + +import "github.com/segmentio/asm/internal/unsafebytes" + +// ValidPrint returns true if b contains only printable ASCII characters. +func ValidPrint(b []byte) bool { + return ValidPrintString(unsafebytes.String(b)) +} + +// ValidPrintBytes returns true if b is an ASCII character. +func ValidPrintByte(b byte) bool { + return 0x20 <= b && b <= 0x7e +} + +// ValidPrintBytes returns true if b is an ASCII character. +func ValidPrintRune(r rune) bool { + return 0x20 <= r && r <= 0x7e +} diff --git a/vendor/github.com/segmentio/asm/ascii/valid_print_amd64.go b/vendor/github.com/segmentio/asm/ascii/valid_print_amd64.go new file mode 100644 index 00000000..b1462666 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_print_amd64.go @@ -0,0 +1,9 @@ +// Code generated by command: go run valid_print_asm.go -pkg ascii -out ../ascii/valid_print_amd64.s -stubs ../ascii/valid_print_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +package ascii + +// ValidPrintString returns true if s contains only printable ASCII characters. +func ValidPrintString(s string) bool diff --git a/vendor/github.com/segmentio/asm/ascii/valid_print_amd64.s b/vendor/github.com/segmentio/asm/ascii/valid_print_amd64.s new file mode 100644 index 00000000..bc2e20a2 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_print_amd64.s @@ -0,0 +1,185 @@ +// Code generated by command: go run valid_print_asm.go -pkg ascii -out ../ascii/valid_print_amd64.s -stubs ../ascii/valid_print_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +#include "textflag.h" + +// func ValidPrintString(s string) bool +// Requires: AVX, AVX2, SSE4.1 +TEXT ·ValidPrintString(SB), NOSPLIT, $0-17 + MOVQ s_base+0(FP), AX + MOVQ s_len+8(FP), CX + CMPQ CX, $0x10 + JB init_x86 + BTL $0x08, github·com∕segmentio∕asm∕cpu·X86+0(SB) + JCS init_avx + +init_x86: + CMPQ CX, $0x08 + JB cmp4 + MOVQ $0xdfdfdfdfdfdfdfe0, DX + MOVQ $0x0101010101010101, BX + MOVQ $0x8080808080808080, SI + +cmp8: + MOVQ (AX), DI + MOVQ DI, R8 + LEAQ (DI)(DX*1), R9 + NOTQ R8 + ANDQ R8, R9 + LEAQ (DI)(BX*1), R8 + ORQ R8, DI + ORQ R9, DI + ADDQ $0x08, AX + SUBQ $0x08, CX + TESTQ SI, DI + JNE done + CMPQ CX, $0x08 + JB cmp4 + JMP cmp8 + +cmp4: + CMPQ CX, $0x04 + JB cmp3 + MOVL (AX), DX + MOVL DX, BX + LEAL 3755991008(DX), SI + NOTL BX + ANDL BX, SI + LEAL 16843009(DX), BX + ORL BX, DX + ORL SI, DX + ADDQ $0x04, AX + SUBQ $0x04, CX + TESTL $0x80808080, DX + JNE done + +cmp3: + CMPQ CX, $0x03 + JB cmp2 + MOVWLZX (AX), DX + MOVBLZX 2(AX), AX + SHLL $0x10, AX + ORL DX, AX + ORL $0x20000000, AX + JMP final + +cmp2: + CMPQ CX, $0x02 + JB cmp1 + MOVWLZX (AX), AX + ORL $0x20200000, AX + JMP final + +cmp1: + CMPQ CX, $0x00 + JE done + MOVBLZX (AX), AX + ORL $0x20202000, AX + +final: + MOVL AX, CX + LEAL 3755991008(AX), DX + NOTL CX + ANDL CX, DX + LEAL 16843009(AX), CX + ORL CX, AX + ORL DX, AX + TESTL $0x80808080, AX + +done: + SETEQ ret+16(FP) + RET + +init_avx: + MOVB $0x1f, DL + PINSRB $0x00, DX, X8 + VPBROADCASTB X8, Y8 + MOVB $0x7e, DL + PINSRB $0x00, DX, X9 + VPBROADCASTB X9, Y9 + +cmp128: + CMPQ CX, $0x80 + JB cmp64 + VMOVDQU (AX), Y0 + VMOVDQU 32(AX), Y1 + VMOVDQU 64(AX), Y2 + VMOVDQU 96(AX), Y3 + VPCMPGTB Y8, Y0, Y4 + VPCMPGTB Y9, Y0, Y0 + VPANDN Y4, Y0, Y0 + VPCMPGTB Y8, Y1, Y5 + VPCMPGTB Y9, Y1, Y1 + VPANDN Y5, Y1, Y1 + VPCMPGTB Y8, Y2, Y6 + VPCMPGTB Y9, Y2, Y2 + VPANDN Y6, Y2, Y2 + VPCMPGTB Y8, Y3, Y7 + VPCMPGTB Y9, Y3, Y3 + VPANDN Y7, Y3, Y3 + VPAND Y1, Y0, Y0 + VPAND Y3, Y2, Y2 + VPAND Y2, Y0, Y0 + ADDQ $0x80, AX + SUBQ $0x80, CX + VPMOVMSKB Y0, DX + XORL $0xffffffff, DX + JNE done + JMP cmp128 + +cmp64: + CMPQ CX, $0x40 + JB cmp32 + VMOVDQU (AX), Y0 + VMOVDQU 32(AX), Y1 + VPCMPGTB Y8, Y0, Y2 + VPCMPGTB Y9, Y0, Y0 + VPANDN Y2, Y0, Y0 + VPCMPGTB Y8, Y1, Y3 + VPCMPGTB Y9, Y1, Y1 + VPANDN Y3, Y1, Y1 + VPAND Y1, Y0, Y0 + ADDQ $0x40, AX + SUBQ $0x40, CX + VPMOVMSKB Y0, DX + XORL $0xffffffff, DX + JNE done + +cmp32: + CMPQ CX, $0x20 + JB cmp16 + VMOVDQU (AX), Y0 + VPCMPGTB Y8, Y0, Y1 + VPCMPGTB Y9, Y0, Y0 + VPANDN Y1, Y0, Y0 + ADDQ $0x20, AX + SUBQ $0x20, CX + VPMOVMSKB Y0, DX + XORL $0xffffffff, DX + JNE done + +cmp16: + CMPQ CX, $0x10 + JLE cmp_tail + VMOVDQU (AX), X0 + VPCMPGTB X8, X0, X1 + VPCMPGTB X9, X0, X0 + VPANDN X1, X0, X0 + ADDQ $0x10, AX + SUBQ $0x10, CX + VPMOVMSKB X0, DX + XORL $0x0000ffff, DX + JNE done + +cmp_tail: + SUBQ $0x10, CX + ADDQ CX, AX + VMOVDQU (AX), X0 + VPCMPGTB X8, X0, X1 + VPCMPGTB X9, X0, X0 + VPANDN X1, X0, X0 + VPMOVMSKB X0, DX + XORL $0x0000ffff, DX + JMP done diff --git a/vendor/github.com/segmentio/asm/ascii/valid_print_default.go b/vendor/github.com/segmentio/asm/ascii/valid_print_default.go new file mode 100644 index 00000000..c4dc748b --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_print_default.go @@ -0,0 +1,46 @@ +//go:build purego || !amd64 +// +build purego !amd64 + +package ascii + +import "unsafe" + +// ValidString returns true if s contains only printable ASCII characters. +func ValidPrintString(s string) bool { + p := *(*unsafe.Pointer)(unsafe.Pointer(&s)) + i := uintptr(0) + n := uintptr(len(s)) + + for i+8 <= n { + if hasLess64(*(*uint64)(unsafe.Pointer(uintptr(p) + i)), 0x20) || hasMore64(*(*uint64)(unsafe.Pointer(uintptr(p) + i)), 0x7e) { + return false + } + i += 8 + } + + if i+4 <= n { + if hasLess32(*(*uint32)(unsafe.Pointer(uintptr(p) + i)), 0x20) || hasMore32(*(*uint32)(unsafe.Pointer(uintptr(p) + i)), 0x7e) { + return false + } + i += 4 + } + + if i == n { + return true + } + + p = unsafe.Pointer(uintptr(p) + i) + + var x uint32 + switch n - i { + case 3: + x = 0x20000000 | uint32(*(*uint16)(p)) | uint32(*(*uint8)(unsafe.Pointer(uintptr(p) + 2)))<<16 + case 2: + x = 0x20200000 | uint32(*(*uint16)(p)) + case 1: + x = 0x20202000 | uint32(*(*uint8)(p)) + default: + return true + } + return !(hasLess32(x, 0x20) || hasMore32(x, 0x7e)) +} diff --git a/vendor/github.com/segmentio/asm/base64/base64.go b/vendor/github.com/segmentio/asm/base64/base64.go new file mode 100644 index 00000000..dd2128d4 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/base64.go @@ -0,0 +1,67 @@ +package base64 + +import ( + "encoding/base64" +) + +const ( + StdPadding rune = base64.StdPadding + NoPadding rune = base64.NoPadding + + encodeStd = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" + encodeURL = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" + encodeIMAP = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+," + + letterRange = int8('Z' - 'A' + 1) +) + +// StdEncoding is the standard base64 encoding, as defined in RFC 4648. +var StdEncoding = NewEncoding(encodeStd) + +// URLEncoding is the alternate base64 encoding defined in RFC 4648. +// It is typically used in URLs and file names. +var URLEncoding = NewEncoding(encodeURL) + +// RawStdEncoding is the standard unpadded base64 encoding defined in RFC 4648 section 3.2. +// This is the same as StdEncoding but omits padding characters. +var RawStdEncoding = StdEncoding.WithPadding(NoPadding) + +// RawURLEncoding is the unpadded alternate base64 encoding defined in RFC 4648. +// This is the same as URLEncoding but omits padding characters. +var RawURLEncoding = URLEncoding.WithPadding(NoPadding) + +// NewEncoding returns a new padded Encoding defined by the given alphabet, +// which must be a 64-byte string that does not contain the padding character +// or CR / LF ('\r', '\n'). Unlike the standard library, the encoding alphabet +// cannot be abitrary, and it must follow one of the know standard encoding +// variants. +// +// Required alphabet values: +// * [0,26): characters 'A'..'Z' +// * [26,52): characters 'a'..'z' +// * [52,62): characters '0'..'9' +// Flexible alphabet value options: +// * RFC 4648, RFC 1421, RFC 2045, RFC 2152, RFC 4880: '+' and '/' +// * RFC 4648 URI: '-' and '_' +// * RFC 3501: '+' and ',' +// +// The resulting Encoding uses the default padding character ('='), which may +// be changed or disabled via WithPadding. The padding characters is urestricted, +// but it must be a character outside of the encoder alphabet. +func NewEncoding(encoder string) *Encoding { + if len(encoder) != 64 { + panic("encoding alphabet is not 64-bytes long") + } + + if _, ok := allowedEncoding[encoder]; !ok { + panic("non-standard encoding alphabets are not supported") + } + + return newEncoding(encoder) +} + +var allowedEncoding = map[string]struct{}{ + encodeStd: {}, + encodeURL: {}, + encodeIMAP: {}, +} diff --git a/vendor/github.com/segmentio/asm/base64/base64_amd64.go b/vendor/github.com/segmentio/asm/base64/base64_amd64.go new file mode 100644 index 00000000..e4940d78 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/base64_amd64.go @@ -0,0 +1,160 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package base64 + +import ( + "encoding/base64" + + "github.com/segmentio/asm/cpu" + "github.com/segmentio/asm/cpu/x86" + "github.com/segmentio/asm/internal/unsafebytes" +) + +// An Encoding is a radix 64 encoding/decoding scheme, defined by a +// 64-character alphabet. +type Encoding struct { + enc func(dst []byte, src []byte, lut *int8) (int, int) + enclut [32]int8 + + dec func(dst []byte, src []byte, lut *int8) (int, int) + declut [48]int8 + + base *base64.Encoding +} + +const ( + minEncodeLen = 28 + minDecodeLen = 45 +) + +func newEncoding(encoder string) *Encoding { + e := &Encoding{base: base64.NewEncoding(encoder)} + if cpu.X86.Has(x86.AVX2) { + e.enableEncodeAVX2(encoder) + e.enableDecodeAVX2(encoder) + } + return e +} + +func (e *Encoding) enableEncodeAVX2(encoder string) { + // Translate values 0..63 to the Base64 alphabet. There are five sets: + // + // From To Add Index Example + // [0..25] [65..90] +65 0 ABCDEFGHIJKLMNOPQRSTUVWXYZ + // [26..51] [97..122] +71 1 abcdefghijklmnopqrstuvwxyz + // [52..61] [48..57] -4 [2..11] 0123456789 + // [62] [43] -19 12 + + // [63] [47] -16 13 / + tab := [32]int8{int8(encoder[0]), int8(encoder[letterRange]) - letterRange} + for i, ch := range encoder[2*letterRange:] { + tab[2+i] = int8(ch) - 2*letterRange - int8(i) + } + + e.enc = encodeAVX2 + e.enclut = tab +} + +func (e *Encoding) enableDecodeAVX2(encoder string) { + c62, c63 := int8(encoder[62]), int8(encoder[63]) + url := c63 == '_' + if url { + c63 = '/' + } + + // Translate values from the Base64 alphabet using five sets. Values outside + // of these ranges are considered invalid: + // + // From To Add Index Example + // [47] [63] +16 1 / + // [43] [62] +19 2 + + // [48..57] [52..61] +4 3 0123456789 + // [65..90] [0..25] -65 4,5 ABCDEFGHIJKLMNOPQRSTUVWXYZ + // [97..122] [26..51] -71 6,7 abcdefghijklmnopqrstuvwxyz + tab := [48]int8{ + 0, 63 - c63, 62 - c62, 4, -65, -65, -71, -71, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x15, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x13, 0x1B, 0x1B, 0x1B, 0x1B, 0x1B, + } + tab[(c62&15)+16] = 0x1A + tab[(c63&15)+16] = 0x1A + + if url { + e.dec = decodeAVX2URI + } else { + e.dec = decodeAVX2 + } + e.declut = tab +} + +// WithPadding creates a duplicate Encoding updated with a specified padding +// character, or NoPadding to disable padding. The padding character must not +// be contained in the encoding alphabet, must not be '\r' or '\n', and must +// be no greater than '\xFF'. +func (enc Encoding) WithPadding(padding rune) *Encoding { + enc.base = enc.base.WithPadding(padding) + return &enc +} + +// Strict creates a duplicate encoding updated with strict decoding enabled. +// This requires that trailing padding bits are zero. +func (enc Encoding) Strict() *Encoding { + enc.base = enc.base.Strict() + return &enc +} + +// Encode encodes src using the defined encoding alphabet. +// This will write EncodedLen(len(src)) bytes to dst. +func (enc *Encoding) Encode(dst, src []byte) { + if len(src) >= minEncodeLen && enc.enc != nil { + d, s := enc.enc(dst, src, &enc.enclut[0]) + dst = dst[d:] + src = src[s:] + } + enc.base.Encode(dst, src) +} + +// Encode encodes src using the encoding enc, writing +// EncodedLen(len(src)) bytes to dst. +func (enc *Encoding) EncodeToString(src []byte) string { + buf := make([]byte, enc.base.EncodedLen(len(src))) + enc.Encode(buf, src) + return string(buf) +} + +// EncodedLen calculates the base64-encoded byte length for a message +// of length n. +func (enc *Encoding) EncodedLen(n int) int { + return enc.base.EncodedLen(n) +} + +// Decode decodes src using the defined encoding alphabet. +// This will write DecodedLen(len(src)) bytes to dst and return the number of +// bytes written. +func (enc *Encoding) Decode(dst, src []byte) (n int, err error) { + var d, s int + if len(src) >= minDecodeLen && enc.dec != nil { + d, s = enc.dec(dst, src, &enc.declut[0]) + dst = dst[d:] + src = src[s:] + } + n, err = enc.base.Decode(dst, src) + n += d + return +} + +// DecodeString decodes the base64 encoded string s, returns the decoded +// value as bytes. +func (enc *Encoding) DecodeString(s string) ([]byte, error) { + src := unsafebytes.BytesOf(s) + dst := make([]byte, enc.base.DecodedLen(len(s))) + n, err := enc.Decode(dst, src) + return dst[:n], err +} + +// DecodedLen calculates the decoded byte length for a base64-encoded message +// of length n. +func (enc *Encoding) DecodedLen(n int) int { + return enc.base.DecodedLen(n) +} diff --git a/vendor/github.com/segmentio/asm/base64/base64_default.go b/vendor/github.com/segmentio/asm/base64/base64_default.go new file mode 100644 index 00000000..f5d3d647 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/base64_default.go @@ -0,0 +1,14 @@ +//go:build purego || !amd64 +// +build purego !amd64 + +package base64 + +import "encoding/base64" + +// An Encoding is a radix 64 encoding/decoding scheme, defined by a +// 64-character alphabet. +type Encoding = base64.Encoding + +func newEncoding(encoder string) *Encoding { + return base64.NewEncoding(encoder) +} diff --git a/vendor/github.com/segmentio/asm/base64/decode_amd64.go b/vendor/github.com/segmentio/asm/base64/decode_amd64.go new file mode 100644 index 00000000..1dae5b43 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/decode_amd64.go @@ -0,0 +1,10 @@ +// Code generated by command: go run decode_asm.go -pkg base64 -out ../base64/decode_amd64.s -stubs ../base64/decode_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +package base64 + +func decodeAVX2(dst []byte, src []byte, lut *int8) (int, int) + +func decodeAVX2URI(dst []byte, src []byte, lut *int8) (int, int) diff --git a/vendor/github.com/segmentio/asm/base64/decode_amd64.s b/vendor/github.com/segmentio/asm/base64/decode_amd64.s new file mode 100644 index 00000000..cc6c779d --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/decode_amd64.s @@ -0,0 +1,144 @@ +// Code generated by command: go run decode_asm.go -pkg base64 -out ../base64/decode_amd64.s -stubs ../base64/decode_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +#include "textflag.h" + +DATA b64_dec_lut_hi<>+0(SB)/8, $0x0804080402011010 +DATA b64_dec_lut_hi<>+8(SB)/8, $0x1010101010101010 +DATA b64_dec_lut_hi<>+16(SB)/8, $0x0804080402011010 +DATA b64_dec_lut_hi<>+24(SB)/8, $0x1010101010101010 +GLOBL b64_dec_lut_hi<>(SB), RODATA|NOPTR, $32 + +DATA b64_dec_madd1<>+0(SB)/8, $0x0140014001400140 +DATA b64_dec_madd1<>+8(SB)/8, $0x0140014001400140 +DATA b64_dec_madd1<>+16(SB)/8, $0x0140014001400140 +DATA b64_dec_madd1<>+24(SB)/8, $0x0140014001400140 +GLOBL b64_dec_madd1<>(SB), RODATA|NOPTR, $32 + +DATA b64_dec_madd2<>+0(SB)/8, $0x0001100000011000 +DATA b64_dec_madd2<>+8(SB)/8, $0x0001100000011000 +DATA b64_dec_madd2<>+16(SB)/8, $0x0001100000011000 +DATA b64_dec_madd2<>+24(SB)/8, $0x0001100000011000 +GLOBL b64_dec_madd2<>(SB), RODATA|NOPTR, $32 + +DATA b64_dec_shuf_lo<>+0(SB)/8, $0x0000000000000000 +DATA b64_dec_shuf_lo<>+8(SB)/8, $0x0600010200000000 +GLOBL b64_dec_shuf_lo<>(SB), RODATA|NOPTR, $16 + +DATA b64_dec_shuf<>+0(SB)/8, $0x090a040506000102 +DATA b64_dec_shuf<>+8(SB)/8, $0x000000000c0d0e08 +DATA b64_dec_shuf<>+16(SB)/8, $0x0c0d0e08090a0405 +DATA b64_dec_shuf<>+24(SB)/8, $0x0000000000000000 +GLOBL b64_dec_shuf<>(SB), RODATA|NOPTR, $32 + +// func decodeAVX2(dst []byte, src []byte, lut *int8) (int, int) +// Requires: AVX, AVX2, SSE4.1 +TEXT ·decodeAVX2(SB), NOSPLIT, $0-72 + MOVQ dst_base+0(FP), AX + MOVQ src_base+24(FP), DX + MOVQ lut+48(FP), SI + MOVQ src_len+32(FP), DI + MOVB $0x2f, CL + PINSRB $0x00, CX, X8 + VPBROADCASTB X8, Y8 + XORQ CX, CX + XORQ BX, BX + VPXOR Y7, Y7, Y7 + VPERMQ $0x44, (SI), Y6 + VPERMQ $0x44, 16(SI), Y4 + VMOVDQA b64_dec_lut_hi<>+0(SB), Y5 + +loop: + VMOVDQU (DX)(BX*1), Y0 + VPSRLD $0x04, Y0, Y2 + VPAND Y8, Y0, Y3 + VPSHUFB Y3, Y4, Y3 + VPAND Y8, Y2, Y2 + VPSHUFB Y2, Y5, Y9 + VPTEST Y9, Y3 + JNE done + VPCMPEQB Y8, Y0, Y3 + VPADDB Y3, Y2, Y2 + VPSHUFB Y2, Y6, Y2 + VPADDB Y0, Y2, Y0 + VPMADDUBSW b64_dec_madd1<>+0(SB), Y0, Y0 + VPMADDWD b64_dec_madd2<>+0(SB), Y0, Y0 + VEXTRACTI128 $0x01, Y0, X1 + VPSHUFB b64_dec_shuf_lo<>+0(SB), X1, X1 + VPSHUFB b64_dec_shuf<>+0(SB), Y0, Y0 + VPBLENDD $0x08, Y1, Y0, Y1 + VPBLENDD $0xc0, Y7, Y1, Y1 + VMOVDQU Y1, (AX)(CX*1) + ADDQ $0x18, CX + ADDQ $0x20, BX + SUBQ $0x20, DI + CMPQ DI, $0x2d + JB done + JMP loop + +done: + MOVQ CX, ret+56(FP) + MOVQ BX, ret1+64(FP) + VZEROUPPER + RET + +// func decodeAVX2URI(dst []byte, src []byte, lut *int8) (int, int) +// Requires: AVX, AVX2, SSE4.1 +TEXT ·decodeAVX2URI(SB), NOSPLIT, $0-72 + MOVB $0x2f, AL + PINSRB $0x00, AX, X0 + VPBROADCASTB X0, Y0 + MOVB $0x5f, AL + PINSRB $0x00, AX, X1 + VPBROADCASTB X1, Y1 + MOVQ dst_base+0(FP), AX + MOVQ src_base+24(FP), DX + MOVQ lut+48(FP), SI + MOVQ src_len+32(FP), DI + MOVB $0x2f, CL + PINSRB $0x00, CX, X10 + VPBROADCASTB X10, Y10 + XORQ CX, CX + XORQ BX, BX + VPXOR Y9, Y9, Y9 + VPERMQ $0x44, (SI), Y8 + VPERMQ $0x44, 16(SI), Y6 + VMOVDQA b64_dec_lut_hi<>+0(SB), Y7 + +loop: + VMOVDQU (DX)(BX*1), Y2 + VPCMPEQB Y2, Y1, Y4 + VPBLENDVB Y4, Y0, Y2, Y2 + VPSRLD $0x04, Y2, Y4 + VPAND Y10, Y2, Y5 + VPSHUFB Y5, Y6, Y5 + VPAND Y10, Y4, Y4 + VPSHUFB Y4, Y7, Y11 + VPTEST Y11, Y5 + JNE done + VPCMPEQB Y10, Y2, Y5 + VPADDB Y5, Y4, Y4 + VPSHUFB Y4, Y8, Y4 + VPADDB Y2, Y4, Y2 + VPMADDUBSW b64_dec_madd1<>+0(SB), Y2, Y2 + VPMADDWD b64_dec_madd2<>+0(SB), Y2, Y2 + VEXTRACTI128 $0x01, Y2, X3 + VPSHUFB b64_dec_shuf_lo<>+0(SB), X3, X3 + VPSHUFB b64_dec_shuf<>+0(SB), Y2, Y2 + VPBLENDD $0x08, Y3, Y2, Y3 + VPBLENDD $0xc0, Y9, Y3, Y3 + VMOVDQU Y3, (AX)(CX*1) + ADDQ $0x18, CX + ADDQ $0x20, BX + SUBQ $0x20, DI + CMPQ DI, $0x2d + JB done + JMP loop + +done: + MOVQ CX, ret+56(FP) + MOVQ BX, ret1+64(FP) + VZEROUPPER + RET diff --git a/vendor/github.com/segmentio/asm/base64/encode_amd64.go b/vendor/github.com/segmentio/asm/base64/encode_amd64.go new file mode 100644 index 00000000..c38060f7 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/encode_amd64.go @@ -0,0 +1,8 @@ +// Code generated by command: go run encode_asm.go -pkg base64 -out ../base64/encode_amd64.s -stubs ../base64/encode_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +package base64 + +func encodeAVX2(dst []byte, src []byte, lut *int8) (int, int) diff --git a/vendor/github.com/segmentio/asm/base64/encode_amd64.s b/vendor/github.com/segmentio/asm/base64/encode_amd64.s new file mode 100644 index 00000000..2edd27aa --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/encode_amd64.s @@ -0,0 +1,88 @@ +// Code generated by command: go run encode_asm.go -pkg base64 -out ../base64/encode_amd64.s -stubs ../base64/encode_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +#include "textflag.h" + +// func encodeAVX2(dst []byte, src []byte, lut *int8) (int, int) +// Requires: AVX, AVX2, SSE4.1 +TEXT ·encodeAVX2(SB), NOSPLIT, $0-72 + MOVQ dst_base+0(FP), AX + MOVQ src_base+24(FP), DX + MOVQ lut+48(FP), SI + MOVQ src_len+32(FP), DI + MOVB $0x33, CL + PINSRB $0x00, CX, X4 + VPBROADCASTB X4, Y4 + MOVB $0x19, CL + PINSRB $0x00, CX, X5 + VPBROADCASTB X5, Y5 + XORQ CX, CX + XORQ BX, BX + + // Load the 16-byte LUT into both lanes of the register + VPERMQ $0x44, (SI), Y3 + + // Load the first block using a mask to avoid potential fault + VMOVDQU b64_enc_load<>+0(SB), Y0 + VPMASKMOVD -4(DX)(BX*1), Y0, Y0 + +loop: + VPSHUFB b64_enc_shuf<>+0(SB), Y0, Y0 + VPAND b64_enc_mask1<>+0(SB), Y0, Y1 + VPSLLW $0x08, Y1, Y2 + VPSLLW $0x04, Y1, Y1 + VPBLENDW $0xaa, Y2, Y1, Y2 + VPAND b64_enc_mask2<>+0(SB), Y0, Y1 + VPMULHUW b64_enc_mult<>+0(SB), Y1, Y0 + VPOR Y0, Y2, Y0 + VPSUBUSB Y4, Y0, Y1 + VPCMPGTB Y5, Y0, Y2 + VPSUBB Y2, Y1, Y1 + VPSHUFB Y1, Y3, Y1 + VPADDB Y0, Y1, Y0 + VMOVDQU Y0, (AX)(CX*1) + ADDQ $0x20, CX + ADDQ $0x18, BX + SUBQ $0x18, DI + CMPQ DI, $0x20 + JB done + VMOVDQU -4(DX)(BX*1), Y0 + JMP loop + +done: + MOVQ CX, ret+56(FP) + MOVQ BX, ret1+64(FP) + VZEROUPPER + RET + +DATA b64_enc_load<>+0(SB)/8, $0x8000000000000000 +DATA b64_enc_load<>+8(SB)/8, $0x8000000080000000 +DATA b64_enc_load<>+16(SB)/8, $0x8000000080000000 +DATA b64_enc_load<>+24(SB)/8, $0x8000000080000000 +GLOBL b64_enc_load<>(SB), RODATA|NOPTR, $32 + +DATA b64_enc_shuf<>+0(SB)/8, $0x0809070805060405 +DATA b64_enc_shuf<>+8(SB)/8, $0x0e0f0d0e0b0c0a0b +DATA b64_enc_shuf<>+16(SB)/8, $0x0405030401020001 +DATA b64_enc_shuf<>+24(SB)/8, $0x0a0b090a07080607 +GLOBL b64_enc_shuf<>(SB), RODATA|NOPTR, $32 + +DATA b64_enc_mask1<>+0(SB)/8, $0x003f03f0003f03f0 +DATA b64_enc_mask1<>+8(SB)/8, $0x003f03f0003f03f0 +DATA b64_enc_mask1<>+16(SB)/8, $0x003f03f0003f03f0 +DATA b64_enc_mask1<>+24(SB)/8, $0x003f03f0003f03f0 +GLOBL b64_enc_mask1<>(SB), RODATA|NOPTR, $32 + +DATA b64_enc_mask2<>+0(SB)/8, $0x0fc0fc000fc0fc00 +DATA b64_enc_mask2<>+8(SB)/8, $0x0fc0fc000fc0fc00 +DATA b64_enc_mask2<>+16(SB)/8, $0x0fc0fc000fc0fc00 +DATA b64_enc_mask2<>+24(SB)/8, $0x0fc0fc000fc0fc00 +GLOBL b64_enc_mask2<>(SB), RODATA|NOPTR, $32 + +DATA b64_enc_mult<>+0(SB)/8, $0x0400004004000040 +DATA b64_enc_mult<>+8(SB)/8, $0x0400004004000040 +DATA b64_enc_mult<>+16(SB)/8, $0x0400004004000040 +DATA b64_enc_mult<>+24(SB)/8, $0x0400004004000040 +GLOBL b64_enc_mult<>(SB), RODATA|NOPTR, $32 diff --git a/vendor/github.com/segmentio/asm/cpu/arm/arm.go b/vendor/github.com/segmentio/asm/cpu/arm/arm.go new file mode 100644 index 00000000..47c695a0 --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/arm/arm.go @@ -0,0 +1,80 @@ +package arm + +import ( + "github.com/segmentio/asm/cpu/cpuid" + . "golang.org/x/sys/cpu" +) + +type CPU cpuid.CPU + +func (cpu CPU) Has(feature Feature) bool { + return cpuid.CPU(cpu).Has(cpuid.Feature(feature)) +} + +func (cpu *CPU) set(feature Feature, enable bool) { + (*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable) +} + +type Feature cpuid.Feature + +const ( + SWP Feature = 1 << iota // SWP instruction support + HALF // Half-word load and store support + THUMB // ARM Thumb instruction set + BIT26 // Address space limited to 26-bits + FASTMUL // 32-bit operand, 64-bit result multiplication support + FPA // Floating point arithmetic support + VFP // Vector floating point support + EDSP // DSP Extensions support + JAVA // Java instruction set + IWMMXT // Intel Wireless MMX technology support + CRUNCH // MaverickCrunch context switching and handling + THUMBEE // Thumb EE instruction set + NEON // NEON instruction set + VFPv3 // Vector floating point version 3 support + VFPv3D16 // Vector floating point version 3 D8-D15 + TLS // Thread local storage support + VFPv4 // Vector floating point version 4 support + IDIVA // Integer divide instruction support in ARM mode + IDIVT // Integer divide instruction support in Thumb mode + VFPD32 // Vector floating point version 3 D15-D31 + LPAE // Large Physical Address Extensions + EVTSTRM // Event stream support + AES // AES hardware implementation + PMULL // Polynomial multiplication instruction set + SHA1 // SHA1 hardware implementation + SHA2 // SHA2 hardware implementation + CRC32 // CRC32 hardware implementation +) + +func ABI() CPU { + cpu := CPU(0) + cpu.set(SWP, ARM.HasSWP) + cpu.set(HALF, ARM.HasHALF) + cpu.set(THUMB, ARM.HasTHUMB) + cpu.set(BIT26, ARM.Has26BIT) + cpu.set(FASTMUL, ARM.HasFASTMUL) + cpu.set(FPA, ARM.HasFPA) + cpu.set(VFP, ARM.HasVFP) + cpu.set(EDSP, ARM.HasEDSP) + cpu.set(JAVA, ARM.HasJAVA) + cpu.set(IWMMXT, ARM.HasIWMMXT) + cpu.set(CRUNCH, ARM.HasCRUNCH) + cpu.set(THUMBEE, ARM.HasTHUMBEE) + cpu.set(NEON, ARM.HasNEON) + cpu.set(VFPv3, ARM.HasVFPv3) + cpu.set(VFPv3D16, ARM.HasVFPv3D16) + cpu.set(TLS, ARM.HasTLS) + cpu.set(VFPv4, ARM.HasVFPv4) + cpu.set(IDIVA, ARM.HasIDIVA) + cpu.set(IDIVT, ARM.HasIDIVT) + cpu.set(VFPD32, ARM.HasVFPD32) + cpu.set(LPAE, ARM.HasLPAE) + cpu.set(EVTSTRM, ARM.HasEVTSTRM) + cpu.set(AES, ARM.HasAES) + cpu.set(PMULL, ARM.HasPMULL) + cpu.set(SHA1, ARM.HasSHA1) + cpu.set(SHA2, ARM.HasSHA2) + cpu.set(CRC32, ARM.HasCRC32) + return cpu +} diff --git a/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go b/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go new file mode 100644 index 00000000..0c5134c7 --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go @@ -0,0 +1,74 @@ +package arm64 + +import ( + "github.com/segmentio/asm/cpu/cpuid" + . "golang.org/x/sys/cpu" +) + +type CPU cpuid.CPU + +func (cpu CPU) Has(feature Feature) bool { + return cpuid.CPU(cpu).Has(cpuid.Feature(feature)) +} + +func (cpu *CPU) set(feature Feature, enable bool) { + (*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable) +} + +type Feature cpuid.Feature + +const ( + FP Feature = 1 << iota // Floating-point instruction set (always available) + ASIMD // Advanced SIMD (always available) + EVTSTRM // Event stream support + AES // AES hardware implementation + PMULL // Polynomial multiplication instruction set + SHA1 // SHA1 hardware implementation + SHA2 // SHA2 hardware implementation + CRC32 // CRC32 hardware implementation + ATOMICS // Atomic memory operation instruction set + FPHP // Half precision floating-point instruction set + ASIMDHP // Advanced SIMD half precision instruction set + CPUID // CPUID identification scheme registers + ASIMDRDM // Rounding double multiply add/subtract instruction set + JSCVT // Javascript conversion from floating-point to integer + FCMA // Floating-point multiplication and addition of complex numbers + LRCPC // Release Consistent processor consistent support + DCPOP // Persistent memory support + SHA3 // SHA3 hardware implementation + SM3 // SM3 hardware implementation + SM4 // SM4 hardware implementation + ASIMDDP // Advanced SIMD double precision instruction set + SHA512 // SHA512 hardware implementation + SVE // Scalable Vector Extensions + ASIMDFHM // Advanced SIMD multiplication FP16 to FP32 +) + +func ABI() CPU { + cpu := CPU(0) + cpu.set(FP, ARM64.HasFP) + cpu.set(ASIMD, ARM64.HasASIMD) + cpu.set(EVTSTRM, ARM64.HasEVTSTRM) + cpu.set(AES, ARM64.HasAES) + cpu.set(PMULL, ARM64.HasPMULL) + cpu.set(SHA1, ARM64.HasSHA1) + cpu.set(SHA2, ARM64.HasSHA2) + cpu.set(CRC32, ARM64.HasCRC32) + cpu.set(ATOMICS, ARM64.HasATOMICS) + cpu.set(FPHP, ARM64.HasFPHP) + cpu.set(ASIMDHP, ARM64.HasASIMDHP) + cpu.set(CPUID, ARM64.HasCPUID) + cpu.set(ASIMDRDM, ARM64.HasASIMDRDM) + cpu.set(JSCVT, ARM64.HasJSCVT) + cpu.set(FCMA, ARM64.HasFCMA) + cpu.set(LRCPC, ARM64.HasLRCPC) + cpu.set(DCPOP, ARM64.HasDCPOP) + cpu.set(SHA3, ARM64.HasSHA3) + cpu.set(SM3, ARM64.HasSM3) + cpu.set(SM4, ARM64.HasSM4) + cpu.set(ASIMDDP, ARM64.HasASIMDDP) + cpu.set(SHA512, ARM64.HasSHA512) + cpu.set(SVE, ARM64.HasSVE) + cpu.set(ASIMDFHM, ARM64.HasASIMDFHM) + return cpu +} diff --git a/vendor/github.com/segmentio/asm/cpu/cpu.go b/vendor/github.com/segmentio/asm/cpu/cpu.go new file mode 100644 index 00000000..6ddf4973 --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/cpu.go @@ -0,0 +1,22 @@ +// Pakage cpu provides APIs to detect CPU features available at runtime. +package cpu + +import ( + "github.com/segmentio/asm/cpu/arm" + "github.com/segmentio/asm/cpu/arm64" + "github.com/segmentio/asm/cpu/x86" +) + +var ( + // X86 is the bitset representing the set of the x86 instruction sets are + // supported by the CPU. + X86 = x86.ABI() + + // ARM is the bitset representing which parts of the arm instruction sets + // are supported by the CPU. + ARM = arm.ABI() + + // ARM64 is the bitset representing which parts of the arm64 instruction + // sets are supported by the CPU. + ARM64 = arm64.ABI() +) diff --git a/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go b/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go new file mode 100644 index 00000000..0949d3d5 --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go @@ -0,0 +1,32 @@ +// Package cpuid provides generic types used to represent CPU features supported +// by the architecture. +package cpuid + +// CPU is a bitset of feature flags representing the capabilities of various CPU +// architeectures that this package provides optimized assembly routines for. +// +// The intent is to provide a stable ABI between the Go code that generate the +// assembly, and the program that uses the library functions. +type CPU uint64 + +// Feature represents a single CPU feature. +type Feature uint64 + +const ( + // None is a Feature value that has no CPU features enabled. + None Feature = 0 + // All is a Feature value that has all CPU features enabled. + All Feature = 0xFFFFFFFFFFFFFFFF +) + +func (cpu CPU) Has(feature Feature) bool { + return (Feature(cpu) & feature) == feature +} + +func (cpu *CPU) Set(feature Feature, enabled bool) { + if enabled { + *cpu |= CPU(feature) + } else { + *cpu &= ^CPU(feature) + } +} diff --git a/vendor/github.com/segmentio/asm/cpu/x86/x86.go b/vendor/github.com/segmentio/asm/cpu/x86/x86.go new file mode 100644 index 00000000..9e935375 --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/x86/x86.go @@ -0,0 +1,76 @@ +package x86 + +import ( + "github.com/segmentio/asm/cpu/cpuid" + . "golang.org/x/sys/cpu" +) + +type CPU cpuid.CPU + +func (cpu CPU) Has(feature Feature) bool { + return cpuid.CPU(cpu).Has(cpuid.Feature(feature)) +} + +func (cpu *CPU) set(feature Feature, enable bool) { + (*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable) +} + +type Feature cpuid.Feature + +const ( + SSE Feature = 1 << iota // SSE functions + SSE2 // P4 SSE functions + SSE3 // Prescott SSE3 functions + SSE41 // Penryn SSE4.1 functions + SSE42 // Nehalem SSE4.2 functions + SSE4A // AMD Barcelona microarchitecture SSE4a instructions + SSSE3 // Conroe SSSE3 functions + AVX // AVX functions + AVX2 // AVX2 functions + AVX512BF16 // AVX-512 BFLOAT16 Instructions + AVX512BITALG // AVX-512 Bit Algorithms + AVX512BW // AVX-512 Byte and Word Instructions + AVX512CD // AVX-512 Conflict Detection Instructions + AVX512DQ // AVX-512 Doubleword and Quadword Instructions + AVX512ER // AVX-512 Exponential and Reciprocal Instructions + AVX512F // AVX-512 Foundation + AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF // AVX-512 Prefetch Instructions + AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions + AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2 + AVX512VL // AVX-512 Vector Length Extensions + AVX512VNNI // AVX-512 Vector Neural Network Instructions + AVX512VP2INTERSECT // AVX-512 Intersect for D/Q + AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword + CMOV // Conditional move +) + +func ABI() CPU { + cpu := CPU(0) + cpu.set(SSE, true) // TODO: golang.org/x/sys/cpu assumes all CPUs have SEE? + cpu.set(SSE2, X86.HasSSE2) + cpu.set(SSE3, X86.HasSSE3) + cpu.set(SSE41, X86.HasSSE41) + cpu.set(SSE42, X86.HasSSE42) + cpu.set(SSE4A, false) // TODO: add upstream support in golang.org/x/sys/cpu? + cpu.set(SSSE3, X86.HasSSSE3) + cpu.set(AVX, X86.HasAVX) + cpu.set(AVX2, X86.HasAVX2) + cpu.set(AVX512BF16, X86.HasAVX512BF16) + cpu.set(AVX512BITALG, X86.HasAVX512BITALG) + cpu.set(AVX512BW, X86.HasAVX512BW) + cpu.set(AVX512CD, X86.HasAVX512CD) + cpu.set(AVX512DQ, X86.HasAVX512DQ) + cpu.set(AVX512ER, X86.HasAVX512ER) + cpu.set(AVX512F, X86.HasAVX512F) + cpu.set(AVX512IFMA, X86.HasAVX512IFMA) + cpu.set(AVX512PF, X86.HasAVX512PF) + cpu.set(AVX512VBMI, X86.HasAVX512VBMI) + cpu.set(AVX512VBMI2, X86.HasAVX512VBMI2) + cpu.set(AVX512VL, X86.HasAVX512VL) + cpu.set(AVX512VNNI, X86.HasAVX512VNNI) + cpu.set(AVX512VP2INTERSECT, false) // TODO: add upstream support in golang.org/x/sys/cpu? + cpu.set(AVX512VPOPCNTDQ, X86.HasAVX512VPOPCNTDQ) + cpu.set(CMOV, true) // TODO: golang.org/x/sys/cpu assumes all CPUs have CMOV? + return cpu +} diff --git a/vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go b/vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go new file mode 100644 index 00000000..913c9cc6 --- /dev/null +++ b/vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go @@ -0,0 +1,20 @@ +package unsafebytes + +import "unsafe" + +func Pointer(b []byte) *byte { + return *(**byte)(unsafe.Pointer(&b)) +} + +func String(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func BytesOf(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&sliceHeader{str: s, cap: len(s)})) +} + +type sliceHeader struct { + str string + cap int +} diff --git a/vendor/github.com/segmentio/asm/keyset/keyset.go b/vendor/github.com/segmentio/asm/keyset/keyset.go new file mode 100644 index 00000000..1943c5f7 --- /dev/null +++ b/vendor/github.com/segmentio/asm/keyset/keyset.go @@ -0,0 +1,40 @@ +package keyset + +import ( + "bytes" + + "github.com/segmentio/asm/cpu" + "github.com/segmentio/asm/cpu/arm64" + "github.com/segmentio/asm/cpu/x86" +) + +// New prepares a set of keys for use with Lookup. +// +// An optimized routine is used if the processor supports AVX instructions and +// the maximum length of any of the keys is less than or equal to 16. If New +// returns nil, this indicates that an optimized routine is not available, and +// the caller should use a fallback. +func New(keys [][]byte) []byte { + maxWidth, hasNullByte := checkKeys(keys) + if hasNullByte || maxWidth > 16 || !(cpu.X86.Has(x86.AVX) || cpu.ARM64.Has(arm64.ASIMD)) { + return nil + } + + set := make([]byte, len(keys)*16) + for i, k := range keys { + copy(set[i*16:], k) + } + return set +} + +func checkKeys(keys [][]byte) (maxWidth int, hasNullByte bool) { + for _, k := range keys { + if len(k) > maxWidth { + maxWidth = len(k) + } + if bytes.IndexByte(k, 0) >= 0 { + hasNullByte = true + } + } + return +} diff --git a/vendor/github.com/segmentio/asm/keyset/keyset_amd64.go b/vendor/github.com/segmentio/asm/keyset/keyset_amd64.go new file mode 100644 index 00000000..9554ee67 --- /dev/null +++ b/vendor/github.com/segmentio/asm/keyset/keyset_amd64.go @@ -0,0 +1,10 @@ +// Code generated by command: go run keyset_asm.go -pkg keyset -out ../keyset/keyset_amd64.s -stubs ../keyset/keyset_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +package keyset + +// Lookup searches for a key in a set of keys, returning its index if +// found. If the key cannot be found, the number of keys is returned. +func Lookup(keyset []byte, key []byte) int diff --git a/vendor/github.com/segmentio/asm/keyset/keyset_amd64.s b/vendor/github.com/segmentio/asm/keyset/keyset_amd64.s new file mode 100644 index 00000000..e27d2c45 --- /dev/null +++ b/vendor/github.com/segmentio/asm/keyset/keyset_amd64.s @@ -0,0 +1,108 @@ +// Code generated by command: go run keyset_asm.go -pkg keyset -out ../keyset/keyset_amd64.s -stubs ../keyset/keyset_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +#include "textflag.h" + +// func Lookup(keyset []byte, key []byte) int +// Requires: AVX +TEXT ·Lookup(SB), NOSPLIT, $0-56 + MOVQ keyset_base+0(FP), AX + MOVQ keyset_len+8(FP), CX + SHRQ $0x04, CX + MOVQ key_base+24(FP), DX + MOVQ key_len+32(FP), BX + MOVQ key_cap+40(FP), SI + CMPQ BX, $0x10 + JA not_found + CMPQ SI, $0x10 + JB safe_load + +load: + VMOVUPS (DX), X0 + +prepare: + VPXOR X2, X2, X2 + VPCMPEQB X1, X1, X1 + LEAQ blend_masks<>+16(SB), DX + SUBQ BX, DX + VMOVUPS (DX), X3 + VPBLENDVB X3, X0, X2, X0 + XORQ DX, DX + MOVQ CX, BX + SHRQ $0x02, BX + SHLQ $0x02, BX + +bigloop: + CMPQ DX, BX + JE loop + VPCMPEQB (AX), X0, X8 + VPTEST X1, X8 + JCS done + VPCMPEQB 16(AX), X0, X9 + VPTEST X1, X9 + JCS found1 + VPCMPEQB 32(AX), X0, X10 + VPTEST X1, X10 + JCS found2 + VPCMPEQB 48(AX), X0, X11 + VPTEST X1, X11 + JCS found3 + ADDQ $0x04, DX + ADDQ $0x40, AX + JMP bigloop + +loop: + CMPQ DX, CX + JE done + VPCMPEQB (AX), X0, X2 + VPTEST X1, X2 + JCS done + INCQ DX + ADDQ $0x10, AX + JMP loop + JMP done + +found3: + INCQ DX + +found2: + INCQ DX + +found1: + INCQ DX + +done: + MOVQ DX, ret+48(FP) + RET + +not_found: + MOVQ CX, ret+48(FP) + RET + +safe_load: + MOVQ DX, SI + ANDQ $0x00000fff, SI + CMPQ SI, $0x00000ff0 + JBE load + MOVQ $0xfffffffffffffff0, SI + ADDQ BX, SI + VMOVUPS (DX)(SI*1), X0 + LEAQ shuffle_masks<>+16(SB), DX + SUBQ BX, DX + VMOVUPS (DX), X1 + VPSHUFB X1, X0, X0 + JMP prepare + +DATA blend_masks<>+0(SB)/8, $0xffffffffffffffff +DATA blend_masks<>+8(SB)/8, $0xffffffffffffffff +DATA blend_masks<>+16(SB)/8, $0x0000000000000000 +DATA blend_masks<>+24(SB)/8, $0x0000000000000000 +GLOBL blend_masks<>(SB), RODATA|NOPTR, $32 + +DATA shuffle_masks<>+0(SB)/8, $0x0706050403020100 +DATA shuffle_masks<>+8(SB)/8, $0x0f0e0d0c0b0a0908 +DATA shuffle_masks<>+16(SB)/8, $0x0706050403020100 +DATA shuffle_masks<>+24(SB)/8, $0x0f0e0d0c0b0a0908 +GLOBL shuffle_masks<>(SB), RODATA|NOPTR, $32 diff --git a/vendor/github.com/segmentio/asm/keyset/keyset_arm64.go b/vendor/github.com/segmentio/asm/keyset/keyset_arm64.go new file mode 100644 index 00000000..feafabef --- /dev/null +++ b/vendor/github.com/segmentio/asm/keyset/keyset_arm64.go @@ -0,0 +1,8 @@ +//go:build !purego +// +build !purego + +package keyset + +// Lookup searches for a key in a set of keys, returning its index if +// found. If the key cannot be found, the number of keys is returned. +func Lookup(keyset []byte, key []byte) int diff --git a/vendor/github.com/segmentio/asm/keyset/keyset_arm64.s b/vendor/github.com/segmentio/asm/keyset/keyset_arm64.s new file mode 100644 index 00000000..20acb992 --- /dev/null +++ b/vendor/github.com/segmentio/asm/keyset/keyset_arm64.s @@ -0,0 +1,143 @@ +//go:build !purego +// +build !purego + +#include "textflag.h" + +// func Lookup(keyset []byte, key []byte) int +TEXT ·Lookup(SB), NOSPLIT, $0-56 + MOVD keyset+0(FP), R0 + MOVD keyset_len+8(FP), R1 + MOVD key+24(FP), R2 + MOVD key_len+32(FP), R3 + MOVD key_cap+40(FP), R4 + + // None of the keys in the set are greater than 16 bytes, so if the input + // key is we can jump straight to not found. + CMP $16, R3 + BHI notfound + + // We'll be moving the keyset pointer (R0) forward as we compare keys, so + // make a copy of the starting point (R6). Also add the byte length (R1) to + // obtain a pointer to the end of the keyset (R5). + MOVD R0, R6 + ADD R0, R1, R5 + + // Prepare a 64-bit mask of all ones. + MOVD $-1, R7 + + // Prepare a vector of all zeroes. + VMOV ZR, V1.B16 + + // Check that it's safe to load 16 bytes of input. If cap(input)<16, jump + // to a check that determines whether a tail load is necessary (to avoid a + // page fault). + CMP $16, R4 + BLO safeload + +load: + // Load the input key (V0) and pad with zero bytes (V1). To blend the two + // vectors, we load a mask for the particular key length and then use TBL + // to select bytes from either V0 or V1. + VLD1 (R2), [V0.B16] + MOVD $blend_masks<>(SB), R10 + ADD R3<<4, R10, R10 + VLD1 (R10), [V2.B16] + VTBL V2.B16, [V0.B16, V1.B16], V3.B16 + +loop: + // Loop through each 16 byte key in the keyset. + CMP R0, R5 + BEQ notfound + + // Load and compare the next key. + VLD1.P 16(R0), [V4.B16] + VCMEQ V3.B16, V4.B16, V5.B16 + VMOV V5.D[0], R8 + VMOV V5.D[1], R9 + AND R8, R9, R9 + + // If the masks match, we found the key. + CMP R9, R7 + BEQ found + JMP loop + +found: + // If the key was found, take the position in the keyset and convert it + // to an index. The keyset pointer (R0) will be 1 key past the match, so + // subtract the starting pointer (R6), divide by 16 to convert from byte + // length to an index, and then subtract one. + SUB R6, R0, R0 + ADD R0>>4, ZR, R0 + SUB $1, R0, R0 + MOVD R0, ret+48(FP) + RET + +notfound: + // Return the number of keys in the keyset, which is the byte length (R1) + // divided by 16. + ADD R1>>4, ZR, R1 + MOVD R1, ret+48(FP) + RET + +safeload: + // Check if the input crosses a page boundary. If not, jump back. + AND $4095, R2, R12 + CMP $4080, R12 + BLS load + + // If it does cross a page boundary, we must assume that loading 16 bytes + // will cause a fault. Instead, we load the 16 bytes up to and including the + // key and then shuffle the key forward in the register. We can shuffle and + // pad with zeroes at the same time to avoid having to also blend (as load + // does). + MOVD $16, R12 + SUB R3, R12, R12 + SUB R12, R2, R2 + VLD1 (R2), [V0.B16] + MOVD $shuffle_masks<>(SB), R10 + ADD R12, R10, R10 + VLD1 (R10), [V2.B16] + VTBL V2.B16, [V0.B16, V1.B16], V3.B16 + JMP loop + +DATA blend_masks<>+0(SB)/8, $0x1010101010101010 +DATA blend_masks<>+8(SB)/8, $0x1010101010101010 +DATA blend_masks<>+16(SB)/8, $0x1010101010101000 +DATA blend_masks<>+24(SB)/8, $0x1010101010101010 +DATA blend_masks<>+32(SB)/8, $0x1010101010100100 +DATA blend_masks<>+40(SB)/8, $0x1010101010101010 +DATA blend_masks<>+48(SB)/8, $0x1010101010020100 +DATA blend_masks<>+56(SB)/8, $0x1010101010101010 +DATA blend_masks<>+64(SB)/8, $0x1010101003020100 +DATA blend_masks<>+72(SB)/8, $0x1010101010101010 +DATA blend_masks<>+80(SB)/8, $0x1010100403020100 +DATA blend_masks<>+88(SB)/8, $0x1010101010101010 +DATA blend_masks<>+96(SB)/8, $0x1010050403020100 +DATA blend_masks<>+104(SB)/8, $0x1010101010101010 +DATA blend_masks<>+112(SB)/8, $0x1006050403020100 +DATA blend_masks<>+120(SB)/8, $0x1010101010101010 +DATA blend_masks<>+128(SB)/8, $0x0706050403020100 +DATA blend_masks<>+136(SB)/8, $0x1010101010101010 +DATA blend_masks<>+144(SB)/8, $0x0706050403020100 +DATA blend_masks<>+152(SB)/8, $0x1010101010101008 +DATA blend_masks<>+160(SB)/8, $0x0706050403020100 +DATA blend_masks<>+168(SB)/8, $0x1010101010100908 +DATA blend_masks<>+176(SB)/8, $0x0706050403020100 +DATA blend_masks<>+184(SB)/8, $0x10101010100A0908 +DATA blend_masks<>+192(SB)/8, $0x0706050403020100 +DATA blend_masks<>+200(SB)/8, $0x101010100B0A0908 +DATA blend_masks<>+208(SB)/8, $0x0706050403020100 +DATA blend_masks<>+216(SB)/8, $0x1010100C0B0A0908 +DATA blend_masks<>+224(SB)/8, $0x0706050403020100 +DATA blend_masks<>+232(SB)/8, $0x10100D0C0B0A0908 +DATA blend_masks<>+240(SB)/8, $0x0706050403020100 +DATA blend_masks<>+248(SB)/8, $0x100E0D0C0B0A0908 +DATA blend_masks<>+256(SB)/8, $0x0706050403020100 +DATA blend_masks<>+264(SB)/8, $0x0F0E0D0C0B0A0908 +GLOBL blend_masks<>(SB), RODATA|NOPTR, $272 + +DATA shuffle_masks<>+0(SB)/8, $0x0706050403020100 +DATA shuffle_masks<>+8(SB)/8, $0x0F0E0D0C0B0A0908 +DATA shuffle_masks<>+16(SB)/8, $0x1010101010101010 +DATA shuffle_masks<>+24(SB)/8, $0x1010101010101010 +GLOBL shuffle_masks<>(SB), RODATA|NOPTR, $32 diff --git a/vendor/github.com/segmentio/asm/keyset/keyset_default.go b/vendor/github.com/segmentio/asm/keyset/keyset_default.go new file mode 100644 index 00000000..1fa7d3fc --- /dev/null +++ b/vendor/github.com/segmentio/asm/keyset/keyset_default.go @@ -0,0 +1,19 @@ +//go:build purego || !(amd64 || arm64) +// +build purego !amd64,!arm64 + +package keyset + +func Lookup(keyset []byte, key []byte) int { + if len(key) > 16 { + return len(keyset) / 16 + } + var padded [16]byte + copy(padded[:], key) + + for i := 0; i < len(keyset); i += 16 { + if string(padded[:]) == string(keyset[i:i+16]) { + return i / 16 + } + } + return len(keyset) / 16 +} diff --git a/vendor/github.com/segmentio/encoding/LICENSE b/vendor/github.com/segmentio/encoding/LICENSE new file mode 100644 index 00000000..1fbffdf7 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Segment.io, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/segmentio/encoding/ascii/equal_fold.go b/vendor/github.com/segmentio/encoding/ascii/equal_fold.go new file mode 100644 index 00000000..4207f171 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/ascii/equal_fold.go @@ -0,0 +1,40 @@ +//go:generate go run equal_fold_asm.go -out equal_fold_amd64.s -stubs equal_fold_amd64.go +package ascii + +import ( + "github.com/segmentio/asm/ascii" +) + +// EqualFold is a version of bytes.EqualFold designed to work on ASCII input +// instead of UTF-8. +// +// When the program has guarantees that the input is composed of ASCII +// characters only, it allows for greater optimizations. +func EqualFold(a, b []byte) bool { + return ascii.EqualFold(a, b) +} + +func HasPrefixFold(s, prefix []byte) bool { + return ascii.HasPrefixFold(s, prefix) +} + +func HasSuffixFold(s, suffix []byte) bool { + return ascii.HasSuffixFold(s, suffix) +} + +// EqualFoldString is a version of strings.EqualFold designed to work on ASCII +// input instead of UTF-8. +// +// When the program has guarantees that the input is composed of ASCII +// characters only, it allows for greater optimizations. +func EqualFoldString(a, b string) bool { + return ascii.EqualFoldString(a, b) +} + +func HasPrefixFoldString(s, prefix string) bool { + return ascii.HasPrefixFoldString(s, prefix) +} + +func HasSuffixFoldString(s, suffix string) bool { + return ascii.HasSuffixFoldString(s, suffix) +} diff --git a/vendor/github.com/segmentio/encoding/ascii/valid.go b/vendor/github.com/segmentio/encoding/ascii/valid.go new file mode 100644 index 00000000..68b7c6ca --- /dev/null +++ b/vendor/github.com/segmentio/encoding/ascii/valid.go @@ -0,0 +1,26 @@ +//go:generate go run valid_asm.go -out valid_amd64.s -stubs valid_amd64.go +package ascii + +import ( + "github.com/segmentio/asm/ascii" +) + +// Valid returns true if b contains only ASCII characters. +func Valid(b []byte) bool { + return ascii.Valid(b) +} + +// ValidBytes returns true if b is an ASCII character. +func ValidByte(b byte) bool { + return ascii.ValidByte(b) +} + +// ValidBytes returns true if b is an ASCII character. +func ValidRune(r rune) bool { + return ascii.ValidRune(r) +} + +// ValidString returns true if s contains only ASCII characters. +func ValidString(s string) bool { + return ascii.ValidString(s) +} diff --git a/vendor/github.com/segmentio/encoding/ascii/valid_print.go b/vendor/github.com/segmentio/encoding/ascii/valid_print.go new file mode 100644 index 00000000..241f5849 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/ascii/valid_print.go @@ -0,0 +1,26 @@ +//go:generate go run valid_print_asm.go -out valid_print_amd64.s -stubs valid_print_amd64.go +package ascii + +import ( + "github.com/segmentio/asm/ascii" +) + +// Valid returns true if b contains only printable ASCII characters. +func ValidPrint(b []byte) bool { + return ascii.ValidPrint(b) +} + +// ValidBytes returns true if b is an ASCII character. +func ValidPrintByte(b byte) bool { + return ascii.ValidPrintByte(b) +} + +// ValidBytes returns true if b is an ASCII character. +func ValidPrintRune(r rune) bool { + return ascii.ValidPrintRune(r) +} + +// ValidString returns true if s contains only printable ASCII characters. +func ValidPrintString(s string) bool { + return ascii.ValidPrintString(s) +} diff --git a/vendor/github.com/segmentio/encoding/iso8601/parse.go b/vendor/github.com/segmentio/encoding/iso8601/parse.go new file mode 100644 index 00000000..6fbe5dc3 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/iso8601/parse.go @@ -0,0 +1,185 @@ +package iso8601 + +import ( + "encoding/binary" + "errors" + "time" + "unsafe" +) + +var ( + errInvalidTimestamp = errors.New("invalid ISO8601 timestamp") + errMonthOutOfRange = errors.New("month out of range") + errDayOutOfRange = errors.New("day out of range") + errHourOutOfRange = errors.New("hour out of range") + errMinuteOutOfRange = errors.New("minute out of range") + errSecondOutOfRange = errors.New("second out of range") +) + +// Parse parses an ISO8601 timestamp, e.g. "2021-03-25T21:36:12Z". +func Parse(input string) (time.Time, error) { + b := unsafeStringToBytes(input) + if len(b) >= 20 && len(b) <= 30 && b[len(b)-1] == 'Z' { + if len(b) == 21 || (len(b) > 21 && b[19] != '.') { + return time.Time{}, errInvalidTimestamp + } + + t1 := binary.LittleEndian.Uint64(b) + t2 := binary.LittleEndian.Uint64(b[8:16]) + t3 := uint64(b[16]) | uint64(b[17])<<8 | uint64(b[18])<<16 | uint64('Z')<<24 + + // Check for valid separators by masking input with " - - T : : Z". + // If separators are all valid, replace them with a '0' (0x30) byte and + // check all bytes are now numeric. + if !match(t1, mask1) || !match(t2, mask2) || !match(t3, mask3) { + return time.Time{}, errInvalidTimestamp + } + t1 ^= replace1 + t2 ^= replace2 + t3 ^= replace3 + if (nonNumeric(t1) | nonNumeric(t2) | nonNumeric(t3)) != 0 { + return time.Time{}, errInvalidTimestamp + } + + t1 -= zero + t2 -= zero + t3 -= zero + year := (t1&0xF)*1000 + (t1>>8&0xF)*100 + (t1>>16&0xF)*10 + (t1 >> 24 & 0xF) + month := (t1>>40&0xF)*10 + (t1 >> 48 & 0xF) + day := (t2&0xF)*10 + (t2 >> 8 & 0xF) + hour := (t2>>24&0xF)*10 + (t2 >> 32 & 0xF) + minute := (t2>>48&0xF)*10 + (t2 >> 56) + second := (t3>>8&0xF)*10 + (t3 >> 16) + + nanos := int64(0) + if len(b) > 20 { + for _, c := range b[20 : len(b)-1] { + if c < '0' || c > '9' { + return time.Time{}, errInvalidTimestamp + } + nanos = (nanos * 10) + int64(c-'0') + } + nanos *= pow10[30-len(b)] + } + + if err := validate(year, month, day, hour, minute, second); err != nil { + return time.Time{}, err + } + + unixSeconds := int64(daysSinceEpoch(year, month, day))*86400 + int64(hour*3600+minute*60+second) + return time.Unix(unixSeconds, nanos).UTC(), nil + } + + // Fallback to using time.Parse(). + t, err := time.Parse(time.RFC3339Nano, input) + if err != nil { + // Override (and don't wrap) the error here. The error returned by + // time.Parse() is dynamic, and includes a reference to the input + // string. By overriding the error, we guarantee that the input string + // doesn't escape. + return time.Time{}, errInvalidTimestamp + } + return t, nil +} + +var pow10 = []int64{1, 10, 100, 1000, 1e4, 1e5, 1e6, 1e7, 1e8} + +const ( + mask1 = 0x2d00002d00000000 // YYYY-MM- + mask2 = 0x00003a0000540000 // DDTHH:MM + mask3 = 0x000000005a00003a // :SSZ____ + + // Generate masks that replace the separators with a numeric byte. + // The input must have valid separators. XOR with the separator bytes + // to zero them out and then XOR with 0x30 to replace them with '0'. + replace1 = mask1 ^ 0x3000003000000000 + replace2 = mask2 ^ 0x0000300000300000 + replace3 = mask3 ^ 0x3030303030000030 + + lsb = ^uint64(0) / 255 + msb = lsb * 0x80 + + zero = lsb * '0' + nine = lsb * '9' +) + +func validate(year, month, day, hour, minute, second uint64) error { + if day == 0 || day > 31 { + return errDayOutOfRange + } + if month == 0 || month > 12 { + return errMonthOutOfRange + } + if hour >= 24 { + return errHourOutOfRange + } + if minute >= 60 { + return errMinuteOutOfRange + } + if second >= 60 { + return errSecondOutOfRange + } + if month == 2 && (day > 29 || (day == 29 && !isLeapYear(year))) { + return errDayOutOfRange + } + if day == 31 { + switch month { + case 4, 6, 9, 11: + return errDayOutOfRange + } + } + return nil +} + +func match(u, mask uint64) bool { + return (u & mask) == mask +} + +func nonNumeric(u uint64) uint64 { + // Derived from https://graphics.stanford.edu/~seander/bithacks.html#HasLessInWord. + // Subtract '0' (0x30) from each byte so that the MSB is set in each byte + // if there's a byte less than '0' (0x30). Add 0x46 (0x7F-'9') so that the + // MSB is set if there's a byte greater than '9' (0x39). To handle overflow + // when adding 0x46, include the MSB from the input bytes in the final mask. + // Remove all but the MSBs and then you're left with a mask where each + // non-numeric byte from the input has its MSB set in the output. + return ((u - zero) | (u + (^msb - nine)) | u) & msb +} + +func daysSinceEpoch(year, month, day uint64) uint64 { + // Derived from https://blog.reverberate.org/2020/05/12/optimizing-date-algorithms.html. + monthAdjusted := month - 3 + var carry uint64 + if monthAdjusted > month { + carry = 1 + } + var adjust uint64 + if carry == 1 { + adjust = 12 + } + yearAdjusted := year + 4800 - carry + monthDays := ((monthAdjusted+adjust)*62719 + 769) / 2048 + leapDays := yearAdjusted/4 - yearAdjusted/100 + yearAdjusted/400 + return yearAdjusted*365 + leapDays + monthDays + (day - 1) - 2472632 +} + +func isLeapYear(y uint64) bool { + return (y%4) == 0 && ((y%100) != 0 || (y%400) == 0) +} + +func unsafeStringToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&sliceHeader{ + Data: *(*unsafe.Pointer)(unsafe.Pointer(&s)), + Len: len(s), + Cap: len(s), + })) +} + +// sliceHeader is like reflect.SliceHeader but the Data field is a +// unsafe.Pointer instead of being a uintptr to avoid invalid +// conversions from uintptr to unsafe.Pointer. +type sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int +} diff --git a/vendor/github.com/segmentio/encoding/iso8601/valid.go b/vendor/github.com/segmentio/encoding/iso8601/valid.go new file mode 100644 index 00000000..187b4ef4 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/iso8601/valid.go @@ -0,0 +1,179 @@ +package iso8601 + +// ValidFlags is a bitset type used to configure the behavior of the Valid +// function. +type ValidFlags int + +const ( + // Strict is a validation flag used to represent a string iso8601 validation + // (this is the default). + Strict ValidFlags = 0 + + // AllowSpaceSeparator allows the presence of a space instead of a 'T' as + // separator between the date and time. + AllowSpaceSeparator ValidFlags = 1 << iota + + // AllowMissingTime allows the value to contain only a date. + AllowMissingTime + + // AllowMissingSubsecond allows the value to contain only a date and time. + AllowMissingSubsecond + + // AllowMissingTimezone allows the value to be missing the timezone + // information. + AllowMissingTimezone + + // AllowNumericTimezone allows the value to represent timezones in their + // numeric form. + AllowNumericTimezone + + // Flexible is a combination of all validation flag that allow for + // non-strict checking of the input value. + Flexible = AllowSpaceSeparator | AllowMissingTime | AllowMissingSubsecond | AllowMissingTimezone | AllowNumericTimezone +) + +// Valid check value to verify whether or not it is a valid iso8601 time +// representation. +func Valid(value string, flags ValidFlags) bool { + var ok bool + + // year + if value, ok = readDigits(value, 4, 4); !ok { + return false + } + + if value, ok = readByte(value, '-'); !ok { + return false + } + + // month + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + if value, ok = readByte(value, '-'); !ok { + return false + } + + // day + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + if len(value) == 0 && (flags&AllowMissingTime) != 0 { + return true // date only + } + + // separator + if value, ok = readByte(value, 'T'); !ok { + if (flags & AllowSpaceSeparator) == 0 { + return false + } + if value, ok = readByte(value, ' '); !ok { + return false + } + } + + // hour + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + if value, ok = readByte(value, ':'); !ok { + return false + } + + // minute + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + if value, ok = readByte(value, ':'); !ok { + return false + } + + // second + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + // microsecond + if value, ok = readByte(value, '.'); !ok { + if (flags & AllowMissingSubsecond) == 0 { + return false + } + } else { + if value, ok = readDigits(value, 1, 9); !ok { + return false + } + } + + if len(value) == 0 && (flags&AllowMissingTimezone) != 0 { + return true // date and time + } + + // timezone + if value, ok = readByte(value, 'Z'); ok { + return len(value) == 0 + } + + if (flags & AllowSpaceSeparator) != 0 { + value, _ = readByte(value, ' ') + } + + if value, ok = readByte(value, '+'); !ok { + if value, ok = readByte(value, '-'); !ok { + return false + } + } + + // timezone hour + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + if value, ok = readByte(value, ':'); !ok { + if (flags & AllowNumericTimezone) == 0 { + return false + } + } + + // timezone minute + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + return len(value) == 0 +} + +func readDigits(value string, min, max int) (string, bool) { + if len(value) < min { + return value, false + } + + i := 0 + + for i < max && i < len(value) && isDigit(value[i]) { + i++ + } + + if i < max && i < min { + return value, false + } + + return value[i:], true +} + +func readByte(value string, c byte) (string, bool) { + if len(value) == 0 { + return value, false + } + if value[0] != c { + return value, false + } + return value[1:], true +} + +func isDigit(c byte) bool { + return '0' <= c && c <= '9' +} diff --git a/vendor/github.com/segmentio/encoding/json/README.md b/vendor/github.com/segmentio/encoding/json/README.md new file mode 100644 index 00000000..c5ed94b7 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/README.md @@ -0,0 +1,76 @@ +# encoding/json [![GoDoc](https://godoc.org/github.com/segmentio/encoding/json?status.svg)](https://godoc.org/github.com/segmentio/encoding/json) + +Go package offering a replacement implementation of the standard library's +[`encoding/json`](https://golang.org/pkg/encoding/json/) package, with much +better performance. + +## Usage + +The exported API of this package mirrors the standard library's +[`encoding/json`](https://golang.org/pkg/encoding/json/) package, the only +change needed to take advantage of the performance improvements is the import +path of the `json` package, from: +```go +import ( + "encoding/json" +) +``` +to +```go +import ( + "github.com/segmentio/encoding/json" +) +``` + +One way to gain higher encoding throughput is to disable HTML escaping. +It allows the string encoding to use a much more efficient code path which +does not require parsing UTF-8 runes most of the time. + +## Performance Improvements + +The internal implementation uses a fair amount of unsafe operations (untyped +code, pointer arithmetic, etc...) to avoid using reflection as much as possible, +which is often the reason why serialization code has a large CPU and memory +footprint. + +The package aims for zero unnecessary dynamic memory allocations and hot code +paths that are mostly free from calls into the reflect package. + +## Compatibility with encoding/json + +This package aims to be a drop-in replacement, therefore it is tested to behave +exactly like the standard library's package. However, there are still a few +missing features that have not been ported yet: + +- Streaming decoder, currently the `Decoder` implementation offered by the +package does not support progressively reading values from a JSON array (unlike +the standard library). In our experience this is a very rare use-case, if you +need it you're better off sticking to the standard library, or spend a bit of +time implementing it in here ;) + +Note that none of those features should result in performance degradations if +they were implemented in the package, and we welcome contributions! + +## Trade-offs + +As one would expect, we had to make a couple of trade-offs to achieve greater +performance than the standard library, but there were also features that we +did not want to give away. + +Other open-source packages offering a reduced CPU and memory footprint usually +do so by designing a different API, or require code generation (therefore adding +complexity to the build process). These were not acceptable conditions for us, +as we were not willing to trade off developer productivity for better runtime +performance. To achieve this, we chose to exactly replicate the standard +library interfaces and behavior, which meant the package implementation was the +only area that we were able to work with. The internals of this package make +heavy use of unsafe pointer arithmetics and other performance optimizations, +and therefore are not as approachable as typical Go programs. Basically, we put +a bigger burden on maintainers to achieve better runtime cost without +sacrificing developer productivity. + +For these reasons, we also don't believe that this code should be ported upstream +to the standard `encoding/json` package. The standard library has to remain +readable and approachable to maximize stability and maintainability, and make +projects like this one possible because a high quality reference implementation +already exists. diff --git a/vendor/github.com/segmentio/encoding/json/codec.go b/vendor/github.com/segmentio/encoding/json/codec.go new file mode 100644 index 00000000..77fe264f --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/codec.go @@ -0,0 +1,1240 @@ +package json + +import ( + "encoding" + "encoding/json" + "fmt" + "maps" + "math/big" + "reflect" + "sort" + "strconv" + "strings" + "sync/atomic" + "time" + "unicode" + "unsafe" + + "github.com/segmentio/asm/keyset" +) + +const ( + // 1000 is the value used by the standard encoding/json package. + // + // https://cs.opensource.google/go/go/+/refs/tags/go1.17.3:src/encoding/json/encode.go;drc=refs%2Ftags%2Fgo1.17.3;l=300 + startDetectingCyclesAfter = 1000 +) + +type codec struct { + encode encodeFunc + decode decodeFunc +} + +type encoder struct { + flags AppendFlags + // ptrDepth tracks the depth of pointer cycles, when it reaches the value + // of startDetectingCyclesAfter, the ptrSeen map is allocated and the + // encoder starts tracking pointers it has seen as an attempt to detect + // whether it has entered a pointer cycle and needs to error before the + // goroutine runs out of stack space. + ptrDepth uint32 + ptrSeen map[unsafe.Pointer]struct{} +} + +type decoder struct { + flags ParseFlags +} + +type ( + encodeFunc func(encoder, []byte, unsafe.Pointer) ([]byte, error) + decodeFunc func(decoder, []byte, unsafe.Pointer) ([]byte, error) +) + +type ( + emptyFunc func(unsafe.Pointer) bool + sortFunc func([]reflect.Value) +) + +// Eventually consistent cache mapping go types to dynamically generated +// codecs. +// +// Note: using a uintptr as key instead of reflect.Type shaved ~15ns off of +// the ~30ns Marhsal/Unmarshal functions which were dominated by the map +// lookup time for simple types like bool, int, etc.. +var cache atomic.Pointer[map[unsafe.Pointer]codec] + +func cacheLoad() map[unsafe.Pointer]codec { + p := cache.Load() + if p == nil { + return nil + } + + return *p +} + +func cacheStore(typ reflect.Type, cod codec, oldCodecs map[unsafe.Pointer]codec) { + newCodecs := make(map[unsafe.Pointer]codec, len(oldCodecs)+1) + maps.Copy(newCodecs, oldCodecs) + newCodecs[typeid(typ)] = cod + + cache.Store(&newCodecs) +} + +func typeid(t reflect.Type) unsafe.Pointer { + return (*iface)(unsafe.Pointer(&t)).ptr +} + +func constructCachedCodec(t reflect.Type, cache map[unsafe.Pointer]codec) codec { + c := constructCodec(t, map[reflect.Type]*structType{}, t.Kind() == reflect.Ptr) + + if inlined(t) { + c.encode = constructInlineValueEncodeFunc(c.encode) + } + + cacheStore(t, c, cache) + return c +} + +func constructCodec(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) (c codec) { + switch t { + case nullType, nil: + c = codec{encode: encoder.encodeNull, decode: decoder.decodeNull} + + case numberType: + c = codec{encode: encoder.encodeNumber, decode: decoder.decodeNumber} + + case bytesType: + c = codec{encode: encoder.encodeBytes, decode: decoder.decodeBytes} + + case durationType: + c = codec{encode: encoder.encodeDuration, decode: decoder.decodeDuration} + + case timeType: + c = codec{encode: encoder.encodeTime, decode: decoder.decodeTime} + + case interfaceType: + c = codec{encode: encoder.encodeInterface, decode: decoder.decodeInterface} + + case rawMessageType: + c = codec{encode: encoder.encodeRawMessage, decode: decoder.decodeRawMessage} + + case numberPtrType: + c = constructPointerCodec(numberPtrType, nil) + + case durationPtrType: + c = constructPointerCodec(durationPtrType, nil) + + case timePtrType: + c = constructPointerCodec(timePtrType, nil) + + case rawMessagePtrType: + c = constructPointerCodec(rawMessagePtrType, nil) + } + + if c.encode != nil { + return + } + + switch t.Kind() { + case reflect.Bool: + c = codec{encode: encoder.encodeBool, decode: decoder.decodeBool} + + case reflect.Int: + c = codec{encode: encoder.encodeInt, decode: decoder.decodeInt} + + case reflect.Int8: + c = codec{encode: encoder.encodeInt8, decode: decoder.decodeInt8} + + case reflect.Int16: + c = codec{encode: encoder.encodeInt16, decode: decoder.decodeInt16} + + case reflect.Int32: + c = codec{encode: encoder.encodeInt32, decode: decoder.decodeInt32} + + case reflect.Int64: + c = codec{encode: encoder.encodeInt64, decode: decoder.decodeInt64} + + case reflect.Uint: + c = codec{encode: encoder.encodeUint, decode: decoder.decodeUint} + + case reflect.Uintptr: + c = codec{encode: encoder.encodeUintptr, decode: decoder.decodeUintptr} + + case reflect.Uint8: + c = codec{encode: encoder.encodeUint8, decode: decoder.decodeUint8} + + case reflect.Uint16: + c = codec{encode: encoder.encodeUint16, decode: decoder.decodeUint16} + + case reflect.Uint32: + c = codec{encode: encoder.encodeUint32, decode: decoder.decodeUint32} + + case reflect.Uint64: + c = codec{encode: encoder.encodeUint64, decode: decoder.decodeUint64} + + case reflect.Float32: + c = codec{encode: encoder.encodeFloat32, decode: decoder.decodeFloat32} + + case reflect.Float64: + c = codec{encode: encoder.encodeFloat64, decode: decoder.decodeFloat64} + + case reflect.String: + c = codec{encode: encoder.encodeString, decode: decoder.decodeString} + + case reflect.Interface: + c = constructInterfaceCodec(t) + + case reflect.Array: + c = constructArrayCodec(t, seen, canAddr) + + case reflect.Slice: + c = constructSliceCodec(t, seen) + + case reflect.Map: + c = constructMapCodec(t, seen) + + case reflect.Struct: + c = constructStructCodec(t, seen, canAddr) + + case reflect.Ptr: + c = constructPointerCodec(t, seen) + + default: + c = constructUnsupportedTypeCodec(t) + } + + p := reflect.PointerTo(t) + + if canAddr { + switch { + case p.Implements(jsonMarshalerType): + c.encode = constructJSONMarshalerEncodeFunc(t, true) + case p.Implements(textMarshalerType): + c.encode = constructTextMarshalerEncodeFunc(t, true) + } + } + + switch { + case t.Implements(jsonMarshalerType): + c.encode = constructJSONMarshalerEncodeFunc(t, false) + case t.Implements(textMarshalerType): + c.encode = constructTextMarshalerEncodeFunc(t, false) + } + + switch { + case p.Implements(jsonUnmarshalerType): + c.decode = constructJSONUnmarshalerDecodeFunc(t, true) + case p.Implements(textUnmarshalerType): + c.decode = constructTextUnmarshalerDecodeFunc(t, true) + } + + return +} + +func constructStringCodec(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) codec { + c := constructCodec(t, seen, canAddr) + return codec{ + encode: constructStringEncodeFunc(c.encode), + decode: constructStringDecodeFunc(c.decode), + } +} + +func constructStringEncodeFunc(encode encodeFunc) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeToString(b, p, encode) + } +} + +func constructStringDecodeFunc(decode decodeFunc) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeFromString(b, p, decode) + } +} + +func constructStringToIntDecodeFunc(t reflect.Type, decode decodeFunc) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeFromStringToInt(b, p, t, decode) + } +} + +func constructArrayCodec(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) codec { + e := t.Elem() + c := constructCodec(e, seen, canAddr) + s := alignedSize(e) + return codec{ + encode: constructArrayEncodeFunc(s, t, c.encode), + decode: constructArrayDecodeFunc(s, t, c.decode), + } +} + +func constructArrayEncodeFunc(size uintptr, t reflect.Type, encode encodeFunc) encodeFunc { + n := t.Len() + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeArray(b, p, n, size, t, encode) + } +} + +func constructArrayDecodeFunc(size uintptr, t reflect.Type, decode decodeFunc) decodeFunc { + n := t.Len() + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeArray(b, p, n, size, t, decode) + } +} + +func constructSliceCodec(t reflect.Type, seen map[reflect.Type]*structType) codec { + e := t.Elem() + s := alignedSize(e) + + if e.Kind() == reflect.Uint8 { + // Go 1.7+ behavior: slices of byte types (and aliases) may override the + // default encoding and decoding behaviors by implementing marshaler and + // unmarshaler interfaces. + p := reflect.PointerTo(e) + c := codec{} + + switch { + case e.Implements(jsonMarshalerType): + c.encode = constructJSONMarshalerEncodeFunc(e, false) + case e.Implements(textMarshalerType): + c.encode = constructTextMarshalerEncodeFunc(e, false) + case p.Implements(jsonMarshalerType): + c.encode = constructJSONMarshalerEncodeFunc(e, true) + case p.Implements(textMarshalerType): + c.encode = constructTextMarshalerEncodeFunc(e, true) + } + + switch { + case e.Implements(jsonUnmarshalerType): + c.decode = constructJSONUnmarshalerDecodeFunc(e, false) + case e.Implements(textUnmarshalerType): + c.decode = constructTextUnmarshalerDecodeFunc(e, false) + case p.Implements(jsonUnmarshalerType): + c.decode = constructJSONUnmarshalerDecodeFunc(e, true) + case p.Implements(textUnmarshalerType): + c.decode = constructTextUnmarshalerDecodeFunc(e, true) + } + + if c.encode != nil { + c.encode = constructSliceEncodeFunc(s, t, c.encode) + } else { + c.encode = encoder.encodeBytes + } + + if c.decode != nil { + c.decode = constructSliceDecodeFunc(s, t, c.decode) + } else { + c.decode = decoder.decodeBytes + } + + return c + } + + c := constructCodec(e, seen, true) + return codec{ + encode: constructSliceEncodeFunc(s, t, c.encode), + decode: constructSliceDecodeFunc(s, t, c.decode), + } +} + +func constructSliceEncodeFunc(size uintptr, t reflect.Type, encode encodeFunc) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeSlice(b, p, size, t, encode) + } +} + +func constructSliceDecodeFunc(size uintptr, t reflect.Type, decode decodeFunc) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeSlice(b, p, size, t, decode) + } +} + +func constructMapCodec(t reflect.Type, seen map[reflect.Type]*structType) codec { + var sortKeys sortFunc + k := t.Key() + v := t.Elem() + + // Faster implementations for some common cases. + switch { + case k == stringType && v == interfaceType: + return codec{ + encode: encoder.encodeMapStringInterface, + decode: decoder.decodeMapStringInterface, + } + + case k == stringType && v == rawMessageType: + return codec{ + encode: encoder.encodeMapStringRawMessage, + decode: decoder.decodeMapStringRawMessage, + } + + case k == stringType && v == stringType: + return codec{ + encode: encoder.encodeMapStringString, + decode: decoder.decodeMapStringString, + } + + case k == stringType && v == stringsType: + return codec{ + encode: encoder.encodeMapStringStringSlice, + decode: decoder.decodeMapStringStringSlice, + } + + case k == stringType && v == boolType: + return codec{ + encode: encoder.encodeMapStringBool, + decode: decoder.decodeMapStringBool, + } + } + + kc := codec{} + vc := constructCodec(v, seen, false) + + if k.Implements(textMarshalerType) || reflect.PointerTo(k).Implements(textUnmarshalerType) { + kc.encode = constructTextMarshalerEncodeFunc(k, false) + kc.decode = constructTextUnmarshalerDecodeFunc(k, true) + + sortKeys = func(keys []reflect.Value) { + sort.Slice(keys, func(i, j int) bool { + // This is a performance abomination but the use case is rare + // enough that it shouldn't be a problem in practice. + k1, _ := keys[i].Interface().(encoding.TextMarshaler).MarshalText() + k2, _ := keys[j].Interface().(encoding.TextMarshaler).MarshalText() + return string(k1) < string(k2) + }) + } + } else { + switch k.Kind() { + case reflect.String: + kc.encode = encoder.encodeString + kc.decode = decoder.decodeString + + sortKeys = func(keys []reflect.Value) { + sort.Slice(keys, func(i, j int) bool { return keys[i].String() < keys[j].String() }) + } + + case reflect.Int, + reflect.Int8, + reflect.Int16, + reflect.Int32, + reflect.Int64: + kc = constructStringCodec(k, seen, false) + + sortKeys = func(keys []reflect.Value) { + sort.Slice(keys, func(i, j int) bool { return intStringsAreSorted(keys[i].Int(), keys[j].Int()) }) + } + + case reflect.Uint, + reflect.Uintptr, + reflect.Uint8, + reflect.Uint16, + reflect.Uint32, + reflect.Uint64: + kc = constructStringCodec(k, seen, false) + + sortKeys = func(keys []reflect.Value) { + sort.Slice(keys, func(i, j int) bool { return uintStringsAreSorted(keys[i].Uint(), keys[j].Uint()) }) + } + + default: + return constructUnsupportedTypeCodec(t) + } + } + + if inlined(v) { + vc.encode = constructInlineValueEncodeFunc(vc.encode) + } + + return codec{ + encode: constructMapEncodeFunc(t, kc.encode, vc.encode, sortKeys), + decode: constructMapDecodeFunc(t, kc.decode, vc.decode), + } +} + +func constructMapEncodeFunc(t reflect.Type, encodeKey, encodeValue encodeFunc, sortKeys sortFunc) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeMap(b, p, t, encodeKey, encodeValue, sortKeys) + } +} + +func constructMapDecodeFunc(t reflect.Type, decodeKey, decodeValue decodeFunc) decodeFunc { + kt := t.Key() + vt := t.Elem() + kz := reflect.Zero(kt) + vz := reflect.Zero(vt) + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeMap(b, p, t, kt, vt, kz, vz, decodeKey, decodeValue) + } +} + +func constructStructCodec(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) codec { + st := constructStructType(t, seen, canAddr) + return codec{ + encode: constructStructEncodeFunc(st), + decode: constructStructDecodeFunc(st), + } +} + +func constructStructType(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) *structType { + // Used for preventing infinite recursion on types that have pointers to + // themselves. + st := seen[t] + + if st == nil { + st = &structType{ + fields: make([]structField, 0, t.NumField()), + fieldsIndex: make(map[string]*structField), + ficaseIndex: make(map[string]*structField), + typ: t, + } + + seen[t] = st + st.fields = appendStructFields(st.fields, t, 0, seen, canAddr) + + for i := range st.fields { + f := &st.fields[i] + s := strings.ToLower(f.name) + st.fieldsIndex[f.name] = f + // When there is ambiguity because multiple fields have the same + // case-insensitive representation, the first field must win. + if _, exists := st.ficaseIndex[s]; !exists { + st.ficaseIndex[s] = f + } + } + + // At a certain point the linear scan provided by keyset is less + // efficient than a map. The 32 was chosen based on benchmarks in the + // segmentio/asm repo run with an Intel Kaby Lake processor and go1.17. + if len(st.fields) <= 32 { + keys := make([][]byte, len(st.fields)) + for i, f := range st.fields { + keys[i] = []byte(f.name) + } + st.keyset = keyset.New(keys) + } + } + + return st +} + +func constructStructEncodeFunc(st *structType) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeStruct(b, p, st) + } +} + +func constructStructDecodeFunc(st *structType) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeStruct(b, p, st) + } +} + +func constructEmbeddedStructPointerCodec(t reflect.Type, unexported bool, offset uintptr, field codec) codec { + return codec{ + encode: constructEmbeddedStructPointerEncodeFunc(t, unexported, offset, field.encode), + decode: constructEmbeddedStructPointerDecodeFunc(t, unexported, offset, field.decode), + } +} + +func constructEmbeddedStructPointerEncodeFunc(t reflect.Type, unexported bool, offset uintptr, encode encodeFunc) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeEmbeddedStructPointer(b, p, t, unexported, offset, encode) + } +} + +func constructEmbeddedStructPointerDecodeFunc(t reflect.Type, unexported bool, offset uintptr, decode decodeFunc) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeEmbeddedStructPointer(b, p, t, unexported, offset, decode) + } +} + +func appendStructFields(fields []structField, t reflect.Type, offset uintptr, seen map[reflect.Type]*structType, canAddr bool) []structField { + type embeddedField struct { + index int + offset uintptr + pointer bool + unexported bool + subtype *structType + subfield *structField + } + + names := make(map[string]struct{}) + embedded := make([]embeddedField, 0, 10) + + for i := range t.NumField() { + f := t.Field(i) + + var ( + name = f.Name + anonymous = f.Anonymous + tag = false + omitempty = false + stringify = false + unexported = len(f.PkgPath) != 0 + ) + + if unexported && !anonymous { // unexported + continue + } + + if parts := strings.Split(f.Tag.Get("json"), ","); len(parts) != 0 { + if len(parts[0]) != 0 { + name, tag = parts[0], true + } + + if name == "-" && len(parts) == 1 { // ignored + continue + } + + if !isValidTag(name) { + name = f.Name + } + + for _, tag := range parts[1:] { + switch tag { + case "omitempty": + omitempty = true + case "string": + stringify = true + } + } + } + + if anonymous && !tag { // embedded + typ := f.Type + ptr := f.Type.Kind() == reflect.Ptr + + if ptr { + typ = f.Type.Elem() + } + + if typ.Kind() == reflect.Struct { + // When the embedded fields is inlined the fields can be looked + // up by offset from the address of the wrapping object, so we + // simply add the embedded struct fields to the list of fields + // of the current struct type. + subtype := constructStructType(typ, seen, canAddr) + + for j := range subtype.fields { + embedded = append(embedded, embeddedField{ + index: i<<32 | j, + offset: offset + f.Offset, + pointer: ptr, + unexported: unexported, + subtype: subtype, + subfield: &subtype.fields[j], + }) + } + + continue + } + + if unexported { // ignore unexported non-struct types + continue + } + } + + codec := constructCodec(f.Type, seen, canAddr) + + if stringify { + // https://golang.org/pkg/encoding/json/#Marshal + // + // The "string" option signals that a field is stored as JSON inside + // a JSON-encoded string. It applies only to fields of string, + // floating point, integer, or boolean types. This extra level of + // encoding is sometimes used when communicating with JavaScript + // programs: + typ := f.Type + + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + switch typ.Kind() { + case reflect.Int, + reflect.Int8, + reflect.Int16, + reflect.Int32, + reflect.Int64, + reflect.Uint, + reflect.Uintptr, + reflect.Uint8, + reflect.Uint16, + reflect.Uint32, + reflect.Uint64: + codec.encode = constructStringEncodeFunc(codec.encode) + codec.decode = constructStringToIntDecodeFunc(typ, codec.decode) + case reflect.Bool, + reflect.Float32, + reflect.Float64, + reflect.String: + codec.encode = constructStringEncodeFunc(codec.encode) + codec.decode = constructStringDecodeFunc(codec.decode) + } + } + + fields = append(fields, structField{ + codec: codec, + offset: offset + f.Offset, + empty: emptyFuncOf(f.Type), + tag: tag, + omitempty: omitempty, + name: name, + index: i << 32, + typ: f.Type, + zero: reflect.Zero(f.Type), + }) + + names[name] = struct{}{} + } + + // Only unambiguous embedded fields must be serialized. + ambiguousNames := make(map[string]int) + ambiguousTags := make(map[string]int) + + // Embedded types can never override a field that was already present at + // the top-level. + for name := range names { + ambiguousNames[name]++ + ambiguousTags[name]++ + } + + for _, embfield := range embedded { + ambiguousNames[embfield.subfield.name]++ + if embfield.subfield.tag { + ambiguousTags[embfield.subfield.name]++ + } + } + + for _, embfield := range embedded { + subfield := *embfield.subfield + + if ambiguousNames[subfield.name] > 1 && (!subfield.tag || ambiguousTags[subfield.name] != 1) { + continue // ambiguous embedded field + } + + if embfield.pointer { + subfield.codec = constructEmbeddedStructPointerCodec(embfield.subtype.typ, embfield.unexported, subfield.offset, subfield.codec) + subfield.offset = embfield.offset + } else { + subfield.offset += embfield.offset + } + + // To prevent dominant flags more than one level below the embedded one. + subfield.tag = false + + // To ensure the order of the fields in the output is the same is in the + // struct type. + subfield.index = embfield.index + + fields = append(fields, subfield) + } + + for i := range fields { + name := fields[i].name + fields[i].json = encodeKeyFragment(name, 0) + fields[i].html = encodeKeyFragment(name, EscapeHTML) + } + + sort.Slice(fields, func(i, j int) bool { return fields[i].index < fields[j].index }) + return fields +} + +func encodeKeyFragment(s string, flags AppendFlags) string { + b := make([]byte, 1, len(s)+4) + b[0] = ',' + e := encoder{flags: flags} + b, _ = e.encodeString(b, unsafe.Pointer(&s)) + b = append(b, ':') + return *(*string)(unsafe.Pointer(&b)) +} + +func constructPointerCodec(t reflect.Type, seen map[reflect.Type]*structType) codec { + e := t.Elem() + c := constructCodec(e, seen, true) + return codec{ + encode: constructPointerEncodeFunc(e, c.encode), + decode: constructPointerDecodeFunc(e, c.decode), + } +} + +func constructPointerEncodeFunc(t reflect.Type, encode encodeFunc) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodePointer(b, p, t, encode) + } +} + +func constructPointerDecodeFunc(t reflect.Type, decode decodeFunc) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodePointer(b, p, t, decode) + } +} + +func constructInterfaceCodec(t reflect.Type) codec { + return codec{ + encode: constructMaybeEmptyInterfaceEncoderFunc(t), + decode: constructMaybeEmptyInterfaceDecoderFunc(t), + } +} + +func constructMaybeEmptyInterfaceEncoderFunc(t reflect.Type) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeMaybeEmptyInterface(b, p, t) + } +} + +func constructMaybeEmptyInterfaceDecoderFunc(t reflect.Type) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeMaybeEmptyInterface(b, p, t) + } +} + +func constructUnsupportedTypeCodec(t reflect.Type) codec { + return codec{ + encode: constructUnsupportedTypeEncodeFunc(t), + decode: constructUnsupportedTypeDecodeFunc(t), + } +} + +func constructUnsupportedTypeEncodeFunc(t reflect.Type) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeUnsupportedTypeError(b, p, t) + } +} + +func constructUnsupportedTypeDecodeFunc(t reflect.Type) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeUnmarshalTypeError(b, p, t) + } +} + +func constructJSONMarshalerEncodeFunc(t reflect.Type, pointer bool) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeJSONMarshaler(b, p, t, pointer) + } +} + +func constructJSONUnmarshalerDecodeFunc(t reflect.Type, pointer bool) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeJSONUnmarshaler(b, p, t, pointer) + } +} + +func constructTextMarshalerEncodeFunc(t reflect.Type, pointer bool) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeTextMarshaler(b, p, t, pointer) + } +} + +func constructTextUnmarshalerDecodeFunc(t reflect.Type, pointer bool) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeTextUnmarshaler(b, p, t, pointer) + } +} + +func constructInlineValueEncodeFunc(encode encodeFunc) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return encode(e, b, noescape(unsafe.Pointer(&p))) + } +} + +// noescape hides a pointer from escape analysis. noescape is +// the identity function but escape analysis doesn't think the +// output depends on the input. noescape is inlined and currently +// compiles down to zero instructions. +// USE CAREFULLY! +// This was copied from the runtime; see issues 23382 and 7921. +// +//go:nosplit +func noescape(p unsafe.Pointer) unsafe.Pointer { + x := uintptr(p) + return unsafe.Pointer(x ^ 0) +} + +func alignedSize(t reflect.Type) uintptr { + a := t.Align() + s := t.Size() + return align(uintptr(a), uintptr(s)) +} + +func align(align, size uintptr) uintptr { + if align != 0 && (size%align) != 0 { + size = ((size / align) + 1) * align + } + return size +} + +func inlined(t reflect.Type) bool { + switch t.Kind() { + case reflect.Ptr: + return true + case reflect.Map: + return true + case reflect.Struct: + return t.NumField() == 1 && inlined(t.Field(0).Type) + default: + return false + } +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +func emptyFuncOf(t reflect.Type) emptyFunc { + switch t { + case bytesType, rawMessageType: + return func(p unsafe.Pointer) bool { return (*slice)(p).len == 0 } + } + + switch t.Kind() { + case reflect.Array: + if t.Len() == 0 { + return func(unsafe.Pointer) bool { return true } + } + + case reflect.Map: + return func(p unsafe.Pointer) bool { return reflect.NewAt(t, p).Elem().Len() == 0 } + + case reflect.Slice: + return func(p unsafe.Pointer) bool { return (*slice)(p).len == 0 } + + case reflect.String: + return func(p unsafe.Pointer) bool { return len(*(*string)(p)) == 0 } + + case reflect.Bool: + return func(p unsafe.Pointer) bool { return !*(*bool)(p) } + + case reflect.Int, reflect.Uint: + return func(p unsafe.Pointer) bool { return *(*uint)(p) == 0 } + + case reflect.Uintptr: + return func(p unsafe.Pointer) bool { return *(*uintptr)(p) == 0 } + + case reflect.Int8, reflect.Uint8: + return func(p unsafe.Pointer) bool { return *(*uint8)(p) == 0 } + + case reflect.Int16, reflect.Uint16: + return func(p unsafe.Pointer) bool { return *(*uint16)(p) == 0 } + + case reflect.Int32, reflect.Uint32: + return func(p unsafe.Pointer) bool { return *(*uint32)(p) == 0 } + + case reflect.Int64, reflect.Uint64: + return func(p unsafe.Pointer) bool { return *(*uint64)(p) == 0 } + + case reflect.Float32: + return func(p unsafe.Pointer) bool { return *(*float32)(p) == 0 } + + case reflect.Float64: + return func(p unsafe.Pointer) bool { return *(*float64)(p) == 0 } + + case reflect.Ptr: + return func(p unsafe.Pointer) bool { return *(*unsafe.Pointer)(p) == nil } + + case reflect.Interface: + return func(p unsafe.Pointer) bool { return (*iface)(p).ptr == nil } + } + + return func(unsafe.Pointer) bool { return false } +} + +type iface struct { + typ unsafe.Pointer + ptr unsafe.Pointer +} + +type slice struct { + data unsafe.Pointer + len int + cap int +} + +type structType struct { + fields []structField + fieldsIndex map[string]*structField + ficaseIndex map[string]*structField + keyset []byte + typ reflect.Type +} + +type structField struct { + codec codec + offset uintptr + empty emptyFunc + tag bool + omitempty bool + json string + html string + name string + typ reflect.Type + zero reflect.Value + index int +} + +func unmarshalTypeError(b []byte, t reflect.Type) error { + return &UnmarshalTypeError{Value: strconv.Quote(prefix(b)), Type: t} +} + +func unmarshalOverflow(b []byte, t reflect.Type) error { + return &UnmarshalTypeError{Value: "number " + prefix(b) + " overflows", Type: t} +} + +func unexpectedEOF(b []byte) error { + return syntaxError(b, "unexpected end of JSON input") +} + +var syntaxErrorMsgOffset = ^uintptr(0) + +func init() { + t := reflect.TypeOf(SyntaxError{}) + for i := range t.NumField() { + if f := t.Field(i); f.Type.Kind() == reflect.String { + syntaxErrorMsgOffset = f.Offset + } + } +} + +func syntaxError(b []byte, msg string, args ...any) error { + e := new(SyntaxError) + i := syntaxErrorMsgOffset + if i != ^uintptr(0) { + s := "json: " + fmt.Sprintf(msg, args...) + ": " + prefix(b) + p := unsafe.Pointer(e) + // Hack to set the unexported `msg` field. + *(*string)(unsafe.Pointer(uintptr(p) + i)) = s + } + return e +} + +func objectKeyError(b []byte, err error) ([]byte, error) { + if len(b) == 0 { + return nil, unexpectedEOF(b) + } + switch err.(type) { + case *UnmarshalTypeError: + err = syntaxError(b, "invalid character '%c' looking for beginning of object key", b[0]) + } + return b, err +} + +func prefix(b []byte) string { + if len(b) < 32 { + return string(b) + } + return string(b[:32]) + "..." +} + +func intStringsAreSorted(i0, i1 int64) bool { + var b0, b1 [32]byte + return string(strconv.AppendInt(b0[:0], i0, 10)) < string(strconv.AppendInt(b1[:0], i1, 10)) +} + +func uintStringsAreSorted(u0, u1 uint64) bool { + var b0, b1 [32]byte + return string(strconv.AppendUint(b0[:0], u0, 10)) < string(strconv.AppendUint(b1[:0], u1, 10)) +} + +func stringToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&sliceHeader{ + Data: *(*unsafe.Pointer)(unsafe.Pointer(&s)), + Len: len(s), + Cap: len(s), + })) +} + +type sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int +} + +var ( + nullType = reflect.TypeOf(nil) + boolType = reflect.TypeOf(false) + + intType = reflect.TypeOf(int(0)) + int8Type = reflect.TypeOf(int8(0)) + int16Type = reflect.TypeOf(int16(0)) + int32Type = reflect.TypeOf(int32(0)) + int64Type = reflect.TypeOf(int64(0)) + + uintType = reflect.TypeOf(uint(0)) + uint8Type = reflect.TypeOf(uint8(0)) + uint16Type = reflect.TypeOf(uint16(0)) + uint32Type = reflect.TypeOf(uint32(0)) + uint64Type = reflect.TypeOf(uint64(0)) + uintptrType = reflect.TypeOf(uintptr(0)) + + float32Type = reflect.TypeOf(float32(0)) + float64Type = reflect.TypeOf(float64(0)) + + bigIntType = reflect.TypeOf(new(big.Int)) + numberType = reflect.TypeOf(json.Number("")) + stringType = reflect.TypeOf("") + stringsType = reflect.TypeOf([]string(nil)) + bytesType = reflect.TypeOf(([]byte)(nil)) + durationType = reflect.TypeOf(time.Duration(0)) + timeType = reflect.TypeOf(time.Time{}) + rawMessageType = reflect.TypeOf(RawMessage(nil)) + + numberPtrType = reflect.PointerTo(numberType) + durationPtrType = reflect.PointerTo(durationType) + timePtrType = reflect.PointerTo(timeType) + rawMessagePtrType = reflect.PointerTo(rawMessageType) + + sliceInterfaceType = reflect.TypeOf(([]any)(nil)) + sliceStringType = reflect.TypeOf(([]any)(nil)) + mapStringInterfaceType = reflect.TypeOf((map[string]any)(nil)) + mapStringRawMessageType = reflect.TypeOf((map[string]RawMessage)(nil)) + mapStringStringType = reflect.TypeOf((map[string]string)(nil)) + mapStringStringSliceType = reflect.TypeOf((map[string][]string)(nil)) + mapStringBoolType = reflect.TypeOf((map[string]bool)(nil)) + + interfaceType = reflect.TypeOf((*any)(nil)).Elem() + jsonMarshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + jsonUnmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + + bigIntDecoder = constructJSONUnmarshalerDecodeFunc(bigIntType, false) +) + +// ============================================================================= +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// appendDuration appends a human-readable representation of d to b. +// +// The function copies the implementation of time.Duration.String but prevents +// Go from making a dynamic memory allocation on the returned value. +func appendDuration(b []byte, d time.Duration) []byte { + // Largest time is 2540400h10m10.000000000s + var buf [32]byte + w := len(buf) + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + if u < uint64(time.Second) { + // Special case: if duration is smaller than a second, + // use smaller units, like 1.2ms + var prec int + w-- + buf[w] = 's' + w-- + switch { + case u == 0: + return append(b, '0', 's') + case u < uint64(time.Microsecond): + // print nanoseconds + prec = 0 + buf[w] = 'n' + case u < uint64(time.Millisecond): + // print microseconds + prec = 3 + // U+00B5 'µ' micro sign == 0xC2 0xB5 + w-- // Need room for two bytes. + copy(buf[w:], "µ") + default: + // print milliseconds + prec = 6 + buf[w] = 'm' + } + w, u = fmtFrac(buf[:w], u, prec) + w = fmtInt(buf[:w], u) + } else { + w-- + buf[w] = 's' + + w, u = fmtFrac(buf[:w], u, 9) + + // u is now integer seconds + w = fmtInt(buf[:w], u%60) + u /= 60 + + // u is now integer minutes + if u > 0 { + w-- + buf[w] = 'm' + w = fmtInt(buf[:w], u%60) + u /= 60 + + // u is now integer hours + // Stop at hours because days can be different lengths. + if u > 0 { + w-- + buf[w] = 'h' + w = fmtInt(buf[:w], u) + } + } + } + + if neg { + w-- + buf[w] = '-' + } + + return append(b, buf[w:]...) +} + +// fmtFrac formats the fraction of v/10**prec (e.g., ".12345") into the +// tail of buf, omitting trailing zeros. it omits the decimal +// point too when the fraction is 0. It returns the index where the +// output bytes begin and the value v/10**prec. +func fmtFrac(buf []byte, v uint64, prec int) (nw int, nv uint64) { + // Omit trailing zeros up to and including decimal point. + w := len(buf) + print := false + for range prec { + digit := v % 10 + print = print || digit != 0 + if print { + w-- + buf[w] = byte(digit) + '0' + } + v /= 10 + } + if print { + w-- + buf[w] = '.' + } + return w, v +} + +// fmtInt formats v into the tail of buf. +// It returns the index where the output begins. +func fmtInt(buf []byte, v uint64) int { + w := len(buf) + if v == 0 { + w-- + buf[w] = '0' + } else { + for v > 0 { + w-- + buf[w] = byte(v%10) + '0' + v /= 10 + } + } + return w +} + +// ============================================================================= diff --git a/vendor/github.com/segmentio/encoding/json/decode.go b/vendor/github.com/segmentio/encoding/json/decode.go new file mode 100644 index 00000000..36720337 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/decode.go @@ -0,0 +1,1534 @@ +package json + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "math" + "math/big" + "reflect" + "strconv" + "time" + "unsafe" + + "github.com/segmentio/asm/base64" + "github.com/segmentio/asm/keyset" + "github.com/segmentio/encoding/iso8601" +) + +func (d decoder) anyFlagsSet(flags ParseFlags) bool { + return d.flags&flags != 0 +} + +func (d decoder) decodeNull(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + return d.inputError(b, nullType) +} + +func (d decoder) decodeBool(b []byte, p unsafe.Pointer) ([]byte, error) { + switch { + case hasTruePrefix(b): + *(*bool)(p) = true + return b[4:], nil + + case hasFalsePrefix(b): + *(*bool)(p) = false + return b[5:], nil + + case hasNullPrefix(b): + return b[4:], nil + + default: + return d.inputError(b, boolType) + } +} + +func (d decoder) decodeInt(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseInt(b, intType) + if err != nil { + return r, err + } + + *(*int)(p) = int(v) + return r, nil +} + +func (d decoder) decodeInt8(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseInt(b, int8Type) + if err != nil { + return r, err + } + + if v < math.MinInt8 || v > math.MaxInt8 { + return r, unmarshalOverflow(b[:len(b)-len(r)], int8Type) + } + + *(*int8)(p) = int8(v) + return r, nil +} + +func (d decoder) decodeInt16(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseInt(b, int16Type) + if err != nil { + return r, err + } + + if v < math.MinInt16 || v > math.MaxInt16 { + return r, unmarshalOverflow(b[:len(b)-len(r)], int16Type) + } + + *(*int16)(p) = int16(v) + return r, nil +} + +func (d decoder) decodeInt32(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseInt(b, int32Type) + if err != nil { + return r, err + } + + if v < math.MinInt32 || v > math.MaxInt32 { + return r, unmarshalOverflow(b[:len(b)-len(r)], int32Type) + } + + *(*int32)(p) = int32(v) + return r, nil +} + +func (d decoder) decodeInt64(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseInt(b, int64Type) + if err != nil { + return r, err + } + + *(*int64)(p) = v + return r, nil +} + +func (d decoder) decodeUint(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseUint(b, uintType) + if err != nil { + return r, err + } + + *(*uint)(p) = uint(v) + return r, nil +} + +func (d decoder) decodeUintptr(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseUint(b, uintptrType) + if err != nil { + return r, err + } + + *(*uintptr)(p) = uintptr(v) + return r, nil +} + +func (d decoder) decodeUint8(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseUint(b, uint8Type) + if err != nil { + return r, err + } + + if v > math.MaxUint8 { + return r, unmarshalOverflow(b[:len(b)-len(r)], uint8Type) + } + + *(*uint8)(p) = uint8(v) + return r, nil +} + +func (d decoder) decodeUint16(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseUint(b, uint16Type) + if err != nil { + return r, err + } + + if v > math.MaxUint16 { + return r, unmarshalOverflow(b[:len(b)-len(r)], uint16Type) + } + + *(*uint16)(p) = uint16(v) + return r, nil +} + +func (d decoder) decodeUint32(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseUint(b, uint32Type) + if err != nil { + return r, err + } + + if v > math.MaxUint32 { + return r, unmarshalOverflow(b[:len(b)-len(r)], uint32Type) + } + + *(*uint32)(p) = uint32(v) + return r, nil +} + +func (d decoder) decodeUint64(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseUint(b, uint64Type) + if err != nil { + return r, err + } + + *(*uint64)(p) = v + return r, nil +} + +func (d decoder) decodeFloat32(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, _, err := d.parseNumber(b) + if err != nil { + return d.inputError(b, float32Type) + } + + f, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&v)), 32) + if err != nil { + return d.inputError(b, float32Type) + } + + *(*float32)(p) = float32(f) + return r, nil +} + +func (d decoder) decodeFloat64(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, _, err := d.parseNumber(b) + if err != nil { + return d.inputError(b, float64Type) + } + + f, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&v)), 64) + if err != nil { + return d.inputError(b, float64Type) + } + + *(*float64)(p) = f + return r, nil +} + +func (d decoder) decodeNumber(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, _, err := d.parseNumber(b) + if err != nil { + return d.inputError(b, numberType) + } + + if (d.flags & DontCopyNumber) != 0 { + *(*Number)(p) = *(*Number)(unsafe.Pointer(&v)) + } else { + *(*Number)(p) = Number(v) + } + + return r, nil +} + +func (d decoder) decodeString(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + s, r, new, err := d.parseStringUnquote(b, nil) + if err != nil { + if len(b) == 0 || b[0] != '"' { + return d.inputError(b, stringType) + } + return r, err + } + + if new || (d.flags&DontCopyString) != 0 { + *(*string)(p) = *(*string)(unsafe.Pointer(&s)) + } else { + *(*string)(p) = string(s) + } + + return r, nil +} + +func (d decoder) decodeFromString(b []byte, p unsafe.Pointer, decode decodeFunc) ([]byte, error) { + if hasNullPrefix(b) { + return decode(d, b, p) + } + + v, b, _, err := d.parseStringUnquote(b, nil) + if err != nil { + return d.inputError(v, stringType) + } + + if v, err = decode(d, v, p); err != nil { + return b, err + } + + if v = skipSpaces(v); len(v) != 0 { + return b, syntaxError(v, "unexpected trailing tokens after string value") + } + + return b, nil +} + +func (d decoder) decodeFromStringToInt(b []byte, p unsafe.Pointer, t reflect.Type, decode decodeFunc) ([]byte, error) { + if hasNullPrefix(b) { + return decode(d, b, p) + } + + if len(b) > 0 && b[0] != '"' { + v, r, k, err := d.parseNumber(b) + if err == nil { + // The encoding/json package will return a *json.UnmarshalTypeError if + // the input was a floating point number representation, even tho a + // string is expected here. + if k == Float { + _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&v)), 64) + if err != nil { + return r, unmarshalTypeError(v, t) + } + } + } + return r, fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into int") + } + + if len(b) > 1 && b[0] == '"' && b[1] == '"' { + return b, fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal \"\" into int") + } + + v, b, _, err := d.parseStringUnquote(b, nil) + if err != nil { + return d.inputError(v, t) + } + + if hasLeadingZeroes(v) { + // In this context the encoding/json package accepts leading zeroes because + // it is not constrained by the JSON syntax, remove them so the parsing + // functions don't return syntax errors. + u := make([]byte, 0, len(v)) + i := 0 + + if i < len(v) && v[i] == '-' || v[i] == '+' { + u = append(u, v[i]) + i++ + } + + for (i+1) < len(v) && v[i] == '0' && '0' <= v[i+1] && v[i+1] <= '9' { + i++ + } + + v = append(u, v[i:]...) + } + + if r, err := decode(d, v, p); err != nil { + if _, isSyntaxError := err.(*SyntaxError); isSyntaxError { + if hasPrefix(v, "-") { + // The standard library interprets sequences of '-' characters + // as numbers but still returns type errors in this case... + return b, unmarshalTypeError(v, t) + } + return b, fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into int", prefix(v)) + } + // When the input value was a valid number representation we retain the + // error returned by the decoder. + if _, _, _, err := d.parseNumber(v); err != nil { + // When the input value valid JSON we mirror the behavior of the + // encoding/json package and return a generic error. + if _, _, _, err := d.parseValue(v); err == nil { + return b, fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into int", prefix(v)) + } + } + return b, err + } else if len(r) != 0 { + return r, unmarshalTypeError(v, t) + } + + return b, nil +} + +func (d decoder) decodeBytes(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + *(*[]byte)(p) = nil + return b[4:], nil + } + + if len(b) < 2 { + return d.inputError(b, bytesType) + } + + if b[0] != '"' { + // Go 1.7- behavior: bytes slices may be decoded from array of integers. + if len(b) > 0 && b[0] == '[' { + return d.decodeSlice(b, p, 1, bytesType, decoder.decodeUint8) + } + return d.inputError(b, bytesType) + } + + // The input string contains escaped sequences, we need to parse it before + // decoding it to match the encoding/json package behvaior. + src, r, _, err := d.parseStringUnquote(b, nil) + if err != nil { + return d.inputError(b, bytesType) + } + + dst := make([]byte, base64.StdEncoding.DecodedLen(len(src))) + + n, err := base64.StdEncoding.Decode(dst, src) + if err != nil { + return r, err + } + + *(*[]byte)(p) = dst[:n] + return r, nil +} + +func (d decoder) decodeDuration(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + // in order to inter-operate with the stdlib, we must be able to interpret + // durations passed as integer values. there's some discussion about being + // flexible on how durations are formatted, but for the time being, it's + // been punted to go2 at the earliest: https://github.com/golang/go/issues/4712 + if len(b) > 0 && b[0] != '"' { + v, r, err := d.parseInt(b, durationType) + if err != nil { + return d.inputError(b, int32Type) + } + + if v < math.MinInt64 || v > math.MaxInt64 { + return r, unmarshalOverflow(b[:len(b)-len(r)], int32Type) + } + + *(*time.Duration)(p) = time.Duration(v) + return r, nil + } + + if len(b) < 2 || b[0] != '"' { + return d.inputError(b, durationType) + } + + i := bytes.IndexByte(b[1:], '"') + 1 + if i <= 0 { + return d.inputError(b, durationType) + } + + s := b[1:i] // trim quotes + + v, err := time.ParseDuration(*(*string)(unsafe.Pointer(&s))) + if err != nil { + return d.inputError(b, durationType) + } + + *(*time.Duration)(p) = v + return b[i+1:], nil +} + +func (d decoder) decodeTime(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + if len(b) < 2 || b[0] != '"' { + return d.inputError(b, timeType) + } + + i := bytes.IndexByte(b[1:], '"') + 1 + if i <= 0 { + return d.inputError(b, timeType) + } + + s := b[1:i] // trim quotes + + v, err := iso8601.Parse(*(*string)(unsafe.Pointer(&s))) + if err != nil { + return d.inputError(b, timeType) + } + + *(*time.Time)(p) = v + return b[i+1:], nil +} + +func (d decoder) decodeArray(b []byte, p unsafe.Pointer, n int, size uintptr, t reflect.Type, decode decodeFunc) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + if len(b) < 2 || b[0] != '[' { + return d.inputError(b, t) + } + b = b[1:] + + var err error + for i := range n { + b = skipSpaces(b) + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected EOF after array element") + } + switch b[0] { + case ',': + b = skipSpaces(b[1:]) + case ']': + return b[1:], nil + default: + return b, syntaxError(b, "expected ',' after array element but found '%c'", b[0]) + } + } + + b, err = decode(d, b, unsafe.Pointer(uintptr(p)+(uintptr(i)*size))) + if err != nil { + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = t.String() + e.Struct + e.Field = d.prependField(strconv.Itoa(i), e.Field) + } + return b, err + } + } + + // The encoding/json package ignores extra elements found when decoding into + // array types (which have a fixed size). + for { + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "missing closing ']' in array value") + } + + switch b[0] { + case ',': + b = skipSpaces(b[1:]) + case ']': + return b[1:], nil + } + + _, b, _, err = d.parseValue(b) + if err != nil { + return b, err + } + } +} + +// This is a placeholder used to consturct non-nil empty slices. +var empty struct{} + +func (d decoder) decodeSlice(b []byte, p unsafe.Pointer, size uintptr, t reflect.Type, decode decodeFunc) ([]byte, error) { + if hasNullPrefix(b) { + *(*slice)(p) = slice{} + return b[4:], nil + } + + if len(b) < 2 { + return d.inputError(b, t) + } + + if b[0] != '[' { + // Go 1.7- behavior: fallback to decoding as a []byte if the element + // type is byte; allow conversions from JSON strings even tho the + // underlying type implemented unmarshaler interfaces. + if t.Elem().Kind() == reflect.Uint8 { + return d.decodeBytes(b, p) + } + return d.inputError(b, t) + } + + input := b + b = b[1:] + + s := (*slice)(p) + s.len = 0 + + var err error + for { + b = skipSpaces(b) + + if len(b) != 0 && b[0] == ']' { + if s.data == nil { + s.data = unsafe.Pointer(&empty) + } + return b[1:], nil + } + + if s.len != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected EOF after array element") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after array element but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if s.len == s.cap { + c := s.cap + + if c == 0 { + c = 10 + } else { + c *= 2 + } + + *s = extendSlice(t, s, c) + } + + b, err = decode(d, b, unsafe.Pointer(uintptr(s.data)+(uintptr(s.len)*size))) + if err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = t.String() + e.Struct + e.Field = d.prependField(strconv.Itoa(s.len), e.Field) + } + return b, err + } + + s.len++ + } +} + +func (d decoder) decodeMap(b []byte, p unsafe.Pointer, t, kt, vt reflect.Type, kz, vz reflect.Value, decodeKey, decodeValue decodeFunc) ([]byte, error) { + if hasNullPrefix(b) { + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, t) + } + i := 0 + m := reflect.NewAt(t, p).Elem() + + k := reflect.New(kt).Elem() + v := reflect.New(vt).Elem() + + kptr := (*iface)(unsafe.Pointer(&k)).ptr + vptr := (*iface)(unsafe.Pointer(&v)).ptr + input := b + + if m.IsNil() { + m = reflect.MakeMap(t) + } + + var err error + b = b[1:] + for { + k.Set(kz) + v.Set(vz) + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + *(*unsafe.Pointer)(p) = unsafe.Pointer(m.Pointer()) + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + if b, err = decodeKey(d, b, kptr); err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + if b, err = decodeValue(d, b, vptr); err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = "map[" + kt.String() + "]" + vt.String() + "{" + e.Struct + "}" + e.Field = d.prependField(fmt.Sprint(k.Interface()), e.Field) + } + return b, err + } + + m.SetMapIndex(k, v) + i++ + } +} + +func (d decoder) decodeMapStringInterface(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, mapStringInterfaceType) + } + + i := 0 + m := *(*map[string]any)(p) + + if m == nil { + m = make(map[string]any, 64) + } + + var ( + input = b + key string + val any + err error + ) + + b = b[1:] + for { + key = "" + val = nil + + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(unsafe.Pointer(&m)) + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + b, err = d.decodeString(b, unsafe.Pointer(&key)) + if err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + b, err = d.decodeInterface(b, unsafe.Pointer(&val)) + if err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = mapStringInterfaceType.String() + e.Struct + e.Field = d.prependField(key, e.Field) + } + return b, err + } + + m[key] = val + i++ + } +} + +func (d decoder) decodeMapStringRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, mapStringRawMessageType) + } + + i := 0 + m := *(*map[string]RawMessage)(p) + + if m == nil { + m = make(map[string]RawMessage, 64) + } + + var err error + var key string + var val RawMessage + input := b + + b = b[1:] + for { + key = "" + val = nil + + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(unsafe.Pointer(&m)) + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + b, err = d.decodeString(b, unsafe.Pointer(&key)) + if err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + b, err = d.decodeRawMessage(b, unsafe.Pointer(&val)) + if err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = mapStringRawMessageType.String() + e.Struct + e.Field = d.prependField(key, e.Field) + } + return b, err + } + + m[key] = val + i++ + } +} + +func (d decoder) decodeMapStringString(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, mapStringStringType) + } + + i := 0 + m := *(*map[string]string)(p) + + if m == nil { + m = make(map[string]string, 64) + } + + var err error + var key string + var val string + input := b + + b = b[1:] + for { + key = "" + val = "" + + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(unsafe.Pointer(&m)) + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + b, err = d.decodeString(b, unsafe.Pointer(&key)) + if err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + b, err = d.decodeString(b, unsafe.Pointer(&val)) + if err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = mapStringStringType.String() + e.Struct + e.Field = d.prependField(key, e.Field) + } + return b, err + } + + m[key] = val + i++ + } +} + +func (d decoder) decodeMapStringStringSlice(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, mapStringStringSliceType) + } + + i := 0 + m := *(*map[string][]string)(p) + + if m == nil { + m = make(map[string][]string, 64) + } + + var err error + var key string + var buf []string + input := b + stringSize := unsafe.Sizeof("") + + b = b[1:] + for { + key = "" + buf = buf[:0] + + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(unsafe.Pointer(&m)) + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + b, err = d.decodeString(b, unsafe.Pointer(&key)) + if err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + b, err = d.decodeSlice(b, unsafe.Pointer(&buf), stringSize, sliceStringType, decoder.decodeString) + if err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = mapStringStringType.String() + e.Struct + e.Field = d.prependField(key, e.Field) + } + return b, err + } + + val := make([]string, len(buf)) + copy(val, buf) + + m[key] = val + i++ + } +} + +func (d decoder) decodeMapStringBool(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, mapStringBoolType) + } + + i := 0 + m := *(*map[string]bool)(p) + + if m == nil { + m = make(map[string]bool, 64) + } + + var err error + var key string + var val bool + input := b + + b = b[1:] + for { + key = "" + val = false + + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(unsafe.Pointer(&m)) + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + b, err = d.decodeString(b, unsafe.Pointer(&key)) + if err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + b, err = d.decodeBool(b, unsafe.Pointer(&val)) + if err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = mapStringStringType.String() + e.Struct + e.Field = d.prependField(key, e.Field) + } + return b, err + } + + m[key] = val + i++ + } +} + +func (d decoder) decodeStruct(b []byte, p unsafe.Pointer, st *structType) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, st.typ) + } + + var err error + var k []byte + var i int + + // memory buffer used to convert short field names to lowercase + var buf [64]byte + var key []byte + input := b + + b = b[1:] + for { + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + i++ + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + k, b, _, err = d.parseStringUnquote(b, nil) + if err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + var f *structField + if len(st.keyset) != 0 { + if n := keyset.Lookup(st.keyset, k); n < len(st.fields) { + if len(st.fields[n].name) == len(k) { + f = &st.fields[n] + } + } + } else { + f = st.fieldsIndex[string(k)] + } + + if f == nil && (d.flags&DontMatchCaseInsensitiveStructFields) == 0 { + key = appendToLower(buf[:0], k) + f = st.ficaseIndex[string(key)] + } + + if f == nil { + if (d.flags & DisallowUnknownFields) != 0 { + return b, fmt.Errorf("json: unknown field %q", k) + } + if _, b, _, err = d.parseValue(b); err != nil { + return b, err + } + continue + } + + if b, err = f.codec.decode(d, b, unsafe.Pointer(uintptr(p)+f.offset)); err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = st.typ.String() + e.Struct + e.Field = d.prependField(string(k), e.Field) + } + return b, err + } + } +} + +func (d decoder) decodeEmbeddedStructPointer(b []byte, p unsafe.Pointer, t reflect.Type, unexported bool, offset uintptr, decode decodeFunc) ([]byte, error) { + v := *(*unsafe.Pointer)(p) + + if v == nil { + if unexported { + return nil, fmt.Errorf("json: cannot set embedded pointer to unexported struct: %s", t) + } + v = unsafe.Pointer(reflect.New(t).Pointer()) + *(*unsafe.Pointer)(p) = v + } + + return decode(d, b, unsafe.Pointer(uintptr(v)+offset)) +} + +func (d decoder) decodePointer(b []byte, p unsafe.Pointer, t reflect.Type, decode decodeFunc) ([]byte, error) { + if hasNullPrefix(b) { + pp := *(*unsafe.Pointer)(p) + if pp != nil && t.Kind() == reflect.Ptr { + return decode(d, b, pp) + } + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + v := *(*unsafe.Pointer)(p) + if v == nil { + v = unsafe.Pointer(reflect.New(t).Pointer()) + *(*unsafe.Pointer)(p) = v + } + + return decode(d, b, v) +} + +func (d decoder) decodeInterface(b []byte, p unsafe.Pointer) ([]byte, error) { + val := *(*any)(p) + *(*any)(p) = nil + + if t := reflect.TypeOf(val); t != nil && t.Kind() == reflect.Ptr { + if v := reflect.ValueOf(val); v.IsNil() || t.Elem().Kind() != reflect.Ptr { + // If the destination is nil the only value that is OK to decode is + // `null`, and the encoding/json package always nils the destination + // interface value in this case. + if hasNullPrefix(b) { + *(*any)(p) = nil + return b[4:], nil + } + } + + b, err := Parse(b, val, d.flags) + if err == nil { + *(*any)(p) = val + } + + return b, err + } + + v, b, k, err := d.parseValue(b) + if err != nil { + return b, err + } + + switch k.Class() { + case Object: + m := make(map[string]interface{}) + v, err = d.decodeMapStringInterface(v, unsafe.Pointer(&m)) + val = m + + case Array: + a := make([]interface{}, 0, 10) + v, err = d.decodeSlice(v, unsafe.Pointer(&a), unsafe.Sizeof(a[0]), sliceInterfaceType, decoder.decodeInterface) + val = a + + case String: + s := "" + v, err = d.decodeString(v, unsafe.Pointer(&s)) + val = s + + case Null: + v, val = nil, nil + + case Bool: + v, val = nil, k == True + + case Num: + v, err = d.decodeDynamicNumber(v, unsafe.Pointer(&val)) + + default: + return b, syntaxError(v, "expected token but found '%c'", v[0]) + } + + if err != nil { + return b, err + } + + if v = skipSpaces(v); len(v) != 0 { + return b, syntaxError(v, "unexpected trailing trailing tokens after json value") + } + + *(*any)(p) = val + return b, nil +} + +func (d decoder) decodeDynamicNumber(b []byte, p unsafe.Pointer) ([]byte, error) { + kind := Float + var err error + + // Only pre-parse for numeric kind if a conditional decode + // has been requested. + if d.anyFlagsSet(UseBigInt | UseInt64 | UseUint64) { + _, _, kind, err = d.parseNumber(b) + if err != nil { + return b, err + } + } + + var rem []byte + anyPtr := (*any)(p) + + // Mutually exclusive integer handling cases. + switch { + // If requested, attempt decode of positive integers as uint64. + case kind == Uint && d.anyFlagsSet(UseUint64): + rem, err = decodeInto[uint64](anyPtr, b, d, decoder.decodeUint64) + if err == nil { + return rem, err + } + + // If uint64 decode was not requested but int64 decode was requested, + // then attempt decode of positive integers as int64. + case kind == Uint && d.anyFlagsSet(UseInt64): + fallthrough + + // If int64 decode was requested, + // attempt decode of negative integers as int64. + case kind == Int && d.anyFlagsSet(UseInt64): + rem, err = decodeInto[int64](anyPtr, b, d, decoder.decodeInt64) + if err == nil { + return rem, err + } + } + + // Fallback numeric handling cases: + // these cannot be combined into the above switch, + // since these cases also handle overflow + // from the above cases, if decode was already attempted. + switch { + // If *big.Int decode was requested, handle that case for any integer. + case kind == Uint && d.anyFlagsSet(UseBigInt): + fallthrough + case kind == Int && d.anyFlagsSet(UseBigInt): + rem, err = decodeInto[*big.Int](anyPtr, b, d, bigIntDecoder) + + // If json.Number decode was requested, handle that for any number. + case d.anyFlagsSet(UseNumber): + rem, err = decodeInto[Number](anyPtr, b, d, decoder.decodeNumber) + + // Fall back to float64 decode when no special decoding has been requested. + default: + rem, err = decodeInto[float64](anyPtr, b, d, decoder.decodeFloat64) + } + + return rem, err +} + +func (d decoder) decodeMaybeEmptyInterface(b []byte, p unsafe.Pointer, t reflect.Type) ([]byte, error) { + if hasNullPrefix(b) { + *(*any)(p) = nil + return b[4:], nil + } + + if x := reflect.NewAt(t, p).Elem(); !x.IsNil() { + if e := x.Elem(); e.Kind() == reflect.Ptr { + return Parse(b, e.Interface(), d.flags) + } + } else if t.NumMethod() == 0 { // empty interface + return Parse(b, (*any)(p), d.flags) + } + + return d.decodeUnmarshalTypeError(b, p, t) +} + +func (d decoder) decodeUnmarshalTypeError(b []byte, _ unsafe.Pointer, t reflect.Type) ([]byte, error) { + v, b, _, err := d.parseValue(b) + if err != nil { + return b, err + } + return b, &UnmarshalTypeError{ + Value: string(v), + Type: t, + } +} + +func (d decoder) decodeRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) { + v, r, _, err := d.parseValue(b) + if err != nil { + return d.inputError(b, rawMessageType) + } + + if (d.flags & DontCopyRawMessage) == 0 { + v = append(make([]byte, 0, len(v)), v...) + } + + *(*RawMessage)(p) = json.RawMessage(v) + return r, err +} + +func (d decoder) decodeJSONUnmarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) { + v, b, _, err := d.parseValue(b) + if err != nil { + return b, err + } + + u := reflect.NewAt(t, p) + if !pointer { + u = u.Elem() + t = t.Elem() + } + if u.IsNil() { + u.Set(reflect.New(t)) + } + + return b, u.Interface().(Unmarshaler).UnmarshalJSON(v) +} + +func (d decoder) decodeTextUnmarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) { + var value string + + v, b, k, err := d.parseValue(b) + if err != nil { + return b, err + } + if len(v) == 0 { + return d.inputError(v, t) + } + + switch k.Class() { + case Null: + return b, err + + case String: + s, _, _, err := d.parseStringUnquote(v, nil) + if err != nil { + return b, err + } + u := reflect.NewAt(t, p) + if !pointer { + u = u.Elem() + t = t.Elem() + } + if u.IsNil() { + u.Set(reflect.New(t)) + } + return b, u.Interface().(encoding.TextUnmarshaler).UnmarshalText(s) + + case Bool: + if k == True { + value = "true" + } else { + value = "false" + } + + case Num: + value = "number" + + case Object: + value = "object" + + case Array: + value = "array" + } + + return b, &UnmarshalTypeError{Value: value, Type: reflect.PointerTo(t)} +} + +func (d decoder) prependField(key, field string) string { + if field != "" { + return key + "." + field + } + return key +} + +func (d decoder) inputError(b []byte, t reflect.Type) ([]byte, error) { + if len(b) == 0 { + return nil, unexpectedEOF(b) + } + _, r, _, err := d.parseValue(b) + if err != nil { + return r, err + } + return skipSpaces(r), unmarshalTypeError(b, t) +} + +func decodeInto[T any](dest *any, b []byte, d decoder, fn decodeFunc) ([]byte, error) { + var v T + rem, err := fn(d, b, unsafe.Pointer(&v)) + if err == nil { + *dest = v + } + + return rem, err +} diff --git a/vendor/github.com/segmentio/encoding/json/encode.go b/vendor/github.com/segmentio/encoding/json/encode.go new file mode 100644 index 00000000..2a6da07d --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/encode.go @@ -0,0 +1,970 @@ +package json + +import ( + "encoding" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "sync" + "time" + "unicode/utf8" + "unsafe" + + "github.com/segmentio/asm/base64" +) + +const hex = "0123456789abcdef" + +func (e encoder) encodeNull(b []byte, p unsafe.Pointer) ([]byte, error) { + return append(b, "null"...), nil +} + +func (e encoder) encodeBool(b []byte, p unsafe.Pointer) ([]byte, error) { + if *(*bool)(p) { + return append(b, "true"...), nil + } + return append(b, "false"...), nil +} + +func (e encoder) encodeInt(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendInt(b, int64(*(*int)(p))), nil +} + +func (e encoder) encodeInt8(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendInt(b, int64(*(*int8)(p))), nil +} + +func (e encoder) encodeInt16(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendInt(b, int64(*(*int16)(p))), nil +} + +func (e encoder) encodeInt32(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendInt(b, int64(*(*int32)(p))), nil +} + +func (e encoder) encodeInt64(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendInt(b, *(*int64)(p)), nil +} + +func (e encoder) encodeUint(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendUint(b, uint64(*(*uint)(p))), nil +} + +func (e encoder) encodeUintptr(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendUint(b, uint64(*(*uintptr)(p))), nil +} + +func (e encoder) encodeUint8(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendUint(b, uint64(*(*uint8)(p))), nil +} + +func (e encoder) encodeUint16(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendUint(b, uint64(*(*uint16)(p))), nil +} + +func (e encoder) encodeUint32(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendUint(b, uint64(*(*uint32)(p))), nil +} + +func (e encoder) encodeUint64(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendUint(b, *(*uint64)(p)), nil +} + +func (e encoder) encodeFloat32(b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeFloat(b, float64(*(*float32)(p)), 32) +} + +func (e encoder) encodeFloat64(b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeFloat(b, *(*float64)(p), 64) +} + +func (e encoder) encodeFloat(b []byte, f float64, bits int) ([]byte, error) { + switch { + case math.IsNaN(f): + return b, &UnsupportedValueError{Value: reflect.ValueOf(f), Str: "NaN"} + case math.IsInf(f, 0): + return b, &UnsupportedValueError{Value: reflect.ValueOf(f), Str: "inf"} + } + + // Convert as if by ES6 number to string conversion. + // This matches most other JSON generators. + // See golang.org/issue/6384 and golang.org/issue/14135. + // Like fmt %g, but the exponent cutoffs are different + // and exponents themselves are not padded to two digits. + abs := math.Abs(f) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + + b = strconv.AppendFloat(b, f, fmt, -1, int(bits)) + + if fmt == 'e' { + // clean up e-09 to e-9 + n := len(b) + if n >= 4 && b[n-4] == 'e' && b[n-3] == '-' && b[n-2] == '0' { + b[n-2] = b[n-1] + b = b[:n-1] + } + } + + return b, nil +} + +func (e encoder) encodeNumber(b []byte, p unsafe.Pointer) ([]byte, error) { + n := *(*Number)(p) + if n == "" { + n = "0" + } + + d := decoder{} + _, _, _, err := d.parseNumber(stringToBytes(string(n))) + if err != nil { + return b, err + } + + return append(b, n...), nil +} + +func (e encoder) encodeString(b []byte, p unsafe.Pointer) ([]byte, error) { + s := *(*string)(p) + if len(s) == 0 { + return append(b, `""`...), nil + } + i := 0 + j := 0 + escapeHTML := (e.flags & EscapeHTML) != 0 + + b = append(b, '"') + + if len(s) >= 8 { + if j = escapeIndex(s, escapeHTML); j < 0 { + return append(append(b, s...), '"'), nil + } + } + + for j < len(s) { + c := s[j] + + if c >= 0x20 && c <= 0x7f && c != '\\' && c != '"' && (!escapeHTML || (c != '<' && c != '>' && c != '&')) { + // fast path: most of the time, printable ascii characters are used + j++ + continue + } + + switch c { + case '\\', '"', '\b', '\f', '\n', '\r', '\t': + b = append(b, s[i:j]...) + b = append(b, '\\', escapeByteRepr(c)) + i = j + 1 + j = j + 1 + continue + + case '<', '>', '&': + b = append(b, s[i:j]...) + b = append(b, `\u00`...) + b = append(b, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + + // This encodes bytes < 0x20 except for \t, \n and \r. + if c < 0x20 { + b = append(b, s[i:j]...) + b = append(b, `\u00`...) + b = append(b, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + + r, size := utf8.DecodeRuneInString(s[j:]) + + if r == utf8.RuneError && size == 1 { + b = append(b, s[i:j]...) + b = append(b, `\ufffd`...) + i = j + size + j = j + size + continue + } + + switch r { + case '\u2028', '\u2029': + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + b = append(b, s[i:j]...) + b = append(b, `\u202`...) + b = append(b, hex[r&0xF]) + i = j + size + j = j + size + continue + } + + j += size + } + + b = append(b, s[i:]...) + b = append(b, '"') + return b, nil +} + +func (e encoder) encodeToString(b []byte, p unsafe.Pointer, encode encodeFunc) ([]byte, error) { + i := len(b) + + b, err := encode(e, b, p) + if err != nil { + return b, err + } + + j := len(b) + s := b[i:] + + if b, err = e.encodeString(b, unsafe.Pointer(&s)); err != nil { + return b, err + } + + n := copy(b[i:], b[j:]) + return b[:i+n], nil +} + +func (e encoder) encodeBytes(b []byte, p unsafe.Pointer) ([]byte, error) { + v := *(*[]byte)(p) + if v == nil { + return append(b, "null"...), nil + } + + n := base64.StdEncoding.EncodedLen(len(v)) + 2 + + if avail := cap(b) - len(b); avail < n { + newB := make([]byte, cap(b)+(n-avail)) + copy(newB, b) + b = newB[:len(b)] + } + + i := len(b) + j := len(b) + n + + b = b[:j] + b[i] = '"' + base64.StdEncoding.Encode(b[i+1:j-1], v) + b[j-1] = '"' + return b, nil +} + +func (e encoder) encodeDuration(b []byte, p unsafe.Pointer) ([]byte, error) { + b = append(b, '"') + b = appendDuration(b, *(*time.Duration)(p)) + b = append(b, '"') + return b, nil +} + +func (e encoder) encodeTime(b []byte, p unsafe.Pointer) ([]byte, error) { + t := *(*time.Time)(p) + b = append(b, '"') + b = t.AppendFormat(b, time.RFC3339Nano) + b = append(b, '"') + return b, nil +} + +func (e encoder) encodeArray(b []byte, p unsafe.Pointer, n int, size uintptr, t reflect.Type, encode encodeFunc) ([]byte, error) { + start := len(b) + var err error + b = append(b, '[') + + for i := range n { + if i != 0 { + b = append(b, ',') + } + if b, err = encode(e, b, unsafe.Pointer(uintptr(p)+(uintptr(i)*size))); err != nil { + return b[:start], err + } + } + + b = append(b, ']') + return b, nil +} + +func (e encoder) encodeSlice(b []byte, p unsafe.Pointer, size uintptr, t reflect.Type, encode encodeFunc) ([]byte, error) { + s := (*slice)(p) + + if s.data == nil && s.len == 0 && s.cap == 0 { + return append(b, "null"...), nil + } + + return e.encodeArray(b, s.data, s.len, size, t, encode) +} + +func (e encoder) encodeMap(b []byte, p unsafe.Pointer, t reflect.Type, encodeKey, encodeValue encodeFunc, sortKeys sortFunc) ([]byte, error) { + m := reflect.NewAt(t, p).Elem() + if m.IsNil() { + return append(b, "null"...), nil + } + + keys := m.MapKeys() + if sortKeys != nil && (e.flags&SortMapKeys) != 0 { + sortKeys(keys) + } + + start := len(b) + var err error + b = append(b, '{') + + for i, k := range keys { + v := m.MapIndex(k) + + if i != 0 { + b = append(b, ',') + } + + if b, err = encodeKey(e, b, (*iface)(unsafe.Pointer(&k)).ptr); err != nil { + return b[:start], err + } + + b = append(b, ':') + + if b, err = encodeValue(e, b, (*iface)(unsafe.Pointer(&v)).ptr); err != nil { + return b[:start], err + } + } + + b = append(b, '}') + return b, nil +} + +type element struct { + key string + val any + raw RawMessage +} + +type mapslice struct { + elements []element +} + +func (m *mapslice) Len() int { return len(m.elements) } +func (m *mapslice) Less(i, j int) bool { return m.elements[i].key < m.elements[j].key } +func (m *mapslice) Swap(i, j int) { m.elements[i], m.elements[j] = m.elements[j], m.elements[i] } + +var mapslicePool = sync.Pool{ + New: func() any { return new(mapslice) }, +} + +func (e encoder) encodeMapStringInterface(b []byte, p unsafe.Pointer) ([]byte, error) { + m := *(*map[string]any)(p) + if m == nil { + return append(b, "null"...), nil + } + + if (e.flags & SortMapKeys) == 0 { + // Optimized code path when the program does not need the map keys to be + // sorted. + b = append(b, '{') + + if len(m) != 0 { + var err error + i := 0 + + for k, v := range m { + if i != 0 { + b = append(b, ',') + } + + b, _ = e.encodeString(b, unsafe.Pointer(&k)) + b = append(b, ':') + + b, err = Append(b, v, e.flags) + if err != nil { + return b, err + } + + i++ + } + } + + b = append(b, '}') + return b, nil + } + + s := mapslicePool.Get().(*mapslice) + if cap(s.elements) < len(m) { + s.elements = make([]element, 0, align(10, uintptr(len(m)))) + } + for key, val := range m { + s.elements = append(s.elements, element{key: key, val: val}) + } + sort.Sort(s) + + start := len(b) + var err error + b = append(b, '{') + + for i, elem := range s.elements { + if i != 0 { + b = append(b, ',') + } + + b, _ = e.encodeString(b, unsafe.Pointer(&elem.key)) + b = append(b, ':') + + b, err = Append(b, elem.val, e.flags) + if err != nil { + break + } + } + + for i := range s.elements { + s.elements[i] = element{} + } + + s.elements = s.elements[:0] + mapslicePool.Put(s) + + if err != nil { + return b[:start], err + } + + b = append(b, '}') + return b, nil +} + +func (e encoder) encodeMapStringRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) { + m := *(*map[string]RawMessage)(p) + if m == nil { + return append(b, "null"...), nil + } + + if (e.flags & SortMapKeys) == 0 { + // Optimized code path when the program does not need the map keys to be + // sorted. + b = append(b, '{') + + if len(m) != 0 { + var err error + i := 0 + + for k, v := range m { + if i != 0 { + b = append(b, ',') + } + + // encodeString doesn't return errors so we ignore it here + b, _ = e.encodeString(b, unsafe.Pointer(&k)) + b = append(b, ':') + + b, err = e.encodeRawMessage(b, unsafe.Pointer(&v)) + if err != nil { + break + } + + i++ + } + } + + b = append(b, '}') + return b, nil + } + + s := mapslicePool.Get().(*mapslice) + if cap(s.elements) < len(m) { + s.elements = make([]element, 0, align(10, uintptr(len(m)))) + } + for key, raw := range m { + s.elements = append(s.elements, element{key: key, raw: raw}) + } + sort.Sort(s) + + start := len(b) + var err error + b = append(b, '{') + + for i, elem := range s.elements { + if i != 0 { + b = append(b, ',') + } + + b, _ = e.encodeString(b, unsafe.Pointer(&elem.key)) + b = append(b, ':') + + b, err = e.encodeRawMessage(b, unsafe.Pointer(&elem.raw)) + if err != nil { + break + } + } + + for i := range s.elements { + s.elements[i] = element{} + } + + s.elements = s.elements[:0] + mapslicePool.Put(s) + + if err != nil { + return b[:start], err + } + + b = append(b, '}') + return b, nil +} + +func (e encoder) encodeMapStringString(b []byte, p unsafe.Pointer) ([]byte, error) { + m := *(*map[string]string)(p) + if m == nil { + return append(b, "null"...), nil + } + + if (e.flags & SortMapKeys) == 0 { + // Optimized code path when the program does not need the map keys to be + // sorted. + b = append(b, '{') + + if len(m) != 0 { + i := 0 + + for k, v := range m { + if i != 0 { + b = append(b, ',') + } + + // encodeString never returns an error so we ignore it here + b, _ = e.encodeString(b, unsafe.Pointer(&k)) + b = append(b, ':') + b, _ = e.encodeString(b, unsafe.Pointer(&v)) + + i++ + } + } + + b = append(b, '}') + return b, nil + } + + s := mapslicePool.Get().(*mapslice) + if cap(s.elements) < len(m) { + s.elements = make([]element, 0, align(10, uintptr(len(m)))) + } + for key, val := range m { + v := val + s.elements = append(s.elements, element{key: key, val: &v}) + } + sort.Sort(s) + + b = append(b, '{') + + for i, elem := range s.elements { + if i != 0 { + b = append(b, ',') + } + + // encodeString never returns an error so we ignore it here + b, _ = e.encodeString(b, unsafe.Pointer(&elem.key)) + b = append(b, ':') + b, _ = e.encodeString(b, unsafe.Pointer(elem.val.(*string))) + } + + for i := range s.elements { + s.elements[i] = element{} + } + + s.elements = s.elements[:0] + mapslicePool.Put(s) + + b = append(b, '}') + return b, nil +} + +func (e encoder) encodeMapStringStringSlice(b []byte, p unsafe.Pointer) ([]byte, error) { + m := *(*map[string][]string)(p) + if m == nil { + return append(b, "null"...), nil + } + + stringSize := unsafe.Sizeof("") + + if (e.flags & SortMapKeys) == 0 { + // Optimized code path when the program does not need the map keys to be + // sorted. + b = append(b, '{') + + if len(m) != 0 { + var err error + i := 0 + + for k, v := range m { + if i != 0 { + b = append(b, ',') + } + + b, _ = e.encodeString(b, unsafe.Pointer(&k)) + b = append(b, ':') + + b, err = e.encodeSlice(b, unsafe.Pointer(&v), stringSize, sliceStringType, encoder.encodeString) + if err != nil { + return b, err + } + + i++ + } + } + + b = append(b, '}') + return b, nil + } + + s := mapslicePool.Get().(*mapslice) + if cap(s.elements) < len(m) { + s.elements = make([]element, 0, align(10, uintptr(len(m)))) + } + for key, val := range m { + v := val + s.elements = append(s.elements, element{key: key, val: &v}) + } + sort.Sort(s) + + start := len(b) + var err error + b = append(b, '{') + + for i, elem := range s.elements { + if i != 0 { + b = append(b, ',') + } + + b, _ = e.encodeString(b, unsafe.Pointer(&elem.key)) + b = append(b, ':') + + b, err = e.encodeSlice(b, unsafe.Pointer(elem.val.(*[]string)), stringSize, sliceStringType, encoder.encodeString) + if err != nil { + break + } + } + + for i := range s.elements { + s.elements[i] = element{} + } + + s.elements = s.elements[:0] + mapslicePool.Put(s) + + if err != nil { + return b[:start], err + } + + b = append(b, '}') + return b, nil +} + +func (e encoder) encodeMapStringBool(b []byte, p unsafe.Pointer) ([]byte, error) { + m := *(*map[string]bool)(p) + if m == nil { + return append(b, "null"...), nil + } + + if (e.flags & SortMapKeys) == 0 { + // Optimized code path when the program does not need the map keys to be + // sorted. + b = append(b, '{') + + if len(m) != 0 { + i := 0 + + for k, v := range m { + if i != 0 { + b = append(b, ',') + } + + // encodeString never returns an error so we ignore it here + b, _ = e.encodeString(b, unsafe.Pointer(&k)) + if v { + b = append(b, ":true"...) + } else { + b = append(b, ":false"...) + } + + i++ + } + } + + b = append(b, '}') + return b, nil + } + + s := mapslicePool.Get().(*mapslice) + if cap(s.elements) < len(m) { + s.elements = make([]element, 0, align(10, uintptr(len(m)))) + } + for key, val := range m { + s.elements = append(s.elements, element{key: key, val: val}) + } + sort.Sort(s) + + b = append(b, '{') + + for i, elem := range s.elements { + if i != 0 { + b = append(b, ',') + } + + // encodeString never returns an error so we ignore it here + b, _ = e.encodeString(b, unsafe.Pointer(&elem.key)) + if elem.val.(bool) { + b = append(b, ":true"...) + } else { + b = append(b, ":false"...) + } + } + + for i := range s.elements { + s.elements[i] = element{} + } + + s.elements = s.elements[:0] + mapslicePool.Put(s) + + b = append(b, '}') + return b, nil +} + +func (e encoder) encodeStruct(b []byte, p unsafe.Pointer, st *structType) ([]byte, error) { + start := len(b) + var err error + var k string + var n int + b = append(b, '{') + + escapeHTML := (e.flags & EscapeHTML) != 0 + + for i := range st.fields { + f := &st.fields[i] + v := unsafe.Pointer(uintptr(p) + f.offset) + + if f.omitempty && f.empty(v) { + continue + } + + if escapeHTML { + k = f.html + } else { + k = f.json + } + + lengthBeforeKey := len(b) + + if n != 0 { + b = append(b, k...) + } else { + b = append(b, k[1:]...) + } + + if b, err = f.codec.encode(e, b, v); err != nil { + if err == (rollback{}) { + b = b[:lengthBeforeKey] + continue + } + return b[:start], err + } + + n++ + } + + b = append(b, '}') + return b, nil +} + +type rollback struct{} + +func (rollback) Error() string { return "rollback" } + +func (e encoder) encodeEmbeddedStructPointer(b []byte, p unsafe.Pointer, t reflect.Type, unexported bool, offset uintptr, encode encodeFunc) ([]byte, error) { + p = *(*unsafe.Pointer)(p) + if p == nil { + return b, rollback{} + } + return encode(e, b, unsafe.Pointer(uintptr(p)+offset)) +} + +func (e encoder) encodePointer(b []byte, p unsafe.Pointer, t reflect.Type, encode encodeFunc) ([]byte, error) { + if p = *(*unsafe.Pointer)(p); p != nil { + if e.ptrDepth++; e.ptrDepth >= startDetectingCyclesAfter { + if _, seen := e.ptrSeen[p]; seen { + // TODO: reconstruct the reflect.Value from p + t so we can set + // the erorr's Value field? + return b, &UnsupportedValueError{Str: fmt.Sprintf("encountered a cycle via %s", t)} + } + if e.ptrSeen == nil { + e.ptrSeen = make(map[unsafe.Pointer]struct{}) + } + e.ptrSeen[p] = struct{}{} + defer delete(e.ptrSeen, p) + } + return encode(e, b, p) + } + return e.encodeNull(b, nil) +} + +func (e encoder) encodeInterface(b []byte, p unsafe.Pointer) ([]byte, error) { + return Append(b, *(*any)(p), e.flags) +} + +func (e encoder) encodeMaybeEmptyInterface(b []byte, p unsafe.Pointer, t reflect.Type) ([]byte, error) { + return Append(b, reflect.NewAt(t, p).Elem().Interface(), e.flags) +} + +func (e encoder) encodeUnsupportedTypeError(b []byte, p unsafe.Pointer, t reflect.Type) ([]byte, error) { + return b, &UnsupportedTypeError{Type: t} +} + +func (e encoder) encodeRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) { + v := *(*RawMessage)(p) + + if v == nil { + return append(b, "null"...), nil + } + + var s []byte + + if (e.flags & TrustRawMessage) != 0 { + s = v + } else { + var err error + v = skipSpaces(v) // don't assume that a RawMessage starts with a token. + d := decoder{} + s, _, _, err = d.parseValue(v) + if err != nil { + return b, &UnsupportedValueError{Value: reflect.ValueOf(v), Str: err.Error()} + } + } + + if (e.flags & EscapeHTML) != 0 { + return appendCompactEscapeHTML(b, s), nil + } + + return append(b, s...), nil +} + +func (e encoder) encodeJSONMarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) { + v := reflect.NewAt(t, p) + + if !pointer { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return append(b, "null"...), nil + } + } + + j, err := v.Interface().(Marshaler).MarshalJSON() + if err != nil { + return b, err + } + + d := decoder{} + s, _, _, err := d.parseValue(j) + if err != nil { + return b, &MarshalerError{Type: t, Err: err} + } + + if (e.flags & EscapeHTML) != 0 { + return appendCompactEscapeHTML(b, s), nil + } + + return append(b, s...), nil +} + +func (e encoder) encodeTextMarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) { + v := reflect.NewAt(t, p) + + if !pointer { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return append(b, `null`...), nil + } + } + + s, err := v.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return b, err + } + + return e.encodeString(b, unsafe.Pointer(&s)) +} + +func appendCompactEscapeHTML(dst []byte, src []byte) []byte { + start := 0 + escape := false + inString := false + + for i, c := range src { + if !inString { + switch c { + case '"': // enter string + inString = true + case ' ', '\n', '\r', '\t': // skip space + if start < i { + dst = append(dst, src[start:i]...) + } + start = i + 1 + } + continue + } + + if escape { + escape = false + continue + } + + if c == '\\' { + escape = true + continue + } + + if c == '"' { + inString = false + continue + } + + if c == '<' || c == '>' || c == '&' { + if start < i { + dst = append(dst, src[start:i]...) + } + dst = append(dst, `\u00`...) + dst = append(dst, hex[c>>4], hex[c&0xF]) + start = i + 1 + continue + } + + // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9). + if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 { + if start < i { + dst = append(dst, src[start:i]...) + } + dst = append(dst, `\u202`...) + dst = append(dst, hex[src[i+2]&0xF]) + start = i + 3 + continue + } + } + + if start < len(src) { + dst = append(dst, src[start:]...) + } + + return dst +} diff --git a/vendor/github.com/segmentio/encoding/json/int.go b/vendor/github.com/segmentio/encoding/json/int.go new file mode 100644 index 00000000..b53149cb --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/int.go @@ -0,0 +1,98 @@ +package json + +import ( + "unsafe" +) + +var endianness int + +func init() { + var b [2]byte + *(*uint16)(unsafe.Pointer(&b)) = uint16(0xABCD) + + switch b[0] { + case 0xCD: + endianness = 0 // LE + case 0xAB: + endianness = 1 // BE + default: + panic("could not determine endianness") + } +} + +// "00010203...96979899" cast to []uint16 +var intLELookup = [100]uint16{ + 0x3030, 0x3130, 0x3230, 0x3330, 0x3430, 0x3530, 0x3630, 0x3730, 0x3830, 0x3930, + 0x3031, 0x3131, 0x3231, 0x3331, 0x3431, 0x3531, 0x3631, 0x3731, 0x3831, 0x3931, + 0x3032, 0x3132, 0x3232, 0x3332, 0x3432, 0x3532, 0x3632, 0x3732, 0x3832, 0x3932, + 0x3033, 0x3133, 0x3233, 0x3333, 0x3433, 0x3533, 0x3633, 0x3733, 0x3833, 0x3933, + 0x3034, 0x3134, 0x3234, 0x3334, 0x3434, 0x3534, 0x3634, 0x3734, 0x3834, 0x3934, + 0x3035, 0x3135, 0x3235, 0x3335, 0x3435, 0x3535, 0x3635, 0x3735, 0x3835, 0x3935, + 0x3036, 0x3136, 0x3236, 0x3336, 0x3436, 0x3536, 0x3636, 0x3736, 0x3836, 0x3936, + 0x3037, 0x3137, 0x3237, 0x3337, 0x3437, 0x3537, 0x3637, 0x3737, 0x3837, 0x3937, + 0x3038, 0x3138, 0x3238, 0x3338, 0x3438, 0x3538, 0x3638, 0x3738, 0x3838, 0x3938, + 0x3039, 0x3139, 0x3239, 0x3339, 0x3439, 0x3539, 0x3639, 0x3739, 0x3839, 0x3939, +} + +var intBELookup = [100]uint16{ + 0x3030, 0x3031, 0x3032, 0x3033, 0x3034, 0x3035, 0x3036, 0x3037, 0x3038, 0x3039, + 0x3130, 0x3131, 0x3132, 0x3133, 0x3134, 0x3135, 0x3136, 0x3137, 0x3138, 0x3139, + 0x3230, 0x3231, 0x3232, 0x3233, 0x3234, 0x3235, 0x3236, 0x3237, 0x3238, 0x3239, + 0x3330, 0x3331, 0x3332, 0x3333, 0x3334, 0x3335, 0x3336, 0x3337, 0x3338, 0x3339, + 0x3430, 0x3431, 0x3432, 0x3433, 0x3434, 0x3435, 0x3436, 0x3437, 0x3438, 0x3439, + 0x3530, 0x3531, 0x3532, 0x3533, 0x3534, 0x3535, 0x3536, 0x3537, 0x3538, 0x3539, + 0x3630, 0x3631, 0x3632, 0x3633, 0x3634, 0x3635, 0x3636, 0x3637, 0x3638, 0x3639, + 0x3730, 0x3731, 0x3732, 0x3733, 0x3734, 0x3735, 0x3736, 0x3737, 0x3738, 0x3739, + 0x3830, 0x3831, 0x3832, 0x3833, 0x3834, 0x3835, 0x3836, 0x3837, 0x3838, 0x3839, + 0x3930, 0x3931, 0x3932, 0x3933, 0x3934, 0x3935, 0x3936, 0x3937, 0x3938, 0x3939, +} + +var intLookup = [2]*[100]uint16{&intLELookup, &intBELookup} + +func appendInt(b []byte, n int64) []byte { + return formatInteger(b, uint64(n), n < 0) +} + +func appendUint(b []byte, n uint64) []byte { + return formatInteger(b, n, false) +} + +func formatInteger(out []byte, n uint64, negative bool) []byte { + if !negative { + if n < 10 { + return append(out, byte(n+'0')) + } else if n < 100 { + u := intLELookup[n] + return append(out, byte(u), byte(u>>8)) + } + } else { + n = -n + } + + lookup := intLookup[endianness] + + var b [22]byte + u := (*[11]uint16)(unsafe.Pointer(&b)) + i := 11 + + for n >= 100 { + j := n % 100 + n /= 100 + i-- + u[i] = lookup[j] + } + + i-- + u[i] = lookup[n] + + i *= 2 // convert to byte index + if n < 10 { + i++ // remove leading zero + } + if negative { + i-- + b[i] = '-' + } + + return append(out, b[i:]...) +} diff --git a/vendor/github.com/segmentio/encoding/json/json.go b/vendor/github.com/segmentio/encoding/json/json.go new file mode 100644 index 00000000..028fd1f3 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/json.go @@ -0,0 +1,594 @@ +package json + +import ( + "bytes" + "encoding/json" + "io" + "math/bits" + "reflect" + "runtime" + "sync" + "unsafe" +) + +// Delim is documented at https://golang.org/pkg/encoding/json/#Delim +type Delim = json.Delim + +// InvalidUTF8Error is documented at https://golang.org/pkg/encoding/json/#InvalidUTF8Error +type InvalidUTF8Error = json.InvalidUTF8Error //nolint:staticcheck // compat. + +// InvalidUnmarshalError is documented at https://golang.org/pkg/encoding/json/#InvalidUnmarshalError +type InvalidUnmarshalError = json.InvalidUnmarshalError + +// Marshaler is documented at https://golang.org/pkg/encoding/json/#Marshaler +type Marshaler = json.Marshaler + +// MarshalerError is documented at https://golang.org/pkg/encoding/json/#MarshalerError +type MarshalerError = json.MarshalerError + +// Number is documented at https://golang.org/pkg/encoding/json/#Number +type Number = json.Number + +// RawMessage is documented at https://golang.org/pkg/encoding/json/#RawMessage +type RawMessage = json.RawMessage + +// A SyntaxError is a description of a JSON syntax error. +type SyntaxError = json.SyntaxError + +// Token is documented at https://golang.org/pkg/encoding/json/#Token +type Token = json.Token + +// UnmarshalFieldError is documented at https://golang.org/pkg/encoding/json/#UnmarshalFieldError +type UnmarshalFieldError = json.UnmarshalFieldError //nolint:staticcheck // compat. + +// UnmarshalTypeError is documented at https://golang.org/pkg/encoding/json/#UnmarshalTypeError +type UnmarshalTypeError = json.UnmarshalTypeError + +// Unmarshaler is documented at https://golang.org/pkg/encoding/json/#Unmarshaler +type Unmarshaler = json.Unmarshaler + +// UnsupportedTypeError is documented at https://golang.org/pkg/encoding/json/#UnsupportedTypeError +type UnsupportedTypeError = json.UnsupportedTypeError + +// UnsupportedValueError is documented at https://golang.org/pkg/encoding/json/#UnsupportedValueError +type UnsupportedValueError = json.UnsupportedValueError + +// AppendFlags is a type used to represent configuration options that can be +// applied when formatting json output. +type AppendFlags uint32 + +const ( + // EscapeHTML is a formatting flag used to to escape HTML in json strings. + EscapeHTML AppendFlags = 1 << iota + + // SortMapKeys is formatting flag used to enable sorting of map keys when + // encoding JSON (this matches the behavior of the standard encoding/json + // package). + SortMapKeys + + // TrustRawMessage is a performance optimization flag to skip value + // checking of raw messages. It should only be used if the values are + // known to be valid json (e.g., they were created by json.Unmarshal). + TrustRawMessage + + // appendNewline is a formatting flag to enable the addition of a newline + // in Encode (this matches the behavior of the standard encoding/json + // package). + appendNewline +) + +// ParseFlags is a type used to represent configuration options that can be +// applied when parsing json input. +type ParseFlags uint32 + +func (flags ParseFlags) has(f ParseFlags) bool { + return (flags & f) != 0 +} + +func (f ParseFlags) kind() Kind { + return Kind((f >> kindOffset) & 0xFF) +} + +func (f ParseFlags) withKind(kind Kind) ParseFlags { + return (f & ^(ParseFlags(0xFF) << kindOffset)) | (ParseFlags(kind) << kindOffset) +} + +const ( + // DisallowUnknownFields is a parsing flag used to prevent decoding of + // objects to Go struct values when a field of the input does not match + // with any of the struct fields. + DisallowUnknownFields ParseFlags = 1 << iota + + // UseNumber is a parsing flag used to load numeric values as Number + // instead of float64. + UseNumber + + // DontCopyString is a parsing flag used to provide zero-copy support when + // loading string values from a json payload. It is not always possible to + // avoid dynamic memory allocations, for example when a string is escaped in + // the json data a new buffer has to be allocated, but when the `wire` value + // can be used as content of a Go value the decoder will simply point into + // the input buffer. + DontCopyString + + // DontCopyNumber is a parsing flag used to provide zero-copy support when + // loading Number values (see DontCopyString and DontCopyRawMessage). + DontCopyNumber + + // DontCopyRawMessage is a parsing flag used to provide zero-copy support + // when loading RawMessage values from a json payload. When used, the + // RawMessage values will not be allocated into new memory buffers and + // will instead point directly to the area of the input buffer where the + // value was found. + DontCopyRawMessage + + // DontMatchCaseInsensitiveStructFields is a parsing flag used to prevent + // matching fields in a case-insensitive way. This can prevent degrading + // performance on case conversions, and can also act as a stricter decoding + // mode. + DontMatchCaseInsensitiveStructFields + + // Decode integers into *big.Int. + // Takes precedence over UseNumber for integers. + UseBigInt + + // Decode in-range integers to int64. + // Takes precedence over UseNumber and UseBigInt for in-range integers. + UseInt64 + + // Decode in-range positive integers to uint64. + // Takes precedence over UseNumber, UseBigInt, and UseInt64 + // for positive, in-range integers. + UseUint64 + + // ZeroCopy is a parsing flag that combines all the copy optimizations + // available in the package. + // + // The zero-copy optimizations are better used in request-handler style + // code where none of the values are retained after the handler returns. + ZeroCopy = DontCopyString | DontCopyNumber | DontCopyRawMessage + + // validAsciiPrint is an internal flag indicating that the input contains + // only valid ASCII print chars (0x20 <= c <= 0x7E). If the flag is unset, + // it's unknown whether the input is valid ASCII print. + validAsciiPrint ParseFlags = 1 << 28 + + // noBackslach is an internal flag indicating that the input does not + // contain a backslash. If the flag is unset, it's unknown whether the + // input contains a backslash. + noBackslash ParseFlags = 1 << 29 + + // Bit offset where the kind of the json value is stored. + // + // See Kind in token.go for the enum. + kindOffset ParseFlags = 16 +) + +// Kind represents the different kinds of value that exist in JSON. +type Kind uint + +const ( + Undefined Kind = 0 + + Null Kind = 1 // Null is not zero, so we keep zero for "undefined". + + Bool Kind = 2 // Bit two is set to 1, means it's a boolean. + False Kind = 2 // Bool + 0 + True Kind = 3 // Bool + 1 + + Num Kind = 4 // Bit three is set to 1, means it's a number. + Uint Kind = 5 // Num + 1 + Int Kind = 6 // Num + 2 + Float Kind = 7 // Num + 3 + + String Kind = 8 // Bit four is set to 1, means it's a string. + Unescaped Kind = 9 // String + 1 + + Array Kind = 16 // Equivalent to Delim == '[' + Object Kind = 32 // Equivalent to Delim == '{' +) + +// Class returns the class of k. +func (k Kind) Class() Kind { return Kind(1 << uint(bits.Len(uint(k))-1)) } + +// Append acts like Marshal but appends the json representation to b instead of +// always reallocating a new slice. +func Append(b []byte, x any, flags AppendFlags) ([]byte, error) { + if x == nil { + // Special case for nil values because it makes the rest of the code + // simpler to assume that it won't be seeing nil pointers. + return append(b, "null"...), nil + } + + t := reflect.TypeOf(x) + p := (*iface)(unsafe.Pointer(&x)).ptr + + cache := cacheLoad() + c, found := cache[typeid(t)] + + if !found { + c = constructCachedCodec(t, cache) + } + + b, err := c.encode(encoder{flags: flags}, b, p) + runtime.KeepAlive(x) + return b, err +} + +// Escape is a convenience helper to construct an escaped JSON string from s. +// The function escales HTML characters, for more control over the escape +// behavior and to write to a pre-allocated buffer, use AppendEscape. +func Escape(s string) []byte { + // +10 for extra escape characters, maybe not enough and the buffer will + // be reallocated. + b := make([]byte, 0, len(s)+10) + return AppendEscape(b, s, EscapeHTML) +} + +// AppendEscape appends s to b with the string escaped as a JSON value. +// This will include the starting and ending quote characters, and the +// appropriate characters will be escaped correctly for JSON encoding. +func AppendEscape(b []byte, s string, flags AppendFlags) []byte { + e := encoder{flags: flags} + b, _ = e.encodeString(b, unsafe.Pointer(&s)) + return b +} + +// Unescape is a convenience helper to unescape a JSON value. +// For more control over the unescape behavior and +// to write to a pre-allocated buffer, use AppendUnescape. +func Unescape(s []byte) []byte { + b := make([]byte, 0, len(s)) + return AppendUnescape(b, s, ParseFlags(0)) +} + +// AppendUnescape appends s to b with the string unescaped as a JSON value. +// This will remove starting and ending quote characters, and the +// appropriate characters will be escaped correctly as if JSON decoded. +// New space will be reallocated if more space is needed. +func AppendUnescape(b []byte, s []byte, flags ParseFlags) []byte { + d := decoder{flags: flags} + buf := new(string) + d.decodeString(s, unsafe.Pointer(buf)) + return append(b, *buf...) +} + +// Compact is documented at https://golang.org/pkg/encoding/json/#Compact +func Compact(dst *bytes.Buffer, src []byte) error { + return json.Compact(dst, src) +} + +// HTMLEscape is documented at https://golang.org/pkg/encoding/json/#HTMLEscape +func HTMLEscape(dst *bytes.Buffer, src []byte) { + json.HTMLEscape(dst, src) +} + +// Indent is documented at https://golang.org/pkg/encoding/json/#Indent +func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error { + return json.Indent(dst, src, prefix, indent) +} + +// Marshal is documented at https://golang.org/pkg/encoding/json/#Marshal +func Marshal(x any) ([]byte, error) { + var err error + buf := encoderBufferPool.Get().(*encoderBuffer) + + if buf.data, err = Append(buf.data[:0], x, EscapeHTML|SortMapKeys); err != nil { + return nil, err + } + + b := make([]byte, len(buf.data)) + copy(b, buf.data) + encoderBufferPool.Put(buf) + return b, nil +} + +// MarshalIndent is documented at https://golang.org/pkg/encoding/json/#MarshalIndent +func MarshalIndent(x any, prefix, indent string) ([]byte, error) { + b, err := Marshal(x) + + if err == nil { + tmp := &bytes.Buffer{} + tmp.Grow(2 * len(b)) + + Indent(tmp, b, prefix, indent) + b = tmp.Bytes() + } + + return b, err +} + +// Unmarshal is documented at https://golang.org/pkg/encoding/json/#Unmarshal +func Unmarshal(b []byte, x any) error { + r, err := Parse(b, x, 0) + if len(r) != 0 { + if _, ok := err.(*SyntaxError); !ok { + // The encoding/json package prioritizes reporting errors caused by + // unexpected trailing bytes over other issues; here we emulate this + // behavior by overriding the error. + err = syntaxError(r, "invalid character '%c' after top-level value", r[0]) + } + } + return err +} + +// Parse behaves like Unmarshal but the caller can pass a set of flags to +// configure the parsing behavior. +func Parse(b []byte, x any, flags ParseFlags) ([]byte, error) { + t := reflect.TypeOf(x) + p := (*iface)(unsafe.Pointer(&x)).ptr + + d := decoder{flags: flags | internalParseFlags(b)} + + b = skipSpaces(b) + + if t == nil || p == nil || t.Kind() != reflect.Ptr { + _, r, _, err := d.parseValue(b) + r = skipSpaces(r) + if err != nil { + return r, err + } + return r, &InvalidUnmarshalError{Type: t} + } + t = t.Elem() + + cache := cacheLoad() + c, found := cache[typeid(t)] + + if !found { + c = constructCachedCodec(t, cache) + } + + r, err := c.decode(d, b, p) + return skipSpaces(r), err +} + +// Valid is documented at https://golang.org/pkg/encoding/json/#Valid +func Valid(data []byte) bool { + data = skipSpaces(data) + d := decoder{flags: internalParseFlags(data)} + _, data, _, err := d.parseValue(data) + if err != nil { + return false + } + return len(skipSpaces(data)) == 0 +} + +// Decoder is documented at https://golang.org/pkg/encoding/json/#Decoder +type Decoder struct { + reader io.Reader + buffer []byte + remain []byte + inputOffset int64 + err error + flags ParseFlags +} + +// NewDecoder is documented at https://golang.org/pkg/encoding/json/#NewDecoder +func NewDecoder(r io.Reader) *Decoder { return &Decoder{reader: r} } + +// Buffered is documented at https://golang.org/pkg/encoding/json/#Decoder.Buffered +func (dec *Decoder) Buffered() io.Reader { + return bytes.NewReader(dec.remain) +} + +// Decode is documented at https://golang.org/pkg/encoding/json/#Decoder.Decode +func (dec *Decoder) Decode(v any) error { + raw, err := dec.readValue() + if err != nil { + return err + } + _, err = Parse(raw, v, dec.flags) + return err +} + +const ( + minBufferSize = 32768 + minReadSize = 4096 +) + +// readValue reads one JSON value from the buffer and returns its raw bytes. It +// is optimized for the "one JSON value per line" case. +func (dec *Decoder) readValue() (v []byte, err error) { + var n int + var r []byte + d := decoder{flags: dec.flags} + + for { + if len(dec.remain) != 0 { + v, r, _, err = d.parseValue(dec.remain) + if err == nil { + dec.remain, n = skipSpacesN(r) + dec.inputOffset += int64(len(v) + n) + return + } + if len(r) != 0 { + // Parsing of the next JSON value stopped at a position other + // than the end of the input buffer, which indicaates that a + // syntax error was encountered. + return + } + } + + if err = dec.err; err != nil { + if len(dec.remain) != 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return + } + + if dec.buffer == nil { + dec.buffer = make([]byte, 0, minBufferSize) + } else { + dec.buffer = dec.buffer[:copy(dec.buffer[:cap(dec.buffer)], dec.remain)] + dec.remain = nil + } + + if (cap(dec.buffer) - len(dec.buffer)) < minReadSize { + buf := make([]byte, len(dec.buffer), 2*cap(dec.buffer)) + copy(buf, dec.buffer) + dec.buffer = buf + } + + n, err = io.ReadFull(dec.reader, dec.buffer[len(dec.buffer):cap(dec.buffer)]) + if n > 0 { + dec.buffer = dec.buffer[:len(dec.buffer)+n] + if err != nil { + err = nil + } + } else if err == io.ErrUnexpectedEOF { + err = io.EOF + } + dec.remain, n = skipSpacesN(dec.buffer) + d.flags = dec.flags | internalParseFlags(dec.remain) + dec.inputOffset += int64(n) + dec.err = err + } +} + +// DisallowUnknownFields is documented at https://golang.org/pkg/encoding/json/#Decoder.DisallowUnknownFields +func (dec *Decoder) DisallowUnknownFields() { dec.flags |= DisallowUnknownFields } + +// UseNumber is documented at https://golang.org/pkg/encoding/json/#Decoder.UseNumber +func (dec *Decoder) UseNumber() { dec.flags |= UseNumber } + +// DontCopyString is an extension to the standard encoding/json package +// which instructs the decoder to not copy strings loaded from the json +// payloads when possible. +func (dec *Decoder) DontCopyString() { dec.flags |= DontCopyString } + +// DontCopyNumber is an extension to the standard encoding/json package +// which instructs the decoder to not copy numbers loaded from the json +// payloads. +func (dec *Decoder) DontCopyNumber() { dec.flags |= DontCopyNumber } + +// DontCopyRawMessage is an extension to the standard encoding/json package +// which instructs the decoder to not allocate RawMessage values in separate +// memory buffers (see the documentation of the DontcopyRawMessage flag for +// more detais). +func (dec *Decoder) DontCopyRawMessage() { dec.flags |= DontCopyRawMessage } + +// DontMatchCaseInsensitiveStructFields is an extension to the standard +// encoding/json package which instructs the decoder to not match object fields +// against struct fields in a case-insensitive way, the field names have to +// match exactly to be decoded into the struct field values. +func (dec *Decoder) DontMatchCaseInsensitiveStructFields() { + dec.flags |= DontMatchCaseInsensitiveStructFields +} + +// ZeroCopy is an extension to the standard encoding/json package which enables +// all the copy optimizations of the decoder. +func (dec *Decoder) ZeroCopy() { dec.flags |= ZeroCopy } + +// InputOffset returns the input stream byte offset of the current decoder position. +// The offset gives the location of the end of the most recently returned token +// and the beginning of the next token. +func (dec *Decoder) InputOffset() int64 { + return dec.inputOffset +} + +// Encoder is documented at https://golang.org/pkg/encoding/json/#Encoder +type Encoder struct { + writer io.Writer + prefix string + indent string + buffer *bytes.Buffer + err error + flags AppendFlags +} + +// NewEncoder is documented at https://golang.org/pkg/encoding/json/#NewEncoder +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{writer: w, flags: EscapeHTML | SortMapKeys | appendNewline} +} + +// Encode is documented at https://golang.org/pkg/encoding/json/#Encoder.Encode +func (enc *Encoder) Encode(v any) error { + if enc.err != nil { + return enc.err + } + + var err error + buf := encoderBufferPool.Get().(*encoderBuffer) + + buf.data, err = Append(buf.data[:0], v, enc.flags) + if err != nil { + encoderBufferPool.Put(buf) + return err + } + + if (enc.flags & appendNewline) != 0 { + buf.data = append(buf.data, '\n') + } + b := buf.data + + if enc.prefix != "" || enc.indent != "" { + if enc.buffer == nil { + enc.buffer = new(bytes.Buffer) + enc.buffer.Grow(2 * len(buf.data)) + } else { + enc.buffer.Reset() + } + Indent(enc.buffer, buf.data, enc.prefix, enc.indent) + b = enc.buffer.Bytes() + } + + if _, err := enc.writer.Write(b); err != nil { + enc.err = err + } + + encoderBufferPool.Put(buf) + return err +} + +// SetEscapeHTML is documented at https://golang.org/pkg/encoding/json/#Encoder.SetEscapeHTML +func (enc *Encoder) SetEscapeHTML(on bool) { + if on { + enc.flags |= EscapeHTML + } else { + enc.flags &= ^EscapeHTML + } +} + +// SetIndent is documented at https://golang.org/pkg/encoding/json/#Encoder.SetIndent +func (enc *Encoder) SetIndent(prefix, indent string) { + enc.prefix = prefix + enc.indent = indent +} + +// SetSortMapKeys is an extension to the standard encoding/json package which +// allows the program to toggle sorting of map keys on and off. +func (enc *Encoder) SetSortMapKeys(on bool) { + if on { + enc.flags |= SortMapKeys + } else { + enc.flags &= ^SortMapKeys + } +} + +// SetTrustRawMessage skips value checking when encoding a raw json message. It should only +// be used if the values are known to be valid json, e.g. because they were originally created +// by json.Unmarshal. +func (enc *Encoder) SetTrustRawMessage(on bool) { + if on { + enc.flags |= TrustRawMessage + } else { + enc.flags &= ^TrustRawMessage + } +} + +// SetAppendNewline is an extension to the standard encoding/json package which +// allows the program to toggle the addition of a newline in Encode on or off. +func (enc *Encoder) SetAppendNewline(on bool) { + if on { + enc.flags |= appendNewline + } else { + enc.flags &= ^appendNewline + } +} + +var encoderBufferPool = sync.Pool{ + New: func() any { return &encoderBuffer{data: make([]byte, 0, 4096)} }, +} + +type encoderBuffer struct{ data []byte } diff --git a/vendor/github.com/segmentio/encoding/json/parse.go b/vendor/github.com/segmentio/encoding/json/parse.go new file mode 100644 index 00000000..d0ee2214 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/parse.go @@ -0,0 +1,781 @@ +package json + +import ( + "bytes" + "encoding/binary" + "math" + "math/bits" + "reflect" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "github.com/segmentio/encoding/ascii" +) + +// All spaces characters defined in the json specification. +const ( + sp = ' ' + ht = '\t' + nl = '\n' + cr = '\r' +) + +func internalParseFlags(b []byte) (flags ParseFlags) { + // Don't consider surrounding whitespace + b = skipSpaces(b) + b = trimTrailingSpaces(b) + if ascii.ValidPrint(b) { + flags |= validAsciiPrint + } + if bytes.IndexByte(b, '\\') == -1 { + flags |= noBackslash + } + return +} + +func skipSpaces(b []byte) []byte { + if len(b) > 0 && b[0] <= 0x20 { + b, _ = skipSpacesN(b) + } + return b +} + +func skipSpacesN(b []byte) ([]byte, int) { + for i := range b { + switch b[i] { + case sp, ht, nl, cr: + default: + return b[i:], i + } + } + return nil, 0 +} + +func trimTrailingSpaces(b []byte) []byte { + if len(b) > 0 && b[len(b)-1] <= 0x20 { + b = trimTrailingSpacesN(b) + } + return b +} + +func trimTrailingSpacesN(b []byte) []byte { + i := len(b) - 1 +loop: + for ; i >= 0; i-- { + switch b[i] { + case sp, ht, nl, cr: + default: + break loop + } + } + return b[:i+1] +} + +// parseInt parses a decimal representation of an int64 from b. +// +// The function is equivalent to calling strconv.ParseInt(string(b), 10, 64) but +// it prevents Go from making a memory allocation for converting a byte slice to +// a string (escape analysis fails due to the error returned by strconv.ParseInt). +// +// Because it only works with base 10 the function is also significantly faster +// than strconv.ParseInt. +func (d decoder) parseInt(b []byte, t reflect.Type) (int64, []byte, error) { + var value int64 + var count int + + if len(b) == 0 { + return 0, b, syntaxError(b, "cannot decode integer from an empty input") + } + + if b[0] == '-' { + const max = math.MinInt64 + const lim = max / 10 + + if len(b) == 1 { + return 0, b, syntaxError(b, "cannot decode integer from '-'") + } + + if len(b) > 2 && b[1] == '0' && '0' <= b[2] && b[2] <= '9' { + return 0, b, syntaxError(b, "invalid leading character '0' in integer") + } + + for _, c := range b[1:] { + if c < '0' || c > '9' { + if count == 0 { + b, err := d.inputError(b, t) + return 0, b, err + } + break + } + + if value < lim { + return 0, b, unmarshalOverflow(b, t) + } + + value *= 10 + x := int64(c - '0') + + if value < (max + x) { + return 0, b, unmarshalOverflow(b, t) + } + + value -= x + count++ + } + + count++ + } else { + if len(b) > 1 && b[0] == '0' && '0' <= b[1] && b[1] <= '9' { + return 0, b, syntaxError(b, "invalid leading character '0' in integer") + } + + for ; count < len(b) && b[count] >= '0' && b[count] <= '9'; count++ { + x := int64(b[count] - '0') + next := value*10 + x + if next < value { + return 0, b, unmarshalOverflow(b, t) + } + value = next + } + + if count == 0 { + b, err := d.inputError(b, t) + return 0, b, err + } + } + + if count < len(b) { + switch b[count] { + case '.', 'e', 'E': // was this actually a float? + v, r, _, err := d.parseNumber(b) + if err != nil { + v, r = b[:count+1], b[count+1:] + } + return 0, r, unmarshalTypeError(v, t) + } + } + + return value, b[count:], nil +} + +// parseUint is like parseInt but for unsigned integers. +func (d decoder) parseUint(b []byte, t reflect.Type) (uint64, []byte, error) { + var value uint64 + var count int + + if len(b) == 0 { + return 0, b, syntaxError(b, "cannot decode integer value from an empty input") + } + + if len(b) > 1 && b[0] == '0' && '0' <= b[1] && b[1] <= '9' { + return 0, b, syntaxError(b, "invalid leading character '0' in integer") + } + + for ; count < len(b) && b[count] >= '0' && b[count] <= '9'; count++ { + x := uint64(b[count] - '0') + next := value*10 + x + if next < value { + return 0, b, unmarshalOverflow(b, t) + } + value = next + } + + if count == 0 { + b, err := d.inputError(b, t) + return 0, b, err + } + + if count < len(b) { + switch b[count] { + case '.', 'e', 'E': // was this actually a float? + v, r, _, err := d.parseNumber(b) + if err != nil { + v, r = b[:count+1], b[count+1:] + } + return 0, r, unmarshalTypeError(v, t) + } + } + + return value, b[count:], nil +} + +// parseUintHex parses a hexadecimanl representation of a uint64 from b. +// +// The function is equivalent to calling strconv.ParseUint(string(b), 16, 64) but +// it prevents Go from making a memory allocation for converting a byte slice to +// a string (escape analysis fails due to the error returned by strconv.ParseUint). +// +// Because it only works with base 16 the function is also significantly faster +// than strconv.ParseUint. +func (d decoder) parseUintHex(b []byte) (uint64, []byte, error) { + const max = math.MaxUint64 + const lim = max / 0x10 + + var value uint64 + var count int + + if len(b) == 0 { + return 0, b, syntaxError(b, "cannot decode hexadecimal value from an empty input") + } + +parseLoop: + for i, c := range b { + var x uint64 + + switch { + case c >= '0' && c <= '9': + x = uint64(c - '0') + + case c >= 'A' && c <= 'F': + x = uint64(c-'A') + 0xA + + case c >= 'a' && c <= 'f': + x = uint64(c-'a') + 0xA + + default: + if i == 0 { + return 0, b, syntaxError(b, "expected hexadecimal digit but found '%c'", c) + } + break parseLoop + } + + if value > lim { + return 0, b, syntaxError(b, "hexadecimal value out of range") + } + + if value *= 0x10; value > (max - x) { + return 0, b, syntaxError(b, "hexadecimal value out of range") + } + + value += x + count++ + } + + return value, b[count:], nil +} + +func (d decoder) parseNull(b []byte) ([]byte, []byte, Kind, error) { + if hasNullPrefix(b) { + return b[:4], b[4:], Null, nil + } + if len(b) < 4 { + return nil, b[len(b):], Undefined, unexpectedEOF(b) + } + return nil, b, Undefined, syntaxError(b, "expected 'null' but found invalid token") +} + +func (d decoder) parseTrue(b []byte) ([]byte, []byte, Kind, error) { + if hasTruePrefix(b) { + return b[:4], b[4:], True, nil + } + if len(b) < 4 { + return nil, b[len(b):], Undefined, unexpectedEOF(b) + } + return nil, b, Undefined, syntaxError(b, "expected 'true' but found invalid token") +} + +func (d decoder) parseFalse(b []byte) ([]byte, []byte, Kind, error) { + if hasFalsePrefix(b) { + return b[:5], b[5:], False, nil + } + if len(b) < 5 { + return nil, b[len(b):], Undefined, unexpectedEOF(b) + } + return nil, b, Undefined, syntaxError(b, "expected 'false' but found invalid token") +} + +func (d decoder) parseNumber(b []byte) (v, r []byte, kind Kind, err error) { + if len(b) == 0 { + r, err = b, unexpectedEOF(b) + return + } + + // Assume it's an unsigned integer at first. + kind = Uint + + i := 0 + // sign + if b[i] == '-' { + kind = Int + i++ + } + + if i == len(b) { + r, err = b[i:], syntaxError(b, "missing number value after sign") + return + } + + if b[i] < '0' || b[i] > '9' { + r, err = b[i:], syntaxError(b, "expected digit but got '%c'", b[i]) + return + } + + // integer part + if b[i] == '0' { + i++ + if i == len(b) || (b[i] != '.' && b[i] != 'e' && b[i] != 'E') { + v, r = b[:i], b[i:] + return + } + if '0' <= b[i] && b[i] <= '9' { + r, err = b[i:], syntaxError(b, "cannot decode number with leading '0' character") + return + } + } + + for i < len(b) && '0' <= b[i] && b[i] <= '9' { + i++ + } + + // decimal part + if i < len(b) && b[i] == '.' { + kind = Float + i++ + decimalStart := i + + for i < len(b) { + if c := b[i]; '0' > c || c > '9' { + if i == decimalStart { + r, err = b[i:], syntaxError(b, "expected digit but found '%c'", c) + return + } + break + } + i++ + } + + if i == decimalStart { + r, err = b[i:], syntaxError(b, "expected decimal part after '.'") + return + } + } + + // exponent part + if i < len(b) && (b[i] == 'e' || b[i] == 'E') { + kind = Float + i++ + + if i < len(b) { + if c := b[i]; c == '+' || c == '-' { + i++ + } + } + + if i == len(b) { + r, err = b[i:], syntaxError(b, "missing exponent in number") + return + } + + exponentStart := i + + for i < len(b) { + if c := b[i]; '0' > c || c > '9' { + if i == exponentStart { + err = syntaxError(b, "expected digit but found '%c'", c) + return + } + break + } + i++ + } + } + + v, r = b[:i], b[i:] + return +} + +func (d decoder) parseUnicode(b []byte) (rune, int, error) { + if len(b) < 4 { + return 0, len(b), syntaxError(b, "unicode code point must have at least 4 characters") + } + + u, r, err := d.parseUintHex(b[:4]) + if err != nil { + return 0, 4, syntaxError(b, "parsing unicode code point: %s", err) + } + + if len(r) != 0 { + return 0, 4, syntaxError(b, "invalid unicode code point") + } + + return rune(u), 4, nil +} + +func (d decoder) parseString(b []byte) ([]byte, []byte, Kind, error) { + if len(b) < 2 { + return nil, b[len(b):], Undefined, unexpectedEOF(b) + } + if b[0] != '"' { + return nil, b, Undefined, syntaxError(b, "expected '\"' at the beginning of a string value") + } + + var n int + if len(b) >= 9 { + // This is an optimization for short strings. We read 8/16 bytes, + // and XOR each with 0x22 (") so that these bytes (and only + // these bytes) are now zero. We use the hasless(u,1) trick + // from https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord + // to determine whether any bytes are zero. Finally, we CTZ + // to find the index of that byte. + const mask1 = 0x2222222222222222 + const mask2 = 0x0101010101010101 + const mask3 = 0x8080808080808080 + u := binary.LittleEndian.Uint64(b[1:]) ^ mask1 + if mask := (u - mask2) & ^u & mask3; mask != 0 { + n = bits.TrailingZeros64(mask)/8 + 2 + goto found + } + if len(b) >= 17 { + u = binary.LittleEndian.Uint64(b[9:]) ^ mask1 + if mask := (u - mask2) & ^u & mask3; mask != 0 { + n = bits.TrailingZeros64(mask)/8 + 10 + goto found + } + } + } + n = bytes.IndexByte(b[1:], '"') + 2 + if n <= 1 { + return nil, b[len(b):], Undefined, syntaxError(b, "missing '\"' at the end of a string value") + } +found: + if (d.flags.has(noBackslash) || bytes.IndexByte(b[1:n], '\\') < 0) && + (d.flags.has(validAsciiPrint) || ascii.ValidPrint(b[1:n])) { + return b[:n], b[n:], Unescaped, nil + } + + for i := 1; i < len(b); i++ { + switch b[i] { + case '\\': + if i++; i < len(b) { + switch b[i] { + case '"', '\\', '/', 'n', 'r', 't', 'f', 'b': + case 'u': + _, n, err := d.parseUnicode(b[i+1:]) + if err != nil { + return nil, b[i+1+n:], Undefined, err + } + i += n + default: + return nil, b, Undefined, syntaxError(b, "invalid character '%c' in string escape code", b[i]) + } + } + + case '"': + return b[:i+1], b[i+1:], String, nil + + default: + if b[i] < 0x20 { + return nil, b, Undefined, syntaxError(b, "invalid character '%c' in string escape code", b[i]) + } + } + } + + return nil, b[len(b):], Undefined, syntaxError(b, "missing '\"' at the end of a string value") +} + +func (d decoder) parseStringUnquote(b []byte, r []byte) ([]byte, []byte, bool, error) { + s, b, k, err := d.parseString(b) + if err != nil { + return s, b, false, err + } + + s = s[1 : len(s)-1] // trim the quotes + + if k == Unescaped { + return s, b, false, nil + } + + if r == nil { + r = make([]byte, 0, len(s)) + } + + for len(s) != 0 { + i := bytes.IndexByte(s, '\\') + + if i < 0 { + r = appendCoerceInvalidUTF8(r, s) + break + } + + r = appendCoerceInvalidUTF8(r, s[:i]) + s = s[i+1:] + + c := s[0] + switch c { + case '"', '\\', '/': + // simple escaped character + case 'n': + c = '\n' + + case 'r': + c = '\r' + + case 't': + c = '\t' + + case 'b': + c = '\b' + + case 'f': + c = '\f' + + case 'u': + s = s[1:] + + r1, n1, err := d.parseUnicode(s) + if err != nil { + return r, b, true, err + } + s = s[n1:] + + if utf16.IsSurrogate(r1) { + if !hasPrefix(s, `\u`) { + r1 = unicode.ReplacementChar + } else { + r2, n2, err := d.parseUnicode(s[2:]) + if err != nil { + return r, b, true, err + } + if r1 = utf16.DecodeRune(r1, r2); r1 != unicode.ReplacementChar { + s = s[2+n2:] + } + } + } + + r = appendRune(r, r1) + continue + + default: // not sure what this escape sequence is + return r, b, false, syntaxError(s, "invalid character '%c' in string escape code", c) + } + + r = append(r, c) + s = s[1:] + } + + return r, b, true, nil +} + +func appendRune(b []byte, r rune) []byte { + n := len(b) + b = append(b, 0, 0, 0, 0) + return b[:n+utf8.EncodeRune(b[n:], r)] +} + +func appendCoerceInvalidUTF8(b []byte, s []byte) []byte { + c := [4]byte{} + + for _, r := range string(s) { + b = append(b, c[:utf8.EncodeRune(c[:], r)]...) + } + + return b +} + +func (d decoder) parseObject(b []byte) ([]byte, []byte, Kind, error) { + if len(b) < 2 { + return nil, b[len(b):], Undefined, unexpectedEOF(b) + } + + if b[0] != '{' { + return nil, b, Undefined, syntaxError(b, "expected '{' at the beginning of an object value") + } + + var err error + a := b + n := len(b) + i := 0 + + b = b[1:] + for { + b = skipSpaces(b) + + if len(b) == 0 { + return nil, b, Undefined, syntaxError(b, "cannot decode object from empty input") + } + + if b[0] == '}' { + j := (n - len(b)) + 1 + return a[:j], a[j:], Object, nil + } + + if i != 0 { + if len(b) == 0 { + return nil, b, Undefined, syntaxError(b, "unexpected EOF after object field value") + } + if b[0] != ',' { + return nil, b, Undefined, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + if len(b) == 0 { + return nil, b, Undefined, unexpectedEOF(b) + } + if b[0] == '}' { + return nil, b, Undefined, syntaxError(b, "unexpected trailing comma after object field") + } + } + + _, b, _, err = d.parseString(b) + if err != nil { + return nil, b, Undefined, err + } + b = skipSpaces(b) + + if len(b) == 0 { + return nil, b, Undefined, syntaxError(b, "unexpected EOF after object field key") + } + if b[0] != ':' { + return nil, b, Undefined, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + _, b, _, err = d.parseValue(b) + if err != nil { + return nil, b, Undefined, err + } + + i++ + } +} + +func (d decoder) parseArray(b []byte) ([]byte, []byte, Kind, error) { + if len(b) < 2 { + return nil, b[len(b):], Undefined, unexpectedEOF(b) + } + + if b[0] != '[' { + return nil, b, Undefined, syntaxError(b, "expected '[' at the beginning of array value") + } + + var err error + a := b + n := len(b) + i := 0 + + b = b[1:] + for { + b = skipSpaces(b) + + if len(b) == 0 { + return nil, b, Undefined, syntaxError(b, "missing closing ']' after array value") + } + + if b[0] == ']' { + j := (n - len(b)) + 1 + return a[:j], a[j:], Array, nil + } + + if i != 0 { + if len(b) == 0 { + return nil, b, Undefined, syntaxError(b, "unexpected EOF after array element") + } + if b[0] != ',' { + return nil, b, Undefined, syntaxError(b, "expected ',' after array element but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + if len(b) == 0 { + return nil, b, Undefined, unexpectedEOF(b) + } + if b[0] == ']' { + return nil, b, Undefined, syntaxError(b, "unexpected trailing comma after object field") + } + } + + _, b, _, err = d.parseValue(b) + if err != nil { + return nil, b, Undefined, err + } + + i++ + } +} + +func (d decoder) parseValue(b []byte) ([]byte, []byte, Kind, error) { + if len(b) == 0 { + return nil, b, Undefined, syntaxError(b, "unexpected end of JSON input") + } + + var v []byte + var k Kind + var err error + + switch b[0] { + case '{': + v, b, k, err = d.parseObject(b) + case '[': + v, b, k, err = d.parseArray(b) + case '"': + v, b, k, err = d.parseString(b) + case 'n': + v, b, k, err = d.parseNull(b) + case 't': + v, b, k, err = d.parseTrue(b) + case 'f': + v, b, k, err = d.parseFalse(b) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + v, b, k, err = d.parseNumber(b) + default: + err = syntaxError(b, "invalid character '%c' looking for beginning of value", b[0]) + } + + return v, b, k, err +} + +func hasNullPrefix(b []byte) bool { + return len(b) >= 4 && string(b[:4]) == "null" +} + +func hasTruePrefix(b []byte) bool { + return len(b) >= 4 && string(b[:4]) == "true" +} + +func hasFalsePrefix(b []byte) bool { + return len(b) >= 5 && string(b[:5]) == "false" +} + +func hasPrefix(b []byte, s string) bool { + return len(b) >= len(s) && s == string(b[:len(s)]) +} + +func hasLeadingSign(b []byte) bool { + return len(b) > 0 && (b[0] == '+' || b[0] == '-') +} + +func hasLeadingZeroes(b []byte) bool { + if hasLeadingSign(b) { + b = b[1:] + } + return len(b) > 1 && b[0] == '0' && '0' <= b[1] && b[1] <= '9' +} + +func appendToLower(b, s []byte) []byte { + if ascii.Valid(s) { // fast path for ascii strings + i := 0 + + for j := range s { + c := s[j] + + if 'A' <= c && c <= 'Z' { + b = append(b, s[i:j]...) + b = append(b, c+('a'-'A')) + i = j + 1 + } + } + + return append(b, s[i:]...) + } + + for _, r := range string(s) { + b = appendRune(b, foldRune(r)) + } + + return b +} + +func foldRune(r rune) rune { + if r = unicode.SimpleFold(r); 'A' <= r && r <= 'Z' { + r = r + ('a' - 'A') + } + return r +} diff --git a/vendor/github.com/segmentio/encoding/json/reflect.go b/vendor/github.com/segmentio/encoding/json/reflect.go new file mode 100644 index 00000000..6edd80e6 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/reflect.go @@ -0,0 +1,20 @@ +//go:build go1.20 +// +build go1.20 + +package json + +import ( + "reflect" + "unsafe" +) + +func extendSlice(t reflect.Type, s *slice, n int) slice { + arrayType := reflect.ArrayOf(n, t.Elem()) + arrayData := reflect.New(arrayType) + reflect.Copy(arrayData.Elem(), reflect.NewAt(t, unsafe.Pointer(s)).Elem()) + return slice{ + data: unsafe.Pointer(arrayData.Pointer()), + len: s.len, + cap: n, + } +} diff --git a/vendor/github.com/segmentio/encoding/json/reflect_optimize.go b/vendor/github.com/segmentio/encoding/json/reflect_optimize.go new file mode 100644 index 00000000..6588433d --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/reflect_optimize.go @@ -0,0 +1,30 @@ +//go:build !go1.20 +// +build !go1.20 + +package json + +import ( + "reflect" + "unsafe" +) + +//go:linkname unsafe_NewArray reflect.unsafe_NewArray +func unsafe_NewArray(rtype unsafe.Pointer, length int) unsafe.Pointer + +//go:linkname typedslicecopy reflect.typedslicecopy +//go:noescape +func typedslicecopy(elemType unsafe.Pointer, dst, src slice) int + +func extendSlice(t reflect.Type, s *slice, n int) slice { + elemTypeRef := t.Elem() + elemTypePtr := ((*iface)(unsafe.Pointer(&elemTypeRef))).ptr + + d := slice{ + data: unsafe_NewArray(elemTypePtr, n), + len: s.len, + cap: n, + } + + typedslicecopy(elemTypePtr, d, *s) + return d +} diff --git a/vendor/github.com/segmentio/encoding/json/string.go b/vendor/github.com/segmentio/encoding/json/string.go new file mode 100644 index 00000000..a9a972b6 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/string.go @@ -0,0 +1,89 @@ +package json + +import ( + "math/bits" + "unsafe" +) + +const ( + lsb = 0x0101010101010101 + msb = 0x8080808080808080 +) + +// escapeIndex finds the index of the first char in `s` that requires escaping. +// A char requires escaping if it's outside of the range of [0x20, 0x7F] or if +// it includes a double quote or backslash. If the escapeHTML mode is enabled, +// the chars <, > and & also require escaping. If no chars in `s` require +// escaping, the return value is -1. +func escapeIndex(s string, escapeHTML bool) int { + chunks := stringToUint64(s) + for _, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | below(n, 0x20) | contains(n, '"') | contains(n, '\\') + if escapeHTML { + mask |= contains(n, '<') | contains(n, '>') | contains(n, '&') + } + if (mask & msb) != 0 { + return bits.TrailingZeros64(mask&msb) / 8 + } + } + + for i := len(chunks) * 8; i < len(s); i++ { + c := s[i] + if c < 0x20 || c > 0x7f || c == '"' || c == '\\' || (escapeHTML && (c == '<' || c == '>' || c == '&')) { + return i + } + } + + return -1 +} + +func escapeByteRepr(b byte) byte { + switch b { + case '\\', '"': + return b + case '\b': + return 'b' + case '\f': + return 'f' + case '\n': + return 'n' + case '\r': + return 'r' + case '\t': + return 't' + } + + return 0 +} + +// below return a mask that can be used to determine if any of the bytes +// in `n` are below `b`. If a byte's MSB is set in the mask then that byte was +// below `b`. The result is only valid if `b`, and each byte in `n`, is below +// 0x80. +func below(n uint64, b byte) uint64 { + return n - expand(b) +} + +// contains returns a mask that can be used to determine if any of the +// bytes in `n` are equal to `b`. If a byte's MSB is set in the mask then +// that byte is equal to `b`. The result is only valid if `b`, and each +// byte in `n`, is below 0x80. +func contains(n uint64, b byte) uint64 { + return (n ^ expand(b)) - lsb +} + +// expand puts the specified byte into each of the 8 bytes of a uint64. +func expand(b byte) uint64 { + return lsb * uint64(b) +} + +func stringToUint64(s string) []uint64 { + return *(*[]uint64)(unsafe.Pointer(&sliceHeader{ + Data: *(*unsafe.Pointer)(unsafe.Pointer(&s)), + Len: len(s) / 8, + Cap: len(s) / 8, + })) +} diff --git a/vendor/github.com/segmentio/encoding/json/token.go b/vendor/github.com/segmentio/encoding/json/token.go new file mode 100644 index 00000000..ddcd05d3 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/token.go @@ -0,0 +1,426 @@ +package json + +import ( + "strconv" + "sync" + "unsafe" +) + +// Tokenizer is an iterator-style type which can be used to progressively parse +// through a json input. +// +// Tokenizing json is useful to build highly efficient parsing operations, for +// example when doing tranformations on-the-fly where as the program reads the +// input and produces the transformed json to an output buffer. +// +// Here is a common pattern to use a tokenizer: +// +// for t := json.NewTokenizer(b); t.Next(); { +// switch k := t.Kind(); k.Class() { +// case json.Null: +// ... +// case json.Bool: +// ... +// case json.Num: +// ... +// case json.String: +// ... +// case json.Array: +// ... +// case json.Object: +// ... +// } +// } +type Tokenizer struct { + // When the tokenizer is positioned on a json delimiter this field is not + // zero. In this case the possible values are '{', '}', '[', ']', ':', and + // ','. + Delim Delim + + // This field contains the raw json token that the tokenizer is pointing at. + // When Delim is not zero, this field is a single-element byte slice + // continaing the delimiter value. Otherwise, this field holds values like + // null, true, false, numbers, or quoted strings. + Value RawValue + + // When the tokenizer has encountered invalid content this field is not nil. + Err error + + // When the value is in an array or an object, this field contains the depth + // at which it was found. + Depth int + + // When the value is in an array or an object, this field contains the + // position at which it was found. + Index int + + // This field is true when the value is the key of an object. + IsKey bool + + // Tells whether the next value read from the tokenizer is a key. + isKey bool + + // json input for the tokenizer, pointing at data right after the last token + // that was parsed. + json []byte + + // Stack used to track entering and leaving arrays, objects, and keys. + stack *stack + + // Decoder used for parsing. + decoder +} + +// NewTokenizer constructs a new Tokenizer which reads its json input from b. +func NewTokenizer(b []byte) *Tokenizer { + return &Tokenizer{ + json: b, + decoder: decoder{flags: internalParseFlags(b)}, + } +} + +// Reset erases the state of t and re-initializes it with the json input from b. +func (t *Tokenizer) Reset(b []byte) { + if t.stack != nil { + releaseStack(t.stack) + } + // This code is similar to: + // + // *t = Tokenizer{json: b} + // + // However, it does not compile down to an invocation of duff-copy. + t.Delim = 0 + t.Value = nil + t.Err = nil + t.Depth = 0 + t.Index = 0 + t.IsKey = false + t.isKey = false + t.json = b + t.stack = nil + t.decoder = decoder{flags: internalParseFlags(b)} +} + +// Next returns a new tokenizer pointing at the next token, or the zero-value of +// Tokenizer if the end of the json input has been reached. +// +// If the tokenizer encounters malformed json while reading the input the method +// sets t.Err to an error describing the issue, and returns false. Once an error +// has been encountered, the tokenizer will always fail until its input is +// cleared by a call to its Reset method. +func (t *Tokenizer) Next() bool { + if t.Err != nil { + return false + } + + // Inlined code of the skipSpaces function, this give a ~15% speed boost. + i := 0 +skipLoop: + for _, c := range t.json { + switch c { + case sp, ht, nl, cr: + i++ + default: + break skipLoop + } + } + + if i > 0 { + t.json = t.json[i:] + } + + if len(t.json) == 0 { + t.Reset(nil) + return false + } + + var kind Kind + switch t.json[0] { + case '"': + t.Delim = 0 + t.Value, t.json, kind, t.Err = t.parseString(t.json) + case 'n': + t.Delim = 0 + t.Value, t.json, kind, t.Err = t.parseNull(t.json) + case 't': + t.Delim = 0 + t.Value, t.json, kind, t.Err = t.parseTrue(t.json) + case 'f': + t.Delim = 0 + t.Value, t.json, kind, t.Err = t.parseFalse(t.json) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + t.Delim = 0 + t.Value, t.json, kind, t.Err = t.parseNumber(t.json) + case '{', '}', '[', ']', ':', ',': + t.Delim, t.Value, t.json = Delim(t.json[0]), t.json[:1], t.json[1:] + switch t.Delim { + case '{': + kind = Object + case '[': + kind = Array + } + default: + t.Delim = 0 + t.Value, t.json, t.Err = t.json[:1], t.json[1:], syntaxError(t.json, "expected token but found '%c'", t.json[0]) + } + + t.Depth = t.depth() + t.Index = t.index() + t.flags = t.flags.withKind(kind) + + if t.Delim == 0 { + t.IsKey = t.isKey + } else { + t.IsKey = false + + switch t.Delim { + case '{': + t.isKey = true + t.push(inObject) + case '[': + t.push(inArray) + case '}': + t.Err = t.pop(inObject) + t.Depth-- + t.Index = t.index() + case ']': + t.Err = t.pop(inArray) + t.Depth-- + t.Index = t.index() + case ':': + t.isKey = false + case ',': + if t.stack == nil || len(t.stack.state) == 0 { + t.Err = syntaxError(t.json, "found unexpected comma") + return false + } + if t.stack.is(inObject) { + t.isKey = true + } + t.stack.state[len(t.stack.state)-1].len++ + } + } + + return (t.Delim != 0 || len(t.Value) != 0) && t.Err == nil +} + +func (t *Tokenizer) depth() int { + if t.stack == nil { + return 0 + } + return t.stack.depth() +} + +func (t *Tokenizer) index() int { + if t.stack == nil { + return 0 + } + return t.stack.index() +} + +func (t *Tokenizer) push(typ scope) { + if t.stack == nil { + t.stack = acquireStack() + } + t.stack.push(typ) +} + +func (t *Tokenizer) pop(expect scope) error { + if t.stack == nil || !t.stack.pop(expect) { + return syntaxError(t.json, "found unexpected character while tokenizing json input") + } + return nil +} + +// Kind returns the kind of the value that the tokenizer is currently positioned +// on. +func (t *Tokenizer) Kind() Kind { return t.flags.kind() } + +// Bool returns a bool containing the value of the json boolean that the +// tokenizer is currently pointing at. +// +// This method must only be called after checking the kind of the token via a +// call to Kind. +// +// If the tokenizer is not positioned on a boolean, the behavior is undefined. +func (t *Tokenizer) Bool() bool { return t.flags.kind() == True } + +// Int returns a byte slice containing the value of the json number that the +// tokenizer is currently pointing at. +// +// This method must only be called after checking the kind of the token via a +// call to Kind. +// +// If the tokenizer is not positioned on an integer, the behavior is undefined. +func (t *Tokenizer) Int() int64 { + i, _, _ := t.parseInt(t.Value, int64Type) + return i +} + +// Uint returns a byte slice containing the value of the json number that the +// tokenizer is currently pointing at. +// +// This method must only be called after checking the kind of the token via a +// call to Kind. +// +// If the tokenizer is not positioned on a positive integer, the behavior is +// undefined. +func (t *Tokenizer) Uint() uint64 { + u, _, _ := t.parseUint(t.Value, uint64Type) + return u +} + +// Float returns a byte slice containing the value of the json number that the +// tokenizer is currently pointing at. +// +// This method must only be called after checking the kind of the token via a +// call to Kind. +// +// If the tokenizer is not positioned on a number, the behavior is undefined. +func (t *Tokenizer) Float() float64 { + f, _ := strconv.ParseFloat(*(*string)(unsafe.Pointer(&t.Value)), 64) + return f +} + +// String returns a byte slice containing the value of the json string that the +// tokenizer is currently pointing at. +// +// This method must only be called after checking the kind of the token via a +// call to Kind. +// +// When possible, the returned byte slice references the backing array of the +// tokenizer. A new slice is only allocated if the tokenizer needed to unescape +// the json string. +// +// If the tokenizer is not positioned on a string, the behavior is undefined. +func (t *Tokenizer) String() []byte { + if t.flags.kind() == Unescaped && len(t.Value) > 1 { + return t.Value[1 : len(t.Value)-1] // unquote + } + s, _, _, _ := t.parseStringUnquote(t.Value, nil) + return s +} + +// Remaining returns the number of bytes left to parse. +// +// The position of the tokenizer's current Value within the original byte slice +// can be calculated like so: +// +// end := len(b) - tok.Remaining() +// start := end - len(tok.Value) +// +// And slicing b[start:end] will yield the tokenizer's current Value. +func (t *Tokenizer) Remaining() int { + return len(t.json) +} + +// RawValue represents a raw json value, it is intended to carry null, true, +// false, number, and string values only. +type RawValue []byte + +// String returns true if v contains a string value. +func (v RawValue) String() bool { return len(v) != 0 && v[0] == '"' } + +// Null returns true if v contains a null value. +func (v RawValue) Null() bool { return len(v) != 0 && v[0] == 'n' } + +// True returns true if v contains a true value. +func (v RawValue) True() bool { return len(v) != 0 && v[0] == 't' } + +// False returns true if v contains a false value. +func (v RawValue) False() bool { return len(v) != 0 && v[0] == 'f' } + +// Number returns true if v contains a number value. +func (v RawValue) Number() bool { + if len(v) != 0 { + switch v[0] { + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return true + } + } + return false +} + +// AppendUnquote writes the unquoted version of the string value in v into b. +func (v RawValue) AppendUnquote(b []byte) []byte { + d := decoder{} + s, r, _, err := d.parseStringUnquote(v, b) + if err != nil { + panic(err) + } + if len(r) != 0 { + panic(syntaxError(r, "unexpected trailing tokens after json value")) + } + return append(b, s...) +} + +// Unquote returns the unquoted version of the string value in v. +func (v RawValue) Unquote() []byte { + return v.AppendUnquote(nil) +} + +type scope int + +const ( + inArray scope = iota + inObject +) + +type state struct { + typ scope + len int +} + +type stack struct { + state []state +} + +func (s *stack) push(typ scope) { + s.state = append(s.state, state{typ: typ, len: 1}) +} + +func (s *stack) pop(expect scope) bool { + i := len(s.state) - 1 + + if i < 0 { + return false + } + + if found := s.state[i]; expect != found.typ { + return false + } + + s.state = s.state[:i] + return true +} + +func (s *stack) is(typ scope) bool { + return len(s.state) != 0 && s.state[len(s.state)-1].typ == typ +} + +func (s *stack) depth() int { + return len(s.state) +} + +func (s *stack) index() int { + if len(s.state) == 0 { + return 0 + } + return s.state[len(s.state)-1].len - 1 +} + +func acquireStack() *stack { + s, _ := stackPool.Get().(*stack) + if s == nil { + s = &stack{state: make([]state, 0, 4)} + } else { + s.state = s.state[:0] + } + return s +} + +func releaseStack(s *stack) { + stackPool.Put(s) +} + +var stackPool sync.Pool // *stack diff --git a/vendor/modules.txt b/vendor/modules.txt index 051e356c..44d712c6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1193,10 +1193,12 @@ github.com/mitchellh/mapstructure # github.com/mitchellh/reflectwalk v1.0.2 ## explicit github.com/mitchellh/reflectwalk -# github.com/modelcontextprotocol/go-sdk v1.3.0 -## explicit; go 1.23.0 +# github.com/modelcontextprotocol/go-sdk v1.4.1 +## explicit; go 1.25.0 github.com/modelcontextprotocol/go-sdk/auth +github.com/modelcontextprotocol/go-sdk/internal/json github.com/modelcontextprotocol/go-sdk/internal/jsonrpc2 +github.com/modelcontextprotocol/go-sdk/internal/mcpgodebug github.com/modelcontextprotocol/go-sdk/internal/util github.com/modelcontextprotocol/go-sdk/internal/xcontext github.com/modelcontextprotocol/go-sdk/jsonrpc @@ -1398,6 +1400,22 @@ github.com/securego/gosec/v2/analyzers github.com/securego/gosec/v2/cwe github.com/securego/gosec/v2/issue github.com/securego/gosec/v2/rules +# github.com/segmentio/asm v1.1.3 +## explicit; go 1.17 +github.com/segmentio/asm/ascii +github.com/segmentio/asm/base64 +github.com/segmentio/asm/cpu +github.com/segmentio/asm/cpu/arm +github.com/segmentio/asm/cpu/arm64 +github.com/segmentio/asm/cpu/cpuid +github.com/segmentio/asm/cpu/x86 +github.com/segmentio/asm/internal/unsafebytes +github.com/segmentio/asm/keyset +# github.com/segmentio/encoding v0.5.4 +## explicit; go 1.23 +github.com/segmentio/encoding/ascii +github.com/segmentio/encoding/iso8601 +github.com/segmentio/encoding/json # github.com/shirou/gopsutil/v4 v4.25.12 ## explicit; go 1.24.0 github.com/shirou/gopsutil/v4/common