diff --git a/devtools/deployments/multi-tenancy/.env.example b/devtools/deployments/multi-tenancy/.env.example
new file mode 100644
index 0000000000..2736dda3ff
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/.env.example
@@ -0,0 +1,161 @@
+## Basic Settings ##
+# Define the docker compose log driver used.
+# Defaults to local
+LOG_DRIVER=
+# If you're on an internet facing server, comment out following line.
+# It skips certificate validation for various parts of OpenCloud and is
+# needed when self signed certificates are used.
+INSECURE=true
+
+## Features ##
+COMPOSE_FILE=docker-compose.yml:traefik.yml:keycloak.yml:ldap-server.yml
+
+## Traefik Settings ##
+# Note: Traefik is always enabled and can't be disabled.
+# Serve Traefik dashboard.
+# Defaults to "false".
+TRAEFIK_DASHBOARD=
+# Domain of Traefik, where you can find the dashboard.
+# Defaults to "traefik.opencloud.test"
+TRAEFIK_DOMAIN=
+# Basic authentication for the traefik dashboard.
+# Defaults to user "admin" and password "admin" (written as: "admin:$2y$05$KDHu3xq92SPaO3G8Ybkc7edd51pPLJcG1nWk3lmlrIdANQ/B6r5pq").
+# To create user:password pair, it's possible to use this command:
+# echo $(htpasswd -nB user) | sed -e s/\\$/\\$\\$/g
+TRAEFIK_BASIC_AUTH_USERS=
+# Email address for obtaining LetsEncrypt certificates.
+# Needs only be changed if this is a public facing server.
+TRAEFIK_ACME_MAIL=
+# Set to the following for testing to check the certificate process:
+# "https://acme-staging-v02.api.letsencrypt.org/directory"
+# With staging configured, there will be an SSL error in the browser.
+# When certificates are displayed and are emitted by # "Fake LE Intermediate X1",
+# the process went well and the envvar can be reset to empty to get valid certificates.
+TRAEFIK_ACME_CASERVER=
+# Enable the Traefik ACME (Automatic Certificate Management Environment) for automatic SSL certificate management.
+TRAEFIK_SERVICES_TLS_CONFIG="tls.certresolver=letsencrypt"
+# Enable Traefik to use local certificates.
+#TRAEFIK_SERVICES_TLS_CONFIG="tls=true"
+# You also need to provide a config file in ./config/traefik/dynamic/certs.yml
+# Example:
+# cat ./config/traefik/dynamic/certs.yml
+# tls:
+# certificates:
+# - certFile: /certs/opencloud.test.crt
+# keyFile: /certs/opencloud.test.key
+# stores:
+# - default
+#
+# The certificates need to copied into ./certs/, the absolute path inside the container is /certs/.
+# You can also use TRAEFIK_CERTS_DIR=/path/on/host to set the path to the certificates directory.
+# Enable the access log for Traefik by setting the following variable to true.
+TRAEFIK_ACCESS_LOG=
+# Configure the log level for Traefik.
+# Possible values are "TRACE", "DEBUG", "INFO", "WARN", "ERROR", "FATAL" and "PANIC". Default is "ERROR".
+TRAEFIK_LOG_LEVEL=
+
+
+## OpenCloud Settings ##
+# The opencloud container image.
+# For production releases: "opencloudeu/opencloud"
+# For rolling releases: "opencloudeu/opencloud-rolling"
+# Defaults to production if not set otherwise
+OC_DOCKER_IMAGE=opencloudeu/opencloud-rolling
+# The openCloud container version.
+# Defaults to "latest" and points to the latest stable tag.
+OC_DOCKER_TAG=
+# Domain of openCloud, where you can find the frontend.
+# Defaults to "cloud.opencloud.test"
+OC_DOMAIN=
+# Demo users should not be created on a production instance,
+# because their passwords are public. Defaults to "false".
+# If demo users is set to "true", the following user accounts are created automatically:
+# alan, mary, margaret, dennis and lynn - the password is 'demo' for all.
+DEMO_USERS=
+# Admin Password for the OpenCloud admin user.
+# NOTE: This is only needed when using the built-in LDAP server (idm).
+# If you are using an external LDAP server, the admin password is managed by the LDAP server.
+# NOTE: This variable needs to be set before the first start of OpenCloud. Changes to this variable after the first start will be IGNORED.
+# If not set, opencloud will not work properly. The container will be restarting.
+# After the first initialization, the admin password can only be changed via the OpenCloud User Settings UI or by using the OpenCloud CLI.
+# Documentation: https://docs.opencloud.eu/docs/admin/resources/common-issues#-change-admin-password-set-in-env
+INITIAL_ADMIN_PASSWORD=
+# Define the openCloud loglevel used.
+#
+LOG_LEVEL=
+# Define the kind of logging.
+# The default log can be read by machines.
+# Set this to true to make the log human readable.
+# LOG_PRETTY=true
+#
+# Define the openCloud storage location. Set the paths for config and data to a local path.
+# Ensure that the configuration and data directories are owned by the user and group with ID 1000:1000.
+# This matches the default user inside the container and avoids permission issues when accessing files.
+# Note that especially the data directory can grow big.
+# Leaving it default stores data in docker internal volumes.
+# OC_CONFIG_DIR=/your/local/opencloud/config
+# OC_DATA_DIR=/your/local/opencloud/data
+
+### Compose Configuration ###
+# Path separator for supplemental compose files specified in COMPOSE_FILE.
+COMPOSE_PATH_SEPARATOR=:
+
+### Ldap Settings ###
+# LDAP is always needed for OpenCloud to store user data as there is no relational database.
+# The built-in LDAP server should used for testing purposes or small installations only.
+# For production installations, it is recommended to use an external LDAP server.
+# We are using OpenLDAP as the default LDAP server because it is proven to be stable and reliable.
+# This LDAP configuration is known to work with OpenCloud and provides a blueprint for
+# configuring an external LDAP server based on other products like Microsoft Active Directory or other LDAP servers.
+#
+# Password of LDAP bind user "cn=admin,dc=opencloud,dc=eu". Defaults to "admin"
+LDAP_BIND_PASSWORD=
+# The LDAP server also creates an openCloud admin user dn: uid=admin,ou=users,dc=opencloud,dc=eu
+# The initial password for this user is "admin"
+# NOTE: This password can only be set once, if you want to change it later, you have to use the OpenCloud User Settings UI.
+# If you changed the password and lost it, you need to execute the following LDAP query to reset it:
+# enter the ldap-server container with `docker compose exec ldap-server sh`
+# and run the following command to change the password:
+# ldappasswd -H ldap://127.0.0.1:1389 -D "cn=admin,dc=opencloud,dc=eu" -W "uid=admin,ou=users,dc=opencloud,dc=eu"
+# You will be prompted for the LDAP bind password.
+# The output should provide you a new password for the admin user.
+
+
+### Keycloak Settings ###
+# Keycloak is an open-source identity and access management solution.
+# We are using Keycloak as the default identity provider on production installations.
+# It can be used to federate authentication with other identity providers like
+# Microsoft Entra ID, ADFS or other SAML/OIDC providers.
+# The use of Keycloak as bridge between OpenCloud and other identity providers creates more control over the
+# authentication process, the allowed clients and the session management.
+# Keycloak also manages the Role Based Access Control (RBAC) for OpenCloud.
+# Keycloak can be used in two different modes:
+# 1. Autoprovisioning: New users are automatically created in openCloud when they log in for the first time.
+# 2. Shared User Directory: Users are created in Keycloak and can be used in OpenCloud immediately
+# because the LDAP server is connected to both Keycloak and OpenCloud.
+# Only use one of the two modes at a time.
+
+## Autoprovisioning Mode ##
+# Use together with idm/external-idp.yml
+# If you want to use a keycloak for local testing, you can use testing/external-keycloak.yml and testing/ldap-manager.yml
+# Domain of your Identity Provider.
+IDP_DOMAIN=
+# IdP Issuer URL, which is used to identify the Identity Provider.
+# We need the complete URL, including the protocol (http or https) and the realm.
+# Example: "https://keycloak.opencloud.test/realms/openCloud"
+IDP_ISSUER_URL=
+# Url of the account edit page from your Identity Provider.
+IDP_ACCOUNT_URL=
+
+## Shared User Directory Mode ##
+# Use together with idm/ldap-keycloak.yml and traefik/ldap-keycloak.yml
+# Domain for Keycloak. Defaults to "keycloak.opencloud.test".
+KEYCLOAK_DOMAIN=
+# Admin user login name. Defaults to "kcadmin".
+KEYCLOAK_ADMIN=
+# Admin user login password. Defaults to "admin".
+KEYCLOAK_ADMIN_PASSWORD=
+# Keycloak Database username. Defaults to "keycloak".
+KC_DB_USERNAME=
+# Keycloak Database password. Defaults to "keycloak".
+KC_DB_PASSWORD=
diff --git a/devtools/deployments/multi-tenancy/README.md b/devtools/deployments/multi-tenancy/README.md
new file mode 100644
index 0000000000..f3c4c91184
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/README.md
@@ -0,0 +1,49 @@
+# Development/Test Deployment for a multi-tenacy setup
+
+The docker compose files in this directory are derived from the
+opencloud-compose project and can be used to deploy a Development or Testing
+environment for a multi-tenancy setup of OpenCloud. It consists of the
+following services:
+
+* `provisioning`: The OpenCloud graph service deployed in a standalone mode. It
+ is configured to provide the libregraph education API for managing tenants
+ and users. The `ldap-server`service (see below) is used to store the tenants
+ and users.
+* `ldap-server`: An OpenLDAP server that is used by the provisioning service to
+ store tenants and users. Used by the OpenCloud services as the user directory
+ (for looking up users and searching for sharees).
+* `keycloak`: The OpenID Connect Provider used for authenticating users. The
+ pre-loaded realm is configured to add `tenantid` claim into the identity and
+ access tokens. It's also currently consuming user from the `ldap-server`
+ (this federation will likely go away in the future and is optional for future
+ configurations).
+* `opencloud`: The OpenCloud configured so that is hides users from different
+ tenants from each other.
+
+To deploy the setup, run:
+
+```bash
+docker compose -f docker-compose.yml -f keycloak.yml -f ldap-server.yml -f traefik.yml up
+```
+
+Once deployed you can use the `initialize_users.go` to create a couple of example
+tenants and some users in each tenant:
+
+* Tenant `Famous Coders` with users `dennis` and `grace`
+* Tenant `Scientists` with users `einstein` and `marie`
+
+The passwords for the users is set to `demo` in keycloak
+
+```
+> go run initialize_users.go
+Created tenant: Famous Coders with id fc58e19a-3a2a-4afc-90ec-8f94986db340
+Created user: Dennis Ritchie with id ee1e14e7-b00b-4eec-8b03-a6bf0e29c77c
+Created user: Grace Hopper with id a29f3afd-e4a3-4552-91e8-cc99e26bffce
+Created tenant: Scientists with id 18406c53-e2d6-4e83-98b6-a55880eef195
+Created user: Albert Einstein with id 12023d37-d6ce-4f19-a318-b70866f265ba
+Created user: Marie Curie with id 30c3c825-c37d-4e85-8195-0142e4884872
+Setting password for user: grace
+Setting password for user: marie
+Setting password for user: dennis
+Setting password for user: einstein
+```
diff --git a/devtools/deployments/multi-tenancy/certs/.gitkeep b/devtools/deployments/multi-tenancy/certs/.gitkeep
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/devtools/deployments/multi-tenancy/certs/acme.json b/devtools/deployments/multi-tenancy/certs/acme.json
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/devtools/deployments/multi-tenancy/config/keycloak/clients/OpenCloudAndroid.json b/devtools/deployments/multi-tenancy/config/keycloak/clients/OpenCloudAndroid.json
new file mode 100644
index 0000000000..c21838d67b
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/config/keycloak/clients/OpenCloudAndroid.json
@@ -0,0 +1,63 @@
+{
+ "clientId": "OpenCloudAndroid",
+ "name": "OpenCloud Android App",
+ "surrogateAuthRequired": false,
+ "enabled": true,
+ "alwaysDisplayInConsole": false,
+ "clientAuthenticatorType": "client-secret",
+ "redirectUris": [
+ "oc://android.opencloud.eu"
+ ],
+ "webOrigins": [],
+ "notBefore": 0,
+ "bearerOnly": false,
+ "consentRequired": false,
+ "standardFlowEnabled": true,
+ "implicitFlowEnabled": false,
+ "directAccessGrantsEnabled": true,
+ "serviceAccountsEnabled": false,
+ "publicClient": true,
+ "frontchannelLogout": false,
+ "protocol": "openid-connect",
+ "attributes": {
+ "saml.assertion.signature": "false",
+ "saml.force.post.binding": "false",
+ "saml.multivalued.roles": "false",
+ "saml.encrypt": "false",
+ "post.logout.redirect.uris": "oc://android.opencloud.eu",
+ "backchannel.logout.revoke.offline.tokens": "false",
+ "saml.server.signature": "false",
+ "saml.server.signature.keyinfo.ext": "false",
+ "exclude.session.state.from.auth.response": "false",
+ "backchannel.logout.session.required": "true",
+ "client_credentials.use_refresh_token": "false",
+ "saml_force_name_id_format": "false",
+ "saml.client.signature": "false",
+ "tls.client.certificate.bound.access.tokens": "false",
+ "saml.authnstatement": "false",
+ "display.on.consent.screen": "false",
+ "saml.onetimeuse.condition": "false"
+ },
+ "authenticationFlowBindingOverrides": {},
+ "fullScopeAllowed": true,
+ "nodeReRegistrationTimeout": -1,
+ "defaultClientScopes": [
+ "web-origins",
+ "profile",
+ "roles",
+ "groups",
+ "basic",
+ "email"
+ ],
+ "optionalClientScopes": [
+ "address",
+ "phone",
+ "offline_access",
+ "microprofile-jwt"
+ ],
+ "access": {
+ "view": true,
+ "configure": true,
+ "manage": true
+ }
+}
\ No newline at end of file
diff --git a/devtools/deployments/multi-tenancy/config/keycloak/clients/OpenCloudDesktop.json b/devtools/deployments/multi-tenancy/config/keycloak/clients/OpenCloudDesktop.json
new file mode 100644
index 0000000000..d17a7cb6cf
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/config/keycloak/clients/OpenCloudDesktop.json
@@ -0,0 +1,64 @@
+{
+ "clientId": "OpenCloudDesktop",
+ "name": "OpenCloud Desktop Client",
+ "surrogateAuthRequired": false,
+ "enabled": true,
+ "alwaysDisplayInConsole": false,
+ "clientAuthenticatorType": "client-secret",
+ "redirectUris": [
+ "http://127.0.0.1",
+ "http://localhost"
+ ],
+ "webOrigins": [],
+ "notBefore": 0,
+ "bearerOnly": false,
+ "consentRequired": false,
+ "standardFlowEnabled": true,
+ "implicitFlowEnabled": false,
+ "directAccessGrantsEnabled": true,
+ "serviceAccountsEnabled": false,
+ "publicClient": true,
+ "frontchannelLogout": false,
+ "protocol": "openid-connect",
+ "attributes": {
+ "saml.assertion.signature": "false",
+ "saml.force.post.binding": "false",
+ "saml.multivalued.roles": "false",
+ "saml.encrypt": "false",
+ "post.logout.redirect.uris": "+",
+ "backchannel.logout.revoke.offline.tokens": "false",
+ "saml.server.signature": "false",
+ "saml.server.signature.keyinfo.ext": "false",
+ "exclude.session.state.from.auth.response": "false",
+ "backchannel.logout.session.required": "true",
+ "client_credentials.use_refresh_token": "false",
+ "saml_force_name_id_format": "false",
+ "saml.client.signature": "false",
+ "tls.client.certificate.bound.access.tokens": "false",
+ "saml.authnstatement": "false",
+ "display.on.consent.screen": "false",
+ "saml.onetimeuse.condition": "false"
+ },
+ "authenticationFlowBindingOverrides": {},
+ "fullScopeAllowed": true,
+ "nodeReRegistrationTimeout": -1,
+ "defaultClientScopes": [
+ "web-origins",
+ "profile",
+ "roles",
+ "groups",
+ "basic",
+ "email"
+ ],
+ "optionalClientScopes": [
+ "address",
+ "phone",
+ "offline_access",
+ "microprofile-jwt"
+ ],
+ "access": {
+ "view": true,
+ "configure": true,
+ "manage": true
+ }
+}
\ No newline at end of file
diff --git a/devtools/deployments/multi-tenancy/config/keycloak/clients/OpenCloudIOS.json b/devtools/deployments/multi-tenancy/config/keycloak/clients/OpenCloudIOS.json
new file mode 100644
index 0000000000..d09e089570
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/config/keycloak/clients/OpenCloudIOS.json
@@ -0,0 +1,63 @@
+{
+ "clientId": "OpenCloudIOS",
+ "name": "OpenCloud iOS App",
+ "surrogateAuthRequired": false,
+ "enabled": true,
+ "alwaysDisplayInConsole": false,
+ "clientAuthenticatorType": "client-secret",
+ "redirectUris": [
+ "oc://ios.opencloud.eu"
+ ],
+ "webOrigins": [],
+ "notBefore": 0,
+ "bearerOnly": false,
+ "consentRequired": false,
+ "standardFlowEnabled": true,
+ "implicitFlowEnabled": false,
+ "directAccessGrantsEnabled": true,
+ "serviceAccountsEnabled": false,
+ "publicClient": true,
+ "frontchannelLogout": false,
+ "protocol": "openid-connect",
+ "attributes": {
+ "saml.assertion.signature": "false",
+ "saml.force.post.binding": "false",
+ "saml.multivalued.roles": "false",
+ "saml.encrypt": "false",
+ "post.logout.redirect.uris": "oc://ios.opencloud.eu",
+ "backchannel.logout.revoke.offline.tokens": "false",
+ "saml.server.signature": "false",
+ "saml.server.signature.keyinfo.ext": "false",
+ "exclude.session.state.from.auth.response": "false",
+ "backchannel.logout.session.required": "true",
+ "client_credentials.use_refresh_token": "false",
+ "saml_force_name_id_format": "false",
+ "saml.client.signature": "false",
+ "tls.client.certificate.bound.access.tokens": "false",
+ "saml.authnstatement": "false",
+ "display.on.consent.screen": "false",
+ "saml.onetimeuse.condition": "false"
+ },
+ "authenticationFlowBindingOverrides": {},
+ "fullScopeAllowed": true,
+ "nodeReRegistrationTimeout": -1,
+ "defaultClientScopes": [
+ "web-origins",
+ "profile",
+ "roles",
+ "groups",
+ "basic",
+ "email"
+ ],
+ "optionalClientScopes": [
+ "address",
+ "phone",
+ "offline_access",
+ "microprofile-jwt"
+ ],
+ "access": {
+ "view": true,
+ "configure": true,
+ "manage": true
+ }
+}
\ No newline at end of file
diff --git a/devtools/deployments/multi-tenancy/config/keycloak/clients/cyberduck.json b/devtools/deployments/multi-tenancy/config/keycloak/clients/cyberduck.json
new file mode 100644
index 0000000000..5143323a44
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/config/keycloak/clients/cyberduck.json
@@ -0,0 +1,66 @@
+{
+ "clientId": "Cyberduck",
+ "name": "Cyberduck",
+ "description": "File transfer utility client",
+ "surrogateAuthRequired": false,
+ "enabled": true,
+ "alwaysDisplayInConsole": false,
+ "clientAuthenticatorType": "client-secret",
+ "redirectUris": [
+ "x-cyberduck-action:oauth",
+ "x-mountainduck-action:oauth"
+ ],
+ "webOrigins": [],
+ "notBefore": 0,
+ "bearerOnly": false,
+ "consentRequired": false,
+ "standardFlowEnabled": true,
+ "implicitFlowEnabled": false,
+ "directAccessGrantsEnabled": true,
+ "serviceAccountsEnabled": false,
+ "publicClient": true,
+ "frontchannelLogout": false,
+ "protocol": "openid-connect",
+ "attributes": {
+ "saml.assertion.signature": "false",
+ "saml.force.post.binding": "false",
+ "saml.multivalued.roles": "false",
+ "saml.encrypt": "false",
+ "oauth2.device.authorization.grant.enabled": "false",
+ "backchannel.logout.revoke.offline.tokens": "false",
+ "saml.server.signature": "false",
+ "saml.server.signature.keyinfo.ext": "false",
+ "exclude.session.state.from.auth.response": "false",
+ "oidc.ciba.grant.enabled": "false",
+ "backchannel.logout.session.required": "true",
+ "client_credentials.use_refresh_token": "false",
+ "saml_force_name_id_format": "false",
+ "saml.client.signature": "false",
+ "tls.client.certificate.bound.access.tokens": "false",
+ "saml.authnstatement": "false",
+ "display.on.consent.screen": "false",
+ "saml.onetimeuse.condition": "false"
+ },
+ "authenticationFlowBindingOverrides": {},
+ "fullScopeAllowed": true,
+ "nodeReRegistrationTimeout": -1,
+ "defaultClientScopes": [
+ "web-origins",
+ "profile",
+ "roles",
+ "groups",
+ "basic",
+ "email"
+ ],
+ "optionalClientScopes": [
+ "address",
+ "phone",
+ "offline_access",
+ "microprofile-jwt"
+ ],
+ "access": {
+ "view": true,
+ "configure": true,
+ "manage": true
+ }
+}
\ No newline at end of file
diff --git a/devtools/deployments/multi-tenancy/config/keycloak/clients/web.json b/devtools/deployments/multi-tenancy/config/keycloak/clients/web.json
new file mode 100644
index 0000000000..9ab819a44e
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/config/keycloak/clients/web.json
@@ -0,0 +1,74 @@
+{
+ "clientId": "web",
+ "name": "OpenCloud Web App",
+ "description": "",
+ "rootUrl": "{{OC_URL}}",
+ "adminUrl": "{{OC_URL}}",
+ "baseUrl": "",
+ "surrogateAuthRequired": false,
+ "enabled": true,
+ "alwaysDisplayInConsole": false,
+ "clientAuthenticatorType": "client-secret",
+ "redirectUris": [
+ "{{OC_URL}}/",
+ "{{OC_URL}}/oidc-callback.html",
+ "{{OC_URL}}/oidc-silent-redirect.html"
+ ],
+ "webOrigins": [
+ "{{OC_URL}}"
+ ],
+ "notBefore": 0,
+ "bearerOnly": false,
+ "consentRequired": false,
+ "standardFlowEnabled": true,
+ "implicitFlowEnabled": false,
+ "directAccessGrantsEnabled": true,
+ "serviceAccountsEnabled": false,
+ "publicClient": true,
+ "frontchannelLogout": false,
+ "protocol": "openid-connect",
+ "attributes": {
+ "saml.assertion.signature": "false",
+ "saml.force.post.binding": "false",
+ "saml.multivalued.roles": "false",
+ "saml.encrypt": "false",
+ "post.logout.redirect.uris": "+",
+ "oauth2.device.authorization.grant.enabled": "false",
+ "backchannel.logout.revoke.offline.tokens": "false",
+ "saml.server.signature": "false",
+ "saml.server.signature.keyinfo.ext": "false",
+ "exclude.session.state.from.auth.response": "false",
+ "oidc.ciba.grant.enabled": "false",
+ "backchannel.logout.url": "{{OC_URL}}/backchannel_logout",
+ "backchannel.logout.session.required": "true",
+ "client_credentials.use_refresh_token": "false",
+ "saml_force_name_id_format": "false",
+ "saml.client.signature": "false",
+ "tls.client.certificate.bound.access.tokens": "false",
+ "saml.authnstatement": "false",
+ "display.on.consent.screen": "false",
+ "saml.onetimeuse.condition": "false"
+ },
+ "authenticationFlowBindingOverrides": {},
+ "fullScopeAllowed": true,
+ "nodeReRegistrationTimeout": -1,
+ "defaultClientScopes": [
+ "web-origins",
+ "profile",
+ "roles",
+ "groups",
+ "basic",
+ "email"
+ ],
+ "optionalClientScopes": [
+ "address",
+ "phone",
+ "offline_access",
+ "microprofile-jwt"
+ ],
+ "access": {
+ "view": true,
+ "configure": true,
+ "manage": true
+ }
+}
\ No newline at end of file
diff --git a/devtools/deployments/multi-tenancy/config/keycloak/docker-entrypoint-override.sh b/devtools/deployments/multi-tenancy/config/keycloak/docker-entrypoint-override.sh
new file mode 100644
index 0000000000..4809750389
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/config/keycloak/docker-entrypoint-override.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+printenv
+# replace openCloud domain and LDAP password in keycloak realm import
+mkdir /opt/keycloak/data/import
+sed -e "s/cloud.opencloud.test/${OC_DOMAIN}/g" -e "s/ldap-admin-password/${LDAP_ADMIN_PASSWORD:-admin}/g" /opt/keycloak/data/import-dist/openCloud-realm.json > /opt/keycloak/data/import/openCloud-realm.json
+
+# run original docker-entrypoint
+/opt/keycloak/bin/kc.sh "$@"
diff --git a/devtools/deployments/multi-tenancy/config/keycloak/openCloud-realm.dist.json b/devtools/deployments/multi-tenancy/config/keycloak/openCloud-realm.dist.json
new file mode 100644
index 0000000000..65bb52306e
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/config/keycloak/openCloud-realm.dist.json
@@ -0,0 +1,2419 @@
+{
+ "id" : "openCloud",
+ "realm" : "openCloud",
+ "displayName" : "OpenCloud",
+ "displayNameHtml" : "
OpenCloud
",
+ "notBefore" : 0,
+ "defaultSignatureAlgorithm" : "RS256",
+ "revokeRefreshToken" : false,
+ "refreshTokenMaxReuse" : 0,
+ "accessTokenLifespan" : 300,
+ "accessTokenLifespanForImplicitFlow" : 900,
+ "ssoSessionIdleTimeout" : 1800,
+ "ssoSessionMaxLifespan" : 36000,
+ "ssoSessionIdleTimeoutRememberMe" : 0,
+ "ssoSessionMaxLifespanRememberMe" : 0,
+ "offlineSessionIdleTimeout" : 2592000,
+ "offlineSessionMaxLifespanEnabled" : false,
+ "offlineSessionMaxLifespan" : 5184000,
+ "clientSessionIdleTimeout" : 0,
+ "clientSessionMaxLifespan" : 0,
+ "clientOfflineSessionIdleTimeout" : 0,
+ "clientOfflineSessionMaxLifespan" : 0,
+ "accessCodeLifespan" : 60,
+ "accessCodeLifespanUserAction" : 300,
+ "accessCodeLifespanLogin" : 1800,
+ "actionTokenGeneratedByAdminLifespan" : 43200,
+ "actionTokenGeneratedByUserLifespan" : 300,
+ "oauth2DeviceCodeLifespan" : 600,
+ "oauth2DevicePollingInterval" : 5,
+ "enabled" : true,
+ "sslRequired" : "external",
+ "registrationAllowed" : false,
+ "registrationEmailAsUsername" : false,
+ "rememberMe" : true,
+ "verifyEmail" : false,
+ "loginWithEmailAllowed" : true,
+ "duplicateEmailsAllowed" : false,
+ "resetPasswordAllowed" : true,
+ "editUsernameAllowed" : false,
+ "bruteForceProtected" : true,
+ "permanentLockout" : false,
+ "maxTemporaryLockouts" : 0,
+ "bruteForceStrategy" : "MULTIPLE",
+ "maxFailureWaitSeconds" : 900,
+ "minimumQuickLoginWaitSeconds" : 60,
+ "waitIncrementSeconds" : 60,
+ "quickLoginCheckMilliSeconds" : 1000,
+ "maxDeltaTimeSeconds" : 43200,
+ "failureFactor" : 30,
+ "roles" : {
+ "realm" : [ {
+ "id" : "2d576514-4aae-46aa-9d9c-075f55f4d988",
+ "name" : "uma_authorization",
+ "description" : "${role_uma_authorization}",
+ "composite" : false,
+ "clientRole" : false,
+ "containerId" : "openCloud",
+ "attributes" : { }
+ }, {
+ "id" : "2aadd357-682c-406b-8874-293091995fdd",
+ "name" : "opencloudSpaceAdmin",
+ "description" : "",
+ "composite" : false,
+ "clientRole" : false,
+ "containerId" : "openCloud",
+ "attributes" : { }
+ }, {
+ "id" : "38071a68-456a-4553-846a-fa67bf5596cc",
+ "name" : "opencloudGuest",
+ "description" : "",
+ "composite" : false,
+ "clientRole" : false,
+ "containerId" : "openCloud",
+ "attributes" : { }
+ }, {
+ "id" : "71881883-1768-46bd-a24d-a356a2afdf7f",
+ "name" : "opencloudAdmin",
+ "description" : "",
+ "composite" : false,
+ "clientRole" : false,
+ "containerId" : "openCloud",
+ "attributes" : { }
+ }, {
+ "id" : "e2145b30-bf6f-49fb-af3f-1b40168bfcef",
+ "name" : "offline_access",
+ "description" : "${role_offline-access}",
+ "composite" : false,
+ "clientRole" : false,
+ "containerId" : "openCloud",
+ "attributes" : { }
+ }, {
+ "id" : "82e13ea7-aac4-4d2c-9fc7-cff8333dbe19",
+ "name" : "default-roles-opencloud",
+ "description" : "${role_default-roles}",
+ "composite" : true,
+ "composites" : {
+ "realm" : [ "opencloudGuest", "offline_access", "uma_authorization" ],
+ "client" : {
+ "account" : [ "manage-account", "view-profile" ]
+ }
+ },
+ "clientRole" : false,
+ "containerId" : "openCloud",
+ "attributes" : { }
+ }, {
+ "id" : "d7beeea8-8ff4-406b-8fb6-ab2dd81e6b11",
+ "name" : "opencloudUser",
+ "description" : "",
+ "composite" : false,
+ "clientRole" : false,
+ "containerId" : "openCloud",
+ "attributes" : { }
+ } ],
+ "client" : {
+ "_system" : [ ],
+ "realm-management" : [ {
+ "id" : "979ce053-a671-4b50-81d5-da4bdf7404c9",
+ "name" : "view-clients",
+ "description" : "${role_view-clients}",
+ "composite" : true,
+ "composites" : {
+ "client" : {
+ "realm-management" : [ "query-clients" ]
+ }
+ },
+ "clientRole" : true,
+ "containerId" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "attributes" : { }
+ }, {
+ "id" : "4bec4791-e888-4dac-bc95-71720d5981b9",
+ "name" : "query-users",
+ "description" : "${role_query-users}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "attributes" : { }
+ }, {
+ "id" : "955b4406-b04f-432d-a61a-571675874341",
+ "name" : "manage-authorization",
+ "description" : "${role_manage-authorization}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "attributes" : { }
+ }, {
+ "id" : "baa219af-2773-4d59-b06b-485f10fbbab3",
+ "name" : "view-events",
+ "description" : "${role_view-events}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "attributes" : { }
+ }, {
+ "id" : "f280bc03-d079-478d-be06-3590580b25e9",
+ "name" : "manage-users",
+ "description" : "${role_manage-users}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "attributes" : { }
+ }, {
+ "id" : "db698163-84ad-46c9-958f-bb5f80ae78b5",
+ "name" : "query-clients",
+ "description" : "${role_query-clients}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "attributes" : { }
+ }, {
+ "id" : "36c04d89-abf7-4a2c-a808-8efa9aca1435",
+ "name" : "manage-clients",
+ "description" : "${role_manage-clients}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "attributes" : { }
+ }, {
+ "id" : "06eae953-11d5-4344-b089-ffce1e68d5d8",
+ "name" : "query-realms",
+ "description" : "${role_query-realms}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "attributes" : { }
+ }, {
+ "id" : "afe8aa78-2f06-43a5-8c99-cf68a1f5a86a",
+ "name" : "realm-admin",
+ "description" : "${role_realm-admin}",
+ "composite" : true,
+ "composites" : {
+ "client" : {
+ "realm-management" : [ "view-clients", "query-users", "manage-authorization", "view-events", "manage-users", "query-clients", "manage-clients", "query-realms", "impersonation", "manage-realm", "manage-identity-providers", "view-authorization", "create-client", "query-groups", "view-users", "view-realm", "view-identity-providers", "manage-events" ]
+ }
+ },
+ "clientRole" : true,
+ "containerId" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "attributes" : { }
+ }, {
+ "id" : "22ee128a-b28e-4c6a-aa8e-ad4136d74e1b",
+ "name" : "impersonation",
+ "description" : "${role_impersonation}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "attributes" : { }
+ }, {
+ "id" : "89d4f119-7f87-44d9-8eef-d207304de778",
+ "name" : "manage-realm",
+ "description" : "${role_manage-realm}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "attributes" : { }
+ }, {
+ "id" : "ebffeff4-6794-4003-a2ab-a79eff7d1baa",
+ "name" : "manage-identity-providers",
+ "description" : "${role_manage-identity-providers}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "attributes" : { }
+ }, {
+ "id" : "2361a7ff-d2b3-43f5-b360-ad0e44fba65c",
+ "name" : "view-authorization",
+ "description" : "${role_view-authorization}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "attributes" : { }
+ }, {
+ "id" : "f7bf6d7a-a861-49c6-8f6f-225c18d0a03a",
+ "name" : "create-client",
+ "description" : "${role_create-client}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "attributes" : { }
+ }, {
+ "id" : "34ccce1c-5a7e-4268-8836-2276545be900",
+ "name" : "query-groups",
+ "description" : "${role_query-groups}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "attributes" : { }
+ }, {
+ "id" : "430f7831-8f22-4518-bd15-2998eae45a51",
+ "name" : "view-users",
+ "description" : "${role_view-users}",
+ "composite" : true,
+ "composites" : {
+ "client" : {
+ "realm-management" : [ "query-groups", "query-users" ]
+ }
+ },
+ "clientRole" : true,
+ "containerId" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "attributes" : { }
+ }, {
+ "id" : "371a31e6-4494-4b74-b3ea-d030663423ed",
+ "name" : "view-realm",
+ "description" : "${role_view-realm}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "attributes" : { }
+ }, {
+ "id" : "e875775b-7a3e-4a5d-9e4e-376351b78626",
+ "name" : "view-identity-providers",
+ "description" : "${role_view-identity-providers}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "attributes" : { }
+ }, {
+ "id" : "3dce7929-ee1f-40cd-9be1-7addcae92cef",
+ "name" : "manage-events",
+ "description" : "${role_manage-events}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "attributes" : { }
+ } ],
+ "OpenCloudDesktop" : [ ],
+ "web" : [ ],
+ "security-admin-console" : [ ],
+ "OpenCloudIOS" : [ ],
+ "admin-cli" : [ ],
+ "OpenCloudAndroid" : [ ],
+ "account-console" : [ ],
+ "broker" : [ {
+ "id" : "81fad68a-8dd8-4d79-9a8f-206a82460145",
+ "name" : "read-token",
+ "description" : "${role_read-token}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "002faf0a-716c-4230-81c7-ce22d1eb832c",
+ "attributes" : { }
+ } ],
+ "account" : [ {
+ "id" : "c49a49da-8ad0-44cb-b518-6d7d72cbe494",
+ "name" : "manage-account",
+ "description" : "${role_manage-account}",
+ "composite" : true,
+ "composites" : {
+ "client" : {
+ "account" : [ "manage-account-links" ]
+ }
+ },
+ "clientRole" : true,
+ "containerId" : "9850adad-7910-4b67-a790-da6444361618",
+ "attributes" : { }
+ }, {
+ "id" : "9dc2244e-b8a7-44f1-b173-d2b929fedcca",
+ "name" : "view-consent",
+ "description" : "${role_view-consent}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "9850adad-7910-4b67-a790-da6444361618",
+ "attributes" : { }
+ }, {
+ "id" : "ce115327-99c9-44d4-ba7d-820397dc11e6",
+ "name" : "manage-account-links",
+ "description" : "${role_manage-account-links}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "9850adad-7910-4b67-a790-da6444361618",
+ "attributes" : { }
+ }, {
+ "id" : "2ffdf854-084b-467a-91c6-7f07844efc9a",
+ "name" : "view-groups",
+ "description" : "${role_view-groups}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "9850adad-7910-4b67-a790-da6444361618",
+ "attributes" : { }
+ }, {
+ "id" : "8c45ca71-32aa-4547-932d-412da5e371ed",
+ "name" : "view-profile",
+ "description" : "${role_view-profile}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "9850adad-7910-4b67-a790-da6444361618",
+ "attributes" : { }
+ }, {
+ "id" : "cbeecf6d-9af8-4746-877b-74800a894c35",
+ "name" : "view-applications",
+ "description" : "${role_view-applications}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "9850adad-7910-4b67-a790-da6444361618",
+ "attributes" : { }
+ }, {
+ "id" : "ea798f64-b5f8-417f-9fe0-d3cd9172884f",
+ "name" : "delete-account",
+ "description" : "${role_delete-account}",
+ "composite" : false,
+ "clientRole" : true,
+ "containerId" : "9850adad-7910-4b67-a790-da6444361618",
+ "attributes" : { }
+ }, {
+ "id" : "e73aaf6d-e67b-491a-9cc3-78c32c82b42c",
+ "name" : "manage-consent",
+ "description" : "${role_manage-consent}",
+ "composite" : true,
+ "composites" : {
+ "client" : {
+ "account" : [ "view-consent" ]
+ }
+ },
+ "clientRole" : true,
+ "containerId" : "9850adad-7910-4b67-a790-da6444361618",
+ "attributes" : { }
+ } ]
+ }
+ },
+ "groups" : [ {
+ "id" : "ed57d574-3bd9-4b51-9691-5d1b6aef7c68",
+ "name" : "administrators",
+ "path" : "/administrators",
+ "subGroups" : [ ],
+ "attributes" : { },
+ "realmRoles" : [ "opencloudAdmin" ],
+ "clientRoles" : { }
+ }, {
+ "id" : "1bf1a677-302d-4c28-ac53-e83c87372a81",
+ "name" : "apollos",
+ "path" : "/apollos",
+ "subGroups" : [ ],
+ "attributes" : { },
+ "realmRoles" : [ "opencloudSpaceAdmin" ],
+ "clientRoles" : { }
+ }, {
+ "id" : "12611514-517a-4fe6-a85d-41a018e98598",
+ "name" : "basic-haters",
+ "path" : "/basic-haters",
+ "subGroups" : [ ],
+ "attributes" : { },
+ "realmRoles" : [ ],
+ "clientRoles" : { }
+ }, {
+ "id" : "7a56c37d-3b86-487e-abe5-144452678255",
+ "name" : "bible-readers",
+ "path" : "/bible-readers",
+ "subGroups" : [ ],
+ "attributes" : { },
+ "realmRoles" : [ ],
+ "clientRoles" : { }
+ }, {
+ "id" : "03a5d51c-5e55-40ab-b8c8-5b796f77ab34",
+ "name" : "chess-lovers",
+ "path" : "/chess-lovers",
+ "subGroups" : [ ],
+ "attributes" : { },
+ "realmRoles" : [ ],
+ "clientRoles" : { }
+ }, {
+ "id" : "255c4aea-a3c6-41f5-bca5-0dec59cca3ee",
+ "name" : "machine-lovers",
+ "path" : "/machine-lovers",
+ "subGroups" : [ ],
+ "attributes" : { },
+ "realmRoles" : [ ],
+ "clientRoles" : { }
+ }, {
+ "id" : "d1d0f574-6a8e-49da-981b-bd0f667ec650",
+ "name" : "programmers",
+ "path" : "/programmers",
+ "subGroups" : [ ],
+ "attributes" : { },
+ "realmRoles" : [ ],
+ "clientRoles" : { }
+ }, {
+ "id" : "f8ab7ab3-e44b-445b-86a7-26d61259a6a6",
+ "name" : "unix-lovers",
+ "path" : "/unix-lovers",
+ "subGroups" : [ ],
+ "attributes" : { },
+ "realmRoles" : [ "opencloudAdmin" ],
+ "clientRoles" : { }
+ }, {
+ "id" : "c3e5bec3-a125-4043-b872-5c38dc659a13",
+ "name" : "users",
+ "path" : "/users",
+ "subGroups" : [ ],
+ "attributes" : { },
+ "realmRoles" : [ "opencloudUser" ],
+ "clientRoles" : { }
+ }, {
+ "id" : "ed1c5f92-4938-494f-83d3-a73be393a270",
+ "name" : "vlsi-lovers",
+ "path" : "/vlsi-lovers",
+ "subGroups" : [ ],
+ "attributes" : { },
+ "realmRoles" : [ ],
+ "clientRoles" : { }
+ } ],
+ "defaultRole" : {
+ "id" : "82e13ea7-aac4-4d2c-9fc7-cff8333dbe19",
+ "name" : "default-roles-opencloud",
+ "description" : "${role_default-roles}",
+ "composite" : true,
+ "clientRole" : false,
+ "containerId" : "openCloud"
+ },
+ "requiredCredentials" : [ "password" ],
+ "otpPolicyType" : "totp",
+ "otpPolicyAlgorithm" : "HmacSHA1",
+ "otpPolicyInitialCounter" : 0,
+ "otpPolicyDigits" : 6,
+ "otpPolicyLookAheadWindow" : 1,
+ "otpPolicyPeriod" : 30,
+ "otpPolicyCodeReusable" : false,
+ "otpSupportedApplications" : [ "totpAppFreeOTPName", "totpAppGoogleName", "totpAppMicrosoftAuthenticatorName" ],
+ "localizationTexts" : { },
+ "webAuthnPolicyRpEntityName" : "keycloak",
+ "webAuthnPolicySignatureAlgorithms" : [ "ES256" ],
+ "webAuthnPolicyRpId" : "",
+ "webAuthnPolicyAttestationConveyancePreference" : "not specified",
+ "webAuthnPolicyAuthenticatorAttachment" : "not specified",
+ "webAuthnPolicyRequireResidentKey" : "not specified",
+ "webAuthnPolicyUserVerificationRequirement" : "not specified",
+ "webAuthnPolicyCreateTimeout" : 0,
+ "webAuthnPolicyAvoidSameAuthenticatorRegister" : false,
+ "webAuthnPolicyAcceptableAaguids" : [ ],
+ "webAuthnPolicyExtraOrigins" : [ ],
+ "webAuthnPolicyPasswordlessRpEntityName" : "keycloak",
+ "webAuthnPolicyPasswordlessSignatureAlgorithms" : [ "ES256" ],
+ "webAuthnPolicyPasswordlessRpId" : "",
+ "webAuthnPolicyPasswordlessAttestationConveyancePreference" : "not specified",
+ "webAuthnPolicyPasswordlessAuthenticatorAttachment" : "not specified",
+ "webAuthnPolicyPasswordlessRequireResidentKey" : "not specified",
+ "webAuthnPolicyPasswordlessUserVerificationRequirement" : "not specified",
+ "webAuthnPolicyPasswordlessCreateTimeout" : 0,
+ "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister" : false,
+ "webAuthnPolicyPasswordlessAcceptableAaguids" : [ ],
+ "webAuthnPolicyPasswordlessExtraOrigins" : [ ],
+ "scopeMappings" : [ {
+ "clientScope" : "offline_access",
+ "roles" : [ "offline_access" ]
+ }, {
+ "clientScope" : "roles",
+ "roles" : [ "opencloudGuest", "opencloudAdmin", "opencloudSpaceAdmin", "opencloudUser" ]
+ } ],
+ "clientScopeMappings" : {
+ "account" : [ {
+ "client" : "account-console",
+ "roles" : [ "manage-account", "view-groups" ]
+ } ]
+ },
+ "clients" : [ {
+ "id" : "c8367556-1d13-4979-b4f6-5e2cff1f82ae",
+ "clientId" : "OpenCloudAndroid",
+ "name" : "OpenCloud Android App",
+ "surrogateAuthRequired" : false,
+ "enabled" : true,
+ "alwaysDisplayInConsole" : false,
+ "clientAuthenticatorType" : "client-secret",
+ "redirectUris" : [ "oc://android.opencloud.eu" ],
+ "webOrigins" : [ ],
+ "notBefore" : 0,
+ "bearerOnly" : false,
+ "consentRequired" : false,
+ "standardFlowEnabled" : true,
+ "implicitFlowEnabled" : false,
+ "directAccessGrantsEnabled" : true,
+ "serviceAccountsEnabled" : false,
+ "publicClient" : true,
+ "frontchannelLogout" : false,
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "saml.assertion.signature" : "false",
+ "saml.force.post.binding" : "false",
+ "saml.multivalued.roles" : "false",
+ "saml.encrypt" : "false",
+ "post.logout.redirect.uris" : "oc://android.opencloud.eu",
+ "backchannel.logout.revoke.offline.tokens" : "false",
+ "saml.server.signature" : "false",
+ "saml.server.signature.keyinfo.ext" : "false",
+ "exclude.session.state.from.auth.response" : "false",
+ "realm_client" : "false",
+ "backchannel.logout.session.required" : "true",
+ "client_credentials.use_refresh_token" : "false",
+ "saml_force_name_id_format" : "false",
+ "saml.client.signature" : "false",
+ "tls.client.certificate.bound.access.tokens" : "false",
+ "saml.authnstatement" : "false",
+ "display.on.consent.screen" : "false",
+ "saml.onetimeuse.condition" : "false"
+ },
+ "authenticationFlowBindingOverrides" : { },
+ "fullScopeAllowed" : true,
+ "nodeReRegistrationTimeout" : -1,
+ "defaultClientScopes" : [ "web-origins", "profile", "roles", "groups", "basic", "email" ],
+ "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ]
+ }, {
+ "id" : "fc7d8a8e-cb92-4cb0-b404-d723c07d8d4f",
+ "clientId" : "OpenCloudDesktop",
+ "name" : "OpenCloud Desktop Client",
+ "surrogateAuthRequired" : false,
+ "enabled" : true,
+ "alwaysDisplayInConsole" : false,
+ "clientAuthenticatorType" : "client-secret",
+ "redirectUris" : [ "http://127.0.0.1", "http://localhost" ],
+ "webOrigins" : [ ],
+ "notBefore" : 0,
+ "bearerOnly" : false,
+ "consentRequired" : false,
+ "standardFlowEnabled" : true,
+ "implicitFlowEnabled" : false,
+ "directAccessGrantsEnabled" : true,
+ "serviceAccountsEnabled" : false,
+ "publicClient" : true,
+ "frontchannelLogout" : false,
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "saml.assertion.signature" : "false",
+ "saml.force.post.binding" : "false",
+ "saml.multivalued.roles" : "false",
+ "saml.encrypt" : "false",
+ "post.logout.redirect.uris" : "+",
+ "backchannel.logout.revoke.offline.tokens" : "false",
+ "saml.server.signature" : "false",
+ "saml.server.signature.keyinfo.ext" : "false",
+ "exclude.session.state.from.auth.response" : "false",
+ "realm_client" : "false",
+ "backchannel.logout.session.required" : "true",
+ "client_credentials.use_refresh_token" : "false",
+ "saml_force_name_id_format" : "false",
+ "saml.client.signature" : "false",
+ "tls.client.certificate.bound.access.tokens" : "false",
+ "saml.authnstatement" : "false",
+ "display.on.consent.screen" : "false",
+ "saml.onetimeuse.condition" : "false"
+ },
+ "authenticationFlowBindingOverrides" : { },
+ "fullScopeAllowed" : true,
+ "nodeReRegistrationTimeout" : -1,
+ "defaultClientScopes" : [ "web-origins", "profile", "roles", "groups", "OpenCloudUnique_ID", "basic", "email" ],
+ "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ]
+ }, {
+ "id" : "6ae0e3da-38ff-47a4-a76e-b59eec0a2de9",
+ "clientId" : "OpenCloudIOS",
+ "name" : "OpenCloud iOS App",
+ "surrogateAuthRequired" : false,
+ "enabled" : true,
+ "alwaysDisplayInConsole" : false,
+ "clientAuthenticatorType" : "client-secret",
+ "redirectUris" : [ "oc://ios.opencloud.eu" ],
+ "webOrigins" : [ ],
+ "notBefore" : 0,
+ "bearerOnly" : false,
+ "consentRequired" : false,
+ "standardFlowEnabled" : true,
+ "implicitFlowEnabled" : false,
+ "directAccessGrantsEnabled" : true,
+ "serviceAccountsEnabled" : false,
+ "publicClient" : true,
+ "frontchannelLogout" : false,
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "saml.assertion.signature" : "false",
+ "saml.force.post.binding" : "false",
+ "saml.multivalued.roles" : "false",
+ "saml.encrypt" : "false",
+ "post.logout.redirect.uris" : "oc://ios.opencloud.eu",
+ "backchannel.logout.revoke.offline.tokens" : "false",
+ "saml.server.signature" : "false",
+ "saml.server.signature.keyinfo.ext" : "false",
+ "exclude.session.state.from.auth.response" : "false",
+ "realm_client" : "false",
+ "backchannel.logout.session.required" : "true",
+ "client_credentials.use_refresh_token" : "false",
+ "saml_force_name_id_format" : "false",
+ "saml.client.signature" : "false",
+ "tls.client.certificate.bound.access.tokens" : "false",
+ "saml.authnstatement" : "false",
+ "display.on.consent.screen" : "false",
+ "saml.onetimeuse.condition" : "false"
+ },
+ "authenticationFlowBindingOverrides" : { },
+ "fullScopeAllowed" : true,
+ "nodeReRegistrationTimeout" : -1,
+ "defaultClientScopes" : [ "web-origins", "profile", "roles", "groups", "OpenCloudUnique_ID", "basic", "email" ],
+ "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ]
+ }, {
+ "id" : "294b6cf4-b646-4f6c-bab2-616546ec3167",
+ "clientId" : "_system",
+ "name" : "_system",
+ "surrogateAuthRequired" : false,
+ "enabled" : true,
+ "alwaysDisplayInConsole" : false,
+ "clientAuthenticatorType" : "client-secret",
+ "secret" : "**********",
+ "redirectUris" : [ ],
+ "webOrigins" : [ ],
+ "notBefore" : 0,
+ "bearerOnly" : false,
+ "consentRequired" : false,
+ "standardFlowEnabled" : true,
+ "implicitFlowEnabled" : false,
+ "directAccessGrantsEnabled" : false,
+ "serviceAccountsEnabled" : false,
+ "publicClient" : false,
+ "frontchannelLogout" : false,
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "realm_client" : "false",
+ "client.secret.creation.time" : "1718778122",
+ "post.logout.redirect.uris" : "+"
+ },
+ "authenticationFlowBindingOverrides" : { },
+ "fullScopeAllowed" : false,
+ "nodeReRegistrationTimeout" : 0,
+ "defaultClientScopes" : [ "web-origins", "profile", "roles", "basic", "email" ],
+ "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ]
+ }, {
+ "id" : "9850adad-7910-4b67-a790-da6444361618",
+ "clientId" : "account",
+ "name" : "${client_account}",
+ "rootUrl" : "${authBaseUrl}",
+ "baseUrl" : "/realms/openCloud/account/",
+ "surrogateAuthRequired" : false,
+ "enabled" : true,
+ "alwaysDisplayInConsole" : false,
+ "clientAuthenticatorType" : "client-secret",
+ "secret" : "**********",
+ "redirectUris" : [ "/realms/openCloud/account/*" ],
+ "webOrigins" : [ ],
+ "notBefore" : 0,
+ "bearerOnly" : false,
+ "consentRequired" : false,
+ "standardFlowEnabled" : true,
+ "implicitFlowEnabled" : false,
+ "directAccessGrantsEnabled" : false,
+ "serviceAccountsEnabled" : false,
+ "publicClient" : false,
+ "frontchannelLogout" : false,
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "realm_client" : "false",
+ "client.secret.creation.time" : "1718778122",
+ "post.logout.redirect.uris" : "+"
+ },
+ "authenticationFlowBindingOverrides" : { },
+ "fullScopeAllowed" : false,
+ "nodeReRegistrationTimeout" : 0,
+ "defaultClientScopes" : [ "basic" ],
+ "optionalClientScopes" : [ ]
+ }, {
+ "id" : "55bb4cdc-045b-422a-8830-61245949d6aa",
+ "clientId" : "account-console",
+ "name" : "${client_account-console}",
+ "rootUrl" : "${authBaseUrl}",
+ "baseUrl" : "/realms/openCloud/account/",
+ "surrogateAuthRequired" : false,
+ "enabled" : true,
+ "alwaysDisplayInConsole" : false,
+ "clientAuthenticatorType" : "client-secret",
+ "redirectUris" : [ "/realms/openCloud/account/*" ],
+ "webOrigins" : [ ],
+ "notBefore" : 0,
+ "bearerOnly" : false,
+ "consentRequired" : false,
+ "standardFlowEnabled" : true,
+ "implicitFlowEnabled" : false,
+ "directAccessGrantsEnabled" : false,
+ "serviceAccountsEnabled" : false,
+ "publicClient" : true,
+ "frontchannelLogout" : false,
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "realm_client" : "false",
+ "post.logout.redirect.uris" : "+",
+ "pkce.code.challenge.method" : "S256"
+ },
+ "authenticationFlowBindingOverrides" : { },
+ "fullScopeAllowed" : false,
+ "nodeReRegistrationTimeout" : 0,
+ "protocolMappers" : [ {
+ "id" : "9bf413ed-402f-438d-a72c-033f3c45dab2",
+ "name" : "audience resolve",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-audience-resolve-mapper",
+ "consentRequired" : false,
+ "config" : { }
+ } ],
+ "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "basic", "email" ],
+ "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ]
+ }, {
+ "id" : "2969b8ff-2ab3-4907-aaa7-091a7a627ccb",
+ "clientId" : "admin-cli",
+ "name" : "${client_admin-cli}",
+ "surrogateAuthRequired" : false,
+ "enabled" : true,
+ "alwaysDisplayInConsole" : false,
+ "clientAuthenticatorType" : "client-secret",
+ "redirectUris" : [ ],
+ "webOrigins" : [ ],
+ "notBefore" : 0,
+ "bearerOnly" : false,
+ "consentRequired" : false,
+ "standardFlowEnabled" : false,
+ "implicitFlowEnabled" : false,
+ "directAccessGrantsEnabled" : true,
+ "serviceAccountsEnabled" : false,
+ "publicClient" : true,
+ "frontchannelLogout" : false,
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "realm_client" : "false",
+ "client.use.lightweight.access.token.enabled" : "true",
+ "post.logout.redirect.uris" : "+"
+ },
+ "authenticationFlowBindingOverrides" : { },
+ "fullScopeAllowed" : true,
+ "nodeReRegistrationTimeout" : 0,
+ "defaultClientScopes" : [ "basic" ],
+ "optionalClientScopes" : [ ]
+ }, {
+ "id" : "002faf0a-716c-4230-81c7-ce22d1eb832c",
+ "clientId" : "broker",
+ "name" : "${client_broker}",
+ "surrogateAuthRequired" : false,
+ "enabled" : true,
+ "alwaysDisplayInConsole" : false,
+ "clientAuthenticatorType" : "client-secret",
+ "secret" : "**********",
+ "redirectUris" : [ ],
+ "webOrigins" : [ ],
+ "notBefore" : 0,
+ "bearerOnly" : false,
+ "consentRequired" : false,
+ "standardFlowEnabled" : true,
+ "implicitFlowEnabled" : false,
+ "directAccessGrantsEnabled" : false,
+ "serviceAccountsEnabled" : false,
+ "publicClient" : false,
+ "frontchannelLogout" : false,
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "realm_client" : "true",
+ "client.secret.creation.time" : "1718778122",
+ "post.logout.redirect.uris" : "+"
+ },
+ "authenticationFlowBindingOverrides" : { },
+ "fullScopeAllowed" : false,
+ "nodeReRegistrationTimeout" : 0,
+ "defaultClientScopes" : [ "basic" ],
+ "optionalClientScopes" : [ ]
+ }, {
+ "id" : "7848ee94-cc9b-40db-946f-a86ac73dc9b7",
+ "clientId" : "realm-management",
+ "name" : "${client_realm-management}",
+ "surrogateAuthRequired" : false,
+ "enabled" : true,
+ "alwaysDisplayInConsole" : false,
+ "clientAuthenticatorType" : "client-secret",
+ "redirectUris" : [ ],
+ "webOrigins" : [ ],
+ "notBefore" : 0,
+ "bearerOnly" : true,
+ "consentRequired" : false,
+ "standardFlowEnabled" : true,
+ "implicitFlowEnabled" : false,
+ "directAccessGrantsEnabled" : false,
+ "serviceAccountsEnabled" : false,
+ "publicClient" : false,
+ "frontchannelLogout" : false,
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "realm_client" : "true",
+ "post.logout.redirect.uris" : "+"
+ },
+ "authenticationFlowBindingOverrides" : { },
+ "fullScopeAllowed" : false,
+ "nodeReRegistrationTimeout" : 0,
+ "defaultClientScopes" : [ ],
+ "optionalClientScopes" : [ ]
+ }, {
+ "id" : "97264f49-a8c1-4585-99b6-e706339c62f8",
+ "clientId" : "security-admin-console",
+ "name" : "${client_security-admin-console}",
+ "rootUrl" : "${authAdminUrl}",
+ "baseUrl" : "/admin/openCloud/console/",
+ "surrogateAuthRequired" : false,
+ "enabled" : true,
+ "alwaysDisplayInConsole" : false,
+ "clientAuthenticatorType" : "client-secret",
+ "redirectUris" : [ "/admin/openCloud/console/*" ],
+ "webOrigins" : [ "+" ],
+ "notBefore" : 0,
+ "bearerOnly" : false,
+ "consentRequired" : false,
+ "standardFlowEnabled" : true,
+ "implicitFlowEnabled" : false,
+ "directAccessGrantsEnabled" : false,
+ "serviceAccountsEnabled" : false,
+ "publicClient" : true,
+ "frontchannelLogout" : false,
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "realm_client" : "false",
+ "client.use.lightweight.access.token.enabled" : "true",
+ "post.logout.redirect.uris" : "+",
+ "pkce.code.challenge.method" : "S256"
+ },
+ "authenticationFlowBindingOverrides" : { },
+ "fullScopeAllowed" : true,
+ "nodeReRegistrationTimeout" : 0,
+ "protocolMappers" : [ {
+ "id" : "96092024-21dd-4d31-a004-2c5b96031da3",
+ "name" : "locale",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-attribute-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "locale",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "locale",
+ "jsonType.label" : "String",
+ "userinfo.token.claim" : "true"
+ }
+ } ],
+ "defaultClientScopes" : [ "basic" ],
+ "optionalClientScopes" : [ ]
+ }, {
+ "id" : "54b18eca-cf79-4263-9db9-2d79f8a1c831",
+ "clientId" : "web",
+ "name" : "OpenCloud Web App",
+ "description" : "",
+ "rootUrl" : "https://cloud.opencloud.test",
+ "adminUrl" : "https://cloud.opencloud.test",
+ "baseUrl" : "",
+ "surrogateAuthRequired" : false,
+ "enabled" : true,
+ "alwaysDisplayInConsole" : false,
+ "clientAuthenticatorType" : "client-secret",
+ "redirectUris" : [ "https://cloud.opencloud.test/", "https://cloud.opencloud.test/oidc-silent-redirect.html", "https://cloud.opencloud.test/oidc-callback.html" ],
+ "webOrigins" : [ "https://cloud.opencloud.test" ],
+ "notBefore" : 0,
+ "bearerOnly" : false,
+ "consentRequired" : false,
+ "standardFlowEnabled" : true,
+ "implicitFlowEnabled" : false,
+ "directAccessGrantsEnabled" : true,
+ "serviceAccountsEnabled" : false,
+ "publicClient" : true,
+ "frontchannelLogout" : false,
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "saml.assertion.signature" : "false",
+ "saml.force.post.binding" : "false",
+ "saml.multivalued.roles" : "false",
+ "saml.encrypt" : "false",
+ "post.logout.redirect.uris" : "+",
+ "oauth2.device.authorization.grant.enabled" : "false",
+ "backchannel.logout.revoke.offline.tokens" : "false",
+ "saml.server.signature" : "false",
+ "saml.server.signature.keyinfo.ext" : "false",
+ "exclude.session.state.from.auth.response" : "false",
+ "realm_client" : "false",
+ "oidc.ciba.grant.enabled" : "false",
+ "backchannel.logout.session.required" : "true",
+ "backchannel.logout.url" : "https://cloud.opencloud.test/backchannel_logout",
+ "client_credentials.use_refresh_token" : "false",
+ "saml_force_name_id_format" : "false",
+ "saml.client.signature" : "false",
+ "tls.client.certificate.bound.access.tokens" : "false",
+ "saml.authnstatement" : "false",
+ "display.on.consent.screen" : "false",
+ "saml.onetimeuse.condition" : "false"
+ },
+ "authenticationFlowBindingOverrides" : { },
+ "fullScopeAllowed" : true,
+ "nodeReRegistrationTimeout" : -1,
+ "defaultClientScopes" : [ "web-origins", "profile", "roles", "tenantid", "groups", "OpenCloudUnique_ID", "basic", "email" ],
+ "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ]
+ } ],
+ "clientScopes" : [ {
+ "id" : "258e56a8-1eeb-49ea-957b-aff8df4656ba",
+ "name" : "email",
+ "description" : "OpenID Connect built-in scope: email",
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "include.in.token.scope" : "true",
+ "consent.screen.text" : "${emailScopeConsentText}",
+ "display.on.consent.screen" : "true"
+ },
+ "protocolMappers" : [ {
+ "id" : "068bcfb6-4a17-4c20-b083-ae542a7f76c8",
+ "name" : "email verified",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-property-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "emailVerified",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "email_verified",
+ "jsonType.label" : "boolean",
+ "userinfo.token.claim" : "true"
+ }
+ }, {
+ "id" : "c00d6c21-2fd1-435f-9ee9-87e011048cbe",
+ "name" : "email",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-property-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "email",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "email",
+ "jsonType.label" : "String",
+ "userinfo.token.claim" : "true"
+ }
+ } ]
+ }, {
+ "id" : "b3e1e47e-3912-4b55-ba89-b0198e767682",
+ "name" : "address",
+ "description" : "OpenID Connect built-in scope: address",
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "include.in.token.scope" : "true",
+ "consent.screen.text" : "${addressScopeConsentText}",
+ "display.on.consent.screen" : "true"
+ },
+ "protocolMappers" : [ {
+ "id" : "876baab9-39d1-4845-abb4-561a58aa152d",
+ "name" : "address",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-address-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute.formatted" : "formatted",
+ "user.attribute.country" : "country",
+ "user.attribute.postal_code" : "postal_code",
+ "userinfo.token.claim" : "true",
+ "user.attribute.street" : "street",
+ "id.token.claim" : "true",
+ "user.attribute.region" : "region",
+ "access.token.claim" : "true",
+ "user.attribute.locality" : "locality"
+ }
+ } ]
+ }, {
+ "id" : "9cae7ced-e7d9-4f7b-8e54-7402125f6ead",
+ "name" : "offline_access",
+ "description" : "OpenID Connect built-in scope: offline_access",
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "consent.screen.text" : "${offlineAccessScopeConsentText}",
+ "display.on.consent.screen" : "true"
+ }
+ }, {
+ "id" : "79713daf-89ca-4ed4-ad97-a88b13ee9a18",
+ "name" : "phone",
+ "description" : "OpenID Connect built-in scope: phone",
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "include.in.token.scope" : "true",
+ "consent.screen.text" : "${phoneScopeConsentText}",
+ "display.on.consent.screen" : "true"
+ },
+ "protocolMappers" : [ {
+ "id" : "b5f4f5ed-1008-42ba-8b3b-7d8851a2a680",
+ "name" : "phone number",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-attribute-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "phoneNumber",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "phone_number",
+ "jsonType.label" : "String",
+ "userinfo.token.claim" : "true"
+ }
+ }, {
+ "id" : "08a246f1-2b4c-4def-af5c-aefc31b4820d",
+ "name" : "phone number verified",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-attribute-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "phoneNumberVerified",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "phone_number_verified",
+ "jsonType.label" : "boolean",
+ "userinfo.token.claim" : "true"
+ }
+ } ]
+ }, {
+ "id" : "ad7d7bed-d74d-4582-8bae-c9ef9b02f717",
+ "name" : "tenantid",
+ "description" : "",
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "include.in.token.scope" : "true",
+ "display.on.consent.screen" : "true",
+ "gui.order" : "",
+ "consent.screen.text" : ""
+ },
+ "protocolMappers" : [ {
+ "id" : "22ff6103-80bf-40a5-a557-262e341b5e57",
+ "name" : "tenantid",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-attribute-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "introspection.token.claim" : "true",
+ "userinfo.token.claim" : "true",
+ "user.attribute" : "tenantId",
+ "id.token.claim" : "true",
+ "lightweight.claim" : "false",
+ "access.token.claim" : "true",
+ "claim.name" : "tenantid",
+ "jsonType.label" : "String"
+ }
+ } ]
+ }, {
+ "id" : "5ce87358-3bca-4874-a6f0-6dccae6209a8",
+ "name" : "web-origins",
+ "description" : "OpenID Connect scope for add allowed web origins to the access token",
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "include.in.token.scope" : "false",
+ "consent.screen.text" : "",
+ "display.on.consent.screen" : "false"
+ },
+ "protocolMappers" : [ {
+ "id" : "bbd23c51-918d-4ea6-9ac0-db68b512fb0a",
+ "name" : "allowed web origins",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-allowed-origins-mapper",
+ "consentRequired" : false,
+ "config" : { }
+ } ]
+ }, {
+ "id" : "bdb3e320-76c8-4ad7-9d0f-a08efc060101",
+ "name" : "microprofile-jwt",
+ "description" : "Microprofile - JWT built-in scope",
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "include.in.token.scope" : "true",
+ "display.on.consent.screen" : "false"
+ },
+ "protocolMappers" : [ {
+ "id" : "1d08316c-493b-42ab-afa3-66f621860661",
+ "name" : "groups",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-realm-role-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "multivalued" : "true",
+ "userinfo.token.claim" : "true",
+ "user.attribute" : "foo",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "groups",
+ "jsonType.label" : "String"
+ }
+ }, {
+ "id" : "52061d2d-7a41-4f1d-ba1b-3c4a53e739e4",
+ "name" : "upn",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-property-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "username",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "upn",
+ "jsonType.label" : "String",
+ "userinfo.token.claim" : "true"
+ }
+ } ]
+ }, {
+ "id" : "57eeeee9-99fd-4ac0-b997-87b006989486",
+ "name" : "service_account",
+ "description" : "Specific scope for a client enabled for service accounts",
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "include.in.token.scope" : "false",
+ "display.on.consent.screen" : "false"
+ },
+ "protocolMappers" : [ {
+ "id" : "9891dc9a-c843-4f4d-968d-0d7866f6c572",
+ "name" : "Client Host",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usersessionmodel-note-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.session.note" : "clientHost",
+ "introspection.token.claim" : "true",
+ "userinfo.token.claim" : "true",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "clientHost",
+ "jsonType.label" : "String"
+ }
+ }, {
+ "id" : "63be4abb-12ac-43dd-b6a0-6d8f7be5b2b1",
+ "name" : "Client ID",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usersessionmodel-note-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.session.note" : "client_id",
+ "introspection.token.claim" : "true",
+ "userinfo.token.claim" : "true",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "client_id",
+ "jsonType.label" : "String"
+ }
+ }, {
+ "id" : "789cb39b-848a-40b4-9ba5-269e3154c99e",
+ "name" : "Client IP Address",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usersessionmodel-note-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.session.note" : "clientAddress",
+ "introspection.token.claim" : "true",
+ "userinfo.token.claim" : "true",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "clientAddress",
+ "jsonType.label" : "String"
+ }
+ } ]
+ }, {
+ "id" : "d68b434c-c589-45d4-ada1-5370b7e65cae",
+ "name" : "OpenCloudUnique_ID",
+ "description" : "Id to be used in OpenCloud and Keycloak",
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "include.in.token.scope" : "false",
+ "display.on.consent.screen" : "false",
+ "gui.order" : "",
+ "consent.screen.text" : ""
+ },
+ "protocolMappers" : [ {
+ "id" : "d84f29ea-54d4-477e-9b36-cf5d7bec8f0c",
+ "name" : "Unique ID",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-attribute-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "introspection.token.claim" : "true",
+ "userinfo.token.claim" : "true",
+ "user.attribute" : "uuid",
+ "id.token.claim" : "true",
+ "lightweight.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "uuid",
+ "jsonType.label" : "String"
+ }
+ } ]
+ }, {
+ "id" : "8eb1f69b-b941-4185-bca1-f916953f7cf5",
+ "name" : "role_list",
+ "description" : "SAML role list",
+ "protocol" : "saml",
+ "attributes" : {
+ "consent.screen.text" : "${samlRoleListScopeConsentText}",
+ "display.on.consent.screen" : "true"
+ },
+ "protocolMappers" : [ {
+ "id" : "fb587847-806f-4443-bab0-501efc0f0b46",
+ "name" : "role list",
+ "protocol" : "saml",
+ "protocolMapper" : "saml-role-list-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "single" : "false",
+ "attribute.nameformat" : "Basic",
+ "attribute.name" : "Role"
+ }
+ } ]
+ }, {
+ "id" : "947da1ff-f614-48fc-9ecb-c98cbcfd3390",
+ "name" : "profile",
+ "description" : "OpenID Connect built-in scope: profile",
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "include.in.token.scope" : "true",
+ "consent.screen.text" : "${profileScopeConsentText}",
+ "display.on.consent.screen" : "true"
+ },
+ "protocolMappers" : [ {
+ "id" : "46fec552-2f92-408a-84cf-ba98bf8e35fd",
+ "name" : "family name",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-property-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "lastName",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "family_name",
+ "jsonType.label" : "String",
+ "userinfo.token.claim" : "true"
+ }
+ }, {
+ "id" : "c7ed5458-4d32-423e-8ea1-d112c45045d4",
+ "name" : "middle name",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-attribute-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "middleName",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "middle_name",
+ "jsonType.label" : "String",
+ "userinfo.token.claim" : "true"
+ }
+ }, {
+ "id" : "e18d1ce4-3969-4ec1-9941-a27fd7555245",
+ "name" : "picture",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-attribute-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "picture",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "picture",
+ "jsonType.label" : "String",
+ "userinfo.token.claim" : "true"
+ }
+ }, {
+ "id" : "dab85a5e-9af8-4fcd-88e4-9d3ae50dd5b6",
+ "name" : "locale",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-attribute-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "locale",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "locale",
+ "jsonType.label" : "String",
+ "userinfo.token.claim" : "true"
+ }
+ }, {
+ "id" : "7484f47e-3bb1-48d0-ba64-e8330dcefe6e",
+ "name" : "profile",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-attribute-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "profile",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "profile",
+ "jsonType.label" : "String",
+ "userinfo.token.claim" : "true"
+ }
+ }, {
+ "id" : "fcd00995-9693-4803-8f41-c84044be83ed",
+ "name" : "website",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-attribute-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "website",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "website",
+ "jsonType.label" : "String",
+ "userinfo.token.claim" : "true"
+ }
+ }, {
+ "id" : "f09e7268-5284-449b-849b-cf8225523584",
+ "name" : "full name",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-full-name-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "userinfo.token.claim" : "true"
+ }
+ }, {
+ "id" : "0317f4b3-3f7b-47ab-88d3-5d6f604d944d",
+ "name" : "nickname",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-attribute-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "nickname",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "nickname",
+ "jsonType.label" : "String",
+ "userinfo.token.claim" : "true"
+ }
+ }, {
+ "id" : "db81244c-e739-461b-8822-52ceaa11bdf4",
+ "name" : "updated at",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-attribute-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "updatedAt",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "updated_at",
+ "jsonType.label" : "String",
+ "userinfo.token.claim" : "true"
+ }
+ }, {
+ "id" : "c6a16bf9-9370-4dff-a718-be53131bb238",
+ "name" : "gender",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-attribute-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "gender",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "gender",
+ "jsonType.label" : "String",
+ "userinfo.token.claim" : "true"
+ }
+ }, {
+ "id" : "32d76647-b542-484c-9062-edc34eb350e0",
+ "name" : "birthdate",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-attribute-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "birthdate",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "birthdate",
+ "jsonType.label" : "String",
+ "userinfo.token.claim" : "true"
+ }
+ }, {
+ "id" : "ac6530db-6463-446b-99da-32d5298b5fa0",
+ "name" : "zoneinfo",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-attribute-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "zoneinfo",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "zoneinfo",
+ "jsonType.label" : "String",
+ "userinfo.token.claim" : "true"
+ }
+ }, {
+ "id" : "ed10983b-8700-415e-933e-226ce3f397a6",
+ "name" : "given name",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-property-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "firstName",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "given_name",
+ "jsonType.label" : "String",
+ "userinfo.token.claim" : "true"
+ }
+ }, {
+ "id" : "8205ccd0-1266-4060-b5df-3a6eb229d91e",
+ "name" : "username",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-property-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "username",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "preferred_username",
+ "jsonType.label" : "String",
+ "userinfo.token.claim" : "true"
+ }
+ } ]
+ }, {
+ "id" : "c3a6224b-49aa-4a25-953d-7e326d66893d",
+ "name" : "basic",
+ "description" : "OpenID Connect scope for add all basic claims to the token",
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "include.in.token.scope" : "false",
+ "display.on.consent.screen" : "false"
+ },
+ "protocolMappers" : [ {
+ "id" : "2d4f3f17-1ab7-429e-88e1-cdf08d3533c6",
+ "name" : "auth_time",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usersessionmodel-note-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.session.note" : "AUTH_TIME",
+ "introspection.token.claim" : "true",
+ "userinfo.token.claim" : "true",
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "auth_time",
+ "jsonType.label" : "long"
+ }
+ }, {
+ "id" : "3e7da934-3de3-4bd1-a565-8ac62419c138",
+ "name" : "sub",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-sub-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "introspection.token.claim" : "true",
+ "access.token.claim" : "true"
+ }
+ } ]
+ }, {
+ "id" : "0c72b80b-28d5-48d8-b593-c99030aab58d",
+ "name" : "roles",
+ "description" : "OpenID Connect scope for add user roles to the access token",
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "include.in.token.scope" : "false",
+ "consent.screen.text" : "${rolesScopeConsentText}",
+ "display.on.consent.screen" : "true"
+ },
+ "protocolMappers" : [ {
+ "id" : "bc7f015e-329f-4e99-be6b-72382f4310c7",
+ "name" : "client roles",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-client-role-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "user.attribute" : "foo",
+ "access.token.claim" : "true",
+ "claim.name" : "resource_access.${client_id}.roles",
+ "jsonType.label" : "String",
+ "multivalued" : "true"
+ }
+ }, {
+ "id" : "215f645f-ad0b-4523-9ece-f09f69ead5c4",
+ "name" : "audience resolve",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-audience-resolve-mapper",
+ "consentRequired" : false,
+ "config" : { }
+ }, {
+ "id" : "4a10b958-d34d-413a-b349-1415d02cdcde",
+ "name" : "realm roles",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-usermodel-realm-role-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "claim.name" : "roles",
+ "jsonType.label" : "String",
+ "userinfo.token.claim" : "true",
+ "multivalued" : "true"
+ }
+ } ]
+ }, {
+ "id" : "7438d93e-b07a-4913-9419-3273be364c4b",
+ "name" : "groups",
+ "description" : "OpenID Connect scope for add user groups to the access token",
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "include.in.token.scope" : "false",
+ "display.on.consent.screen" : "true",
+ "gui.order" : "",
+ "consent.screen.text" : ""
+ },
+ "protocolMappers" : [ {
+ "id" : "5349faf2-64a6-481f-b207-39ffef2cd597",
+ "name" : "groups",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-group-membership-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "full.path" : "false",
+ "introspection.token.claim" : "true",
+ "multivalued" : "true",
+ "userinfo.token.claim" : "true",
+ "id.token.claim" : "true",
+ "lightweight.claim" : "false",
+ "access.token.claim" : "true",
+ "claim.name" : "groups"
+ }
+ } ]
+ }, {
+ "id" : "86883395-e439-4cab-9d8d-31d71389969c",
+ "name" : "acr",
+ "description" : "OpenID Connect scope for add acr (authentication context class reference) to the token",
+ "protocol" : "openid-connect",
+ "attributes" : {
+ "include.in.token.scope" : "false",
+ "display.on.consent.screen" : "false"
+ },
+ "protocolMappers" : [ {
+ "id" : "b849b14b-7c9c-4b7b-9329-c56debefb47c",
+ "name" : "acr loa level",
+ "protocol" : "openid-connect",
+ "protocolMapper" : "oidc-acr-mapper",
+ "consentRequired" : false,
+ "config" : {
+ "id.token.claim" : "true",
+ "access.token.claim" : "true",
+ "userinfo.token.claim" : "true"
+ }
+ } ]
+ } ],
+ "defaultDefaultClientScopes" : [ "role_list", "profile", "email", "roles", "web-origins", "acr", "basic", "groups", "OpenCloudUnique_ID", "tenantid" ],
+ "defaultOptionalClientScopes" : [ "offline_access", "address", "phone", "microprofile-jwt" ],
+ "browserSecurityHeaders" : {
+ "contentSecurityPolicyReportOnly" : "",
+ "xContentTypeOptions" : "nosniff",
+ "referrerPolicy" : "no-referrer",
+ "xRobotsTag" : "none",
+ "xFrameOptions" : "SAMEORIGIN",
+ "contentSecurityPolicy" : "frame-src 'self'; frame-ancestors 'self'; object-src 'none';",
+ "xXSSProtection" : "1; mode=block",
+ "strictTransportSecurity" : "max-age=31536000; includeSubDomains"
+ },
+ "smtpServer" : { },
+ "loginTheme" : "opencloud",
+ "eventsEnabled" : false,
+ "eventsListeners" : [ "jboss-logging" ],
+ "enabledEventTypes" : [ "SEND_RESET_PASSWORD", "UPDATE_CONSENT_ERROR", "GRANT_CONSENT", "VERIFY_PROFILE_ERROR", "REMOVE_TOTP", "REVOKE_GRANT", "UPDATE_TOTP", "LOGIN_ERROR", "CLIENT_LOGIN", "RESET_PASSWORD_ERROR", "IMPERSONATE_ERROR", "CODE_TO_TOKEN_ERROR", "CUSTOM_REQUIRED_ACTION", "OAUTH2_DEVICE_CODE_TO_TOKEN_ERROR", "RESTART_AUTHENTICATION", "IMPERSONATE", "UPDATE_PROFILE_ERROR", "LOGIN", "OAUTH2_DEVICE_VERIFY_USER_CODE", "UPDATE_PASSWORD_ERROR", "CLIENT_INITIATED_ACCOUNT_LINKING", "OAUTH2_EXTENSION_GRANT", "USER_DISABLED_BY_PERMANENT_LOCKOUT", "TOKEN_EXCHANGE", "AUTHREQID_TO_TOKEN", "LOGOUT", "REGISTER", "DELETE_ACCOUNT_ERROR", "CLIENT_REGISTER", "IDENTITY_PROVIDER_LINK_ACCOUNT", "USER_DISABLED_BY_TEMPORARY_LOCKOUT", "DELETE_ACCOUNT", "UPDATE_PASSWORD", "CLIENT_DELETE", "FEDERATED_IDENTITY_LINK_ERROR", "IDENTITY_PROVIDER_FIRST_LOGIN", "CLIENT_DELETE_ERROR", "VERIFY_EMAIL", "CLIENT_LOGIN_ERROR", "RESTART_AUTHENTICATION_ERROR", "EXECUTE_ACTIONS", "REMOVE_FEDERATED_IDENTITY_ERROR", "TOKEN_EXCHANGE_ERROR", "PERMISSION_TOKEN", "FEDERATED_IDENTITY_OVERRIDE_LINK", "SEND_IDENTITY_PROVIDER_LINK_ERROR", "EXECUTE_ACTION_TOKEN_ERROR", "OAUTH2_EXTENSION_GRANT_ERROR", "SEND_VERIFY_EMAIL", "OAUTH2_DEVICE_AUTH", "EXECUTE_ACTIONS_ERROR", "REMOVE_FEDERATED_IDENTITY", "OAUTH2_DEVICE_CODE_TO_TOKEN", "IDENTITY_PROVIDER_POST_LOGIN", "IDENTITY_PROVIDER_LINK_ACCOUNT_ERROR", "FEDERATED_IDENTITY_OVERRIDE_LINK_ERROR", "OAUTH2_DEVICE_VERIFY_USER_CODE_ERROR", "UPDATE_EMAIL", "REGISTER_ERROR", "REVOKE_GRANT_ERROR", "EXECUTE_ACTION_TOKEN", "LOGOUT_ERROR", "UPDATE_EMAIL_ERROR", "CLIENT_UPDATE_ERROR", "AUTHREQID_TO_TOKEN_ERROR", "INVITE_ORG_ERROR", "UPDATE_PROFILE", "CLIENT_REGISTER_ERROR", "FEDERATED_IDENTITY_LINK", "INVITE_ORG", "SEND_IDENTITY_PROVIDER_LINK", "SEND_VERIFY_EMAIL_ERROR", "RESET_PASSWORD", "CLIENT_INITIATED_ACCOUNT_LINKING_ERROR", "OAUTH2_DEVICE_AUTH_ERROR", "UPDATE_CONSENT", "REMOVE_TOTP_ERROR", "VERIFY_EMAIL_ERROR", "SEND_RESET_PASSWORD_ERROR", "CLIENT_UPDATE", "CUSTOM_REQUIRED_ACTION_ERROR", "IDENTITY_PROVIDER_POST_LOGIN_ERROR", "UPDATE_TOTP_ERROR", "CODE_TO_TOKEN", "VERIFY_PROFILE", "GRANT_CONSENT_ERROR", "IDENTITY_PROVIDER_FIRST_LOGIN_ERROR" ],
+ "adminEventsEnabled" : false,
+ "adminEventsDetailsEnabled" : false,
+ "identityProviders" : [ ],
+ "identityProviderMappers" : [ ],
+ "components" : {
+ "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy" : [ {
+ "id" : "4682fe74-f3a9-445a-a7ab-557fb532fe6b",
+ "name" : "Consent Required",
+ "providerId" : "consent-required",
+ "subType" : "anonymous",
+ "subComponents" : { },
+ "config" : { }
+ }, {
+ "id" : "c46009e5-c8b5-4051-bf7f-7b1481a9aa86",
+ "name" : "Max Clients Limit",
+ "providerId" : "max-clients",
+ "subType" : "anonymous",
+ "subComponents" : { },
+ "config" : {
+ "max-clients" : [ "200" ]
+ }
+ }, {
+ "id" : "43edf979-28d2-46c8-9f93-48b3de185570",
+ "name" : "Allowed Protocol Mapper Types",
+ "providerId" : "allowed-protocol-mappers",
+ "subType" : "anonymous",
+ "subComponents" : { },
+ "config" : {
+ "allowed-protocol-mapper-types" : [ "saml-user-property-mapper", "saml-user-attribute-mapper", "oidc-usermodel-property-mapper", "oidc-sha256-pairwise-sub-mapper", "oidc-address-mapper", "oidc-usermodel-attribute-mapper", "oidc-full-name-mapper", "saml-role-list-mapper" ]
+ }
+ }, {
+ "id" : "6fc7d765-7da8-4985-ba0b-e83827b04bd3",
+ "name" : "Allowed Client Scopes",
+ "providerId" : "allowed-client-templates",
+ "subType" : "anonymous",
+ "subComponents" : { },
+ "config" : {
+ "allow-default-scopes" : [ "true" ]
+ }
+ }, {
+ "id" : "5a9aef85-98a6-4e90-b30f-8aa715e1f5e6",
+ "name" : "Allowed Protocol Mapper Types",
+ "providerId" : "allowed-protocol-mappers",
+ "subType" : "authenticated",
+ "subComponents" : { },
+ "config" : {
+ "allowed-protocol-mapper-types" : [ "saml-user-property-mapper", "oidc-sha256-pairwise-sub-mapper", "oidc-usermodel-property-mapper", "oidc-usermodel-attribute-mapper", "saml-user-attribute-mapper", "oidc-full-name-mapper", "oidc-address-mapper", "saml-role-list-mapper" ]
+ }
+ }, {
+ "id" : "e3eadb04-8862-4567-869c-a76485268159",
+ "name" : "Allowed Client Scopes",
+ "providerId" : "allowed-client-templates",
+ "subType" : "authenticated",
+ "subComponents" : { },
+ "config" : {
+ "allow-default-scopes" : [ "true" ]
+ }
+ }, {
+ "id" : "c788e6bf-2f57-4a82-b32e-ac8d48a4f676",
+ "name" : "Full Scope Disabled",
+ "providerId" : "scope",
+ "subType" : "anonymous",
+ "subComponents" : { },
+ "config" : { }
+ } ],
+ "org.keycloak.userprofile.UserProfileProvider" : [ {
+ "id" : "28d6b4ce-33d4-40c0-adef-b27e35b7e122",
+ "providerId" : "declarative-user-profile",
+ "subComponents" : { },
+ "config" : {
+ "kc.user.profile.config" : [ "{\"attributes\":[{\"name\":\"username\",\"displayName\":\"${username}\",\"validations\":{\"length\":{\"min\":3,\"max\":255},\"username-prohibited-characters\":{},\"up-username-not-idn-homograph\":{}},\"permissions\":{\"view\":[\"admin\",\"user\"],\"edit\":[\"admin\",\"user\"]},\"multivalued\":false},{\"name\":\"email\",\"displayName\":\"${email}\",\"validations\":{\"email\":{},\"length\":{\"max\":255}},\"required\":{\"roles\":[\"user\"]},\"permissions\":{\"view\":[\"admin\",\"user\"],\"edit\":[\"admin\",\"user\"]},\"multivalued\":false},{\"name\":\"firstName\",\"displayName\":\"${firstName}\",\"validations\":{\"length\":{\"max\":255},\"person-name-prohibited-characters\":{}},\"required\":{\"roles\":[\"user\"]},\"permissions\":{\"view\":[\"admin\",\"user\"],\"edit\":[\"admin\",\"user\"]},\"multivalued\":false},{\"name\":\"lastName\",\"displayName\":\"${lastName}\",\"validations\":{\"length\":{\"max\":255},\"person-name-prohibited-characters\":{}},\"required\":{\"roles\":[\"user\"]},\"permissions\":{\"view\":[\"admin\",\"user\"],\"edit\":[\"admin\",\"user\"]},\"multivalued\":false},{\"name\":\"uuid\",\"displayName\":\"Unique LDAP ID\",\"validations\":{},\"annotations\":{},\"permissions\":{\"view\":[\"admin\",\"user\"],\"edit\":[]},\"multivalued\":false}],\"groups\":[{\"name\":\"user-metadata\",\"displayHeader\":\"User metadata\",\"displayDescription\":\"Attributes, which refer to user metadata\"}]}" ]
+ }
+ } ],
+ "org.keycloak.storage.UserStorageProvider" : [ {
+ "id" : "91da1f3b-b9f4-4759-aed4-50124d9b9f3e",
+ "name" : "ldap",
+ "providerId" : "ldap",
+ "subComponents" : {
+ "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" : [ {
+ "id" : "5ce17b39-efb7-45a2-b600-55c57d3a3021",
+ "name" : "email",
+ "providerId" : "user-attribute-ldap-mapper",
+ "subComponents" : { },
+ "config" : {
+ "ldap.attribute" : [ "mail" ],
+ "is.mandatory.in.ldap" : [ "false" ],
+ "always.read.value.from.ldap" : [ "false" ],
+ "read.only" : [ "false" ],
+ "user.model.attribute" : [ "email" ]
+ }
+ }, {
+ "id" : "f913ed32-5025-4d26-9870-7dffeba1093d",
+ "name" : "tenantId",
+ "providerId" : "user-attribute-ldap-mapper",
+ "subComponents" : { },
+ "config" : {
+ "ldap.attribute" : [ "opencloudMemberOfSchool" ],
+ "is.mandatory.in.ldap" : [ "true" ],
+ "attribute.force.default" : [ "true" ],
+ "is.binary.attribute" : [ "false" ],
+ "always.read.value.from.ldap" : [ "true" ],
+ "read.only" : [ "true" ],
+ "user.model.attribute" : [ "tenantId" ]
+ }
+ }, {
+ "id" : "7bf4374a-10d6-4192-9fb5-119ebf87f9f6",
+ "name" : "modify date",
+ "providerId" : "user-attribute-ldap-mapper",
+ "subComponents" : { },
+ "config" : {
+ "ldap.attribute" : [ "modifyTimestamp" ],
+ "is.mandatory.in.ldap" : [ "false" ],
+ "read.only" : [ "true" ],
+ "always.read.value.from.ldap" : [ "true" ],
+ "user.model.attribute" : [ "modifyTimestamp" ]
+ }
+ }, {
+ "id" : "9fe968b5-c338-4419-93c4-f339cbad5ef8",
+ "name" : "username",
+ "providerId" : "user-attribute-ldap-mapper",
+ "subComponents" : { },
+ "config" : {
+ "ldap.attribute" : [ "uid" ],
+ "is.mandatory.in.ldap" : [ "true" ],
+ "read.only" : [ "false" ],
+ "always.read.value.from.ldap" : [ "false" ],
+ "user.model.attribute" : [ "username" ]
+ }
+ }, {
+ "id" : "330d06e9-3e12-492e-af5e-53b1b950a122",
+ "name" : "uuid",
+ "providerId" : "user-attribute-ldap-mapper",
+ "subComponents" : { },
+ "config" : {
+ "ldap.attribute" : [ "openCloudUUID" ],
+ "attribute.force.default" : [ "false" ],
+ "is.mandatory.in.ldap" : [ "false" ],
+ "is.binary.attribute" : [ "false" ],
+ "read.only" : [ "true" ],
+ "always.read.value.from.ldap" : [ "true" ],
+ "user.model.attribute" : [ "uuid" ]
+ }
+ }, {
+ "id" : "79e83df0-9406-4609-b78f-b27de957bc41",
+ "name" : "last name",
+ "providerId" : "user-attribute-ldap-mapper",
+ "subComponents" : { },
+ "config" : {
+ "ldap.attribute" : [ "sn" ],
+ "is.mandatory.in.ldap" : [ "true" ],
+ "always.read.value.from.ldap" : [ "true" ],
+ "read.only" : [ "false" ],
+ "user.model.attribute" : [ "lastName" ]
+ }
+ }, {
+ "id" : "96bc2621-a714-4f15-ac1d-bc32df94382d",
+ "name" : "display name",
+ "providerId" : "full-name-ldap-mapper",
+ "subComponents" : { },
+ "config" : {
+ "read.only" : [ "false" ],
+ "write.only" : [ "true" ],
+ "ldap.full.name.attribute" : [ "displayName" ]
+ }
+ }, {
+ "id" : "cab8b569-0f50-4e13-b2a5-d24ee513cd8b",
+ "name" : "first name",
+ "providerId" : "user-attribute-ldap-mapper",
+ "subComponents" : { },
+ "config" : {
+ "ldap.attribute" : [ "cn" ],
+ "is.mandatory.in.ldap" : [ "true" ],
+ "read.only" : [ "false" ],
+ "always.read.value.from.ldap" : [ "true" ],
+ "user.model.attribute" : [ "firstName" ]
+ }
+ }, {
+ "id" : "30cec76e-539e-4ac4-9816-8a0ae4a49798",
+ "name" : "creation date",
+ "providerId" : "user-attribute-ldap-mapper",
+ "subComponents" : { },
+ "config" : {
+ "ldap.attribute" : [ "createTimestamp" ],
+ "is.mandatory.in.ldap" : [ "false" ],
+ "always.read.value.from.ldap" : [ "true" ],
+ "read.only" : [ "true" ],
+ "user.model.attribute" : [ "createTimestamp" ]
+ }
+ } ]
+ },
+ "config" : {
+ "fullSyncPeriod" : [ "86400" ],
+ "pagination" : [ "false" ],
+ "startTls" : [ "false" ],
+ "usersDn" : [ "ou=users,dc=opencloud,dc=eu" ],
+ "connectionPooling" : [ "false" ],
+ "cachePolicy" : [ "EVICT_DAILY" ],
+ "useKerberosForPasswordAuthentication" : [ "false" ],
+ "evictionHour" : [ "0" ],
+ "importEnabled" : [ "true" ],
+ "enabled" : [ "true" ],
+ "bindDn" : [ "cn=admin,dc=opencloud,dc=eu" ],
+ "bindCredential" : [ "admin" ],
+ "changedSyncPeriod" : [ "3600" ],
+ "usernameLDAPAttribute" : [ "uid" ],
+ "lastSync" : [ "1759930583" ],
+ "vendor" : [ "other" ],
+ "uuidLDAPAttribute" : [ "entryUUID" ],
+ "connectionUrl" : [ "ldap://ldap-server:1389" ],
+ "allowKerberosAuthentication" : [ "false" ],
+ "syncRegistrations" : [ "true" ],
+ "authType" : [ "simple" ],
+ "krbPrincipalAttribute" : [ "krb5PrincipalName" ],
+ "customUserSearchFilter" : [ "(objectclass=inetOrgPerson)" ],
+ "searchScope" : [ "1" ],
+ "useTruststoreSpi" : [ "always" ],
+ "usePasswordModifyExtendedOp" : [ "false" ],
+ "trustEmail" : [ "false" ],
+ "userObjectClasses" : [ "top,person,organizationalPerson,inetOrgPerson" ],
+ "evictionMinute" : [ "0" ],
+ "rdnLDAPAttribute" : [ "uid" ],
+ "editMode" : [ "WRITABLE" ],
+ "validatePasswordPolicy" : [ "false" ]
+ }
+ } ],
+ "org.keycloak.keys.KeyProvider" : [ {
+ "id" : "0e3d0048-cb16-49c3-8a9a-05d83f0daeca",
+ "name" : "rsa-generated",
+ "providerId" : "rsa-generated",
+ "subComponents" : { },
+ "config" : {
+ "privateKey" : [ "MIIEogIBAAKCAQEA08cSBcaKZRgS0uPFg3G0Mz17lmFzhgWR3DSbcDWhEIT5aYCJlALFT/7WDAc/x/xyYAh0yjp8iEEWkCPZY0Im8ORsEz1rmobO6qRjuHA41fXClz0S8PpqB8TsVgHGThCAYOkQHOfikfiDgaBxvgXokYJzYk1m9ULS/VH5SiO2ycVQQa/i7pgF1UQ/kX8AV3gqjmZI8qIp8HvaK33c6Za5gP0YROVG3JN/6CgWqmjitLeY2bItXIdjYoh2u19H5hsh3OxmfRBYzDQ2L/nU4u2/2lViNPOwCnmFQL1twKrSbSeUBQhQPmLmaixVJ/oePlWNhiTCFT+x0Dd8tqJuy0lh8wIDAQABAoIBAAYiG8mDCHsJCXKGGjBGWLurKisew2O8QY8pDt9fDj0WPMrraBCbS++XJ8fNZfF6xC030XoR241upSKjhyYywkHzu5WLD9S31sJA5Ipw5ZR8u5f5stS7y9pRv651+rJEFBO6bDcKGCu2EMjuRp0EOsoMBx9vsm2j88xWVWvqQN7zBG2uMVvISkPjLLhXTT1kkG1AtWmqxHqAhwGeXsdWIXQPs86sTWDYjnpYsvpOalps8L24fRCJ4Ko5j3wGEnfRgfD/lJjBtcqcWbeGeJd+GseP4ZiWXvrAt9cAcUlA2XdkYNKcDMWSe1aROsr70Kub6cGjY5zyRXIrfBEzx+MfvrECgYEA+POxWYuxFrYV3lEthBVTpSjy6I8lqtb/I4IYK5PJVQjTd91YsptXM1cKzWc4VAGQf7qdCsLT6Bx8gsTjVnn3egVKiPYJxI2fZAVWmVTUSIjk2wd7Qy2DfmUZqSl/yhni3vR/JPHrhhvV3aDHIrkKLygNUT1YLHrCmjGXI2BY75UCgYEA2cXz4EBrpf5Ypl/gg3K5uKzIh7j5pPbSkBEReYjvAXFDRXIODkzYClq7/qx6HL+oJ5li/Tw85UZJOifo29tIYAS/DcTCQafEhhyVup2myEpWNJGOEHRV+wbDSm5qmScMkZfWIi2LfDgkG8gg9JWrwff3+un+ZbZxnaOMF4CRyWcCgYAkQhHdcdv7688pjvcrGpQRJPKj7l1VEXUdbdApYJ/dE6kA9yr48Gj14EKBgfwROB0cjUhcDd4hGy0DOrvFl4P0sJqXcgYgK2RAhAqF0HYOjmmOgPtx4EobeYwQOCwVekmxoTh3YAimgp1WZxk5tGuITLQL1fxmlgaLwJc7nse1NQKBgDPEfbuyNKTJ+JLcOceljwaadDTnQNj5H4bFt+y3I3GpVf99JGA73iNf/E6G+6Q33yZCrShbmHI8Kg4cDJuu3d6NYklIm5D1656hzuoEre/w09s9OpMoxIOe0O+xnb94a+F0QZ/wSbcmtVaxVTwvFBxQ8O8Bjsro+sspZJT1qDJDAoGASBrXDqiP1gPXIDtdIpK/HH7UEG52WrLfFO2nZxJMN13to2HEq9D4vTHz4qtgKWyvrm9khfKK/j8c6YBmJChypJ+M+0qoutXKkLZveqn56e9v1CtgDx9fkMxYxwY13vEPy/ArJQ6Q0QSKRNP/4Is6kqaGPelfr3TL1ddoQIXODic=" ],
+ "certificate" : [ "MIICoTCCAYkCBgGZw0iH2TANBgkqhkiG9w0BAQsFADAUMRIwEAYDVQQDDAlvcGVuQ2xvdWQwHhcNMjUxMDA4MTAwMzU0WhcNMzUxMDA4MTAwNTM0WjAUMRIwEAYDVQQDDAlvcGVuQ2xvdWQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTxxIFxoplGBLS48WDcbQzPXuWYXOGBZHcNJtwNaEQhPlpgImUAsVP/tYMBz/H/HJgCHTKOnyIQRaQI9ljQibw5GwTPWuahs7qpGO4cDjV9cKXPRLw+moHxOxWAcZOEIBg6RAc5+KR+IOBoHG+BeiRgnNiTWb1QtL9UflKI7bJxVBBr+LumAXVRD+RfwBXeCqOZkjyoinwe9orfdzplrmA/RhE5Ubck3/oKBaqaOK0t5jZsi1ch2NiiHa7X0fmGyHc7GZ9EFjMNDYv+dTi7b/aVWI087AKeYVAvW3AqtJtJ5QFCFA+YuZqLFUn+h4+VY2GJMIVP7HQN3y2om7LSWHzAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAF0MZ4/8JtCfBGcCGx4ak2mEZQiUTfMlfIRMS4eK3z52Hv6BBidEIg3KN0SHpYa5buTNGwRvrY1zrC5kNU1QJmYxykV50h9M1+QyXaX9HDfj0z5nbsLg8gSPC1vnp8SaBkRqHYvMWHWT9n1aPK+wfl2Yv5DPeL12oL+HqQNS0lkm10/1JLlWUgq4R3zF85M+5TOyLN8PSZuO9dqpChtOgY1xJbzpyZ9suGC9dd5mkK6Je4MA0oHBO4ABzAFICZMRAgnMoz7VA7rLGEgAoWiCei5b8a675XVwiSioH/b/6asmA+ItCnTe5nTKrBldYRgE90BqGPyB2bTf62Wmcml9MjU=" ],
+ "priority" : [ "100" ]
+ }
+ }, {
+ "id" : "f92ecf31-c3c7-4c3b-af20-839fc05bcf99",
+ "name" : "hmac-generated",
+ "providerId" : "hmac-generated",
+ "subComponents" : { },
+ "config" : {
+ "kid" : [ "1dad7095-af6e-4e90-8f25-1d76f021ac06" ],
+ "secret" : [ "aY4Yn4iSA7Yc_njM0iouDg_6b3M-NMPiAytkS1ZsLBFJIHNyyF684u9U5mW_lJ_C-BFBJYiJg5_af9ix9KKYwE1RVSABtBDLItKB5v5WX8ypLj4ViNXHJLvbqgBf382bI4IfH3ZoJZ8kr2t7nPDHymKVInC9MPiL0Ub9K39I8bw" ],
+ "priority" : [ "100" ],
+ "algorithm" : [ "HS256" ]
+ }
+ }, {
+ "id" : "a137a686-5876-4faf-8d1e-e3a59f55095e",
+ "name" : "hmac-generated-hs512",
+ "providerId" : "hmac-generated",
+ "subComponents" : { },
+ "config" : {
+ "kid" : [ "97869da6-f625-4e74-90c9-f86586844bc5" ],
+ "secret" : [ "5uKd4nFtOsdqqIDe2x5qSDq9klHWYAHZb75YP3sEsmWmn4uGt0Y9j7fVaw5XpY_9sE1H9OFiBPSMme0jmDPuhhjhdzYrf8RvoT4hPJC1bx8K6K8wcbWeJHYNwurFQPZFiKTVJN4U_SeXcO0RgGsma7jq7pUMGuiikygMc7jwKDE" ],
+ "priority" : [ "100" ],
+ "algorithm" : [ "HS512" ]
+ }
+ }, {
+ "id" : "992dcc80-dc41-4b00-bab8-6ec1c839f3a4",
+ "name" : "aes-generated",
+ "providerId" : "aes-generated",
+ "subComponents" : { },
+ "config" : {
+ "kid" : [ "bf83b483-9971-4d99-ba41-a3e6487d5c2f" ],
+ "secret" : [ "1Ku-5g39B0XOFE9XRCQqsw" ],
+ "priority" : [ "100" ]
+ }
+ } ]
+ },
+ "internationalizationEnabled" : false,
+ "authenticationFlows" : [ {
+ "id" : "8964f931-b866-4a05-ab1c-89331a566887",
+ "alias" : "Account verification options",
+ "description" : "Method with which to verity the existing account",
+ "providerId" : "basic-flow",
+ "topLevel" : false,
+ "builtIn" : true,
+ "authenticationExecutions" : [ {
+ "authenticator" : "idp-email-verification",
+ "authenticatorFlow" : false,
+ "requirement" : "ALTERNATIVE",
+ "priority" : 10,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticatorFlow" : true,
+ "requirement" : "ALTERNATIVE",
+ "priority" : 20,
+ "autheticatorFlow" : true,
+ "flowAlias" : "Verify Existing Account by Re-authentication",
+ "userSetupAllowed" : false
+ } ]
+ }, {
+ "id" : "123e5711-1ee5-4f7e-ac9c-64c644daaea9",
+ "alias" : "Browser - Conditional OTP",
+ "description" : "Flow to determine if the OTP is required for the authentication",
+ "providerId" : "basic-flow",
+ "topLevel" : false,
+ "builtIn" : true,
+ "authenticationExecutions" : [ {
+ "authenticator" : "conditional-user-configured",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 10,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticator" : "auth-otp-form",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 20,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ } ]
+ }, {
+ "id" : "be73b7f5-9a66-487c-b7dd-80e0f7ac0c7c",
+ "alias" : "Direct Grant - Conditional OTP",
+ "description" : "Flow to determine if the OTP is required for the authentication",
+ "providerId" : "basic-flow",
+ "topLevel" : false,
+ "builtIn" : true,
+ "authenticationExecutions" : [ {
+ "authenticator" : "conditional-user-configured",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 10,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticator" : "direct-grant-validate-otp",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 20,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ } ]
+ }, {
+ "id" : "597ca917-91fc-4898-a279-cd592af286e3",
+ "alias" : "First broker login - Conditional OTP",
+ "description" : "Flow to determine if the OTP is required for the authentication",
+ "providerId" : "basic-flow",
+ "topLevel" : false,
+ "builtIn" : true,
+ "authenticationExecutions" : [ {
+ "authenticator" : "conditional-user-configured",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 10,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticator" : "auth-otp-form",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 20,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ } ]
+ }, {
+ "id" : "3daadb6b-4d63-4be1-a89e-ec8e41e72afa",
+ "alias" : "Handle Existing Account",
+ "description" : "Handle what to do if there is existing account with same email/username like authenticated identity provider",
+ "providerId" : "basic-flow",
+ "topLevel" : false,
+ "builtIn" : true,
+ "authenticationExecutions" : [ {
+ "authenticator" : "idp-confirm-link",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 10,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticatorFlow" : true,
+ "requirement" : "REQUIRED",
+ "priority" : 20,
+ "autheticatorFlow" : true,
+ "flowAlias" : "Account verification options",
+ "userSetupAllowed" : false
+ } ]
+ }, {
+ "id" : "5942598c-d7e9-4941-b13e-4a8a75e2c2a3",
+ "alias" : "Reset - Conditional OTP",
+ "description" : "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.",
+ "providerId" : "basic-flow",
+ "topLevel" : false,
+ "builtIn" : true,
+ "authenticationExecutions" : [ {
+ "authenticator" : "conditional-user-configured",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 10,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticator" : "reset-otp",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 20,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ } ]
+ }, {
+ "id" : "6e4b336e-eb5f-423c-8d32-4ab94d1122e6",
+ "alias" : "User creation or linking",
+ "description" : "Flow for the existing/non-existing user alternatives",
+ "providerId" : "basic-flow",
+ "topLevel" : false,
+ "builtIn" : true,
+ "authenticationExecutions" : [ {
+ "authenticatorConfig" : "create unique user config",
+ "authenticator" : "idp-create-user-if-unique",
+ "authenticatorFlow" : false,
+ "requirement" : "ALTERNATIVE",
+ "priority" : 10,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticatorFlow" : true,
+ "requirement" : "ALTERNATIVE",
+ "priority" : 20,
+ "autheticatorFlow" : true,
+ "flowAlias" : "Handle Existing Account",
+ "userSetupAllowed" : false
+ } ]
+ }, {
+ "id" : "35ac1997-b6af-44ff-ab27-c34f9be32e56",
+ "alias" : "Verify Existing Account by Re-authentication",
+ "description" : "Reauthentication of existing account",
+ "providerId" : "basic-flow",
+ "topLevel" : false,
+ "builtIn" : true,
+ "authenticationExecutions" : [ {
+ "authenticator" : "idp-username-password-form",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 10,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticatorFlow" : true,
+ "requirement" : "CONDITIONAL",
+ "priority" : 20,
+ "autheticatorFlow" : true,
+ "flowAlias" : "First broker login - Conditional OTP",
+ "userSetupAllowed" : false
+ } ]
+ }, {
+ "id" : "a3473070-fe69-4de1-a0b2-dd54b8a769d5",
+ "alias" : "browser",
+ "description" : "browser based authentication",
+ "providerId" : "basic-flow",
+ "topLevel" : true,
+ "builtIn" : true,
+ "authenticationExecutions" : [ {
+ "authenticator" : "auth-cookie",
+ "authenticatorFlow" : false,
+ "requirement" : "ALTERNATIVE",
+ "priority" : 10,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticator" : "auth-spnego",
+ "authenticatorFlow" : false,
+ "requirement" : "DISABLED",
+ "priority" : 20,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticator" : "identity-provider-redirector",
+ "authenticatorFlow" : false,
+ "requirement" : "ALTERNATIVE",
+ "priority" : 25,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticatorFlow" : true,
+ "requirement" : "ALTERNATIVE",
+ "priority" : 30,
+ "autheticatorFlow" : true,
+ "flowAlias" : "forms",
+ "userSetupAllowed" : false
+ } ]
+ }, {
+ "id" : "cc714857-b114-4df6-9030-b464bbb3964d",
+ "alias" : "clients",
+ "description" : "Base authentication for clients",
+ "providerId" : "client-flow",
+ "topLevel" : true,
+ "builtIn" : true,
+ "authenticationExecutions" : [ {
+ "authenticator" : "client-secret",
+ "authenticatorFlow" : false,
+ "requirement" : "ALTERNATIVE",
+ "priority" : 10,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticator" : "client-jwt",
+ "authenticatorFlow" : false,
+ "requirement" : "ALTERNATIVE",
+ "priority" : 20,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticator" : "client-secret-jwt",
+ "authenticatorFlow" : false,
+ "requirement" : "ALTERNATIVE",
+ "priority" : 30,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticator" : "client-x509",
+ "authenticatorFlow" : false,
+ "requirement" : "ALTERNATIVE",
+ "priority" : 40,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ } ]
+ }, {
+ "id" : "0ebe891c-1a72-4842-bf29-a9abe9c2a4d2",
+ "alias" : "direct grant",
+ "description" : "OpenID Connect Resource Owner Grant",
+ "providerId" : "basic-flow",
+ "topLevel" : true,
+ "builtIn" : true,
+ "authenticationExecutions" : [ {
+ "authenticator" : "direct-grant-validate-username",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 10,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticator" : "direct-grant-validate-password",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 20,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticatorFlow" : true,
+ "requirement" : "CONDITIONAL",
+ "priority" : 30,
+ "autheticatorFlow" : true,
+ "flowAlias" : "Direct Grant - Conditional OTP",
+ "userSetupAllowed" : false
+ } ]
+ }, {
+ "id" : "d97d5579-b3d4-49c4-a60e-0e1e6b1c9d79",
+ "alias" : "docker auth",
+ "description" : "Used by Docker clients to authenticate against the IDP",
+ "providerId" : "basic-flow",
+ "topLevel" : true,
+ "builtIn" : true,
+ "authenticationExecutions" : [ {
+ "authenticator" : "docker-http-basic-authenticator",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 10,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ } ]
+ }, {
+ "id" : "009f7c28-0f41-4237-9911-9091c3d751b7",
+ "alias" : "first broker login",
+ "description" : "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account",
+ "providerId" : "basic-flow",
+ "topLevel" : true,
+ "builtIn" : true,
+ "authenticationExecutions" : [ {
+ "authenticatorConfig" : "review profile config",
+ "authenticator" : "idp-review-profile",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 10,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticatorFlow" : true,
+ "requirement" : "REQUIRED",
+ "priority" : 20,
+ "autheticatorFlow" : true,
+ "flowAlias" : "User creation or linking",
+ "userSetupAllowed" : false
+ } ]
+ }, {
+ "id" : "f9911022-b3cf-4d96-9a96-51bc53c437eb",
+ "alias" : "forms",
+ "description" : "Username, password, otp and other auth forms.",
+ "providerId" : "basic-flow",
+ "topLevel" : false,
+ "builtIn" : true,
+ "authenticationExecutions" : [ {
+ "authenticator" : "auth-username-password-form",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 10,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticatorFlow" : true,
+ "requirement" : "CONDITIONAL",
+ "priority" : 20,
+ "autheticatorFlow" : true,
+ "flowAlias" : "Browser - Conditional OTP",
+ "userSetupAllowed" : false
+ } ]
+ }, {
+ "id" : "c53eb19d-49e9-4252-8a10-4d5c6a12e61b",
+ "alias" : "registration",
+ "description" : "registration flow",
+ "providerId" : "basic-flow",
+ "topLevel" : true,
+ "builtIn" : true,
+ "authenticationExecutions" : [ {
+ "authenticator" : "registration-page-form",
+ "authenticatorFlow" : true,
+ "requirement" : "REQUIRED",
+ "priority" : 10,
+ "autheticatorFlow" : true,
+ "flowAlias" : "registration form",
+ "userSetupAllowed" : false
+ } ]
+ }, {
+ "id" : "3b4f48d3-1706-4630-80e0-e0542780a1f7",
+ "alias" : "registration form",
+ "description" : "registration form",
+ "providerId" : "form-flow",
+ "topLevel" : false,
+ "builtIn" : true,
+ "authenticationExecutions" : [ {
+ "authenticator" : "registration-user-creation",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 20,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticator" : "registration-password-action",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 50,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticator" : "registration-recaptcha-action",
+ "authenticatorFlow" : false,
+ "requirement" : "DISABLED",
+ "priority" : 60,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ } ]
+ }, {
+ "id" : "5520aa89-cd76-438a-abae-7ccd3a2d7615",
+ "alias" : "reset credentials",
+ "description" : "Reset credentials for a user if they forgot their password or something",
+ "providerId" : "basic-flow",
+ "topLevel" : true,
+ "builtIn" : true,
+ "authenticationExecutions" : [ {
+ "authenticator" : "reset-credentials-choose-user",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 10,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticator" : "reset-credential-email",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 20,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticator" : "reset-password",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 30,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ }, {
+ "authenticatorFlow" : true,
+ "requirement" : "CONDITIONAL",
+ "priority" : 40,
+ "autheticatorFlow" : true,
+ "flowAlias" : "Reset - Conditional OTP",
+ "userSetupAllowed" : false
+ } ]
+ }, {
+ "id" : "cce548d6-9bef-4449-88ea-99b949488fe7",
+ "alias" : "saml ecp",
+ "description" : "SAML ECP Profile Authentication Flow",
+ "providerId" : "basic-flow",
+ "topLevel" : true,
+ "builtIn" : true,
+ "authenticationExecutions" : [ {
+ "authenticator" : "http-basic-authenticator",
+ "authenticatorFlow" : false,
+ "requirement" : "REQUIRED",
+ "priority" : 10,
+ "autheticatorFlow" : false,
+ "userSetupAllowed" : false
+ } ]
+ } ],
+ "authenticatorConfig" : [ {
+ "id" : "0848606c-7510-4b09-ba0e-4dc2ef3d63f8",
+ "alias" : "create unique user config",
+ "config" : {
+ "require.password.update.after.registration" : "false"
+ }
+ }, {
+ "id" : "91a8dee7-c679-4202-866e-234eb4164cfd",
+ "alias" : "review profile config",
+ "config" : {
+ "update.profile.on.first.login" : "missing"
+ }
+ } ],
+ "requiredActions" : [ {
+ "alias" : "CONFIGURE_TOTP",
+ "name" : "Configure OTP",
+ "providerId" : "CONFIGURE_TOTP",
+ "enabled" : true,
+ "defaultAction" : false,
+ "priority" : 10,
+ "config" : { }
+ }, {
+ "alias" : "TERMS_AND_CONDITIONS",
+ "name" : "Terms and Conditions",
+ "providerId" : "TERMS_AND_CONDITIONS",
+ "enabled" : false,
+ "defaultAction" : false,
+ "priority" : 20,
+ "config" : { }
+ }, {
+ "alias" : "UPDATE_PASSWORD",
+ "name" : "Update Password",
+ "providerId" : "UPDATE_PASSWORD",
+ "enabled" : true,
+ "defaultAction" : false,
+ "priority" : 30,
+ "config" : { }
+ }, {
+ "alias" : "UPDATE_PROFILE",
+ "name" : "Update Profile",
+ "providerId" : "UPDATE_PROFILE",
+ "enabled" : true,
+ "defaultAction" : false,
+ "priority" : 40,
+ "config" : { }
+ }, {
+ "alias" : "VERIFY_EMAIL",
+ "name" : "Verify Email",
+ "providerId" : "VERIFY_EMAIL",
+ "enabled" : true,
+ "defaultAction" : false,
+ "priority" : 50,
+ "config" : { }
+ }, {
+ "alias" : "delete_account",
+ "name" : "Delete Account",
+ "providerId" : "delete_account",
+ "enabled" : false,
+ "defaultAction" : false,
+ "priority" : 60,
+ "config" : { }
+ }, {
+ "alias" : "delete_credential",
+ "name" : "Delete Credential",
+ "providerId" : "delete_credential",
+ "enabled" : true,
+ "defaultAction" : false,
+ "priority" : 100,
+ "config" : { }
+ }, {
+ "alias" : "idp_link",
+ "name" : "Linking Identity Provider",
+ "providerId" : "idp_link",
+ "enabled" : true,
+ "defaultAction" : false,
+ "priority" : 120,
+ "config" : { }
+ }, {
+ "alias" : "update_user_locale",
+ "name" : "Update User Locale",
+ "providerId" : "update_user_locale",
+ "enabled" : true,
+ "defaultAction" : false,
+ "priority" : 1000,
+ "config" : { }
+ } ],
+ "browserFlow" : "browser",
+ "registrationFlow" : "registration",
+ "directGrantFlow" : "direct grant",
+ "resetCredentialsFlow" : "reset credentials",
+ "clientAuthenticationFlow" : "clients",
+ "dockerAuthenticationFlow" : "docker auth",
+ "firstBrokerLoginFlow" : "first broker login",
+ "attributes" : {
+ "cibaBackchannelTokenDeliveryMode" : "poll",
+ "cibaExpiresIn" : "120",
+ "cibaAuthRequestedUserHint" : "login_hint",
+ "oauth2DeviceCodeLifespan" : "600",
+ "clientOfflineSessionMaxLifespan" : "0",
+ "oauth2DevicePollingInterval" : "5",
+ "clientSessionIdleTimeout" : "0",
+ "parRequestUriLifespan" : "60",
+ "clientSessionMaxLifespan" : "0",
+ "clientOfflineSessionIdleTimeout" : "0",
+ "cibaInterval" : "5",
+ "realmReusableOtpCode" : "false"
+ },
+ "keycloakVersion" : "26.4.0",
+ "userManagedAccessAllowed" : false,
+ "organizationsEnabled" : false,
+ "verifiableCredentialsEnabled" : false,
+ "adminPermissionsEnabled" : false,
+ "clientProfiles" : {
+ "profiles" : [ ]
+ },
+ "clientPolicies" : {
+ "policies" : [ ]
+ }
+}
\ No newline at end of file
diff --git a/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/resources/css/theme.css b/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/resources/css/theme.css
new file mode 100644
index 0000000000..4a529475c6
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/resources/css/theme.css
@@ -0,0 +1,38 @@
+:root {
+ --pf-global--primary-color--100: #e2baff;
+ --pf-global--primary-color--200: #e2baff;
+ --pf-global--primary-color--dark-100: #e2baff;
+ --pf-global--Color--light-100: #20434f;
+}
+
+@font-face {
+ font-family: OpenCloud;
+ src: url('../fonts/OpenCloud500-Regular.woff2') format('woff2');
+ font-weight: normal;
+ font-style: normal;
+}
+
+@font-face {
+ font-family: OpenCloud;
+ src: url('../fonts/OpenCloud750-Bold.woff2') format('woff2');
+ font-weight: bold;
+ font-style: normal;
+}
+
+body {
+ font-family: "OpenCloud", "Open Sans", Helvetica, Arial, sans-serif;
+ background: url(../img/background.png) no-repeat center !important;
+ background-size: cover !important;
+}
+
+.kc-logo-text {
+ background-image: url(../img/logo.svg) !important;
+ background-size: contain;
+ width: 400px;
+ margin: 0 !important;
+}
+
+#kc-header-wrapper{
+ display: flex;
+ justify-content: center;
+}
\ No newline at end of file
diff --git a/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/resources/fonts/OpenCloud500-Regular.woff2 b/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/resources/fonts/OpenCloud500-Regular.woff2
new file mode 100644
index 0000000000..219abd872f
Binary files /dev/null and b/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/resources/fonts/OpenCloud500-Regular.woff2 differ
diff --git a/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/resources/fonts/OpenCloud750-Bold.woff2 b/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/resources/fonts/OpenCloud750-Bold.woff2
new file mode 100644
index 0000000000..1bc7076851
Binary files /dev/null and b/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/resources/fonts/OpenCloud750-Bold.woff2 differ
diff --git a/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/resources/img/background.png b/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/resources/img/background.png
new file mode 100644
index 0000000000..9eaad2c550
Binary files /dev/null and b/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/resources/img/background.png differ
diff --git a/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/resources/img/logo.svg b/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/resources/img/logo.svg
new file mode 100644
index 0000000000..c8e40ea364
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/resources/img/logo.svg
@@ -0,0 +1,14 @@
+
diff --git a/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/resources/js/script.js b/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/resources/js/script.js
new file mode 100644
index 0000000000..9c1d5165d8
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/resources/js/script.js
@@ -0,0 +1,19 @@
+document.addEventListener("DOMContentLoaded", function () {
+ const setLogoUrl = (url) => {
+ const logoTextSelector = document.querySelector(".kc-logo-text");
+
+ if (!logoTextSelector) {
+ return
+ }
+
+ const link = document.createElement("a");
+ link.href = url;
+ link.target = "_blank";
+
+ const parent = logoTextSelector.parentNode;
+ parent.insertBefore(link, logoTextSelector);
+ link.appendChild(logoTextSelector);
+ }
+
+ setLogoUrl('https://opencloud.eu')
+});
\ No newline at end of file
diff --git a/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/theme.properties b/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/theme.properties
new file mode 100644
index 0000000000..767efe4528
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/config/keycloak/themes/opencloud/login/theme.properties
@@ -0,0 +1,5 @@
+parent=keycloak
+import=common/keycloak
+
+styles=css/login.css css/theme.css
+scripts=js/script.js
\ No newline at end of file
diff --git a/devtools/deployments/multi-tenancy/config/ldap/docker-entrypoint-override.sh b/devtools/deployments/multi-tenancy/config/ldap/docker-entrypoint-override.sh
new file mode 100644
index 0000000000..67dc850a63
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/config/ldap/docker-entrypoint-override.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+echo "Running custom LDAP entrypoint script..."
+
+if [ ! -f /opt/bitnami/openldap/share/openldap.key ]
+then
+ openssl req -x509 -newkey rsa:4096 -keyout /opt/bitnami/openldap/share/openldap.key -out /opt/bitnami/openldap/share/openldap.crt -sha256 -days 365 -batch -nodes
+fi
+# run original docker-entrypoint
+/opt/bitnami/scripts/openldap/entrypoint.sh "$@"
diff --git a/devtools/deployments/multi-tenancy/config/ldap/ldif/10_base.ldif b/devtools/deployments/multi-tenancy/config/ldap/ldif/10_base.ldif
new file mode 100644
index 0000000000..650a5f3785
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/config/ldap/ldif/10_base.ldif
@@ -0,0 +1,20 @@
+dn: dc=opencloud,dc=eu
+objectClass: organization
+objectClass: dcObject
+dc: opencloud
+o: openCloud
+
+dn: ou=users,dc=opencloud,dc=eu
+objectClass: organizationalUnit
+ou: users
+
+dn: cn=admin,dc=opencloud,dc=eu
+objectClass: inetOrgPerson
+objectClass: person
+cn: admin
+sn: admin
+uid: ldapadmin
+
+dn: ou=tenants,dc=opencloud,dc=eu
+objectClass: organizationalUnit
+ou: tenants
diff --git a/devtools/deployments/multi-tenancy/config/ldap/ldif/20_admin.ldif b/devtools/deployments/multi-tenancy/config/ldap/ldif/20_admin.ldif
new file mode 100644
index 0000000000..966cbeafdf
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/config/ldap/ldif/20_admin.ldif
@@ -0,0 +1,20 @@
+dn: uid=admin,ou=users,dc=opencloud,dc=eu
+objectClass: inetOrgPerson
+objectClass: organizationalPerson
+objectClass: person
+objectClass: top
+uid: admin
+givenName: Admin
+sn: Admin
+cn: admin
+displayName: Admin
+description: An admin for this OpenCloud instance.
+mail: admin@example.org
+userPassword:: e1NTSEF9UWhmaFB3dERydTUydURoWFFObDRMbzVIckI3TkI5Nmo==
+
+dn: cn=administrators,ou=groups,dc=opencloud,dc=eu
+objectClass: groupOfNames
+objectClass: top
+cn: administrators
+description: OpenCloud Administrators
+member: uid=admin,ou=users,dc=opencloud,dc=eu
diff --git a/devtools/deployments/multi-tenancy/config/opencloud/apps/.gitkeep b/devtools/deployments/multi-tenancy/config/opencloud/apps/.gitkeep
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/devtools/deployments/multi-tenancy/config/opencloud/csp.yaml b/devtools/deployments/multi-tenancy/config/opencloud/csp.yaml
new file mode 100644
index 0000000000..9314c97fb4
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/config/opencloud/csp.yaml
@@ -0,0 +1,44 @@
+directives:
+ child-src:
+ - '''self'''
+ connect-src:
+ - '''self'''
+ - 'blob:'
+ - 'https://${COMPANION_DOMAIN|companion.opencloud.test}/'
+ - 'wss://${COMPANION_DOMAIN|companion.opencloud.test}/'
+ - 'https://raw.githubusercontent.com/opencloud-eu/awesome-apps/'
+ - 'https://${IDP_DOMAIN|keycloak.opencloud.test}/'
+ default-src:
+ - '''none'''
+ font-src:
+ - '''self'''
+ frame-ancestors:
+ - '''self'''
+ frame-src:
+ - '''self'''
+ - 'blob:'
+ - 'https://embed.diagrams.net/'
+ # In contrary to bash and docker the default is given after the | character
+ - 'https://${COLLABORA_DOMAIN|collabora.opencloud.test}/'
+ # This is needed for the external-sites web extension when embedding sites
+ - 'https://docs.opencloud.eu'
+ img-src:
+ - '''self'''
+ - 'data:'
+ - 'blob:'
+ - 'https://raw.githubusercontent.com/opencloud-eu/awesome-apps/'
+ # In contrary to bash and docker the default is given after the | character
+ - 'https://${COLLABORA_DOMAIN|collabora.opencloud.test}/'
+ manifest-src:
+ - '''self'''
+ media-src:
+ - '''self'''
+ object-src:
+ - '''self'''
+ - 'blob:'
+ script-src:
+ - '''self'''
+ - '''unsafe-inline'''
+ style-src:
+ - '''self'''
+ - '''unsafe-inline'''
diff --git a/devtools/deployments/multi-tenancy/config/opencloud/proxy.yaml b/devtools/deployments/multi-tenancy/config/opencloud/proxy.yaml
new file mode 100644
index 0000000000..93d34af675
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/config/opencloud/proxy.yaml
@@ -0,0 +1,40 @@
+# This adds four additional routes to the proxy. Forwarding
+# request on '/carddav/', '/caldav/' and the respective '/.well-knwown'
+# endpoints to the radicale container and setting the required headers.
+additional_policies:
+ - name: default
+ routes:
+ - endpoint: /caldav/
+ backend: http://radicale:5232
+ remote_user_header: X-Remote-User
+ skip_x_access_token: true
+ additional_headers:
+ - X-Script-Name: /caldav
+ - endpoint: /.well-known/caldav
+ backend: http://radicale:5232
+ remote_user_header: X-Remote-User
+ skip_x_access_token: true
+ additional_headers:
+ - X-Script-Name: /caldav
+ - endpoint: /carddav/
+ backend: http://radicale:5232
+ remote_user_header: X-Remote-User
+ skip_x_access_token: true
+ additional_headers:
+ - X-Script-Name: /carddav
+ - endpoint: /.well-known/carddav
+ backend: http://radicale:5232
+ remote_user_header: X-Remote-User
+ skip_x_access_token: true
+ additional_headers:
+ - X-Script-Name: /carddav
+# To enable the radicale web UI add this rule.
+# "unprotected" is True because the Web UI itself ask for
+# the password.
+# Also set "type" to "internal" in the config/radicale/config
+# - endpoint: /caldav/.web/
+# backend: http://radicale:5232/
+# unprotected: true
+# skip_x_access_token: true
+# additional_headers:
+# - X-Script-Name: /caldav
diff --git a/devtools/deployments/multi-tenancy/config/traefik/docker-entrypoint-override.sh b/devtools/deployments/multi-tenancy/config/traefik/docker-entrypoint-override.sh
new file mode 100644
index 0000000000..456a62df46
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/config/traefik/docker-entrypoint-override.sh
@@ -0,0 +1,72 @@
+set -e
+
+printenv
+# Function to add arguments to the command
+add_arg() {
+ TRAEFIK_CMD="$TRAEFIK_CMD $1"
+}
+
+# Initialize the base command
+TRAEFIK_CMD="traefik"
+
+# Base Traefik arguments (from your existing configuration)
+add_arg "--log.level=${TRAEFIK_LOG_LEVEL:-ERROR}"
+# enable dashboard
+add_arg "--api.dashboard=true"
+# define entrypoints
+add_arg "--entryPoints.http.address=:80"
+add_arg "--entryPoints.http.http.redirections.entryPoint.to=https"
+add_arg "--entryPoints.http.http.redirections.entryPoint.scheme=https"
+add_arg "--entryPoints.https.address=:443"
+# change default timeouts for long-running requests
+# this is needed for webdav clients that do not support the TUS protocol
+add_arg "--entryPoints.https.transport.respondingTimeouts.readTimeout=12h"
+add_arg "--entryPoints.https.transport.respondingTimeouts.writeTimeout=12h"
+add_arg "--entryPoints.https.transport.respondingTimeouts.idleTimeout=3m"
+# docker provider (get configuration from container labels)
+add_arg "--providers.docker.endpoint=unix:///var/run/docker.sock"
+add_arg "--providers.docker.exposedByDefault=false"
+# access log
+add_arg "--accessLog=${TRAEFIK_ACCESS_LOG:-false}"
+add_arg "--accessLog.format=json"
+add_arg "--accessLog.fields.headers.names.X-Request-Id=keep"
+
+# Add Let's Encrypt configuration if enabled
+if [ "${TRAEFIK_SERVICES_TLS_CONFIG}" = "tls.certresolver=letsencrypt" ]; then
+ echo "Configuring Traefik with Let's Encrypt..."
+ add_arg "--certificatesResolvers.letsencrypt.acme.email=${TRAEFIK_ACME_MAIL:-example@example.org}"
+ add_arg "--certificatesResolvers.letsencrypt.acme.storage=/certs/acme.json"
+ add_arg "--certificatesResolvers.letsencrypt.acme.httpChallenge.entryPoint=http"
+ add_arg "--certificatesResolvers.letsencrypt.acme.caserver=${TRAEFIK_ACME_CASERVER:-https://acme-v02.api.letsencrypt.org/directory}"
+fi
+
+# Add local certificate configuration if enabled
+if [ "${TRAEFIK_SERVICES_TLS_CONFIG}" = "tls=true" ]; then
+ echo "Configuring Traefik with local certificates..."
+ add_arg "--providers.file.directory=/etc/traefik/dynamic"
+ add_arg "--providers.file.watch=true"
+fi
+
+# Warning if neither certificate method is enabled
+if [ "${TRAEFIK_SERVICES_TLS_CONFIG}" != "tls=true" ] && [ "${TRAEFIK_SERVICES_TLS_CONFIG}" != "tls.certresolver=letsencrypt" ]; then
+ echo "WARNING: Neither Let's Encrypt nor local certificates are enabled."
+ echo "HTTPS will not work properly without certificate configuration."
+fi
+
+# Add any custom arguments from environment variable
+if [ -n "${TRAEFIK_CUSTOM_ARGS}" ]; then
+ echo "Adding custom Traefik arguments: ${TRAEFIK_CUSTOM_ARGS}"
+ TRAEFIK_CMD="$TRAEFIK_CMD $TRAEFIK_CUSTOM_ARGS"
+fi
+
+# Add any additional arguments passed to the script
+for arg in "$@"; do
+ add_arg "$arg"
+done
+
+# Print the final command for debugging
+echo "Starting Traefik with command:"
+echo "$TRAEFIK_CMD"
+
+# Execute Traefik
+exec $TRAEFIK_CMD
diff --git a/devtools/deployments/multi-tenancy/config/traefik/dynamic/.gitkeep b/devtools/deployments/multi-tenancy/config/traefik/dynamic/.gitkeep
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/devtools/deployments/multi-tenancy/docker-compose.yml b/devtools/deployments/multi-tenancy/docker-compose.yml
new file mode 100644
index 0000000000..dcba81d794
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/docker-compose.yml
@@ -0,0 +1,119 @@
+---
+services:
+ # OpenCloud instance configured for multi-tenancy using keycloak as identity provider
+ # The graph service is setup to consume users via the CS3 API.
+ opencloud:
+ image: ${OC_DOCKER_IMAGE:-opencloudeu/opencloud-rolling}:${OC_DOCKER_TAG:-latest}
+ # changelog: https://github.com/opencloud-eu/opencloud/tree/main/changelog
+ # release notes: https://docs.opencloud.eu/opencloud_release_notes.html
+ networks:
+ opencloud-net:
+ entrypoint:
+ - /bin/sh
+ # run opencloud init to initialize a configuration file with random secrets
+ # it will fail on subsequent runs, because the config file already exists
+ # therefore we ignore the error and then start the opencloud server
+ command: ["-c", "opencloud init || true; opencloud server"]
+ environment:
+ OC_MULTI_TENANT_ENABLED: "true"
+ # enable services that are not started automatically
+ OC_URL: https://${OC_DOMAIN:-cloud.opencloud.test}
+ OC_LOG_LEVEL: ${LOG_LEVEL:-info}
+ OC_LOG_COLOR: "${LOG_PRETTY:-false}"
+ OC_LOG_PRETTY: "${LOG_PRETTY:-false}"
+ OC_EXCLUDE_RUN_SERVICES: idm,idp
+ PROXY_ROLE_ASSIGNMENT_DRIVER: "oidc"
+ OC_OIDC_ISSUER: https://${KEYCLOAK_DOMAIN:-keycloak.opencloud.test}/realms/openCloud
+ PROXY_OIDC_REWRITE_WELLKNOWN: "true"
+ WEB_OIDC_CLIENT_ID: ${OC_OIDC_CLIENT_ID:-web}
+ PROXY_USER_OIDC_CLAIM: "uuid"
+ PROXY_USER_CS3_CLAIM: "userid"
+ WEB_OPTION_ACCOUNT_EDIT_LINK_HREF: "https://${KEYCLOAK_DOMAIN:-keycloak.opencloud.test}/realms/openCloud/account"
+ # admin and demo accounts must be created in Keycloak
+ OC_ADMIN_USER_ID: ""
+ SETTINGS_SETUP_DEFAULT_ASSIGNMENTS: "false"
+ GRAPH_ASSIGN_DEFAULT_USER_ROLE: "false"
+ GRAPH_USERNAME_MATCH: "none"
+ GROUPS_DRIVER: "null"
+ # This is needed to set the correct CSP rules for OpenCloud
+ IDP_DOMAIN: ${KEYCLOAK_DOMAIN:-keycloak.opencloud.test}
+ # do not use SSL between the reverse proxy and OpenCloud
+ PROXY_TLS: "false"
+ # INSECURE: needed if OpenCloud / reverse proxy is using self generated certificates
+ OC_INSECURE: "${INSECURE:-false}"
+ # basic auth (not recommended, but needed for eg. WebDav clients that do not support OpenID Connect)
+ PROXY_ENABLE_BASIC_AUTH: "false"
+ GRAPH_IDENTITY_BACKEND: "cs3"
+ PROXY_CSP_CONFIG_FILE_LOCATION: /etc/opencloud/csp.yaml
+ OC_LDAP_URI: ldaps://ldap-server:1636
+ OC_LDAP_INSECURE: "true"
+ OC_LDAP_BIND_DN: "cn=admin,dc=opencloud,dc=eu"
+ OC_LDAP_BIND_PASSWORD: ${LDAP_BIND_PASSWORD:-admin}
+ OC_LDAP_USER_BASE_DN: "ou=users,dc=opencloud,dc=eu"
+ OC_LDAP_USER_SCHEMA_TENANT_ID: "openCloudMemberOfSchool"
+ PROXY_LOG_LEVEL: "debug"
+ volumes:
+ - ./config/opencloud/csp.yaml:/etc/opencloud/csp.yaml
+ # configure the .env file to use own paths instead of docker internal volumes
+ - ${OC_CONFIG_DIR:-opencloud-config}:/etc/opencloud
+ - ${OC_DATA_DIR:-opencloud-data}:/var/lib/opencloud
+ logging:
+ driver: ${LOG_DRIVER:-local}
+ restart: always
+ labels:
+ - "traefik.enable=true"
+ - "traefik.http.routers.opencloud.entrypoints=https"
+ - "traefik.http.routers.opencloud.rule=Host(`${OC_DOMAIN:-cloud.opencloud.test}`)"
+ - "traefik.http.routers.opencloud.service=opencloud"
+ - "traefik.http.services.opencloud.loadbalancer.server.port=9200"
+ - "traefik.http.routers.opencloud.${TRAEFIK_SERVICES_TLS_CONFIG}"
+ # Stand-alone instance of the 'graph' service to serve the provisioning API
+ provsioning:
+ image: ${OC_DOCKER_IMAGE:-opencloudeu/opencloud-rolling}:${OC_DOCKER_TAG:-latest}
+ networks:
+ opencloud-net:
+ entrypoint:
+ - /bin/sh
+ # run opencloud init to initialize a configuration file with random secrets
+ # it will fail on subsequent runs, because the config file already exists
+ # therefore we ignore the error and then start the opencloud server
+ command: ["-c", "opencloud init || true; opencloud graph server"]
+ environment:
+ OC_LOG_LEVEL: "debug"
+ OC_LOG_COLOR: "${LOG_PRETTY:-false}"
+ OC_LOG_PRETTY: "${LOG_PRETTY:-false}"
+ # This just runs the standalone graph service we don't need access to the registry
+ MICRO_REGISTRY: "memory"
+ # INSECURE: needed if OpenCloud / reverse proxy is using self generated certificates
+ OC_INSECURE: "${INSECURE:-false}"
+ GRAPH_HTTP_ADDR: "0.0.0.0:9120"
+ GRAPH_HTTP_API_TOKEN: "${PROVISIONING_API_TOKEN:-changeme}"
+ # disable listening for events
+ GRAPH_EVENTS_ENDPOINT: ""
+ GRAPH_STORE_NODES: ""
+ GRAPH_ASSIGN_DEFAULT_USER_ROLE: "false"
+ GRAPH_USERNAME_MATCH: "none"
+ GRAPH_LDAP_EDUCATION_RESOURCES_ENABLED: "true"
+ GRAPH_LDAP_SCHOOL_BASE_DN: "ou=tenants,dc=opencloud,dc=eu"
+ OC_LDAP_URI: ldaps://ldap-server:1636
+ OC_LDAP_INSECURE: "true"
+ OC_LDAP_BIND_DN: "cn=admin,dc=opencloud,dc=eu"
+ OC_LDAP_BIND_PASSWORD: ${LDAP_BIND_PASSWORD:-admin}
+ OC_LDAP_USER_BASE_DN: "ou=users,dc=opencloud,dc=eu"
+ OC_LDAP_USER_FILTER: "(objectclass=inetOrgPerson)"
+ volumes:
+ # configure the .env file to use own paths instead of docker internal volumes
+ - ${PROVISIONING_CONFIG_DIR:-provisioning-config}:/etc/opencloud
+ logging:
+ driver: ${LOG_DRIVER:-local}
+ restart: always
+ ports:
+ - "9120:9120"
+
+volumes:
+ opencloud-config:
+ opencloud-data:
+ provisioning-config:
+
+networks:
+ opencloud-net:
diff --git a/devtools/deployments/multi-tenancy/initialize_users.go b/devtools/deployments/multi-tenancy/initialize_users.go
new file mode 100644
index 0000000000..ebbbbb1073
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/initialize_users.go
@@ -0,0 +1,169 @@
+package main
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+
+ "github.com/Nerzal/gocloak/v13"
+ "github.com/go-resty/resty/v2"
+ libregraph "github.com/opencloud-eu/libre-graph-api-go"
+)
+
+const (
+ provisioningAPIURL = "http://localhost:9120/graph"
+ provisioningAuthToken = "changeme"
+)
+
+type tenantWithUsers struct {
+ tenant libregraph.EducationSchool
+ users []libregraph.EducationUser
+}
+
+var demoTenants = []tenantWithUsers{
+ {
+ tenant: libregraph.EducationSchool{
+ DisplayName: libregraph.PtrString("Famous Coders"),
+ },
+ users: []libregraph.EducationUser{
+ {
+ DisplayName: libregraph.PtrString("Dennis Ritchie"),
+ OnPremisesSamAccountName: libregraph.PtrString("dennis"),
+ Mail: libregraph.PtrString("dennis@example.org"),
+ },
+ {
+ DisplayName: libregraph.PtrString("Grace Hopper"),
+ OnPremisesSamAccountName: libregraph.PtrString("grace"),
+ Mail: libregraph.PtrString("grace@example.org"),
+ },
+ },
+ },
+ {
+ tenant: libregraph.EducationSchool{
+ DisplayName: libregraph.PtrString("Scientists"),
+ },
+ users: []libregraph.EducationUser{
+ {
+ DisplayName: libregraph.PtrString("Albert Einstein"),
+ OnPremisesSamAccountName: libregraph.PtrString("einstein"),
+ Mail: libregraph.PtrString("einstein@example.org"),
+ },
+ {
+ DisplayName: libregraph.PtrString("Marie Curie"),
+ OnPremisesSamAccountName: libregraph.PtrString("marie"),
+ Mail: libregraph.PtrString("marie@example.org"),
+ },
+ },
+ },
+}
+
+func main() {
+ lgconf := libregraph.NewConfiguration()
+ lgconf.Servers = libregraph.ServerConfigurations{
+ {
+ URL: provisioningAPIURL,
+ },
+ }
+ lgconf.DefaultHeader = map[string]string{"Authorization": "Bearer " + provisioningAuthToken}
+ lgclient := libregraph.NewAPIClient(lgconf)
+
+ for _, tenant := range demoTenants {
+ tenantid, err := createTenant(lgclient, tenant.tenant)
+ if err != nil {
+ fmt.Printf("Failed to create tenant: %s\n", err)
+ continue
+ }
+ for _, user := range tenant.users {
+ userid1, err := createUser(lgclient, user)
+ if err != nil {
+ fmt.Printf("Failed to create user: %s\n", err)
+ continue
+ }
+ _, err = lgclient.EducationSchoolApi.AddUserToSchool(context.TODO(), tenantid).EducationUserReference(libregraph.EducationUserReference{
+ OdataId: libregraph.PtrString(fmt.Sprintf("%s/education/users/%s", provisioningAPIURL, userid1)),
+ }).Execute()
+ if err != nil {
+ fmt.Printf("Failed to add user to tenant: %s\n", err)
+ continue
+ }
+ }
+ }
+
+ resetAllUserPasswords()
+ setUserRoles()
+}
+
+func createUser(client *libregraph.APIClient, user libregraph.EducationUser) (string, error) {
+ newUser, _, err := client.EducationUserApi.CreateEducationUser(context.TODO()).EducationUser(user).Execute()
+ if err != nil {
+ fmt.Printf("Failed to create user: %s\n", err)
+ return "", err
+ }
+ fmt.Printf("Created user: %s with id %s\n", newUser.GetDisplayName(), newUser.GetId())
+ return newUser.GetId(), nil
+}
+
+func createTenant(client *libregraph.APIClient, tenant libregraph.EducationSchool) (string, error) {
+ newTenant, _, err := client.EducationSchoolApi.CreateSchool(context.TODO()).EducationSchool(tenant).Execute()
+ if err != nil {
+ fmt.Printf("Failed to create tenant: %s\n", err)
+ return "", err
+ }
+ fmt.Printf("Created tenant: %s with id %s\n", newTenant.GetDisplayName(), newTenant.GetId())
+ return newTenant.GetId(), nil
+}
+
+func setUserRoles() {
+ tls := tls.Config{InsecureSkipVerify: true}
+ restyClient := resty.New().SetTLSClientConfig(&tls)
+ client := gocloak.NewClient("https://keycloak.opencloud.test")
+ client.SetRestyClient(restyClient)
+ ctx := context.Background()
+ token, err := client.LoginAdmin(ctx, "kcadmin", "admin", "master")
+ if err != nil {
+ fmt.Printf("Failed to login: %s\n", err)
+ panic("Something wrong with the credentials or url")
+ }
+
+ role, _ := client.GetRealmRole(ctx, token.AccessToken, "openCloud", "opencloudUser")
+ users, err := client.GetUsers(ctx, token.AccessToken, "openCloud", gocloak.GetUsersParams{})
+ if err != nil {
+ fmt.Printf("%s\n", err)
+ panic("Oh no!, failed to list users :(")
+ }
+ for _, user := range users {
+ err := client.AddRealmRoleToUser(ctx, token.AccessToken, "openCloud", *user.ID, []gocloak.Role{
+ *role,
+ })
+ if err != nil {
+ fmt.Printf("Failed to assign role to user %s: %s\n", *user.Username, err)
+ }
+ }
+}
+
+func resetAllUserPasswords() {
+ tls := tls.Config{InsecureSkipVerify: true}
+ restyClient := resty.New().SetTLSClientConfig(&tls)
+ client := gocloak.NewClient("https://keycloak.opencloud.test")
+ client.SetRestyClient(restyClient)
+ ctx := context.Background()
+ token, err := client.LoginAdmin(ctx, "kcadmin", "admin", "master")
+ if err != nil {
+ fmt.Printf("Failed to login: %s\n", err)
+ panic("Something wrong with the credentials or url")
+ }
+
+ users, err := client.GetUsers(ctx, token.AccessToken, "openCloud", gocloak.GetUsersParams{})
+ if err != nil {
+ fmt.Printf("%s\n", err)
+ panic("Oh no!, failed to list users :(")
+ }
+ for _, user := range users {
+ fmt.Printf("Setting password for user: %s\n", *user.Username)
+ err = client.SetPassword(ctx, token.AccessToken, *user.ID, "openCloud", "demo", false)
+ if err != nil {
+ fmt.Printf("Failed to set password for user %s: %s\n", *user.Username, err)
+ }
+ }
+
+}
diff --git a/devtools/deployments/multi-tenancy/keycloak.yml b/devtools/deployments/multi-tenancy/keycloak.yml
new file mode 100644
index 0000000000..ecc55479aa
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/keycloak.yml
@@ -0,0 +1,55 @@
+---
+services:
+ opencloud:
+ environment:
+ postgres:
+ image: postgres:alpine
+ networks:
+ opencloud-net:
+ volumes:
+ - keycloak_postgres_data:/var/lib/postgresql/data
+ environment:
+ POSTGRES_DB: keycloak
+ POSTGRES_USER: ${KC_DB_USERNAME:-keycloak}
+ POSTGRES_PASSWORD: ${KC_DB_PASSWORD:-keycloak}
+ logging:
+ driver: ${LOG_DRIVER:-local}
+ restart: always
+
+ keycloak:
+ labels:
+ - "traefik.enable=true"
+ - "traefik.http.routers.keycloak.entrypoints=https"
+ - "traefik.http.routers.keycloak.rule=Host(`${KEYCLOAK_DOMAIN:-keycloak.opencloud.test}`)"
+ - "traefik.http.routers.keycloak.${TRAEFIK_SERVICES_TLS_CONFIG}"
+ - "traefik.http.routers.keycloak.service=keycloak"
+ - "traefik.http.services.keycloak.loadbalancer.server.port=8080"
+ image: quay.io/keycloak/keycloak:26.4
+ networks:
+ opencloud-net:
+ command: [ "start", "--spi-connections-http-client-default-disable-trust-manager=${INSECURE:-false}", "--import-realm" ]
+ entrypoint: [ "/bin/sh", "/opt/keycloak/bin/docker-entrypoint-override.sh" ]
+ volumes:
+ - "./config/keycloak/docker-entrypoint-override.sh:/opt/keycloak/bin/docker-entrypoint-override.sh"
+ - "./config/keycloak/openCloud-realm.dist.json:/opt/keycloak/data/import-dist/openCloud-realm.json"
+ - "./config/keycloak/themes/opencloud:/opt/keycloak/themes/opencloud"
+ environment:
+ OC_DOMAIN: ${OC_DOMAIN:-cloud.opencloud.test}
+ KC_HOSTNAME: ${KEYCLOAK_DOMAIN:-keycloak.opencloud.test}
+ KC_DB: postgres
+ KC_DB_URL: "jdbc:postgresql://postgres:5432/keycloak"
+ KC_DB_USERNAME: ${KC_DB_USERNAME:-keycloak}
+ KC_DB_PASSWORD: ${KC_DB_PASSWORD:-keycloak}
+ KC_FEATURES: impersonation
+ KC_PROXY_HEADERS: xforwarded
+ KC_HTTP_ENABLED: true
+ KEYCLOAK_ADMIN: ${KEYCLOAK_ADMIN:-kcadmin}
+ KEYCLOAK_ADMIN_PASSWORD: ${KEYCLOAK_ADMIN_PASSWORD:-admin}
+ depends_on:
+ - postgres
+ logging:
+ driver: ${LOG_DRIVER:-local}
+ restart: always
+
+volumes:
+ keycloak_postgres_data:
diff --git a/devtools/deployments/multi-tenancy/ldap-server.yml b/devtools/deployments/multi-tenancy/ldap-server.yml
new file mode 100644
index 0000000000..3aebf80390
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/ldap-server.yml
@@ -0,0 +1,32 @@
+---
+services:
+ ldap-server:
+ image: bitnamilegacy/openldap:2.6
+ networks:
+ opencloud-net:
+ entrypoint: [ "/bin/sh", "/opt/bitnami/scripts/openldap/docker-entrypoint-override.sh", "/opt/bitnami/scripts/openldap/run.sh" ]
+ environment:
+ BITNAMI_DEBUG: true
+ LDAP_TLS_VERIFY_CLIENT: never
+ LDAP_ENABLE_TLS: "yes"
+ LDAP_TLS_CA_FILE: /opt/bitnami/openldap/share/openldap.crt
+ LDAP_TLS_CERT_FILE: /opt/bitnami/openldap/share/openldap.crt
+ LDAP_TLS_KEY_FILE: /opt/bitnami/openldap/share/openldap.key
+ LDAP_ROOT: "dc=opencloud,dc=eu"
+ LDAP_ADMIN_PASSWORD: ${LDAP_BIND_PASSWORD:-admin}
+ ports:
+ - "127.0.0.1:389:1389"
+ - "127.0.0.1:636:1636"
+ volumes:
+ # Only use the base ldif file to create the base structure
+ - ./config/ldap/ldif/10_base.ldif:/ldifs/10_base.ldif
+ # Use the custom schema from opencloud because we are in full control of the ldap server
+ - ../shared/config/ldap/schemas/10_opencloud_schema.ldif:/schemas/10_opencloud_schema.ldif
+ - ../shared/config/ldap/schemas/20_opencloud_education_schema.ldif:/schemas/20_opencloud_education_schema.ldif
+ - ./config/ldap/docker-entrypoint-override.sh:/opt/bitnami/scripts/openldap/docker-entrypoint-override.sh
+ - ${LDAP_CERTS_DIR:-ldap-certs}:/opt/bitnami/openldap/share
+ - ${LDAP_DATA_DIR:-ldap-data}:/bitnami/openldap
+
+volumes:
+ ldap-certs:
+ ldap-data:
diff --git a/devtools/deployments/multi-tenancy/testing/ldap-manager.yml b/devtools/deployments/multi-tenancy/testing/ldap-manager.yml
new file mode 100644
index 0000000000..2374ffe574
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/testing/ldap-manager.yml
@@ -0,0 +1,24 @@
+---
+# This file can be used to be added to the opencloud_full example
+# to browse the LDAP server with a web interface.
+# This is not a production ready setup.
+services:
+ ldap-manager:
+ image: phpldapadmin/phpldapadmin:latest
+ networks:
+ opencloud-net:
+ environment:
+ LDAP_HOST: ldap-server
+ LDAP_PORT: 1389
+ LDAP_LOGIN_OBJECTCLASS: "inetOrgPerson"
+ APP_URL: "https://${LDAP_MANAGER_DOMAIN:-ldap.opencloud.test}"
+ labels:
+ - "traefik.enable=true"
+ - "traefik.http.routers.ldap-manager.entrypoints=https"
+ - "traefik.http.routers.ldap-manager.rule=Host(`${LDAP_MANAGER_DOMAIN:-ldap.opencloud.test}`)"
+ - "traefik.http.routers.ldap-manager.${TRAEFIK_SERVICES_TLS_CONFIG}"
+ - "traefik.http.routers.ldap-manager.service=ldap-manager"
+ - "traefik.http.services.ldap-manager.loadbalancer.server.port=8080"
+ logging:
+ driver: ${LOG_DRIVER:-local}
+ restart: always
\ No newline at end of file
diff --git a/devtools/deployments/multi-tenancy/traefik.yml b/devtools/deployments/multi-tenancy/traefik.yml
new file mode 100644
index 0000000000..528cc09b6a
--- /dev/null
+++ b/devtools/deployments/multi-tenancy/traefik.yml
@@ -0,0 +1,45 @@
+---
+services:
+ opencloud:
+ labels:
+ - "traefik.enable=true"
+ - "traefik.http.routers.opencloud.entrypoints=https"
+ - "traefik.http.routers.opencloud.rule=Host(`${OC_DOMAIN:-cloud.opencloud.test}`)"
+ - "traefik.http.routers.opencloud.service=opencloud"
+ - "traefik.http.services.opencloud.loadbalancer.server.port=9200"
+ - "traefik.http.routers.opencloud.${TRAEFIK_SERVICES_TLS_CONFIG}"
+ traefik:
+ image: traefik:v3.3.1
+ # release notes: https://github.com/traefik/traefik/releases
+ networks:
+ opencloud-net:
+ aliases:
+ - ${OC_DOMAIN:-cloud.opencloud.test}
+ - ${KEYCLOAK_DOMAIN:-keycloak.opencloud.test}
+ entrypoint: [ "/bin/sh", "/opt/traefik/bin/docker-entrypoint-override.sh"]
+ environment:
+ - "TRAEFIK_SERVICES_TLS_CONFIG=${TRAEFIK_SERVICES_TLS_CONFIG:-tls.certresolver=letsencrypt}"
+ - "TRAEFIK_ACME_MAIL=${TRAEFIK_ACME_MAIL:-example@example.org}"
+ - "TRAEFIK_ACME_CASERVER=${TRAEFIK_ACME_CASERVER:-https://acme-v02.api.letsencrypt.org/directory}"
+ - "TRAEFIK_LOG_LEVEL=${TRAEFIK_LOG_LEVEL:-ERROR}"
+ - "TRAEFIK_ACCESS_LOG=${TRAEFIK_ACCESS_LOG:-false}"
+ ports:
+ - "80:80"
+ - "443:443"
+ volumes:
+ - "${DOCKER_SOCKET_PATH:-/var/run/docker.sock}:/var/run/docker.sock:ro"
+ - "./config/traefik/docker-entrypoint-override.sh:/opt/traefik/bin/docker-entrypoint-override.sh"
+ - "${TRAEFIK_CERTS_DIR:-./certs}:/certs"
+ - "./config/traefik/dynamic:/etc/traefik/dynamic"
+ labels:
+ - "traefik.enable=${TRAEFIK_DASHBOARD:-false}"
+ # defaults to admin:admin
+ - "traefik.http.middlewares.traefik-auth.basicauth.users=${TRAEFIK_BASIC_AUTH_USERS:-admin:$$apr1$$4vqie50r$$YQAmQdtmz5n9rEALhxJ4l.}"
+ - "traefik.http.routers.traefik.entrypoints=https"
+ - "traefik.http.routers.traefik.rule=Host(`${TRAEFIK_DOMAIN:-traefik.opencloud.test}`)"
+ - "traefik.http.routers.traefik.middlewares=traefik-auth"
+ - "traefik.http.routers.traefik.${TRAEFIK_SERVICES_TLS_CONFIG}"
+ - "traefik.http.routers.traefik.service=api@internal"
+ logging:
+ driver: ${LOG_DRIVER:-local}
+ restart: always
diff --git a/go.mod b/go.mod
index e1c945f868..2851558005 100644
--- a/go.mod
+++ b/go.mod
@@ -34,6 +34,7 @@ require (
github.com/go-micro/plugins/v4/wrapper/monitoring/prometheus v1.2.0
github.com/go-micro/plugins/v4/wrapper/trace/opentelemetry v1.2.0
github.com/go-playground/validator/v10 v10.30.1
+ github.com/go-resty/resty/v2 v2.7.0
github.com/golang-jwt/jwt/v5 v5.3.0
github.com/golang/protobuf v1.5.4
github.com/google/go-cmp v0.7.0
@@ -64,7 +65,7 @@ require (
github.com/open-policy-agent/opa v1.11.1
github.com/opencloud-eu/icap-client v0.0.0-20250930132611-28a2afe62d89
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250724122329-41ba6b191e76
- github.com/opencloud-eu/reva/v2 v2.41.1-0.20260107152322-93760b632993
+ github.com/opencloud-eu/reva/v2 v2.41.1-0.20260120144836-2769c3c07a19
github.com/opensearch-project/opensearch-go/v4 v4.6.0
github.com/orcaman/concurrent-map v1.0.0
github.com/pkg/errors v0.9.1
@@ -124,7 +125,7 @@ require (
filippo.io/edwards25519 v1.1.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
- github.com/BurntSushi/toml v1.5.0 // indirect
+ github.com/BurntSushi/toml v1.6.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.4.0 // indirect
github.com/Masterminds/sprig v2.22.0+incompatible // indirect
@@ -221,7 +222,6 @@ require (
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-redis/redis/v8 v8.11.5 // indirect
- github.com/go-resty/resty/v2 v2.7.0 // indirect
github.com/go-sql-driver/mysql v1.9.3 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
@@ -250,6 +250,7 @@ require (
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect
github.com/hashicorp/go-hclog v1.6.3 // indirect
github.com/hashicorp/go-plugin v1.7.0 // indirect
+ github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hashicorp/yamux v0.1.2 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
github.com/iancoleman/strcase v0.3.0 // indirect
@@ -288,10 +289,10 @@ require (
github.com/mendsley/gojwk v0.0.0-20141217222730-4d5ec6e58103 // indirect
github.com/miekg/dns v1.1.57 // indirect
github.com/mileusna/useragent v1.3.5 // indirect
- github.com/minio/crc64nvme v1.1.0 // indirect
+ github.com/minio/crc64nvme v1.1.1 // indirect
github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
- github.com/minio/minio-go/v7 v7.0.97 // indirect
+ github.com/minio/minio-go/v7 v7.0.98 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
@@ -327,7 +328,7 @@ require (
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/pquerna/cachecontrol v0.2.0 // indirect
- github.com/prometheus/alertmanager v0.30.0 // indirect
+ github.com/prometheus/alertmanager v0.30.1 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.67.4 // indirect
github.com/prometheus/procfs v0.17.0 // indirect
@@ -342,7 +343,7 @@ require (
github.com/samber/slog-common v0.19.0 // indirect
github.com/samber/slog-zerolog/v2 v2.9.0 // indirect
github.com/segmentio/asm v1.2.1 // indirect
- github.com/segmentio/kafka-go v0.4.49 // indirect
+ github.com/segmentio/kafka-go v0.4.50 // indirect
github.com/segmentio/ksuid v1.0.4 // indirect
github.com/sercand/kuberesolver/v5 v5.1.1 // indirect
github.com/sergi/go-diff v1.4.0 // indirect
@@ -362,7 +363,7 @@ require (
github.com/tchap/go-patricia/v2 v2.3.3 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
- github.com/tinylib/msgp v1.3.0 // indirect
+ github.com/tinylib/msgp v1.6.1 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.8.0 // indirect
github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 // indirect
diff --git a/go.sum b/go.sum
index b73161bc65..164485bb8e 100644
--- a/go.sum
+++ b/go.sum
@@ -65,8 +65,8 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
-github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
+github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk=
+github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/CiscoM31/godata v1.0.11 h1:w7y8twuW02LdH6mak3/GJ5i0GrCv2IoZUJVqa/g5Yeo=
github.com/CiscoM31/godata v1.0.11/go.mod h1:ZMiT6JuD3Rm83HEtiTx4JEChsd25YCrxchKGag/sdTc=
@@ -651,6 +651,8 @@ github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
+github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
@@ -851,14 +853,14 @@ github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM=
github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk=
github.com/mileusna/useragent v1.3.5 h1:SJM5NzBmh/hO+4LGeATKpaEX9+b4vcGg2qXGLiNGDws=
github.com/mileusna/useragent v1.3.5/go.mod h1:3d8TOmwL/5I8pJjyVDteHtgDGcefrFUX4ccGOMKNYYc=
-github.com/minio/crc64nvme v1.1.0 h1:e/tAguZ+4cw32D+IO/8GSf5UVr9y+3eJcxZI2WOO/7Q=
-github.com/minio/crc64nvme v1.1.0/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
+github.com/minio/crc64nvme v1.1.1 h1:8dwx/Pz49suywbO+auHCBpCtlW1OfpcLN7wYgVR6wAI=
+github.com/minio/crc64nvme v1.1.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76 h1:KGuD/pM2JpL9FAYvBrnBBeENKZNh6eNtjqytV6TYjnk=
github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
-github.com/minio/minio-go/v7 v7.0.97 h1:lqhREPyfgHTB/ciX8k2r8k0D93WaFqxbJX36UZq5occ=
-github.com/minio/minio-go/v7 v7.0.97/go.mod h1:re5VXuo0pwEtoNLsNuSr0RrLfT/MBtohwdaSmPPSRSk=
+github.com/minio/minio-go/v7 v7.0.98 h1:MeAVKjLVz+XJ28zFcuYyImNSAh8Mq725uNW4beRisi0=
+github.com/minio/minio-go/v7 v7.0.98/go.mod h1:cY0Y+W7yozf0mdIclrttzo1Iiu7mEf9y7nk2uXqMOvM=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
@@ -967,8 +969,8 @@ github.com/opencloud-eu/inotifywaitgo v0.0.0-20251111171128-a390bae3c5e9 h1:dIft
github.com/opencloud-eu/inotifywaitgo v0.0.0-20251111171128-a390bae3c5e9/go.mod h1:JWyDC6H+5oZRdUJUgKuaye+8Ph5hEs6HVzVoPKzWSGI=
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250724122329-41ba6b191e76 h1:vD/EdfDUrv4omSFjrinT8Mvf+8D7f9g4vgQ2oiDrVUI=
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250724122329-41ba6b191e76/go.mod h1:pzatilMEHZFT3qV7C/X3MqOa3NlRQuYhlRhZTL+hN6Q=
-github.com/opencloud-eu/reva/v2 v2.41.1-0.20260107152322-93760b632993 h1:qWU0bKhD1wqQIq6giMTvUUbG1IlaT/lzchLDSjuedi0=
-github.com/opencloud-eu/reva/v2 v2.41.1-0.20260107152322-93760b632993/go.mod h1:foXaMxugUi4TTRsK3AAXRAb/kyFd4A9k2+wNv+p+vbU=
+github.com/opencloud-eu/reva/v2 v2.41.1-0.20260120144836-2769c3c07a19 h1:8loHHe7FYd7zgIcGTlbHwre+bU/AAwREEYVd4SWM9/s=
+github.com/opencloud-eu/reva/v2 v2.41.1-0.20260120144836-2769c3c07a19/go.mod h1:pv+w23JG0/qJweZbTzNNev//YEvlUML1L/2iXgKGkkg=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
@@ -1020,8 +1022,8 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:Om
github.com/pquerna/cachecontrol v0.2.0 h1:vBXSNuE5MYP9IJ5kjsdo8uq+w41jSPgvba2DEnkRx9k=
github.com/pquerna/cachecontrol v0.2.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
github.com/pquerna/otp v1.3.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
-github.com/prometheus/alertmanager v0.30.0 h1:E4dnxSFXK8V2Bb8iqudlisTmaIrF3hRJSWnliG08tBM=
-github.com/prometheus/alertmanager v0.30.0/go.mod h1:93PBumcTLr/gNtNtM0m7BcCffbvYP5bKuLBWiOnISaA=
+github.com/prometheus/alertmanager v0.30.1 h1:427prmCHuy1rMmV7fl/TVQFh5A/78XQ/Mp+TsswZNGM=
+github.com/prometheus/alertmanager v0.30.1/go.mod h1:93PBumcTLr/gNtNtM0m7BcCffbvYP5bKuLBWiOnISaA=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
@@ -1111,8 +1113,8 @@ github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210127161313-bd30bebeac4f/
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0=
github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
-github.com/segmentio/kafka-go v0.4.49 h1:GJiNX1d/g+kG6ljyJEoi9++PUMdXGAxb7JGPiDCuNmk=
-github.com/segmentio/kafka-go v0.4.49/go.mod h1:Y1gn60kzLEEaW28YshXyk2+VCUKbJ3Qr6DrnT3i4+9E=
+github.com/segmentio/kafka-go v0.4.50 h1:mcyC3tT5WeyWzrFbd6O374t+hmcu1NKt2Pu1L3QaXmc=
+github.com/segmentio/kafka-go v0.4.50/go.mod h1:Y1gn60kzLEEaW28YshXyk2+VCUKbJ3Qr6DrnT3i4+9E=
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY=
@@ -1224,8 +1226,8 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
-github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww=
-github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0=
+github.com/tinylib/msgp v1.6.1 h1:ESRv8eL3u+DNHUoSAAQRE50Hm162zqAnBoGv9PzScPY=
+github.com/tinylib/msgp v1.6.1/go.mod h1:RSp0LW9oSxFut3KzESt5Voq4GVWyS+PSulT77roAqEA=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
diff --git a/services/storage-system/pkg/revaconfig/config.go b/services/storage-system/pkg/revaconfig/config.go
index e5183a7a5a..cb1293fd00 100644
--- a/services/storage-system/pkg/revaconfig/config.go
+++ b/services/storage-system/pkg/revaconfig/config.go
@@ -158,6 +158,7 @@ func metadataDrivers(localEndpoint string, cfg *config.Config) map[string]interf
"permissionssvc": localEndpoint,
"max_acquire_lock_cycles": cfg.Drivers.Decomposed.MaxAcquireLockCycles,
"lock_cycle_duration_factor": cfg.Drivers.Decomposed.LockCycleDurationFactor,
+ "multi_tenant_enabled": false, // storage-system doesn't use tenants, even if it's enabled for storage-users
"disable_versioning": true,
"statcache": map[string]interface{}{
"cache_store": "noop",
diff --git a/services/storage-users/pkg/revaconfig/drivers.go b/services/storage-users/pkg/revaconfig/drivers.go
index 56fa18bf0f..a8c1976222 100644
--- a/services/storage-users/pkg/revaconfig/drivers.go
+++ b/services/storage-users/pkg/revaconfig/drivers.go
@@ -100,6 +100,7 @@ func Posix(cfg *config.Config, enableFSScan, enableFSWatch bool) map[string]inte
"scan_debounce_delay": cfg.Drivers.Posix.ScanDebounceDelay,
"max_quota": cfg.Drivers.Posix.MaxQuota,
"disable_versioning": cfg.Drivers.Posix.DisableVersioning,
+ "multi_tenant_enabled": cfg.Commons.MultiTenantEnabled,
"propagator": cfg.Drivers.Posix.Propagator,
"async_propagator_options": map[string]interface{}{
"propagation_delay": cfg.Drivers.Posix.AsyncPropagatorOptions.PropagationDelay,
@@ -203,6 +204,7 @@ func Decomposed(cfg *config.Config) map[string]interface{} {
"asyncfileuploads": cfg.Drivers.Decomposed.AsyncUploads,
"max_quota": cfg.Drivers.Decomposed.MaxQuota,
"disable_versioning": cfg.Drivers.Decomposed.DisableVersioning,
+ "multi_tenant_enabled": cfg.Commons.MultiTenantEnabled,
"filemetadatacache": map[string]interface{}{
"cache_store": cfg.FilemetadataCache.Store,
"cache_nodes": cfg.FilemetadataCache.Nodes,
@@ -257,6 +259,7 @@ func DecomposedNoEvents(cfg *config.Config) map[string]interface{} {
"max_concurrency": cfg.Drivers.Decomposed.MaxConcurrency,
"max_quota": cfg.Drivers.Decomposed.MaxQuota,
"disable_versioning": cfg.Drivers.Decomposed.DisableVersioning,
+ "multi_tenant_enabled": cfg.Commons.MultiTenantEnabled,
"filemetadatacache": map[string]interface{}{
"cache_store": cfg.FilemetadataCache.Store,
"cache_nodes": cfg.FilemetadataCache.Nodes,
@@ -312,6 +315,7 @@ func DecomposedS3(cfg *config.Config) map[string]interface{} {
"lock_cycle_duration_factor": cfg.Drivers.DecomposedS3.LockCycleDurationFactor,
"max_concurrency": cfg.Drivers.DecomposedS3.MaxConcurrency,
"disable_versioning": cfg.Drivers.DecomposedS3.DisableVersioning,
+ "multi_tenant_enabled": cfg.Commons.MultiTenantEnabled,
"asyncfileuploads": cfg.Drivers.DecomposedS3.AsyncUploads,
"filemetadatacache": map[string]interface{}{
"cache_store": cfg.FilemetadataCache.Store,
@@ -370,6 +374,7 @@ func DecomposedS3NoEvents(cfg *config.Config) map[string]interface{} {
"max_acquire_lock_cycles": cfg.Drivers.DecomposedS3.MaxAcquireLockCycles,
"max_concurrency": cfg.Drivers.DecomposedS3.MaxConcurrency,
"disable_versioning": cfg.Drivers.DecomposedS3.DisableVersioning,
+ "multi_tenant_enabled": cfg.Commons.MultiTenantEnabled,
"lock_cycle_duration_factor": cfg.Drivers.DecomposedS3.LockCycleDurationFactor,
"filemetadatacache": map[string]interface{}{
"cache_store": cfg.FilemetadataCache.Store,
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
index 235496eeb2..1101d206d4 100644
--- a/vendor/github.com/BurntSushi/toml/README.md
+++ b/vendor/github.com/BurntSushi/toml/README.md
@@ -1,7 +1,7 @@
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
reflection interface similar to Go's standard library `json` and `xml` packages.
-Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
+Compatible with TOML version [v1.1.0](https://toml.io/en/v1.1.0).
Documentation: https://pkg.go.dev/github.com/BurntSushi/toml
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
index 3fa516caa2..ed884840fb 100644
--- a/vendor/github.com/BurntSushi/toml/decode.go
+++ b/vendor/github.com/BurntSushi/toml/decode.go
@@ -206,6 +206,13 @@ func markDecodedRecursive(md *MetaData, tmap map[string]any) {
markDecodedRecursive(md, tmap)
md.context = md.context[0 : len(md.context)-1]
}
+ if tarr, ok := tmap[key].([]map[string]any); ok {
+ for _, elm := range tarr {
+ md.context = append(md.context, key)
+ markDecodedRecursive(md, elm)
+ md.context = md.context[0 : len(md.context)-1]
+ }
+ }
}
}
@@ -423,7 +430,7 @@ func (md *MetaData) unifyString(data any, rv reflect.Value) error {
if i, ok := data.(int64); ok {
rv.SetString(strconv.FormatInt(i, 10))
} else if f, ok := data.(float64); ok {
- rv.SetString(strconv.FormatFloat(f, 'f', -1, 64))
+ rv.SetString(strconv.FormatFloat(f, 'g', -1, 64))
} else {
return md.badtype("string", data)
}
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
index ac196e7df8..bd7aa18655 100644
--- a/vendor/github.com/BurntSushi/toml/encode.go
+++ b/vendor/github.com/BurntSushi/toml/encode.go
@@ -228,9 +228,9 @@ func (enc *Encoder) eElement(rv reflect.Value) {
}
switch v.Location() {
default:
- enc.wf(v.Format(format))
+ enc.write(v.Format(format))
case internal.LocalDatetime, internal.LocalDate, internal.LocalTime:
- enc.wf(v.In(time.UTC).Format(format))
+ enc.write(v.In(time.UTC).Format(format))
}
return
case Marshaler:
@@ -279,40 +279,40 @@ func (enc *Encoder) eElement(rv reflect.Value) {
case reflect.String:
enc.writeQuoted(rv.String())
case reflect.Bool:
- enc.wf(strconv.FormatBool(rv.Bool()))
+ enc.write(strconv.FormatBool(rv.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- enc.wf(strconv.FormatInt(rv.Int(), 10))
+ enc.write(strconv.FormatInt(rv.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- enc.wf(strconv.FormatUint(rv.Uint(), 10))
+ enc.write(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
f := rv.Float()
if math.IsNaN(f) {
if math.Signbit(f) {
- enc.wf("-")
+ enc.write("-")
}
- enc.wf("nan")
+ enc.write("nan")
} else if math.IsInf(f, 0) {
if math.Signbit(f) {
- enc.wf("-")
+ enc.write("-")
}
- enc.wf("inf")
+ enc.write("inf")
} else {
- enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
+ enc.write(floatAddDecimal(strconv.FormatFloat(f, 'g', -1, 32)))
}
case reflect.Float64:
f := rv.Float()
if math.IsNaN(f) {
if math.Signbit(f) {
- enc.wf("-")
+ enc.write("-")
}
- enc.wf("nan")
+ enc.write("nan")
} else if math.IsInf(f, 0) {
if math.Signbit(f) {
- enc.wf("-")
+ enc.write("-")
}
- enc.wf("inf")
+ enc.write("inf")
} else {
- enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
+ enc.write(floatAddDecimal(strconv.FormatFloat(f, 'g', -1, 64)))
}
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
@@ -330,27 +330,32 @@ func (enc *Encoder) eElement(rv reflect.Value) {
// By the TOML spec, all floats must have a decimal with at least one number on
// either side.
func floatAddDecimal(fstr string) string {
- if !strings.Contains(fstr, ".") {
- return fstr + ".0"
+ for _, c := range fstr {
+ if c == 'e' { // Exponent syntax
+ return fstr
+ }
+ if c == '.' {
+ return fstr
+ }
}
- return fstr
+ return fstr + ".0"
}
func (enc *Encoder) writeQuoted(s string) {
- enc.wf("\"%s\"", dblQuotedReplacer.Replace(s))
+ enc.write(`"` + dblQuotedReplacer.Replace(s) + `"`)
}
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
length := rv.Len()
- enc.wf("[")
+ enc.write("[")
for i := 0; i < length; i++ {
elem := eindirect(rv.Index(i))
enc.eElement(elem)
if i != length-1 {
- enc.wf(", ")
+ enc.write(", ")
}
}
- enc.wf("]")
+ enc.write("]")
}
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
@@ -363,7 +368,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
continue
}
enc.newline()
- enc.wf("%s[[%s]]", enc.indentStr(key), key)
+ enc.writef("%s[[%s]]", enc.indentStr(key), key)
enc.newline()
enc.eMapOrStruct(key, trv, false)
}
@@ -376,7 +381,7 @@ func (enc *Encoder) eTable(key Key, rv reflect.Value) {
enc.newline()
}
if len(key) > 0 {
- enc.wf("%s[%s]", enc.indentStr(key), key)
+ enc.writef("%s[%s]", enc.indentStr(key), key)
enc.newline()
}
enc.eMapOrStruct(key, rv, false)
@@ -422,7 +427,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
if inline {
enc.writeKeyValue(Key{mapKey.String()}, val, true)
if trailC || i != len(mapKeys)-1 {
- enc.wf(", ")
+ enc.write(", ")
}
} else {
enc.encode(key.add(mapKey.String()), val)
@@ -431,12 +436,12 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
}
if inline {
- enc.wf("{")
+ enc.write("{")
}
writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0)
writeMapKeys(mapKeysSub, false)
if inline {
- enc.wf("}")
+ enc.write("}")
}
}
@@ -534,7 +539,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if inline {
enc.writeKeyValue(Key{keyName}, fieldVal, true)
if fieldIndex[0] != totalFields-1 {
- enc.wf(", ")
+ enc.write(", ")
}
} else {
enc.encode(key.add(keyName), fieldVal)
@@ -543,14 +548,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
}
if inline {
- enc.wf("{")
+ enc.write("{")
}
l := len(fieldsDirect) + len(fieldsSub)
writeFields(fieldsDirect, l)
writeFields(fieldsSub, l)
if inline {
- enc.wf("}")
+ enc.write("}")
}
}
@@ -700,7 +705,7 @@ func isEmpty(rv reflect.Value) bool {
func (enc *Encoder) newline() {
if enc.hasWritten {
- enc.wf("\n")
+ enc.write("\n")
}
}
@@ -722,14 +727,22 @@ func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
enc.eElement(val)
return
}
- enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
+ enc.writef("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val)
if !inline {
enc.newline()
}
}
-func (enc *Encoder) wf(format string, v ...any) {
+func (enc *Encoder) write(s string) {
+ _, err := enc.w.WriteString(s)
+ if err != nil {
+ encPanic(err)
+ }
+ enc.hasWritten = true
+}
+
+func (enc *Encoder) writef(format string, v ...any) {
_, err := fmt.Fprintf(enc.w, format, v...)
if err != nil {
encPanic(err)
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
index 1c3b477029..9f4396a0f7 100644
--- a/vendor/github.com/BurntSushi/toml/lex.go
+++ b/vendor/github.com/BurntSushi/toml/lex.go
@@ -13,7 +13,6 @@ type itemType int
const (
itemError itemType = iota
- itemNIL // used in the parser to indicate no type
itemEOF
itemText
itemString
@@ -47,14 +46,13 @@ func (p Position) String() string {
}
type lexer struct {
- input string
- start int
- pos int
- line int
- state stateFn
- items chan item
- tomlNext bool
- esc bool
+ input string
+ start int
+ pos int
+ line int
+ state stateFn
+ items chan item
+ esc bool
// Allow for backing up up to 4 runes. This is necessary because TOML
// contains 3-rune tokens (""" and ''').
@@ -90,14 +88,13 @@ func (lx *lexer) nextItem() item {
}
}
-func lex(input string, tomlNext bool) *lexer {
+func lex(input string) *lexer {
lx := &lexer{
- input: input,
- state: lexTop,
- items: make(chan item, 10),
- stack: make([]stateFn, 0, 10),
- line: 1,
- tomlNext: tomlNext,
+ input: input,
+ state: lexTop,
+ items: make(chan item, 10),
+ stack: make([]stateFn, 0, 10),
+ line: 1,
}
return lx
}
@@ -108,7 +105,7 @@ func (lx *lexer) push(state stateFn) {
func (lx *lexer) pop() stateFn {
if len(lx.stack) == 0 {
- return lx.errorf("BUG in lexer: no states to pop")
+ panic("BUG in lexer: no states to pop")
}
last := lx.stack[len(lx.stack)-1]
lx.stack = lx.stack[0 : len(lx.stack)-1]
@@ -305,6 +302,8 @@ func lexTop(lx *lexer) stateFn {
return lexTableStart
case eof:
if lx.pos > lx.start {
+ // TODO: never reached? I think this can only occur on a bug in the
+ // lexer(?)
return lx.errorf("unexpected EOF")
}
lx.emit(itemEOF)
@@ -392,8 +391,6 @@ func lexTableNameStart(lx *lexer) stateFn {
func lexTableNameEnd(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.next(); {
- case isWhitespace(r):
- return lexTableNameEnd
case r == '.':
lx.ignore()
return lexTableNameStart
@@ -412,7 +409,7 @@ func lexTableNameEnd(lx *lexer) stateFn {
// Lexes only one part, e.g. only 'a' inside 'a.b'.
func lexBareName(lx *lexer) stateFn {
r := lx.next()
- if isBareKeyChar(r, lx.tomlNext) {
+ if isBareKeyChar(r) {
return lexBareName
}
lx.backup()
@@ -420,23 +417,23 @@ func lexBareName(lx *lexer) stateFn {
return lx.pop()
}
-// lexBareName lexes one part of a key or table.
-//
-// It assumes that at least one valid character for the table has already been
-// read.
+// lexQuotedName lexes one part of a quoted key or table name. It assumes that
+// it starts lexing at the quote itself (" or ').
//
// Lexes only one part, e.g. only '"a"' inside '"a".b'.
func lexQuotedName(lx *lexer) stateFn {
r := lx.next()
switch {
- case isWhitespace(r):
- return lexSkip(lx, lexValue)
case r == '"':
lx.ignore() // ignore the '"'
return lexString
case r == '\'':
lx.ignore() // ignore the "'"
return lexRawString
+
+ // TODO: I don't think any of the below conditions can ever be reached?
+ case isWhitespace(r):
+ return lexSkip(lx, lexValue)
case r == eof:
return lx.errorf("unexpected EOF; expected value")
default:
@@ -464,17 +461,19 @@ func lexKeyStart(lx *lexer) stateFn {
func lexKeyNameStart(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.peek(); {
- case r == '=' || r == eof:
- return lx.errorf("unexpected '='")
- case r == '.':
- return lx.errorf("unexpected '.'")
+ default:
+ lx.push(lexKeyEnd)
+ return lexBareName
case r == '"' || r == '\'':
lx.ignore()
lx.push(lexKeyEnd)
return lexQuotedName
- default:
- lx.push(lexKeyEnd)
- return lexBareName
+
+ // TODO: I think these can never be reached?
+ case r == '=' || r == eof:
+ return lx.errorf("unexpected '='")
+ case r == '.':
+ return lx.errorf("unexpected '.'")
}
}
@@ -485,7 +484,7 @@ func lexKeyEnd(lx *lexer) stateFn {
switch r := lx.next(); {
case isWhitespace(r):
return lexSkip(lx, lexKeyEnd)
- case r == eof:
+ case r == eof: // TODO: never reached
return lx.errorf("unexpected EOF; expected key separator '='")
case r == '.':
lx.ignore()
@@ -628,10 +627,7 @@ func lexInlineTableValue(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValue)
case isNL(r):
- if lx.tomlNext {
- return lexSkip(lx, lexInlineTableValue)
- }
- return lx.errorPrevLine(errLexInlineTableNL{})
+ return lexSkip(lx, lexInlineTableValue)
case r == '#':
lx.push(lexInlineTableValue)
return lexCommentStart
@@ -653,10 +649,7 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
- if lx.tomlNext {
- return lexSkip(lx, lexInlineTableValueEnd)
- }
- return lx.errorPrevLine(errLexInlineTableNL{})
+ return lexSkip(lx, lexInlineTableValueEnd)
case r == '#':
lx.push(lexInlineTableValueEnd)
return lexCommentStart
@@ -664,10 +657,7 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
lx.ignore()
lx.skip(isWhitespace)
if lx.peek() == '}' {
- if lx.tomlNext {
- return lexInlineTableValueEnd
- }
- return lx.errorf("trailing comma not allowed in inline tables")
+ return lexInlineTableValueEnd
}
return lexInlineTableValue
case r == '}':
@@ -855,9 +845,6 @@ func lexStringEscape(lx *lexer) stateFn {
r := lx.next()
switch r {
case 'e':
- if !lx.tomlNext {
- return lx.error(errLexEscape{r})
- }
fallthrough
case 'b':
fallthrough
@@ -878,9 +865,6 @@ func lexStringEscape(lx *lexer) stateFn {
case '\\':
return lx.pop()
case 'x':
- if !lx.tomlNext {
- return lx.error(errLexEscape{r})
- }
return lexHexEscape
case 'u':
return lexShortUnicodeEscape
@@ -928,19 +912,9 @@ func lexLongUnicodeEscape(lx *lexer) stateFn {
// lexBaseNumberOrDate can differentiate base prefixed integers from other
// types.
func lexNumberOrDateStart(lx *lexer) stateFn {
- r := lx.next()
- switch r {
- case '0':
+ if lx.next() == '0' {
return lexBaseNumberOrDate
}
-
- if !isDigit(r) {
- // The only way to reach this state is if the value starts
- // with a digit, so specifically treat anything else as an
- // error.
- return lx.errorf("expected a digit but got %q", r)
- }
-
return lexNumberOrDate
}
@@ -1196,13 +1170,13 @@ func lexSkip(lx *lexer, nextState stateFn) stateFn {
}
func (s stateFn) String() string {
+ if s == nil {
+ return ""
+ }
name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name()
if i := strings.LastIndexByte(name, '.'); i > -1 {
name = name[i+1:]
}
- if s == nil {
- name = ""
- }
return name + "()"
}
@@ -1210,8 +1184,6 @@ func (itype itemType) String() string {
switch itype {
case itemError:
return "Error"
- case itemNIL:
- return "NIL"
case itemEOF:
return "EOF"
case itemText:
@@ -1226,18 +1198,22 @@ func (itype itemType) String() string {
return "Float"
case itemDatetime:
return "DateTime"
+ case itemArray:
+ return "Array"
+ case itemArrayEnd:
+ return "ArrayEnd"
case itemTableStart:
return "TableStart"
case itemTableEnd:
return "TableEnd"
+ case itemArrayTableStart:
+ return "ArrayTableStart"
+ case itemArrayTableEnd:
+ return "ArrayTableEnd"
case itemKeyStart:
return "KeyStart"
case itemKeyEnd:
return "KeyEnd"
- case itemArray:
- return "Array"
- case itemArrayEnd:
- return "ArrayEnd"
case itemCommentStart:
return "CommentStart"
case itemInlineTableStart:
@@ -1266,7 +1242,7 @@ func isDigit(r rune) bool { return r >= '0' && r <= '9' }
func isBinary(r rune) bool { return r == '0' || r == '1' }
func isOctal(r rune) bool { return r >= '0' && r <= '7' }
func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') }
-func isBareKeyChar(r rune, tomlNext bool) bool {
+func isBareKeyChar(r rune) bool {
return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') || r == '_' || r == '-'
}
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
index e3ea8a9a2d..b474247ae2 100644
--- a/vendor/github.com/BurntSushi/toml/parse.go
+++ b/vendor/github.com/BurntSushi/toml/parse.go
@@ -3,7 +3,6 @@ package toml
import (
"fmt"
"math"
- "os"
"strconv"
"strings"
"time"
@@ -17,7 +16,6 @@ type parser struct {
context Key // Full key for the current hash in scope.
currentKey string // Base key name for everything except hashes.
pos Position // Current position in the TOML file.
- tomlNext bool
ordered []Key // List of keys in the order that they appear in the TOML data.
@@ -32,8 +30,6 @@ type keyInfo struct {
}
func parse(data string) (p *parser, err error) {
- _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110")
-
defer func() {
if r := recover(); r != nil {
if pErr, ok := r.(ParseError); ok {
@@ -73,10 +69,9 @@ func parse(data string) (p *parser, err error) {
p = &parser{
keyInfo: make(map[string]keyInfo),
mapping: make(map[string]any),
- lx: lex(data, tomlNext),
+ lx: lex(data),
ordered: make([]Key, 0),
implicits: make(map[string]struct{}),
- tomlNext: tomlNext,
}
for {
item := p.next()
@@ -350,17 +345,14 @@ func (p *parser) valueFloat(it item) (any, tomlType) {
var dtTypes = []struct {
fmt string
zone *time.Location
- next bool
}{
- {time.RFC3339Nano, time.Local, false},
- {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false},
- {"2006-01-02", internal.LocalDate, false},
- {"15:04:05.999999999", internal.LocalTime, false},
-
- // tomlNext
- {"2006-01-02T15:04Z07:00", time.Local, true},
- {"2006-01-02T15:04", internal.LocalDatetime, true},
- {"15:04", internal.LocalTime, true},
+ {time.RFC3339Nano, time.Local},
+ {"2006-01-02T15:04:05.999999999", internal.LocalDatetime},
+ {"2006-01-02", internal.LocalDate},
+ {"15:04:05.999999999", internal.LocalTime},
+ {"2006-01-02T15:04Z07:00", time.Local},
+ {"2006-01-02T15:04", internal.LocalDatetime},
+ {"15:04", internal.LocalTime},
}
func (p *parser) valueDatetime(it item) (any, tomlType) {
@@ -371,9 +363,6 @@ func (p *parser) valueDatetime(it item) (any, tomlType) {
err error
)
for _, dt := range dtTypes {
- if dt.next && !p.tomlNext {
- continue
- }
t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
if err == nil {
if missingLeadingZero(it.val, dt.fmt) {
@@ -644,6 +633,11 @@ func (p *parser) setValue(key string, value any) {
// Note that since it has already been defined (as a hash), we don't
// want to overwrite it. So our business is done.
if p.isArray(keyContext) {
+ if !p.isImplicit(keyContext) {
+ if _, ok := hash[key]; ok {
+ p.panicf("Key '%s' has already been defined.", keyContext)
+ }
+ }
p.removeImplicit(keyContext)
hash[key] = value
return
@@ -802,10 +796,8 @@ func (p *parser) replaceEscapes(it item, str string) string {
b.WriteByte(0x0d)
skip = 1
case 'e':
- if p.tomlNext {
- b.WriteByte(0x1b)
- skip = 1
- }
+ b.WriteByte(0x1b)
+ skip = 1
case '"':
b.WriteByte(0x22)
skip = 1
@@ -815,11 +807,9 @@ func (p *parser) replaceEscapes(it item, str string) string {
// The lexer guarantees the correct number of characters are present;
// don't need to check here.
case 'x':
- if p.tomlNext {
- escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4])
- b.WriteRune(escaped)
- skip = 3
- }
+ escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4])
+ b.WriteRune(escaped)
+ skip = 3
case 'u':
escaped := p.asciiEscapeToUnicode(it, str[i+2:i+6])
b.WriteRune(escaped)
diff --git a/vendor/github.com/hashicorp/golang-lru/v2/.gitignore b/vendor/github.com/hashicorp/golang-lru/v2/.gitignore
new file mode 100644
index 0000000000..836562412f
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/v2/.gitignore
@@ -0,0 +1,23 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
diff --git a/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml b/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml
new file mode 100644
index 0000000000..7e7b8a9627
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml
@@ -0,0 +1,46 @@
+# Copyright (c) HashiCorp, Inc.
+# SPDX-License-Identifier: MPL-2.0
+
+linters:
+ fast: false
+ disable-all: true
+ enable:
+ - revive
+ - megacheck
+ - govet
+ - unconvert
+ - gas
+ - gocyclo
+ - dupl
+ - misspell
+ - unparam
+ - unused
+ - typecheck
+ - ineffassign
+ # - stylecheck
+ - exportloopref
+ - gocritic
+ - nakedret
+ - gosimple
+ - prealloc
+
+# golangci-lint configuration file
+linters-settings:
+ revive:
+ ignore-generated-header: true
+ severity: warning
+ rules:
+ - name: package-comments
+ severity: warning
+ disabled: true
+ - name: exported
+ severity: warning
+ disabled: false
+ arguments: ["checkPrivateReceivers", "disableStutteringCheck"]
+
+issues:
+ exclude-use-default: false
+ exclude-rules:
+ - path: _test\.go
+ linters:
+ - dupl
diff --git a/vendor/github.com/hashicorp/golang-lru/v2/2q.go b/vendor/github.com/hashicorp/golang-lru/v2/2q.go
new file mode 100644
index 0000000000..8c95252b6f
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/v2/2q.go
@@ -0,0 +1,267 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package lru
+
+import (
+ "errors"
+ "sync"
+
+ "github.com/hashicorp/golang-lru/v2/simplelru"
+)
+
+const (
+ // Default2QRecentRatio is the ratio of the 2Q cache dedicated
+ // to recently added entries that have only been accessed once.
+ Default2QRecentRatio = 0.25
+
+ // Default2QGhostEntries is the default ratio of ghost
+ // entries kept to track entries recently evicted
+ Default2QGhostEntries = 0.50
+)
+
+// TwoQueueCache is a thread-safe fixed size 2Q cache.
+// 2Q is an enhancement over the standard LRU cache
+// in that it tracks both frequently and recently used
+// entries separately. This avoids a burst in access to new
+// entries from evicting frequently used entries. It adds some
+// additional tracking overhead to the standard LRU cache, and is
+// computationally about 2x the cost, and adds some metadata over
+// head. The ARCCache is similar, but does not require setting any
+// parameters.
+type TwoQueueCache[K comparable, V any] struct {
+ size int
+ recentSize int
+ recentRatio float64
+ ghostRatio float64
+
+ recent simplelru.LRUCache[K, V]
+ frequent simplelru.LRUCache[K, V]
+ recentEvict simplelru.LRUCache[K, struct{}]
+ lock sync.RWMutex
+}
+
+// New2Q creates a new TwoQueueCache using the default
+// values for the parameters.
+func New2Q[K comparable, V any](size int) (*TwoQueueCache[K, V], error) {
+ return New2QParams[K, V](size, Default2QRecentRatio, Default2QGhostEntries)
+}
+
+// New2QParams creates a new TwoQueueCache using the provided
+// parameter values.
+func New2QParams[K comparable, V any](size int, recentRatio, ghostRatio float64) (*TwoQueueCache[K, V], error) {
+ if size <= 0 {
+ return nil, errors.New("invalid size")
+ }
+ if recentRatio < 0.0 || recentRatio > 1.0 {
+ return nil, errors.New("invalid recent ratio")
+ }
+ if ghostRatio < 0.0 || ghostRatio > 1.0 {
+ return nil, errors.New("invalid ghost ratio")
+ }
+
+ // Determine the sub-sizes
+ recentSize := int(float64(size) * recentRatio)
+ evictSize := int(float64(size) * ghostRatio)
+
+ // Allocate the LRUs
+ recent, err := simplelru.NewLRU[K, V](size, nil)
+ if err != nil {
+ return nil, err
+ }
+ frequent, err := simplelru.NewLRU[K, V](size, nil)
+ if err != nil {
+ return nil, err
+ }
+ recentEvict, err := simplelru.NewLRU[K, struct{}](evictSize, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Initialize the cache
+ c := &TwoQueueCache[K, V]{
+ size: size,
+ recentSize: recentSize,
+ recentRatio: recentRatio,
+ ghostRatio: ghostRatio,
+ recent: recent,
+ frequent: frequent,
+ recentEvict: recentEvict,
+ }
+ return c, nil
+}
+
+// Get looks up a key's value from the cache.
+func (c *TwoQueueCache[K, V]) Get(key K) (value V, ok bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Check if this is a frequent value
+ if val, ok := c.frequent.Get(key); ok {
+ return val, ok
+ }
+
+ // If the value is contained in recent, then we
+ // promote it to frequent
+ if val, ok := c.recent.Peek(key); ok {
+ c.recent.Remove(key)
+ c.frequent.Add(key, val)
+ return val, ok
+ }
+
+ // No hit
+ return
+}
+
+// Add adds a value to the cache.
+func (c *TwoQueueCache[K, V]) Add(key K, value V) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Check if the value is frequently used already,
+ // and just update the value
+ if c.frequent.Contains(key) {
+ c.frequent.Add(key, value)
+ return
+ }
+
+ // Check if the value is recently used, and promote
+ // the value into the frequent list
+ if c.recent.Contains(key) {
+ c.recent.Remove(key)
+ c.frequent.Add(key, value)
+ return
+ }
+
+ // If the value was recently evicted, add it to the
+ // frequently used list
+ if c.recentEvict.Contains(key) {
+ c.ensureSpace(true)
+ c.recentEvict.Remove(key)
+ c.frequent.Add(key, value)
+ return
+ }
+
+ // Add to the recently seen list
+ c.ensureSpace(false)
+ c.recent.Add(key, value)
+}
+
+// ensureSpace is used to ensure we have space in the cache
+func (c *TwoQueueCache[K, V]) ensureSpace(recentEvict bool) {
+ // If we have space, nothing to do
+ recentLen := c.recent.Len()
+ freqLen := c.frequent.Len()
+ if recentLen+freqLen < c.size {
+ return
+ }
+
+ // If the recent buffer is larger than
+ // the target, evict from there
+ if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) {
+ k, _, _ := c.recent.RemoveOldest()
+ c.recentEvict.Add(k, struct{}{})
+ return
+ }
+
+ // Remove from the frequent list otherwise
+ c.frequent.RemoveOldest()
+}
+
+// Len returns the number of items in the cache.
+func (c *TwoQueueCache[K, V]) Len() int {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.recent.Len() + c.frequent.Len()
+}
+
+// Resize changes the cache size.
+func (c *TwoQueueCache[K, V]) Resize(size int) (evicted int) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Recalculate the sub-sizes
+ recentSize := int(float64(size) * c.recentRatio)
+ evictSize := int(float64(size) * c.ghostRatio)
+ c.size = size
+ c.recentSize = recentSize
+
+ // ensureSpace
+ diff := c.recent.Len() + c.frequent.Len() - size
+ if diff < 0 {
+ diff = 0
+ }
+ for i := 0; i < diff; i++ {
+ c.ensureSpace(true)
+ }
+
+ // Reallocate the LRUs
+ c.recent.Resize(size)
+ c.frequent.Resize(size)
+ c.recentEvict.Resize(evictSize)
+
+ return diff
+}
+
+// Keys returns a slice of the keys in the cache.
+// The frequently used keys are first in the returned slice.
+func (c *TwoQueueCache[K, V]) Keys() []K {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ k1 := c.frequent.Keys()
+ k2 := c.recent.Keys()
+ return append(k1, k2...)
+}
+
+// Values returns a slice of the values in the cache.
+// The frequently used values are first in the returned slice.
+func (c *TwoQueueCache[K, V]) Values() []V {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ v1 := c.frequent.Values()
+ v2 := c.recent.Values()
+ return append(v1, v2...)
+}
+
+// Remove removes the provided key from the cache.
+func (c *TwoQueueCache[K, V]) Remove(key K) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if c.frequent.Remove(key) {
+ return
+ }
+ if c.recent.Remove(key) {
+ return
+ }
+ if c.recentEvict.Remove(key) {
+ return
+ }
+}
+
+// Purge is used to completely clear the cache.
+func (c *TwoQueueCache[K, V]) Purge() {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ c.recent.Purge()
+ c.frequent.Purge()
+ c.recentEvict.Purge()
+}
+
+// Contains is used to check if the cache contains a key
+// without updating recency or frequency.
+func (c *TwoQueueCache[K, V]) Contains(key K) bool {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.frequent.Contains(key) || c.recent.Contains(key)
+}
+
+// Peek is used to inspect the cache value of a key
+// without updating recency or frequency.
+func (c *TwoQueueCache[K, V]) Peek(key K) (value V, ok bool) {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ if val, ok := c.frequent.Peek(key); ok {
+ return val, ok
+ }
+ return c.recent.Peek(key)
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/v2/LICENSE b/vendor/github.com/hashicorp/golang-lru/v2/LICENSE
new file mode 100644
index 0000000000..0e5d580e0e
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/v2/LICENSE
@@ -0,0 +1,364 @@
+Copyright (c) 2014 HashiCorp, Inc.
+
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/golang-lru/v2/README.md b/vendor/github.com/hashicorp/golang-lru/v2/README.md
new file mode 100644
index 0000000000..a942eb5397
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/v2/README.md
@@ -0,0 +1,79 @@
+golang-lru
+==========
+
+This provides the `lru` package which implements a fixed-size
+thread safe LRU cache. It is based on the cache in Groupcache.
+
+Documentation
+=============
+
+Full docs are available on [Go Packages](https://pkg.go.dev/github.com/hashicorp/golang-lru/v2)
+
+LRU cache example
+=================
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/hashicorp/golang-lru/v2"
+)
+
+func main() {
+ l, _ := lru.New[int, any](128)
+ for i := 0; i < 256; i++ {
+ l.Add(i, nil)
+ }
+ if l.Len() != 128 {
+ panic(fmt.Sprintf("bad len: %v", l.Len()))
+ }
+}
+```
+
+Expirable LRU cache example
+===========================
+
+```go
+package main
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/golang-lru/v2/expirable"
+)
+
+func main() {
+ // make cache with 10ms TTL and 5 max keys
+ cache := expirable.NewLRU[string, string](5, nil, time.Millisecond*10)
+
+
+ // set value under key1.
+ cache.Add("key1", "val1")
+
+ // get value under key1
+ r, ok := cache.Get("key1")
+
+ // check for OK value
+ if ok {
+ fmt.Printf("value before expiration is found: %v, value: %q\n", ok, r)
+ }
+
+ // wait for cache to expire
+ time.Sleep(time.Millisecond * 12)
+
+ // get value under key1 after key expiration
+ r, ok = cache.Get("key1")
+ fmt.Printf("value after expiration is found: %v, value: %q\n", ok, r)
+
+ // set value under key2, would evict old entry because it is already expired.
+ cache.Add("key2", "val2")
+
+ fmt.Printf("Cache len: %d\n", cache.Len())
+ // Output:
+ // value before expiration is found: true, value: "val1"
+ // value after expiration is found: false, value: ""
+ // Cache len: 1
+}
+```
diff --git a/vendor/github.com/hashicorp/golang-lru/v2/doc.go b/vendor/github.com/hashicorp/golang-lru/v2/doc.go
new file mode 100644
index 0000000000..24107ee0ed
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/v2/doc.go
@@ -0,0 +1,24 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+// Package lru provides three different LRU caches of varying sophistication.
+//
+// Cache is a simple LRU cache. It is based on the LRU implementation in
+// groupcache: https://github.com/golang/groupcache/tree/master/lru
+//
+// TwoQueueCache tracks frequently used and recently used entries separately.
+// This avoids a burst of accesses from taking out frequently used entries, at
+// the cost of about 2x computational overhead and some extra bookkeeping.
+//
+// ARCCache is an adaptive replacement cache. It tracks recent evictions as well
+// as recent usage in both the frequent and recent caches. Its computational
+// overhead is comparable to TwoQueueCache, but the memory overhead is linear
+// with the size of the cache.
+//
+// ARC has been patented by IBM, so do not use it if that is problematic for
+// your program. For this reason, it is in a separate go module contained within
+// this repository.
+//
+// All caches in this package take locks while operating, and are therefore
+// thread-safe for consumers.
+package lru
diff --git a/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go b/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go
new file mode 100644
index 0000000000..5cd74a0343
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go
@@ -0,0 +1,142 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE_list file.
+
+package internal
+
+import "time"
+
+// Entry is an LRU Entry
+type Entry[K comparable, V any] struct {
+ // Next and previous pointers in the doubly-linked list of elements.
+ // To simplify the implementation, internally a list l is implemented
+ // as a ring, such that &l.root is both the next element of the last
+ // list element (l.Back()) and the previous element of the first list
+ // element (l.Front()).
+ next, prev *Entry[K, V]
+
+ // The list to which this element belongs.
+ list *LruList[K, V]
+
+ // The LRU Key of this element.
+ Key K
+
+ // The Value stored with this element.
+ Value V
+
+ // The time this element would be cleaned up, optional
+ ExpiresAt time.Time
+
+ // The expiry bucket item was put in, optional
+ ExpireBucket uint8
+}
+
+// PrevEntry returns the previous list element or nil.
+func (e *Entry[K, V]) PrevEntry() *Entry[K, V] {
+ if p := e.prev; e.list != nil && p != &e.list.root {
+ return p
+ }
+ return nil
+}
+
+// LruList represents a doubly linked list.
+// The zero Value for LruList is an empty list ready to use.
+type LruList[K comparable, V any] struct {
+ root Entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used
+ len int // current list Length excluding (this) sentinel element
+}
+
+// Init initializes or clears list l.
+func (l *LruList[K, V]) Init() *LruList[K, V] {
+ l.root.next = &l.root
+ l.root.prev = &l.root
+ l.len = 0
+ return l
+}
+
+// NewList returns an initialized list.
+func NewList[K comparable, V any]() *LruList[K, V] { return new(LruList[K, V]).Init() }
+
+// Length returns the number of elements of list l.
+// The complexity is O(1).
+func (l *LruList[K, V]) Length() int { return l.len }
+
+// Back returns the last element of list l or nil if the list is empty.
+func (l *LruList[K, V]) Back() *Entry[K, V] {
+ if l.len == 0 {
+ return nil
+ }
+ return l.root.prev
+}
+
+// lazyInit lazily initializes a zero List Value.
+func (l *LruList[K, V]) lazyInit() {
+ if l.root.next == nil {
+ l.Init()
+ }
+}
+
+// insert inserts e after at, increments l.len, and returns e.
+func (l *LruList[K, V]) insert(e, at *Entry[K, V]) *Entry[K, V] {
+ e.prev = at
+ e.next = at.next
+ e.prev.next = e
+ e.next.prev = e
+ e.list = l
+ l.len++
+ return e
+}
+
+// insertValue is a convenience wrapper for insert(&Entry{Value: v, ExpiresAt: ExpiresAt}, at).
+func (l *LruList[K, V]) insertValue(k K, v V, expiresAt time.Time, at *Entry[K, V]) *Entry[K, V] {
+ return l.insert(&Entry[K, V]{Value: v, Key: k, ExpiresAt: expiresAt}, at)
+}
+
+// Remove removes e from its list, decrements l.len
+func (l *LruList[K, V]) Remove(e *Entry[K, V]) V {
+ e.prev.next = e.next
+ e.next.prev = e.prev
+ e.next = nil // avoid memory leaks
+ e.prev = nil // avoid memory leaks
+ e.list = nil
+ l.len--
+
+ return e.Value
+}
+
+// move moves e to next to at.
+func (l *LruList[K, V]) move(e, at *Entry[K, V]) {
+ if e == at {
+ return
+ }
+ e.prev.next = e.next
+ e.next.prev = e.prev
+
+ e.prev = at
+ e.next = at.next
+ e.prev.next = e
+ e.next.prev = e
+}
+
+// PushFront inserts a new element e with value v at the front of list l and returns e.
+func (l *LruList[K, V]) PushFront(k K, v V) *Entry[K, V] {
+ l.lazyInit()
+ return l.insertValue(k, v, time.Time{}, &l.root)
+}
+
+// PushFrontExpirable inserts a new expirable element e with Value v at the front of list l and returns e.
+func (l *LruList[K, V]) PushFrontExpirable(k K, v V, expiresAt time.Time) *Entry[K, V] {
+ l.lazyInit()
+ return l.insertValue(k, v, expiresAt, &l.root)
+}
+
+// MoveToFront moves element e to the front of list l.
+// If e is not an element of l, the list is not modified.
+// The element must not be nil.
+func (l *LruList[K, V]) MoveToFront(e *Entry[K, V]) {
+ if e.list != l || l.root.next == e {
+ return
+ }
+ // see comment in List.Remove about initialization of l
+ l.move(e, &l.root)
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/v2/lru.go b/vendor/github.com/hashicorp/golang-lru/v2/lru.go
new file mode 100644
index 0000000000..a2655f1f31
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/v2/lru.go
@@ -0,0 +1,250 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package lru
+
+import (
+ "sync"
+
+ "github.com/hashicorp/golang-lru/v2/simplelru"
+)
+
+const (
+ // DefaultEvictedBufferSize defines the default buffer size to store evicted key/val
+ DefaultEvictedBufferSize = 16
+)
+
+// Cache is a thread-safe fixed size LRU cache.
+type Cache[K comparable, V any] struct {
+ lru *simplelru.LRU[K, V]
+ evictedKeys []K
+ evictedVals []V
+ onEvictedCB func(k K, v V)
+ lock sync.RWMutex
+}
+
+// New creates an LRU of the given size.
+func New[K comparable, V any](size int) (*Cache[K, V], error) {
+ return NewWithEvict[K, V](size, nil)
+}
+
+// NewWithEvict constructs a fixed size cache with the given eviction
+// callback.
+func NewWithEvict[K comparable, V any](size int, onEvicted func(key K, value V)) (c *Cache[K, V], err error) {
+ // create a cache with default settings
+ c = &Cache[K, V]{
+ onEvictedCB: onEvicted,
+ }
+ if onEvicted != nil {
+ c.initEvictBuffers()
+ onEvicted = c.onEvicted
+ }
+ c.lru, err = simplelru.NewLRU(size, onEvicted)
+ return
+}
+
+func (c *Cache[K, V]) initEvictBuffers() {
+ c.evictedKeys = make([]K, 0, DefaultEvictedBufferSize)
+ c.evictedVals = make([]V, 0, DefaultEvictedBufferSize)
+}
+
+// onEvicted save evicted key/val and sent in externally registered callback
+// outside of critical section
+func (c *Cache[K, V]) onEvicted(k K, v V) {
+ c.evictedKeys = append(c.evictedKeys, k)
+ c.evictedVals = append(c.evictedVals, v)
+}
+
+// Purge is used to completely clear the cache.
+func (c *Cache[K, V]) Purge() {
+ var ks []K
+ var vs []V
+ c.lock.Lock()
+ c.lru.Purge()
+ if c.onEvictedCB != nil && len(c.evictedKeys) > 0 {
+ ks, vs = c.evictedKeys, c.evictedVals
+ c.initEvictBuffers()
+ }
+ c.lock.Unlock()
+ // invoke callback outside of critical section
+ if c.onEvictedCB != nil {
+ for i := 0; i < len(ks); i++ {
+ c.onEvictedCB(ks[i], vs[i])
+ }
+ }
+}
+
+// Add adds a value to the cache. Returns true if an eviction occurred.
+func (c *Cache[K, V]) Add(key K, value V) (evicted bool) {
+ var k K
+ var v V
+ c.lock.Lock()
+ evicted = c.lru.Add(key, value)
+ if c.onEvictedCB != nil && evicted {
+ k, v = c.evictedKeys[0], c.evictedVals[0]
+ c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
+ }
+ c.lock.Unlock()
+ if c.onEvictedCB != nil && evicted {
+ c.onEvictedCB(k, v)
+ }
+ return
+}
+
+// Get looks up a key's value from the cache.
+func (c *Cache[K, V]) Get(key K) (value V, ok bool) {
+ c.lock.Lock()
+ value, ok = c.lru.Get(key)
+ c.lock.Unlock()
+ return value, ok
+}
+
+// Contains checks if a key is in the cache, without updating the
+// recent-ness or deleting it for being stale.
+func (c *Cache[K, V]) Contains(key K) bool {
+ c.lock.RLock()
+ containKey := c.lru.Contains(key)
+ c.lock.RUnlock()
+ return containKey
+}
+
+// Peek returns the key value (or undefined if not found) without updating
+// the "recently used"-ness of the key.
+func (c *Cache[K, V]) Peek(key K) (value V, ok bool) {
+ c.lock.RLock()
+ value, ok = c.lru.Peek(key)
+ c.lock.RUnlock()
+ return value, ok
+}
+
+// ContainsOrAdd checks if a key is in the cache without updating the
+// recent-ness or deleting it for being stale, and if not, adds the value.
+// Returns whether found and whether an eviction occurred.
+func (c *Cache[K, V]) ContainsOrAdd(key K, value V) (ok, evicted bool) {
+ var k K
+ var v V
+ c.lock.Lock()
+ if c.lru.Contains(key) {
+ c.lock.Unlock()
+ return true, false
+ }
+ evicted = c.lru.Add(key, value)
+ if c.onEvictedCB != nil && evicted {
+ k, v = c.evictedKeys[0], c.evictedVals[0]
+ c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
+ }
+ c.lock.Unlock()
+ if c.onEvictedCB != nil && evicted {
+ c.onEvictedCB(k, v)
+ }
+ return false, evicted
+}
+
+// PeekOrAdd checks if a key is in the cache without updating the
+// recent-ness or deleting it for being stale, and if not, adds the value.
+// Returns whether found and whether an eviction occurred.
+func (c *Cache[K, V]) PeekOrAdd(key K, value V) (previous V, ok, evicted bool) {
+ var k K
+ var v V
+ c.lock.Lock()
+ previous, ok = c.lru.Peek(key)
+ if ok {
+ c.lock.Unlock()
+ return previous, true, false
+ }
+ evicted = c.lru.Add(key, value)
+ if c.onEvictedCB != nil && evicted {
+ k, v = c.evictedKeys[0], c.evictedVals[0]
+ c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
+ }
+ c.lock.Unlock()
+ if c.onEvictedCB != nil && evicted {
+ c.onEvictedCB(k, v)
+ }
+ return
+}
+
+// Remove removes the provided key from the cache.
+func (c *Cache[K, V]) Remove(key K) (present bool) {
+ var k K
+ var v V
+ c.lock.Lock()
+ present = c.lru.Remove(key)
+ if c.onEvictedCB != nil && present {
+ k, v = c.evictedKeys[0], c.evictedVals[0]
+ c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
+ }
+ c.lock.Unlock()
+ if c.onEvictedCB != nil && present {
+ c.onEvictedCB(k, v)
+ }
+ return
+}
+
+// Resize changes the cache size.
+func (c *Cache[K, V]) Resize(size int) (evicted int) {
+ var ks []K
+ var vs []V
+ c.lock.Lock()
+ evicted = c.lru.Resize(size)
+ if c.onEvictedCB != nil && evicted > 0 {
+ ks, vs = c.evictedKeys, c.evictedVals
+ c.initEvictBuffers()
+ }
+ c.lock.Unlock()
+ if c.onEvictedCB != nil && evicted > 0 {
+ for i := 0; i < len(ks); i++ {
+ c.onEvictedCB(ks[i], vs[i])
+ }
+ }
+ return evicted
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *Cache[K, V]) RemoveOldest() (key K, value V, ok bool) {
+ var k K
+ var v V
+ c.lock.Lock()
+ key, value, ok = c.lru.RemoveOldest()
+ if c.onEvictedCB != nil && ok {
+ k, v = c.evictedKeys[0], c.evictedVals[0]
+ c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
+ }
+ c.lock.Unlock()
+ if c.onEvictedCB != nil && ok {
+ c.onEvictedCB(k, v)
+ }
+ return
+}
+
+// GetOldest returns the oldest entry
+func (c *Cache[K, V]) GetOldest() (key K, value V, ok bool) {
+ c.lock.RLock()
+ key, value, ok = c.lru.GetOldest()
+ c.lock.RUnlock()
+ return
+}
+
+// Keys returns a slice of the keys in the cache, from oldest to newest.
+func (c *Cache[K, V]) Keys() []K {
+ c.lock.RLock()
+ keys := c.lru.Keys()
+ c.lock.RUnlock()
+ return keys
+}
+
+// Values returns a slice of the values in the cache, from oldest to newest.
+func (c *Cache[K, V]) Values() []V {
+ c.lock.RLock()
+ values := c.lru.Values()
+ c.lock.RUnlock()
+ return values
+}
+
+// Len returns the number of items in the cache.
+func (c *Cache[K, V]) Len() int {
+ c.lock.RLock()
+ length := c.lru.Len()
+ c.lock.RUnlock()
+ return length
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list
new file mode 100644
index 0000000000..c4764e6b2f
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list
@@ -0,0 +1,29 @@
+This license applies to simplelru/list.go
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go
new file mode 100644
index 0000000000..f69792388c
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go
@@ -0,0 +1,177 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package simplelru
+
+import (
+ "errors"
+
+ "github.com/hashicorp/golang-lru/v2/internal"
+)
+
+// EvictCallback is used to get a callback when a cache entry is evicted
+type EvictCallback[K comparable, V any] func(key K, value V)
+
+// LRU implements a non-thread safe fixed size LRU cache
+type LRU[K comparable, V any] struct {
+ size int
+ evictList *internal.LruList[K, V]
+ items map[K]*internal.Entry[K, V]
+ onEvict EvictCallback[K, V]
+}
+
+// NewLRU constructs an LRU of the given size
+func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V]) (*LRU[K, V], error) {
+ if size <= 0 {
+ return nil, errors.New("must provide a positive size")
+ }
+
+ c := &LRU[K, V]{
+ size: size,
+ evictList: internal.NewList[K, V](),
+ items: make(map[K]*internal.Entry[K, V]),
+ onEvict: onEvict,
+ }
+ return c, nil
+}
+
+// Purge is used to completely clear the cache.
+func (c *LRU[K, V]) Purge() {
+ for k, v := range c.items {
+ if c.onEvict != nil {
+ c.onEvict(k, v.Value)
+ }
+ delete(c.items, k)
+ }
+ c.evictList.Init()
+}
+
+// Add adds a value to the cache. Returns true if an eviction occurred.
+func (c *LRU[K, V]) Add(key K, value V) (evicted bool) {
+ // Check for existing item
+ if ent, ok := c.items[key]; ok {
+ c.evictList.MoveToFront(ent)
+ ent.Value = value
+ return false
+ }
+
+ // Add new item
+ ent := c.evictList.PushFront(key, value)
+ c.items[key] = ent
+
+ evict := c.evictList.Length() > c.size
+ // Verify size not exceeded
+ if evict {
+ c.removeOldest()
+ }
+ return evict
+}
+
+// Get looks up a key's value from the cache.
+func (c *LRU[K, V]) Get(key K) (value V, ok bool) {
+ if ent, ok := c.items[key]; ok {
+ c.evictList.MoveToFront(ent)
+ return ent.Value, true
+ }
+ return
+}
+
+// Contains checks if a key is in the cache, without updating the recent-ness
+// or deleting it for being stale.
+func (c *LRU[K, V]) Contains(key K) (ok bool) {
+ _, ok = c.items[key]
+ return ok
+}
+
+// Peek returns the key value (or undefined if not found) without updating
+// the "recently used"-ness of the key.
+func (c *LRU[K, V]) Peek(key K) (value V, ok bool) {
+ var ent *internal.Entry[K, V]
+ if ent, ok = c.items[key]; ok {
+ return ent.Value, true
+ }
+ return
+}
+
+// Remove removes the provided key from the cache, returning if the
+// key was contained.
+func (c *LRU[K, V]) Remove(key K) (present bool) {
+ if ent, ok := c.items[key]; ok {
+ c.removeElement(ent)
+ return true
+ }
+ return false
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *LRU[K, V]) RemoveOldest() (key K, value V, ok bool) {
+ if ent := c.evictList.Back(); ent != nil {
+ c.removeElement(ent)
+ return ent.Key, ent.Value, true
+ }
+ return
+}
+
+// GetOldest returns the oldest entry
+func (c *LRU[K, V]) GetOldest() (key K, value V, ok bool) {
+ if ent := c.evictList.Back(); ent != nil {
+ return ent.Key, ent.Value, true
+ }
+ return
+}
+
+// Keys returns a slice of the keys in the cache, from oldest to newest.
+func (c *LRU[K, V]) Keys() []K {
+ keys := make([]K, c.evictList.Length())
+ i := 0
+ for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() {
+ keys[i] = ent.Key
+ i++
+ }
+ return keys
+}
+
+// Values returns a slice of the values in the cache, from oldest to newest.
+func (c *LRU[K, V]) Values() []V {
+ values := make([]V, len(c.items))
+ i := 0
+ for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() {
+ values[i] = ent.Value
+ i++
+ }
+ return values
+}
+
+// Len returns the number of items in the cache.
+func (c *LRU[K, V]) Len() int {
+ return c.evictList.Length()
+}
+
+// Resize changes the cache size.
+func (c *LRU[K, V]) Resize(size int) (evicted int) {
+ diff := c.Len() - size
+ if diff < 0 {
+ diff = 0
+ }
+ for i := 0; i < diff; i++ {
+ c.removeOldest()
+ }
+ c.size = size
+ return diff
+}
+
+// removeOldest removes the oldest item from the cache.
+func (c *LRU[K, V]) removeOldest() {
+ if ent := c.evictList.Back(); ent != nil {
+ c.removeElement(ent)
+ }
+}
+
+// removeElement is used to remove a given list element from the cache
+func (c *LRU[K, V]) removeElement(e *internal.Entry[K, V]) {
+ c.evictList.Remove(e)
+ delete(c.items, e.Key)
+ if c.onEvict != nil {
+ c.onEvict(e.Key, e.Value)
+ }
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go
new file mode 100644
index 0000000000..043b8bcc3f
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go
@@ -0,0 +1,46 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+// Package simplelru provides simple LRU implementation based on build-in container/list.
+package simplelru
+
+// LRUCache is the interface for simple LRU cache.
+type LRUCache[K comparable, V any] interface {
+ // Adds a value to the cache, returns true if an eviction occurred and
+ // updates the "recently used"-ness of the key.
+ Add(key K, value V) bool
+
+ // Returns key's value from the cache and
+ // updates the "recently used"-ness of the key. #value, isFound
+ Get(key K) (value V, ok bool)
+
+ // Checks if a key exists in cache without updating the recent-ness.
+ Contains(key K) (ok bool)
+
+ // Returns key's value without updating the "recently used"-ness of the key.
+ Peek(key K) (value V, ok bool)
+
+ // Removes a key from the cache.
+ Remove(key K) bool
+
+ // Removes the oldest entry from cache.
+ RemoveOldest() (K, V, bool)
+
+ // Returns the oldest entry from the cache. #key, value, isFound
+ GetOldest() (K, V, bool)
+
+ // Returns a slice of the keys in the cache, from oldest to newest.
+ Keys() []K
+
+ // Values returns a slice of the values in the cache, from oldest to newest.
+ Values() []V
+
+ // Returns the number of items in the cache.
+ Len() int
+
+ // Clears all cache entries.
+ Purge()
+
+ // Resizes cache, returning number evicted
+ Resize(int) int
+}
diff --git a/vendor/github.com/minio/crc64nvme/crc64.go b/vendor/github.com/minio/crc64nvme/crc64.go
index ca34a48e09..10d12278fb 100644
--- a/vendor/github.com/minio/crc64nvme/crc64.go
+++ b/vendor/github.com/minio/crc64nvme/crc64.go
@@ -128,7 +128,7 @@ func update(crc uint64, p []byte) uint64 {
if hasAsm512 && runs >= 8 {
// Use 512-bit wide instructions for >= 1KB.
crc = updateAsm512(crc, p[:128*runs])
- } else {
+ } else if runs > 0 {
crc = updateAsm(crc, p[:128*runs])
}
return update(crc, p[128*runs:])
diff --git a/vendor/github.com/minio/crc64nvme/crc64_amd64.s b/vendor/github.com/minio/crc64nvme/crc64_amd64.s
index acfea6a151..590814d5bd 100644
--- a/vendor/github.com/minio/crc64nvme/crc64_amd64.s
+++ b/vendor/github.com/minio/crc64nvme/crc64_amd64.s
@@ -15,18 +15,18 @@ TEXT ·updateAsm(SB), $0-40
CMPQ CX, $1
JLT skip128
- VMOVDQA 0x00(SI), X0
- VMOVDQA 0x10(SI), X1
- VMOVDQA 0x20(SI), X2
- VMOVDQA 0x30(SI), X3
- VMOVDQA 0x40(SI), X4
- VMOVDQA 0x50(SI), X5
- VMOVDQA 0x60(SI), X6
- VMOVDQA 0x70(SI), X7
- MOVQ AX, X8
- PXOR X8, X0
- CMPQ CX, $1
- JE tail128
+ MOVOA 0x00(SI), X0
+ MOVOA 0x10(SI), X1
+ MOVOA 0x20(SI), X2
+ MOVOA 0x30(SI), X3
+ MOVOA 0x40(SI), X4
+ MOVOA 0x50(SI), X5
+ MOVOA 0x60(SI), X6
+ MOVOA 0x70(SI), X7
+ MOVQ AX, X8
+ PXOR X8, X0
+ CMPQ CX, $1
+ JE tail128
MOVQ $0xa1ca681e733f9c40, AX
MOVQ AX, X8
@@ -36,42 +36,42 @@ TEXT ·updateAsm(SB), $0-40
loop128:
ADDQ $128, SI
SUBQ $1, CX
- VMOVDQA X0, X10
+ MOVOA X0, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X0
PXOR X10, X0
PXOR 0(SI), X0
- VMOVDQA X1, X10
+ MOVOA X1, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X1
PXOR X10, X1
PXOR 0x10(SI), X1
- VMOVDQA X2, X10
+ MOVOA X2, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X2
PXOR X10, X2
PXOR 0x20(SI), X2
- VMOVDQA X3, X10
+ MOVOA X3, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X3
PXOR X10, X3
PXOR 0x30(SI), X3
- VMOVDQA X4, X10
+ MOVOA X4, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X4
PXOR X10, X4
PXOR 0x40(SI), X4
- VMOVDQA X5, X10
+ MOVOA X5, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X5
PXOR X10, X5
PXOR 0x50(SI), X5
- VMOVDQA X6, X10
+ MOVOA X6, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X6
PXOR X10, X6
PXOR 0x60(SI), X6
- VMOVDQA X7, X10
+ MOVOA X7, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X7
PXOR X10, X7
@@ -202,15 +202,17 @@ TEXT ·updateAsm512(SB), $0-40
PCALIGN $16
loop128:
- VMOVDQU64 0x80(SI), Z1
- VMOVDQU64 0xc0(SI), Z5
- ADDQ $128, SI
+ PREFETCHT0 512(SI)
+ VMOVDQU64 0x80(SI), Z1
+ VMOVDQU64 0xc0(SI), Z5
+ ADDQ $128, SI
SUBQ $1, CX
VPCLMULQDQ $0x00, Z8, Z0, Z10
VPCLMULQDQ $0x11, Z9, Z0, Z0
VPTERNLOGD $0x96, Z1, Z10, Z0 // Combine results with xor into Z0
+ PREFETCHT0 512-64(SI)
VPCLMULQDQ $0x00, Z8, Z4, Z10
VPCLMULQDQ $0x11, Z9, Z4, Z4
VPTERNLOGD $0x96, Z5, Z10, Z4 // Combine results with xor into Z4
diff --git a/vendor/github.com/minio/minio-go/v7/CLAUDE.md b/vendor/github.com/minio/minio-go/v7/CLAUDE.md
index 26ff953237..8074a333c0 100644
--- a/vendor/github.com/minio/minio-go/v7/CLAUDE.md
+++ b/vendor/github.com/minio/minio-go/v7/CLAUDE.md
@@ -29,17 +29,16 @@ SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABL
### Linting and Code Quality
```bash
-# Run all checks (lint, vet, test, examples, functional tests)
+# Run all checks (lint, test, examples, functional tests)
make checks
-# Run linter only
+# Run linter only (includes govet, staticcheck, and other linters)
make lint
-# Run vet and staticcheck
-make vet
-
-# Alternative: run golangci-lint directly
+# Run golangci-lint directly
golangci-lint run --timeout=5m --config ./.golangci.yml
+
+# Note: 'make vet' is now an alias for 'make lint' for backwards compatibility
```
### Building Examples
diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile
index 9e4ddc4c88..85f9f4dc61 100644
--- a/vendor/github.com/minio/minio-go/v7/Makefile
+++ b/vendor/github.com/minio/minio-go/v7/Makefile
@@ -5,7 +5,7 @@ all: checks
.PHONY: examples docs
-checks: lint vet test examples functional-test
+checks: lint test examples functional-test
lint:
@mkdir -p ${GOPATH}/bin
@@ -14,10 +14,7 @@ lint:
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
-vet:
- @GO111MODULE=on go vet ./...
- @echo "Installing staticcheck" && go install honnef.co/go/tools/cmd/staticcheck@latest
- ${GOPATH}/bin/staticcheck -tests=false -checks="all,-ST1000,-ST1003,-ST1016,-ST1020,-ST1021,-ST1022,-ST1023,-ST1005"
+vet: lint
test:
@GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...
diff --git a/vendor/github.com/minio/minio-go/v7/api-append-object.go b/vendor/github.com/minio/minio-go/v7/api-append-object.go
index b1bddf986e..f1b20b6bbb 100644
--- a/vendor/github.com/minio/minio-go/v7/api-append-object.go
+++ b/vendor/github.com/minio/minio-go/v7/api-append-object.go
@@ -45,6 +45,7 @@ type AppendObjectOptions struct {
customHeaders http.Header
checksumType ChecksumType
+ offset int64
}
// Header returns the custom header for AppendObject API
@@ -61,6 +62,7 @@ func (opts *AppendObjectOptions) setWriteOffset(offset int64) {
opts.customHeaders = make(http.Header)
}
opts.customHeaders["x-amz-write-offset-bytes"] = []string{strconv.FormatInt(offset, 10)}
+ opts.offset = offset
}
func (opts *AppendObjectOptions) setChecksumParams(info ObjectInfo) {
@@ -149,10 +151,16 @@ func (c *Client) appendObjectDo(ctx context.Context, bucketName, objectName stri
// When AppendObject() is used, S3 Express will return final object size as x-amz-object-size
if amzSize := h.Get("x-amz-object-size"); amzSize != "" {
+ oSize := size
size, err = strconv.ParseInt(amzSize, 10, 64)
if err != nil {
return UploadInfo{}, err
}
+ if size != opts.offset+oSize {
+ return UploadInfo{}, errors.New("server returned incorrect object size")
+ }
+ } else {
+ return UploadInfo{}, errors.New("server does not support appends. Object has been overwritten")
}
return UploadInfo{
@@ -172,6 +180,7 @@ func (c *Client) appendObjectDo(ctx context.Context, bucketName, objectName stri
}
// AppendObject - S3 Express Zone https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-append.html
+// Note that appending on a server without append support may overwrite the object.
func (c *Client) AppendObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
opts AppendObjectOptions,
) (info UploadInfo, err error) {
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-qos.go b/vendor/github.com/minio/minio-go/v7/api-bucket-qos.go
index d1493a5b9a..7f0c39e0bc 100644
--- a/vendor/github.com/minio/minio-go/v7/api-bucket-qos.go
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-qos.go
@@ -25,7 +25,7 @@ import (
"strings"
"github.com/minio/minio-go/v7/pkg/s3utils"
- "gopkg.in/yaml.v3"
+ "go.yaml.in/yaml/v3"
)
// QOSConfigVersionCurrent is the current version of the QoS configuration.
diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go
index 232bd2c01d..5395648044 100644
--- a/vendor/github.com/minio/minio-go/v7/api-compose-object.go
+++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go
@@ -82,6 +82,9 @@ type CopyDestOptions struct {
Size int64 // Needs to be specified if progress bar is specified.
// Progress of the entire copy operation will be sent here.
Progress io.Reader
+ // PartSize specifies the part size for multipart copy operations.
+ // If not specified, defaults to maxPartSize (5 GiB).
+ PartSize uint64
}
// Process custom-metadata to remove a `x-amz-meta-` prefix if
@@ -460,15 +463,15 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ..
// Is data to copy too large?
totalSize += srcCopySize
- if totalSize > maxMultipartPutObjectSize {
- return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize))
+ if totalSize > maxObjectSize {
+ return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5GiB * 10000)", totalSize))
}
// record source size
srcObjectSizes[i] = srcCopySize
// calculate parts needed for current source
- totalParts += partsRequired(srcCopySize)
+ totalParts += partsRequired(srcCopySize, int64(dst.PartSize))
// Do we need more parts than we are allowed?
if totalParts > maxPartsCount {
return UploadInfo{}, errInvalidArgument(fmt.Sprintf(
@@ -534,7 +537,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ..
// calculate start/end indices of parts after
// splitting.
- startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src)
+ startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src, int64(dst.PartSize))
for j, start := range startIdx {
end := endIdx[j]
@@ -568,12 +571,14 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ..
return uploadInfo, nil
}
-// partsRequired is maximum parts possible with
-// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1))
-func partsRequired(size int64) int64 {
- maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1)
- r := size / int64(maxPartSize)
- if size%int64(maxPartSize) > 0 {
+// partsRequired calculates the number of parts needed for a given size
+// using the specified part size. If partSize is 0, defaults to maxPartSize (5 GiB).
+func partsRequired(size int64, partSize int64) int64 {
+ if partSize == 0 {
+ partSize = maxPartSize
+ }
+ r := size / partSize
+ if size%partSize > 0 {
r++
}
return r
@@ -582,13 +587,13 @@ func partsRequired(size int64) int64 {
// calculateEvenSplits - computes splits for a source and returns
// start and end index slices. Splits happen evenly to be sure that no
// part is less than 5MiB, as that could fail the multipart request if
-// it is not the last part.
-func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex []int64) {
+// it is not the last part. If partSize is 0, defaults to maxPartSize (5 GiB).
+func calculateEvenSplits(size int64, src CopySrcOptions, partSize int64) (startIndex, endIndex []int64) {
if size == 0 {
return startIndex, endIndex
}
- reqParts := partsRequired(size)
+ reqParts := partsRequired(size, partSize)
startIndex = make([]int64, reqParts)
endIndex = make([]int64, reqParts)
// Compute number of required parts `k`, as:
diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go
index 56af168708..9e8756fd59 100644
--- a/vendor/github.com/minio/minio-go/v7/api-datatypes.go
+++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go
@@ -226,7 +226,7 @@ type ObjectInfo struct {
ChecksumSHA1 string
ChecksumSHA256 string
ChecksumCRC64NVME string
- ChecksumMode string
+ ChecksumMode string `xml:"ChecksumType"`
Internal *struct {
K int // Data blocks
diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go
index e5f88d98e1..03c7e94357 100644
--- a/vendor/github.com/minio/minio-go/v7/api-error-response.go
+++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go
@@ -129,7 +129,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
Server: resp.Header.Get("Server"),
}
- _, success := successStatus[resp.StatusCode]
+ success := successStatus.Contains(resp.StatusCode)
errBody, err := xmlDecodeAndBody(resp.Body, &errResp)
// Xml decoding failed with no body, fall back to HTTP headers.
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go
index 52f69563ca..a33e6ba55f 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go
@@ -67,9 +67,11 @@ func isReadAt(reader io.Reader) (ok bool) {
//
// maxPartsCount - 10000
// minPartSize - 16MiB
-// maxMultipartPutObjectSize - 5TiB
+// maxObjectSize - ~48.83TiB (maxPartSize * maxPartsCount)
func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize, lastPartSize int64, err error) {
- // object size is '-1' set it to 5TiB.
+ // When object size is unknown (-1), default to 5TiB to limit memory usage.
+ // This results in ~537MiB part sizes. For larger objects (up to ~48.83TiB),
+ // callers should set configuredPartSize explicitly to control memory usage.
var unknownSize bool
if objectSize == -1 {
unknownSize = true
@@ -77,8 +79,8 @@ func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCou
}
// object size is larger than supported maximum.
- if objectSize > maxMultipartPutObjectSize {
- err = errEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "")
+ if objectSize > maxObjectSize {
+ err = errEntityTooLarge(objectSize, maxObjectSize, "", "")
return totalPartsCount, partSize, lastPartSize, err
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go
index 80f3d61f34..f241b7d980 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go
@@ -311,7 +311,9 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
//
// - For size input as -1 PutObject does a multipart Put operation
// until input stream reaches EOF. Maximum object size that can
-// be uploaded through this operation will be 5TiB.
+// be uploaded through this operation will be 5TiB by default.
+// For larger objects (up to ~48.83TiB), set PutObjectOptions.PartSize
+// to control memory usage and enable uploads beyond 5TiB.
//
// WARNING: Passing down '-1' will use memory and these cannot
// be reused for best outcomes for PutObject(), pass the size always.
@@ -330,8 +332,8 @@ func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, r
}
// Check for largest object size allowed.
- if size > int64(maxMultipartPutObjectSize) {
- return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
+ if size > int64(maxObjectSize) {
+ return UploadInfo{}, errEntityTooLarge(size, maxObjectSize, bucketName, objectName)
}
if opts.Checksum.IsSet() {
diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go
index 9794ffb2bd..69c5fb491a 100644
--- a/vendor/github.com/minio/minio-go/v7/api-remove.go
+++ b/vendor/github.com/minio/minio-go/v7/api-remove.go
@@ -30,6 +30,14 @@ import (
"github.com/minio/minio-go/v7/pkg/s3utils"
)
+// useMultiDeleteForBulkDelete returns true if the client should use
+// multi-object delete API for bulk delete operations. Returns false
+// for endpoints that do not support multi-object delete (e.g., GCS).
+func (c *Client) useMultiDeleteForBulkDelete() bool {
+ // NOTE: GCS does not support multi-object delete API.
+ return !s3utils.IsGoogleEndpoint(*c.endpointURL)
+}
+
//revive:disable
// Deprecated: BucketOptions will be renamed to RemoveBucketOptions in future versions.
@@ -411,6 +419,12 @@ func hasInvalidXMLChar(str string) bool {
// Generate and call MultiDelete S3 requests based on entries received from the iterator.
func (c *Client) removeObjectsIter(ctx context.Context, bucketName string, objectsIter iter.Seq[ObjectInfo], yield func(RemoveObjectResult) bool, opts RemoveObjectsOptions) {
+ // NOTE: GCS does not support multi-object delete, use single DELETE requests.
+ if !c.useMultiDeleteForBulkDelete() {
+ c.removeObjectsSingleIter(ctx, bucketName, objectsIter, yield, opts)
+ return
+ }
+
maxEntries := 1000
urlValues := make(url.Values)
urlValues.Set("delete", "")
@@ -549,14 +563,20 @@ func (c *Client) removeObjectsIter(ctx context.Context, bucketName string, objec
// Generate and call MultiDelete S3 requests based on entries received from objectsCh
func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, resultCh chan<- RemoveObjectResult, opts RemoveObjectsOptions) {
+ // Close result channel when delete finishes.
+ defer close(resultCh)
+
+ // NOTE: GCS does not support multi-object delete, use single DELETE requests.
+ if !c.useMultiDeleteForBulkDelete() {
+ c.removeObjectsSingle(ctx, bucketName, objectsCh, resultCh, opts)
+ return
+ }
+
maxEntries := 1000
finish := false
urlValues := make(url.Values)
urlValues.Set("delete", "")
- // Close result channel when Multi delete finishes.
- defer close(resultCh)
-
// Loop over entries by 1000 and call MultiDelete requests
for !finish {
count := 0
@@ -640,6 +660,63 @@ func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh
}
}
+// removeObjectsSingle deletes objects one by one using single DELETE requests.
+// This is used for endpoints that do not support multi-object delete (e.g., GCS).
+func (c *Client) removeObjectsSingle(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, resultCh chan<- RemoveObjectResult, opts RemoveObjectsOptions) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case object, ok := <-objectsCh:
+ if !ok {
+ return
+ }
+ removeResult := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{
+ VersionID: object.VersionID,
+ GovernanceBypass: opts.GovernanceBypass,
+ })
+ if err := removeResult.Err; err != nil {
+ // Version/object does not exist is not an error, ignore and continue.
+ switch ToErrorResponse(err).Code {
+ case NoSuchVersion, NoSuchKey:
+ continue
+ }
+ }
+ select {
+ case <-ctx.Done():
+ return
+ case resultCh <- removeResult:
+ }
+ }
+ }
+}
+
+// removeObjectsSingleIter deletes objects one by one using single DELETE requests.
+// This is used for endpoints that do not support multi-object delete (e.g., GCS).
+func (c *Client) removeObjectsSingleIter(ctx context.Context, bucketName string, objectsIter iter.Seq[ObjectInfo], yield func(RemoveObjectResult) bool, opts RemoveObjectsOptions) {
+ for object := range objectsIter {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+ removeResult := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{
+ VersionID: object.VersionID,
+ GovernanceBypass: opts.GovernanceBypass,
+ })
+ if err := removeResult.Err; err != nil {
+ // Version/object does not exist is not an error, ignore and continue.
+ switch ToErrorResponse(err).Code {
+ case NoSuchVersion, NoSuchKey:
+ continue
+ }
+ }
+ if !yield(removeResult) {
+ return
+ }
+ }
+}
+
// RemoveIncompleteUpload aborts an partially uploaded object.
func (c *Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error {
// Input validation.
diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go
index 5352d793b8..1b44b90972 100644
--- a/vendor/github.com/minio/minio-go/v7/api.go
+++ b/vendor/github.com/minio/minio-go/v7/api.go
@@ -43,6 +43,7 @@ import (
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/kvcache"
"github.com/minio/minio-go/v7/pkg/s3utils"
+ "github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio-go/v7/pkg/signer"
"github.com/minio/minio-go/v7/pkg/singleflight"
"golang.org/x/net/publicsuffix"
@@ -160,7 +161,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "v7.0.96"
+ libraryVersion = "v7.0.98"
)
// User Agent should always following the below style.
@@ -636,11 +637,11 @@ func (c *Client) do(req *http.Request) (resp *http.Response, err error) {
}
// List of success status.
-var successStatus = map[int]struct{}{
- http.StatusOK: {},
- http.StatusNoContent: {},
- http.StatusPartialContent: {},
-}
+var successStatus = set.CreateIntSet(
+ http.StatusOK,
+ http.StatusNoContent,
+ http.StatusPartialContent,
+)
// executeMethod - instantiates a given method, and retries the
// request upon any error up to maxRetries attempts in a binomially
@@ -722,7 +723,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
return nil, err
}
- _, success := successStatus[res.StatusCode]
+ success := successStatus.Contains(res.StatusCode)
if success && !metadata.expect200OKWithError {
// We do not expect 2xx to return an error return.
return res, nil
diff --git a/vendor/github.com/minio/minio-go/v7/constants.go b/vendor/github.com/minio/minio-go/v7/constants.go
index 4099a37f9a..d49efdee5d 100644
--- a/vendor/github.com/minio/minio-go/v7/constants.go
+++ b/vendor/github.com/minio/minio-go/v7/constants.go
@@ -42,6 +42,10 @@ const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5
// Multipart operation.
const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
+// maxObjectSize - maximum size of an object calculated from
+// maxPartSize * maxPartsCount = 5GiB * 10000 = ~48.83TiB
+const maxObjectSize = maxPartSize * maxPartsCount
+
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
// we don't want to sign the request payload
const unsignedPayload = "UNSIGNED-PAYLOAD"
diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go
index 4f8f9dd8cc..8984d0358b 100644
--- a/vendor/github.com/minio/minio-go/v7/functional_tests.go
+++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go
@@ -1,5 +1,4 @@
//go:build mint
-// +build mint
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
@@ -1965,6 +1964,101 @@ func testObjectTaggingWithVersioning() {
logSuccess(testName, function, args, startTime)
}
+func testPutObjectWithAutoChecksums() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader, size, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
+ }
+
+ if !isFullMode() {
+ logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs")
+ return
+ }
+
+ c, err := NewClient(ClientConfig{TrailingHeaders: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+ const testfile = "datafile-1.03-MB"
+ bufSize := dataFileMap[testfile]
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+ c.TraceOn(os.Stdout)
+
+ cmpChecksum := func(got, want string) {
+ if want != got {
+ logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
+ return
+ }
+ }
+
+ meta := map[string]string{}
+ reader := getDataReader(testfile)
+ b, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read failed", err)
+ return
+ }
+ h := minio.ChecksumCRC64NVME.Hasher()
+ h.Reset()
+ h.Write(b)
+ // Upload the data without explicit checksum.
+ resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
+ DisableMultipart: true,
+ DisableContentSha256: false,
+ UserMetadata: meta,
+ AutoChecksum: minio.ChecksumNone,
+ Checksum: minio.ChecksumNone,
+ })
+ _ = resp
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Read the metadata back
+ gopts := minio.GetObjectOptions{Checksum: true}
+ st, err := c.StatObject(context.Background(), bucketName, objectName, gopts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ if st.ChecksumCRC64NVME != "" {
+ meta[minio.ChecksumCRC64NVME.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil))
+ cmpChecksum(st.ChecksumCRC64NVME, meta["x-amz-checksum-crc64nvme"])
+ if st.ChecksumMode != minio.ChecksumFullObjectMode.String() {
+ logError(testName, function, args, startTime, "", "Checksum mode is not full object", fmt.Errorf("got %s, want %s", st.ChecksumMode, minio.ChecksumFullObjectMode.String()))
+ }
+ }
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
// Test PutObject with custom checksums.
func testPutObjectWithChecksums() {
// initialize logging params
@@ -14686,6 +14780,7 @@ func main() {
testPutObjectMetadataNonUSASCIIV2()
testPutObjectNoLengthV2()
testPutObjectsUnknownV2()
+ testPutObjectWithAutoChecksums()
testGetObjectContextV2()
testFPutObjectContextV2()
testFGetObjectContextV2()
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go
index 6db26c036f..bb7b5ab236 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go
@@ -1,5 +1,4 @@
//go:build !fips
-// +build !fips
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go
index 640258242f..532dd00add 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go
@@ -1,5 +1,4 @@
//go:build fips
-// +build fips
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
index 7ed98b0d13..01d4e6aa79 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
@@ -267,6 +267,10 @@ func (f Filter) MarshalJSON() ([]byte, error) {
// MarshalXML - produces the xml representation of the Filter struct
// only one of Prefix, And and Tag should be present in the output.
func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ if f.IsNull() {
+ return nil
+ }
+
if err := e.EncodeToken(start); err != nil {
return err
}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/set/intset.go b/vendor/github.com/minio/minio-go/v7/pkg/set/intset.go
new file mode 100644
index 0000000000..db38a0c641
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/set/intset.go
@@ -0,0 +1,127 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2026 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package set
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// IntSet - uses map as set of ints.
+// This is now implemented using the generic Set[int] type.
+type IntSet Set[int]
+
+// ToSlice - returns IntSet as int slice.
+func (set IntSet) ToSlice() []int {
+ return ToSliceOrdered(Set[int](set))
+}
+
+// IsEmpty - returns whether the set is empty or not.
+func (set IntSet) IsEmpty() bool {
+ return Set[int](set).IsEmpty()
+}
+
+// Add - adds int to the set.
+func (set IntSet) Add(i int) {
+ Set[int](set).Add(i)
+}
+
+// Remove - removes int in the set. It does nothing if int does not exist in the set.
+func (set IntSet) Remove(i int) {
+ Set[int](set).Remove(i)
+}
+
+// Contains - checks if int is in the set.
+func (set IntSet) Contains(i int) bool {
+ return Set[int](set).Contains(i)
+}
+
+// FuncMatch - returns new set containing each value who passes match function.
+// A 'matchFn' should accept element in a set as first argument and
+// 'matchInt' as second argument. The function can do any logic to
+// compare both the arguments and should return true to accept element in
+// a set to include in output set else the element is ignored.
+func (set IntSet) FuncMatch(matchFn func(int, int) bool, matchInt int) IntSet {
+ return IntSet(Set[int](set).FuncMatch(matchFn, matchInt))
+}
+
+// ApplyFunc - returns new set containing each value processed by 'applyFn'.
+// A 'applyFn' should accept element in a set as a argument and return
+// a processed int. The function can do any logic to return a processed
+// int.
+func (set IntSet) ApplyFunc(applyFn func(int) int) IntSet {
+ return IntSet(Set[int](set).ApplyFunc(applyFn))
+}
+
+// Equals - checks whether given set is equal to current set or not.
+func (set IntSet) Equals(iset IntSet) bool {
+ return Set[int](set).Equals(Set[int](iset))
+}
+
+// Intersection - returns the intersection with given set as new set.
+func (set IntSet) Intersection(iset IntSet) IntSet {
+ return IntSet(Set[int](set).Intersection(Set[int](iset)))
+}
+
+// Difference - returns the difference with given set as new set.
+func (set IntSet) Difference(iset IntSet) IntSet {
+ return IntSet(Set[int](set).Difference(Set[int](iset)))
+}
+
+// Union - returns the union with given set as new set.
+func (set IntSet) Union(iset IntSet) IntSet {
+ return IntSet(Set[int](set).Union(Set[int](iset)))
+}
+
+// MarshalJSON - converts to JSON data.
+func (set IntSet) MarshalJSON() ([]byte, error) {
+ return json.Marshal(set.ToSlice())
+}
+
+// UnmarshalJSON - parses JSON data and creates new set with it.
+func (set *IntSet) UnmarshalJSON(data []byte) error {
+ sl := []int{}
+ var err error
+ if err = json.Unmarshal(data, &sl); err == nil {
+ *set = make(IntSet)
+ for _, i := range sl {
+ set.Add(i)
+ }
+ }
+ return err
+}
+
+// String - returns printable string of the set.
+func (set IntSet) String() string {
+ return fmt.Sprintf("%v", set.ToSlice())
+}
+
+// NewIntSet - creates new int set.
+func NewIntSet() IntSet {
+ return IntSet(New[int]())
+}
+
+// CreateIntSet - creates new int set with given int values.
+func CreateIntSet(il ...int) IntSet {
+ return IntSet(Create(il...))
+}
+
+// CopyIntSet - returns copy of given set.
+func CopyIntSet(set IntSet) IntSet {
+ return IntSet(Copy(Set[int](set)))
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/set/msgp.go b/vendor/github.com/minio/minio-go/v7/pkg/set/msgp.go
index 7d3c3620bb..183c5bef27 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/set/msgp.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/set/msgp.go
@@ -1,6 +1,6 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2025 MinIO, Inc.
+ * Copyright 2015-2026 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,119 +17,47 @@
package set
-import "github.com/tinylib/msgp/msgp"
+import (
+ "github.com/tinylib/msgp/msgp"
+ "github.com/tinylib/msgp/msgp/setof"
+)
// EncodeMsg encodes the message to the writer.
// Values are stored as a slice of strings or nil.
func (s StringSet) EncodeMsg(writer *msgp.Writer) error {
- if s == nil {
- return writer.WriteNil()
- }
- err := writer.WriteArrayHeader(uint32(len(s)))
- if err != nil {
- return err
- }
- sorted := s.ToByteSlices()
- for _, k := range sorted {
- err = writer.WriteStringFromBytes(k)
- if err != nil {
- return err
- }
- }
- return nil
+ return setof.StringSorted(s).EncodeMsg(writer)
}
// MarshalMsg encodes the message to the bytes.
// Values are stored as a slice of strings or nil.
func (s StringSet) MarshalMsg(bytes []byte) ([]byte, error) {
- if s == nil {
- return msgp.AppendNil(bytes), nil
- }
- if len(s) == 0 {
- return msgp.AppendArrayHeader(bytes, 0), nil
- }
- bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
- sorted := s.ToByteSlices()
- for _, k := range sorted {
- bytes = msgp.AppendStringFromBytes(bytes, k)
- }
- return bytes, nil
+ return setof.StringSorted(s).MarshalMsg(bytes)
}
// DecodeMsg decodes the message from the reader.
func (s *StringSet) DecodeMsg(reader *msgp.Reader) error {
- if reader.IsNil() {
- *s = nil
- return reader.Skip()
- }
- sz, err := reader.ReadArrayHeader()
- if err != nil {
+ var ss setof.String
+ if err := ss.DecodeMsg(reader); err != nil {
return err
}
- dst := *s
- if dst == nil {
- dst = make(StringSet, sz)
- } else {
- for k := range dst {
- delete(dst, k)
- }
- }
- for i := uint32(0); i < sz; i++ {
- var k string
- k, err = reader.ReadString()
- if err != nil {
- return err
- }
- dst[k] = struct{}{}
- }
- *s = dst
+ *s = StringSet(ss)
return nil
}
// UnmarshalMsg decodes the message from the bytes.
func (s *StringSet) UnmarshalMsg(bytes []byte) ([]byte, error) {
- if msgp.IsNil(bytes) {
- *s = nil
- return bytes[msgp.NilSize:], nil
- }
- // Read the array header
- sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ var ss setof.String
+ bytes, err := ss.UnmarshalMsg(bytes)
if err != nil {
return nil, err
}
- dst := *s
- if dst == nil {
- dst = make(StringSet, sz)
- } else {
- for k := range dst {
- delete(dst, k)
- }
- }
- for i := uint32(0); i < sz; i++ {
- var k string
- k, bytes, err = msgp.ReadStringBytes(bytes)
- if err != nil {
- return nil, err
- }
- dst[k] = struct{}{}
- }
- *s = dst
+ *s = StringSet(ss)
return bytes, nil
}
// Msgsize returns the maximum size of the message.
func (s StringSet) Msgsize() int {
- if s == nil {
- return msgp.NilSize
- }
- if len(s) == 0 {
- return msgp.ArrayHeaderSize
- }
- size := msgp.ArrayHeaderSize
- for key := range s {
- size += msgp.StringPrefixSize + len(key)
- }
- return size
+ return setof.String(s).Msgsize()
}
// MarshalBinary encodes the receiver into a binary form and returns the result.
@@ -147,3 +75,57 @@ func (s *StringSet) UnmarshalBinary(b []byte) error {
_, err := s.UnmarshalMsg(b)
return err
}
+
+// EncodeMsg encodes the message to the writer.
+// Values are stored as a slice of ints or nil.
+func (s IntSet) EncodeMsg(writer *msgp.Writer) error {
+ return setof.IntSorted(s).EncodeMsg(writer)
+}
+
+// MarshalMsg encodes the message to the bytes.
+// Values are stored as a slice of ints or nil.
+func (s IntSet) MarshalMsg(bytes []byte) ([]byte, error) {
+ return setof.IntSorted(s).MarshalMsg(bytes)
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *IntSet) DecodeMsg(reader *msgp.Reader) error {
+ var is setof.Int
+ if err := is.DecodeMsg(reader); err != nil {
+ return err
+ }
+ *s = IntSet(is)
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *IntSet) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ var is setof.Int
+ bytes, err := is.UnmarshalMsg(bytes)
+ if err != nil {
+ return nil, err
+ }
+ *s = IntSet(is)
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s IntSet) Msgsize() int {
+ return setof.Int(s).Msgsize()
+}
+
+// MarshalBinary encodes the receiver into a binary form and returns the result.
+func (s IntSet) MarshalBinary() ([]byte, error) {
+ return s.MarshalMsg(nil)
+}
+
+// AppendBinary appends the binary representation of itself to the end of b
+func (s IntSet) AppendBinary(b []byte) ([]byte, error) {
+ return s.MarshalMsg(b)
+}
+
+// UnmarshalBinary decodes the binary representation of itself from b
+func (s *IntSet) UnmarshalBinary(b []byte) error {
+ _, err := s.UnmarshalMsg(b)
+ return err
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/set/set.go b/vendor/github.com/minio/minio-go/v7/pkg/set/set.go
new file mode 100644
index 0000000000..c3e8aeb3e5
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/set/set.go
@@ -0,0 +1,190 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2026 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package set
+
+import (
+ "cmp"
+ "slices"
+)
+
+// Set - uses map as a set of comparable elements.
+//
+// Important Caveats:
+// - Sets are unordered by nature. Map iteration order is non-deterministic in Go.
+// - When converting to slices, use ToSlice() with a comparison function or
+// ToSliceOrdered() for ordered types to get deterministic, sorted results.
+// - Comparison functions must provide total ordering: if your comparison returns 0
+// for different elements, their relative order in the result is undefined.
+// - For deterministic ordering when elements may compare equal, use secondary
+// sort criteria (e.g., sort by length first, then alphabetically for ties).
+type Set[T comparable] map[T]struct{}
+
+// ToSlice - returns Set as a slice sorted using the provided comparison function.
+// If cmpFn is nil, the slice order is undefined (non-deterministic).
+//
+// Important: The comparison function should provide total ordering. If it returns 0
+// for elements that are not identical, their relative order in the result is undefined.
+// For deterministic results, use secondary sort criteria for tie-breaking.
+func (set Set[T]) ToSlice(cmpFn func(a, b T) int) []T {
+ keys := make([]T, 0, len(set))
+ for k := range set {
+ keys = append(keys, k)
+ }
+ if cmpFn != nil {
+ slices.SortFunc(keys, cmpFn)
+ }
+ return keys
+}
+
+// ToSliceOrdered - returns Set as a sorted slice for ordered types.
+// This is a convenience method for types that implement cmp.Ordered.
+// The result is deterministic and always sorted in ascending order.
+func ToSliceOrdered[T cmp.Ordered](set Set[T]) []T {
+ keys := make([]T, 0, len(set))
+ for k := range set {
+ keys = append(keys, k)
+ }
+ slices.Sort(keys)
+ return keys
+}
+
+// IsEmpty - returns whether the set is empty or not.
+func (set Set[T]) IsEmpty() bool {
+ return len(set) == 0
+}
+
+// Add - adds element to the set.
+func (set Set[T]) Add(s T) {
+ set[s] = struct{}{}
+}
+
+// Remove - removes element from the set. It does nothing if element does not exist in the set.
+func (set Set[T]) Remove(s T) {
+ delete(set, s)
+}
+
+// Contains - checks if element is in the set.
+func (set Set[T]) Contains(s T) bool {
+ _, ok := set[s]
+ return ok
+}
+
+// FuncMatch - returns new set containing each value that passes match function.
+// A 'matchFn' should accept element in a set as first argument and
+// 'matchValue' as second argument. The function can do any logic to
+// compare both the arguments and should return true to accept element in
+// a set to include in output set else the element is ignored.
+func (set Set[T]) FuncMatch(matchFn func(T, T) bool, matchValue T) Set[T] {
+ nset := New[T]()
+ for k := range set {
+ if matchFn(k, matchValue) {
+ nset.Add(k)
+ }
+ }
+ return nset
+}
+
+// ApplyFunc - returns new set containing each value processed by 'applyFn'.
+// A 'applyFn' should accept element in a set as an argument and return
+// a processed value. The function can do any logic to return a processed value.
+func (set Set[T]) ApplyFunc(applyFn func(T) T) Set[T] {
+ nset := New[T]()
+ for k := range set {
+ nset.Add(applyFn(k))
+ }
+ return nset
+}
+
+// Equals - checks whether given set is equal to current set or not.
+func (set Set[T]) Equals(sset Set[T]) bool {
+ // If length of set is not equal to length of given set, the
+ // set is not equal to given set.
+ if len(set) != len(sset) {
+ return false
+ }
+
+ // As both sets are equal in length, check each elements are equal.
+ for k := range set {
+ if _, ok := sset[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection - returns the intersection with given set as new set.
+func (set Set[T]) Intersection(sset Set[T]) Set[T] {
+ nset := New[T]()
+ for k := range set {
+ if _, ok := sset[k]; ok {
+ nset.Add(k)
+ }
+ }
+
+ return nset
+}
+
+// Difference - returns the difference with given set as new set.
+func (set Set[T]) Difference(sset Set[T]) Set[T] {
+ nset := New[T]()
+ for k := range set {
+ if _, ok := sset[k]; !ok {
+ nset.Add(k)
+ }
+ }
+
+ return nset
+}
+
+// Union - returns the union with given set as new set.
+func (set Set[T]) Union(sset Set[T]) Set[T] {
+ nset := New[T]()
+ for k := range set {
+ nset.Add(k)
+ }
+
+ for k := range sset {
+ nset.Add(k)
+ }
+
+ return nset
+}
+
+// New - creates new set.
+func New[T comparable]() Set[T] {
+ return make(Set[T])
+}
+
+// Create - creates new set with given values.
+func Create[T comparable](sl ...T) Set[T] {
+ set := make(Set[T], len(sl))
+ for _, k := range sl {
+ set.Add(k)
+ }
+ return set
+}
+
+// Copy - returns copy of given set.
+func Copy[T comparable](set Set[T]) Set[T] {
+ nset := make(Set[T], len(set))
+ for k, v := range set {
+ nset[k] = v
+ }
+ return nset
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go
index c12651b544..5a0426027e 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go
@@ -1,6 +1,6 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 MinIO, Inc.
+ * Copyright 2015-2026 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,16 +24,12 @@ import (
)
// StringSet - uses map as set of strings.
-type StringSet map[string]struct{}
+// This is now implemented using the generic Set[string] type.
+type StringSet Set[string]
// ToSlice - returns StringSet as string slice.
func (set StringSet) ToSlice() []string {
- keys := make([]string, 0, len(set))
- for k := range set {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- return keys
+ return ToSliceOrdered(Set[string](set))
}
// ToByteSlices - returns StringSet as a sorted
@@ -62,23 +58,22 @@ func (set StringSet) ToByteSlices() [][]byte {
// IsEmpty - returns whether the set is empty or not.
func (set StringSet) IsEmpty() bool {
- return len(set) == 0
+ return Set[string](set).IsEmpty()
}
// Add - adds string to the set.
func (set StringSet) Add(s string) {
- set[s] = struct{}{}
+ Set[string](set).Add(s)
}
// Remove - removes string in the set. It does nothing if string does not exist in the set.
func (set StringSet) Remove(s string) {
- delete(set, s)
+ Set[string](set).Remove(s)
}
// Contains - checks if string is in the set.
func (set StringSet) Contains(s string) bool {
- _, ok := set[s]
- return ok
+ return Set[string](set).Contains(s)
}
// FuncMatch - returns new set containing each value who passes match function.
@@ -87,13 +82,7 @@ func (set StringSet) Contains(s string) bool {
// compare both the arguments and should return true to accept element in
// a set to include in output set else the element is ignored.
func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet {
- nset := NewStringSet()
- for k := range set {
- if matchFn(k, matchString) {
- nset.Add(k)
- }
- }
- return nset
+ return StringSet(Set[string](set).FuncMatch(matchFn, matchString))
}
// ApplyFunc - returns new set containing each value processed by 'applyFn'.
@@ -101,67 +90,27 @@ func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString st
// a processed string. The function can do any logic to return a processed
// string.
func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet {
- nset := NewStringSet()
- for k := range set {
- nset.Add(applyFn(k))
- }
- return nset
+ return StringSet(Set[string](set).ApplyFunc(applyFn))
}
// Equals - checks whether given set is equal to current set or not.
func (set StringSet) Equals(sset StringSet) bool {
- // If length of set is not equal to length of given set, the
- // set is not equal to given set.
- if len(set) != len(sset) {
- return false
- }
-
- // As both sets are equal in length, check each elements are equal.
- for k := range set {
- if _, ok := sset[k]; !ok {
- return false
- }
- }
-
- return true
+ return Set[string](set).Equals(Set[string](sset))
}
// Intersection - returns the intersection with given set as new set.
func (set StringSet) Intersection(sset StringSet) StringSet {
- nset := NewStringSet()
- for k := range set {
- if _, ok := sset[k]; ok {
- nset.Add(k)
- }
- }
-
- return nset
+ return StringSet(Set[string](set).Intersection(Set[string](sset)))
}
// Difference - returns the difference with given set as new set.
func (set StringSet) Difference(sset StringSet) StringSet {
- nset := NewStringSet()
- for k := range set {
- if _, ok := sset[k]; !ok {
- nset.Add(k)
- }
- }
-
- return nset
+ return StringSet(Set[string](set).Difference(Set[string](sset)))
}
// Union - returns the union with given set as new set.
func (set StringSet) Union(sset StringSet) StringSet {
- nset := NewStringSet()
- for k := range set {
- nset.Add(k)
- }
-
- for k := range sset {
- nset.Add(k)
- }
-
- return nset
+ return StringSet(Set[string](set).Union(Set[string](sset)))
}
// MarshalJSON - converts to JSON data.
@@ -196,23 +145,15 @@ func (set StringSet) String() string {
// NewStringSet - creates new string set.
func NewStringSet() StringSet {
- return make(StringSet)
+ return StringSet(New[string]())
}
// CreateStringSet - creates new string set with given string values.
func CreateStringSet(sl ...string) StringSet {
- set := make(StringSet, len(sl))
- for _, k := range sl {
- set.Add(k)
- }
- return set
+ return StringSet(Create(sl...))
}
// CopyStringSet - returns copy of given set.
func CopyStringSet(set StringSet) StringSet {
- nset := make(StringSet, len(set))
- for k, v := range set {
- nset[k] = v
- }
- return nset
+ return StringSet(Copy(Set[string](set)))
}
diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go
index 59c7a163d4..acf9da6364 100644
--- a/vendor/github.com/minio/minio-go/v7/retry.go
+++ b/vendor/github.com/minio/minio-go/v7/retry.go
@@ -26,6 +26,8 @@ import (
"net/http"
"net/url"
"time"
+
+ "github.com/minio/minio-go/v7/pkg/set"
)
// MaxRetry is the maximum number of retries before stopping.
@@ -93,45 +95,43 @@ func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, max
}
// List of AWS S3 error codes which are retryable.
-var retryableS3Codes = map[string]struct{}{
- "RequestError": {},
- "RequestTimeout": {},
- "Throttling": {},
- "ThrottlingException": {},
- "RequestLimitExceeded": {},
- "RequestThrottled": {},
- "InternalError": {},
- "ExpiredToken": {},
- "ExpiredTokenException": {},
- "SlowDown": {},
- "SlowDownWrite": {},
- "SlowDownRead": {},
+var retryableS3Codes = set.CreateStringSet(
+ "RequestError",
+ "RequestTimeout",
+ "Throttling",
+ "ThrottlingException",
+ "RequestLimitExceeded",
+ "RequestThrottled",
+ "InternalError",
+ "ExpiredToken",
+ "ExpiredTokenException",
+ "SlowDown",
+ "SlowDownWrite",
+ "SlowDownRead",
// Add more AWS S3 codes here.
-}
+)
// isS3CodeRetryable - is s3 error code retryable.
-func isS3CodeRetryable(s3Code string) (ok bool) {
- _, ok = retryableS3Codes[s3Code]
- return ok
+func isS3CodeRetryable(s3Code string) bool {
+ return retryableS3Codes.Contains(s3Code)
}
// List of HTTP status codes which are retryable.
-var retryableHTTPStatusCodes = map[int]struct{}{
- http.StatusRequestTimeout: {},
- 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet
- 499: {}, // client closed request, retry. A non-standard status code introduced by nginx.
- http.StatusInternalServerError: {},
- http.StatusBadGateway: {},
- http.StatusServiceUnavailable: {},
- http.StatusGatewayTimeout: {},
- 520: {}, // It is used by Cloudflare as a catch-all response for when the origin server sends something unexpected.
+var retryableHTTPStatusCodes = set.CreateIntSet(
+ http.StatusRequestTimeout,
+ 429, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet
+ 499, // client closed request, retry. A non-standard status code introduced by nginx.
+ http.StatusInternalServerError,
+ http.StatusBadGateway,
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout,
+ 520, // It is used by Cloudflare as a catch-all response for when the origin server sends something unexpected.
// Add more HTTP status codes here.
-}
+)
// isHTTPStatusRetryable - is HTTP error code retryable.
-func isHTTPStatusRetryable(httpStatusCode int) (ok bool) {
- _, ok = retryableHTTPStatusCodes[httpStatusCode]
- return ok
+func isHTTPStatusRetryable(httpStatusCode int) bool {
+ return retryableHTTPStatusCodes.Contains(httpStatusCode)
}
// For now, all http Do() requests are retriable except some well defined errors
diff --git a/vendor/github.com/minio/minio-go/v7/transport.go b/vendor/github.com/minio/minio-go/v7/transport.go
index 1bff664628..2ef648b2ba 100644
--- a/vendor/github.com/minio/minio-go/v7/transport.go
+++ b/vendor/github.com/minio/minio-go/v7/transport.go
@@ -1,5 +1,4 @@
//go:build go1.7 || go1.8
-// +build go1.7 go1.8
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/lookup.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/lookup.go
index ef69a56e6c..8436e0fc19 100644
--- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/lookup.go
+++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/lookup.go
@@ -30,6 +30,7 @@ import (
user "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"github.com/google/uuid"
+ lru "github.com/hashicorp/golang-lru/v2"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options"
@@ -74,6 +75,7 @@ type Lookup struct {
IDCache IDCache
IDHistoryCache IDCache
+ spaceRootCache *lru.Cache[string, string]
metadataBackend metadata.Backend
userMapper usermapper.Mapper
tm node.TimeManager
@@ -85,11 +87,14 @@ func New(b metadata.Backend, um usermapper.Mapper, o *options.Options, tm node.T
idHistoryConf.Database = o.Options.IDCache.Table + "_history"
idHistoryConf.TTL = 1 * time.Minute
+ spaceRootCache, _ := lru.New[string, string](1000)
+
lu := &Lookup{
Options: o,
metadataBackend: b,
IDCache: NewStoreIDCache(o.Options.IDCache),
IDHistoryCache: NewStoreIDCache(idHistoryConf),
+ spaceRootCache: spaceRootCache,
userMapper: um,
tm: tm,
}
@@ -99,11 +104,17 @@ func New(b metadata.Backend, um usermapper.Mapper, o *options.Options, tm node.T
// CacheID caches the path for the given space and node id
func (lu *Lookup) CacheID(ctx context.Context, spaceID, nodeID, val string) error {
+ if spaceID == nodeID {
+ lu.spaceRootCache.Add(spaceID, val)
+ }
return lu.IDCache.Set(ctx, spaceID, nodeID, val)
}
// GetCachedID returns the cached path for the given space and node id
func (lu *Lookup) GetCachedID(ctx context.Context, spaceID, nodeID string) (string, bool) {
+ if spaceID == nodeID {
+ return lu.getSpaceRootPathWithStatus(ctx, spaceID)
+ }
return lu.IDCache.Get(ctx, spaceID, nodeID)
}
@@ -186,7 +197,7 @@ func (lu *Lookup) NodeFromID(ctx context.Context, id *provider.ResourceId) (n *n
// The Resource references the root of a space
return lu.NodeFromSpaceID(ctx, id.SpaceId)
}
- return node.ReadNode(ctx, lu, id.SpaceId, id.OpaqueId, false, nil, false)
+ return node.ReadNode(ctx, lu, id.SpaceId, id.OpaqueId, "", false, nil, false)
}
// Pathify segments the beginning of a string into depth segments of width length
@@ -207,7 +218,7 @@ func Pathify(id string, depth, width int) string {
// NodeFromSpaceID converts a resource id into a Node
func (lu *Lookup) NodeFromSpaceID(ctx context.Context, spaceID string) (n *node.Node, err error) {
- node, err := node.ReadNode(ctx, lu, spaceID, spaceID, false, nil, false)
+ node, err := node.ReadNode(ctx, lu, spaceID, spaceID, "", false, nil, false)
if err != nil {
return nil, err
}
@@ -283,35 +294,55 @@ func (lu *Lookup) InternalRoot() string {
return lu.Options.Root
}
+func (lu *Lookup) getSpaceRootPathWithStatus(ctx context.Context, spaceID string) (string, bool) {
+ if val, ok := lu.spaceRootCache.Get(spaceID); ok {
+ return val, true
+ }
+ val, ok := lu.IDCache.Get(ctx, spaceID, spaceID)
+ if ok {
+ lu.spaceRootCache.Add(spaceID, val)
+ }
+ return val, ok
+}
+
+func (lu *Lookup) getSpaceRootPath(ctx context.Context, spaceID string) string {
+ val, _ := lu.getSpaceRootPathWithStatus(ctx, spaceID)
+ return val
+}
+
// InternalSpaceRoot returns the internal path for a space
func (lu *Lookup) InternalSpaceRoot(spaceID string) string {
- return lu.InternalPath(spaceID, spaceID)
+ return lu.getSpaceRootPath(context.Background(), spaceID)
}
// InternalPath returns the internal path for a given ID
func (lu *Lookup) InternalPath(spaceID, nodeID string) string {
if strings.Contains(nodeID, node.RevisionIDDelimiter) || strings.HasSuffix(nodeID, node.CurrentIDDelimiter) {
- spaceRoot, _ := lu.IDCache.Get(context.Background(), spaceID, spaceID)
+ spaceRoot := lu.getSpaceRootPath(context.Background(), spaceID)
if len(spaceRoot) == 0 {
return ""
}
return filepath.Join(spaceRoot, MetadataDir, Pathify(nodeID, 4, 2))
}
+ if spaceID == nodeID {
+ return lu.getSpaceRootPath(context.Background(), spaceID)
+ }
+
path, _ := lu.IDCache.Get(context.Background(), spaceID, nodeID)
return path
}
// LockfilePaths returns the paths(s) to the lockfile of the node
-func (lu *Lookup) LockfilePaths(spaceID, nodeID string) []string {
- spaceRoot, _ := lu.IDCache.Get(context.Background(), spaceID, spaceID)
+func (lu *Lookup) LockfilePaths(n *node.Node) []string {
+ spaceRoot := lu.getSpaceRootPath(context.Background(), n.SpaceID)
if len(spaceRoot) == 0 {
return nil
}
- paths := []string{filepath.Join(spaceRoot, MetadataDir, Pathify(nodeID, 4, 2)+".lock")}
+ paths := []string{filepath.Join(spaceRoot, MetadataDir, Pathify(n.ID, 4, 2)+".lock")}
- nodepath := lu.InternalPath(spaceID, nodeID)
+ nodepath := n.InternalPath()
if len(nodepath) > 0 {
paths = append(paths, nodepath+".lock")
}
@@ -321,7 +352,7 @@ func (lu *Lookup) LockfilePaths(spaceID, nodeID string) []string {
// VersionPath returns the path to the version of the node
func (lu *Lookup) VersionPath(spaceID, nodeID, version string) string {
- spaceRoot, _ := lu.IDCache.Get(context.Background(), spaceID, spaceID)
+ spaceRoot := lu.getSpaceRootPath(context.Background(), spaceID)
if len(spaceRoot) == 0 {
return ""
}
@@ -331,7 +362,7 @@ func (lu *Lookup) VersionPath(spaceID, nodeID, version string) string {
// VersionPath returns the "current" path of the node
func (lu *Lookup) CurrentPath(spaceID, nodeID string) string {
- spaceRoot, _ := lu.IDCache.Get(context.Background(), spaceID, spaceID)
+ spaceRoot := lu.getSpaceRootPath(context.Background(), spaceID)
if len(spaceRoot) == 0 {
return ""
}
@@ -446,6 +477,9 @@ func (lu *Lookup) PurgeNode(n *node.Node) error {
if cerr := lu.IDCache.Delete(context.Background(), n.SpaceID, n.ID); cerr != nil {
return cerr
}
+ if n.ID == n.SpaceID {
+ lu.spaceRootCache.Remove(n.SpaceID)
+ }
return rerr
}
diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go
index dbd71237b6..0a9971a134 100644
--- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go
+++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go
@@ -360,7 +360,7 @@ func (t *Tree) getNodeForPath(path string) (*node.Node, error) {
return nil, err
}
- return node.ReadNode(context.Background(), t.lookup, spaceID, nodeID, false, nil, false)
+ return node.ReadNode(context.Background(), t.lookup, spaceID, nodeID, path, false, nil, false)
}
func (t *Tree) findSpaceId(path string) (string, error) {
@@ -909,17 +909,24 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error {
}
if id != "" {
- // Check if the item on the previous still exists. In this case it might have been a copy with extended attributes -> set new ID
+ // Check if the item on the previous path still exists. In this case it might have been a copy with extended attributes -> set new ID
+ isCopy := false
previousPath, ok := t.lookup.GetCachedID(context.Background(), spaceID, id)
if ok && previousPath != path {
- // this id clashes with an existing id -> re-assimilate
_, err := os.Stat(previousPath)
if err == nil {
- _ = t.assimilate(scanItem{Path: path})
+ // previous path (using the same id) still exists -> this is a copy
+ isCopy = true
}
}
- if err := t.lookup.CacheID(context.Background(), spaceID, id, path); err != nil {
- t.log.Error().Err(err).Str("spaceID", spaceID).Str("id", id).Str("path", path).Msg("could not cache id")
+ if isCopy {
+ // copy detected -> re-assimilate
+ _ = t.assimilate(scanItem{Path: path})
+ } else {
+ // update cached id with new path
+ if err := t.lookup.CacheID(context.Background(), spaceID, id, path); err != nil {
+ t.log.Error().Err(err).Str("spaceID", spaceID).Str("id", id).Str("path", path).Msg("could not cache id")
+ }
}
}
} else if assimilate {
@@ -943,7 +950,7 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error {
t.log.Error().Err(err).Str("path", dir).Msg("could not get ids for path")
continue
}
- n, err := node.ReadNode(context.Background(), t.lookup, spaceID, id, true, nil, false)
+ n, err := node.ReadNode(context.Background(), t.lookup, spaceID, id, dir, true, nil, false)
if err != nil {
t.log.Error().Err(err).Str("path", dir).Msg("could not read directory node")
continue
diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/revisions.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/revisions.go
index 5d9281884e..69f456dcd8 100644
--- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/revisions.go
+++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/revisions.go
@@ -216,7 +216,7 @@ func (tp *Tree) DownloadRevision(ctx context.Context, ref *provider.Reference, r
spaceID := ref.ResourceId.SpaceId
// check if the node is available and has not been deleted
- n, err := node.ReadNode(ctx, tp.lookup, spaceID, kp[0], false, nil, false)
+ n, err := node.ReadNode(ctx, tp.lookup, spaceID, kp[0], "", false, nil, false)
if err != nil {
return nil, nil, err
}
diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go
index fd81b52580..c416f79f51 100644
--- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go
+++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go
@@ -529,7 +529,7 @@ func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, erro
}
}
- child, err := node.ReadNode(ctx, t.lookup, n.SpaceID, nodeID, false, n.SpaceRoot, true)
+ child, err := node.ReadNode(ctx, t.lookup, n.SpaceID, nodeID, path, false, n.SpaceRoot, true)
if err != nil {
t.log.Error().Err(err).Str("path", path).Msg("failed to read node")
continue
diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/grants.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/grants.go
index 2adb45f4cc..834ce25d5d 100644
--- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/grants.go
+++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/grants.go
@@ -28,7 +28,6 @@ import (
"github.com/opencloud-eu/reva/v2/pkg/appctx"
ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx"
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
- "github.com/opencloud-eu/reva/v2/pkg/sharedconf"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
@@ -119,7 +118,7 @@ func (fs *Decomposedfs) AddGrant(ctx context.Context, ref *provider.Reference, g
}
}
- if sharedconf.MultiTenantEnabled() {
+ if fs.o.MultiTenantEnabled {
spaceTenant, err := grantNode.SpaceRoot.XattrString(ctx, prefixes.SpaceTenantIDAttr)
if err != nil {
log.Error().Err(err).Msg("failed to read tenant id of space")
diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup/lookup.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup/lookup.go
index 37a4af3e41..bf7b73700f 100644
--- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup/lookup.go
+++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup/lookup.go
@@ -151,7 +151,7 @@ func (lu *Lookup) NodeFromID(ctx context.Context, id *provider.ResourceId) (n *n
// The Resource references the root of a space
return lu.NodeFromSpaceID(ctx, id.SpaceId)
}
- return node.ReadNode(ctx, lu, id.SpaceId, id.OpaqueId, false, nil, false)
+ return node.ReadNode(ctx, lu, id.SpaceId, id.OpaqueId, "", false, nil, false)
}
// Pathify segments the beginning of a string into depth segments of width length
@@ -172,7 +172,7 @@ func Pathify(id string, depth, width int) string {
// NodeFromSpaceID converts a resource id into a Node
func (lu *Lookup) NodeFromSpaceID(ctx context.Context, spaceID string) (n *node.Node, err error) {
- node, err := node.ReadNode(ctx, lu, spaceID, spaceID, false, nil, false)
+ node, err := node.ReadNode(ctx, lu, spaceID, spaceID, "", false, nil, false)
if err != nil {
return nil, err
}
@@ -274,8 +274,8 @@ func (lu *Lookup) InternalPath(spaceID, nodeID string) string {
}
// LockfilePaths returns the paths(s) to the lockfile of the node
-func (lu *Lookup) LockfilePaths(spaceID, nodeID string) []string {
- return []string{lu.InternalPath(spaceID, nodeID) + ".lock"}
+func (lu *Lookup) LockfilePaths(n *node.Node) []string {
+ return []string{n.InternalPath() + ".lock"}
}
// VersionPath returns the internal path for a version of a node
diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go
index e468bb9dfe..6572cddc3b 100644
--- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go
+++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go
@@ -29,6 +29,7 @@ import (
"hash/adler32"
"io"
"os"
+ "path/filepath"
"strconv"
"strings"
"time"
@@ -158,7 +159,7 @@ type PathLookup interface {
InternalRoot() string
InternalSpaceRoot(spaceID string) string
InternalPath(spaceID, nodeID string) string
- LockfilePaths(spaceID, nodeID string) []string
+ LockfilePaths(n *Node) []string
VersionPath(spaceID, nodeID, version string) string
Path(ctx context.Context, n *Node, hasPermission PermissionFunc) (path string, err error)
MetadataBackend() metadata.Backend
@@ -350,7 +351,7 @@ func (n *Node) SpaceOwnerOrManager(ctx context.Context) *userpb.UserId {
}
// ReadNode creates a new instance from an id and checks if it exists
-func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canListDisabledSpace bool, spaceRoot *Node, skipParentCheck bool) (*Node, error) {
+func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID, internalPath string, canListDisabledSpace bool, spaceRoot *Node, skipParentCheck bool) (*Node, error) {
ctx, span := tracer.Start(ctx, "ReadNode")
defer span.End()
var err error
@@ -417,6 +418,9 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis
},
SpaceRoot: spaceRoot,
}
+ if internalPath != "" {
+ n.internalPath = internalPath
+ }
// append back revision to nodeid, even when returning a not existing node
defer func() {
@@ -506,7 +510,7 @@ func (n *Node) Child(ctx context.Context, name string) (*Node, error) {
return nil, err
}
- readNode, err := ReadNode(ctx, n.lu, spaceID, nodeID, false, n.SpaceRoot, true)
+ readNode, err := ReadNode(ctx, n.lu, spaceID, nodeID, filepath.Join(n.internalPath, name), false, n.SpaceRoot, true)
if err != nil {
return nil, errors.Wrap(err, "could not read child node")
}
@@ -653,7 +657,7 @@ func (n *Node) ParentPath() string {
// path to use for new locks.
// In the future only one path should remain at which point the function can return a single string.
func (n *Node) LockFilePaths() []string {
- return n.lu.LockfilePaths(n.SpaceID, n.ID)
+ return n.lu.LockfilePaths(n)
}
// CalculateEtag returns a hash of fileid + tmtime (or mtime)
diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options/options.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options/options.go
index 0dd689cd2d..5414c03d9d 100644
--- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options/options.go
+++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options/options.go
@@ -94,6 +94,8 @@ type Options struct {
DisableVersioning bool `mapstructure:"disable_versioning"`
MountID string `mapstructure:"mount_id"`
+
+ MultiTenantEnabled bool `mapstructure:"multi_tenant_enabled"`
}
// AsyncPropagatorOptions holds the configuration for the async propagator
diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/revisions.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/revisions.go
index 0aeaf7f2b0..95bf5ee460 100644
--- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/revisions.go
+++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/revisions.go
@@ -69,7 +69,7 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer
spaceID := ref.ResourceId.SpaceId
// check if the node is available and has not been deleted
- n, err := node.ReadNode(ctx, fs.lu, spaceID, kp[0], false, nil, false)
+ n, err := node.ReadNode(ctx, fs.lu, spaceID, kp[0], "", false, nil, false)
if err != nil {
return err
}
@@ -185,7 +185,7 @@ func (fs *Decomposedfs) getRevisionNode(ctx context.Context, ref *provider.Refer
spaceID := ref.ResourceId.SpaceId
// check if the node is available and has not been deleted
- n, err := node.ReadNode(ctx, fs.lu, spaceID, kp[0], false, nil, false)
+ n, err := node.ReadNode(ctx, fs.lu, spaceID, kp[0], "", false, nil, false)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/spaces.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/spaces.go
index 2ae7d17d24..9ecf6f4a3a 100644
--- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/spaces.go
+++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/spaces.go
@@ -107,7 +107,7 @@ func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.Cr
alias = templates.WithSpacePropertiesAndUser(u, req.Type, req.Name, spaceID, fs.o.PersonalSpaceAliasTemplate)
}
- root, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID, true, nil, false) // will fall into `Exists` case below
+ root, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID, "", true, nil, false) // will fall into `Exists` case below
switch {
case err != nil:
return nil, err
@@ -312,7 +312,7 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
if spaceID != spaceIDAny && entry != spaceIDAny {
// try directly reading the node
- n, err := node.ReadNode(ctx, fs.lu, spaceID, entry, true, nil, false) // permission to read disabled space is checked later
+ n, err := node.ReadNode(ctx, fs.lu, spaceID, entry, "", true, nil, false) // permission to read disabled space is checked later
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("id", entry).Msg("could not read node")
return nil, err
@@ -449,7 +449,7 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
continue
}
- n, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID, true, nil, true)
+ n, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID, "", true, nil, true)
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("id", spaceID).Msg("could not read node, skipping")
continue
@@ -519,7 +519,7 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
// if there are no matches (or they happened to be spaces for the owner) and the node is a child return a space
if int64(len(matches)) <= numShares.Load() && entry != spaceID {
// try node id
- n, err := node.ReadNode(ctx, fs.lu, spaceID, entry, true, nil, false) // permission to read disabled space is checked in storageSpaceFromNode
+ n, err := node.ReadNode(ctx, fs.lu, spaceID, entry, "", true, nil, false) // permission to read disabled space is checked in storageSpaceFromNode
if err != nil {
return nil, err
}
@@ -631,7 +631,7 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up
}
// check which permissions are needed
- spaceNode, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID, true, nil, false)
+ spaceNode, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID, "", true, nil, false)
if err != nil {
return nil, err
}
@@ -733,7 +733,7 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De
return err
}
- n, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID, true, nil, false) // permission to read disabled space is checked later
+ n, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID, "", true, nil, false) // permission to read disabled space is checked later
if err != nil {
return err
}
diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/propagator/async.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/propagator/async.go
index 82a17edab6..19f97c42cc 100644
--- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/propagator/async.go
+++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/propagator/async.go
@@ -295,7 +295,7 @@ func (p AsyncPropagator) propagate(ctx context.Context, pn PropagationNode, reca
defer func() { _ = unlock() }()
_, subspan = tracer.Start(ctx, "node.ReadNode")
- n, err := node.ReadNode(ctx, p.lookup, pn.GetSpaceID(), pn.GetID(), false, nil, false)
+ n, err := node.ReadNode(ctx, p.lookup, pn.GetSpaceID(), pn.GetID(), "", false, nil, false)
if err != nil {
log.Error().Err(err).
Msg("Propagation failed. Could not read node.")
diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/revisions.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/revisions.go
index bbf429a66a..7dd73a0647 100644
--- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/revisions.go
+++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/revisions.go
@@ -214,7 +214,7 @@ func (tp *Tree) DownloadRevision(ctx context.Context, ref *provider.Reference, r
spaceID := ref.ResourceId.SpaceId
// check if the node is available and has not been deleted
- n, err := node.ReadNode(ctx, tp.lookup, spaceID, kp[0], false, nil, false)
+ n, err := node.ReadNode(ctx, tp.lookup, spaceID, kp[0], "", false, nil, false)
if err != nil {
return nil, nil, err
}
@@ -307,7 +307,7 @@ func (tp *Tree) getRevisionNode(ctx context.Context, ref *provider.Reference, re
spaceID := ref.ResourceId.SpaceId
// check if the node is available and has not been deleted
- n, err := node.ReadNode(ctx, tp.lookup, spaceID, kp[0], false, nil, false)
+ n, err := node.ReadNode(ctx, tp.lookup, spaceID, kp[0], "", false, nil, false)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/tree.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/tree.go
index a52993a9fd..d4787aa5ea 100644
--- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/tree.go
+++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/tree.go
@@ -383,7 +383,7 @@ func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, erro
}
}
- child, err := node.ReadNode(ctx, t.lookup, n.SpaceID, nodeID, false, n.SpaceRoot, true)
+ child, err := node.ReadNode(ctx, t.lookup, n.SpaceID, nodeID, "", false, n.SpaceRoot, true)
if err != nil {
return err
}
@@ -889,7 +889,7 @@ func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (
nodeID = strings.ReplaceAll(nodeID, "/", "")
recycleNode = node.New(spaceID, nodeID, "", "", 0, "", provider.ResourceType_RESOURCE_TYPE_INVALID, nil, t.lookup)
- recycleNode.SpaceRoot, err = node.ReadNode(ctx, t.lookup, spaceID, spaceID, false, nil, false)
+ recycleNode.SpaceRoot, err = node.ReadNode(ctx, t.lookup, spaceID, spaceID, "", false, nil, false)
if err != nil {
return
}
diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/session.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/session.go
index 2f5c5d9a2c..994a373b6b 100644
--- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/session.go
+++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/session.go
@@ -170,7 +170,7 @@ func (session *DecomposedFsSession) HeaderIfUnmodifiedSince() string {
// Node returns the node for the session
func (session *DecomposedFsSession) Node(ctx context.Context) (*node.Node, error) {
- return node.ReadNode(ctx, session.store.lu, session.SpaceID(), session.info.Storage["NodeId"], false, nil, true)
+ return node.ReadNode(ctx, session.store.lu, session.SpaceID(), session.info.Storage["NodeId"], "", false, nil, true)
}
// ID returns the upload session id
diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/store.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/store.go
index 712e9f598d..67a510e7da 100644
--- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/store.go
+++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/store.go
@@ -213,7 +213,7 @@ func (store DecomposedFsStore) CreateNodeForUpload(ctx context.Context, session
store.lu,
)
var err error
- n.SpaceRoot, err = node.ReadNode(ctx, store.lu, session.SpaceID(), session.SpaceID(), false, nil, false)
+ n.SpaceRoot, err = node.ReadNode(ctx, store.lu, session.SpaceID(), session.SpaceID(), "", false, nil, false)
if err != nil {
return nil, err
}
@@ -316,7 +316,7 @@ func (store DecomposedFsStore) updateExistingNode(ctx context.Context, session *
return f.Close()
}
- old, _ := node.ReadNode(ctx, store.lu, spaceID, n.ID, false, nil, false)
+ old, _ := node.ReadNode(ctx, store.lu, spaceID, n.ID, "", false, nil, false)
if _, err := node.CheckQuota(ctx, n.SpaceRoot, true, uint64(old.Blobsize), fsize); err != nil {
return unlock, err
}
diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/upload.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/upload.go
index f1be9b869c..699e343206 100644
--- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/upload.go
+++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/upload.go
@@ -326,7 +326,7 @@ func (session *DecomposedFsSession) Finalize(ctx context.Context) (err error) {
if !isProcessing || procssingID != session.ID() {
versionID := revisionNode.ID + node.RevisionIDDelimiter + session.MTime().UTC().Format(time.RFC3339Nano)
// There should be a revision node (created by the other upload that finished before us), read it and upload our blob there.
- existingRevisionNode, err := node.ReadNode(ctx, session.store.lu, session.SpaceID(), versionID, false, spaceRoot, false)
+ existingRevisionNode, err := node.ReadNode(ctx, session.store.lu, session.SpaceID(), versionID, "", false, spaceRoot, false)
if err != nil || !existingRevisionNode.Exists {
// The revision node has not been created. Likely because the file on disk was modified externally and re-assilimated (watchfs == true)
// Let's create the revision node now and upload the blob to it.
@@ -379,7 +379,7 @@ func (session *DecomposedFsSession) createRevisionNodeForUpload(ctx context.Cont
prefixes.ChecksumPrefix + "md5": md5h.Sum(nil),
prefixes.ChecksumPrefix + "adler32": adler32h.Sum(nil),
}
- revisionNode, err := node.ReadNode(ctx, session.store.lu, session.SpaceID(), versionID, false, baseNode.SpaceRoot, false)
+ revisionNode, err := node.ReadNode(ctx, session.store.lu, session.SpaceID(), versionID, "", false, baseNode.SpaceRoot, false)
if err == nil {
mtime := session.MTime()
attrs.SetString(prefixes.BlobIDAttr, session.ID())
@@ -432,7 +432,7 @@ func (session *DecomposedFsSession) Cleanup(revertNodeMetadata, cleanBin, cleanI
if session.NodeExists() && session.info.MetaData["versionID"] != "" {
versionID := session.info.MetaData["versionID"]
sublog.Debug().Str("nodepath", n.InternalPath()).Str("versionID", versionID).Msg("restoring revision")
- revisionNode, err := node.ReadNode(ctx, session.store.lu, session.SpaceID(), versionID, false, n.SpaceRoot, false)
+ revisionNode, err := node.ReadNode(ctx, session.store.lu, session.SpaceID(), versionID, "", false, n.SpaceRoot, false)
if err != nil {
sublog.Error().Err(err).Str("versionID", versionID).Msg("reading revision node failed")
}
diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/walker/walker.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/walker/walker.go
index 31712aace8..57b43962b3 100644
--- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/walker/walker.go
+++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/walker/walker.go
@@ -78,13 +78,15 @@ func (r *revaWalker) walkRecursively(ctx context.Context, wd string, info *provi
return fn(wd, info, nil)
}
- list, err := r.readDir(ctx, info.Id)
- errFn := fn(wd, info, err)
-
- if err != nil || errFn != nil {
- return errFn
+ err := fn(wd, info, nil)
+ if err != nil {
+ return err
}
+ list, err := r.readDir(ctx, info.Id)
+ if err != nil {
+ return err
+ }
for _, file := range list {
err = r.walkRecursively(ctx, filepath.Join(wd, info.Path), file, fn)
if err != nil && (file.Type != provider.ResourceType_RESOURCE_TYPE_CONTAINER || err != filepath.SkipDir) {
diff --git a/vendor/github.com/segmentio/kafka-go/protocol/describegroups/describegroups.go b/vendor/github.com/segmentio/kafka-go/protocol/describegroups/describegroups.go
index a4d12048a0..5a9d7ccf31 100644
--- a/vendor/github.com/segmentio/kafka-go/protocol/describegroups/describegroups.go
+++ b/vendor/github.com/segmentio/kafka-go/protocol/describegroups/describegroups.go
@@ -10,8 +10,11 @@ func init() {
// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_DescribeGroups
type Request struct {
- Groups []string `kafka:"min=v0,max=v4"`
- IncludeAuthorizedOperations bool `kafka:"min=v3,max=v4"`
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v5,max=v5,tag"`
+ Groups []string `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
+ IncludeAuthorizedOperations bool `kafka:"min=v3,max=v5"`
}
func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeGroups }
@@ -42,27 +45,36 @@ func (r *Request) Split(cluster protocol.Cluster) (
}
type Response struct {
- ThrottleTimeMs int32 `kafka:"min=v1,max=v4"`
- Groups []ResponseGroup `kafka:"min=v0,max=v4"`
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v5,max=v5,tag"`
+ ThrottleTimeMs int32 `kafka:"min=v1,max=v5"`
+ Groups []ResponseGroup `kafka:"min=v0,max=v5"`
}
type ResponseGroup struct {
- ErrorCode int16 `kafka:"min=v0,max=v4"`
- GroupID string `kafka:"min=v0,max=v4"`
- GroupState string `kafka:"min=v0,max=v4"`
- ProtocolType string `kafka:"min=v0,max=v4"`
- ProtocolData string `kafka:"min=v0,max=v4"`
- Members []ResponseGroupMember `kafka:"min=v0,max=v4"`
- AuthorizedOperations int32 `kafka:"min=v3,max=v4"`
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v5,max=v5,tag"`
+ ErrorCode int16 `kafka:"min=v0,max=v5"`
+ GroupID string `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
+ GroupState string `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
+ ProtocolType string `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
+ ProtocolData string `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
+ Members []ResponseGroupMember `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
+ AuthorizedOperations int32 `kafka:"min=v3,max=v5"`
}
type ResponseGroupMember struct {
- MemberID string `kafka:"min=v0,max=v4"`
- GroupInstanceID string `kafka:"min=v4,max=v4,nullable"`
- ClientID string `kafka:"min=v0,max=v4"`
- ClientHost string `kafka:"min=v0,max=v4"`
- MemberMetadata []byte `kafka:"min=v0,max=v4"`
- MemberAssignment []byte `kafka:"min=v0,max=v4"`
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v5,max=v5,tag"`
+ MemberID string `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
+ GroupInstanceID string `kafka:"min=v4,max=v4,nullable|min=v5,max=v5,compact,nullable"`
+ ClientID string `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
+ ClientHost string `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
+ MemberMetadata []byte `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
+ MemberAssignment []byte `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
}
func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeGroups }
diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_linux.go b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go
index d2a66857be..b7c30d3e91 100644
--- a/vendor/github.com/tinylib/msgp/msgp/advise_linux.go
+++ b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go
@@ -1,5 +1,4 @@
//go:build linux && !appengine && !tinygo
-// +build linux,!appengine,!tinygo
package msgp
diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_other.go b/vendor/github.com/tinylib/msgp/msgp/advise_other.go
index 07f524af7f..0017cfb91f 100644
--- a/vendor/github.com/tinylib/msgp/msgp/advise_other.go
+++ b/vendor/github.com/tinylib/msgp/msgp/advise_other.go
@@ -1,5 +1,4 @@
//go:build (!linux && !tinygo && !windows) || appengine
-// +build !linux,!tinygo,!windows appengine
package msgp
diff --git a/vendor/github.com/tinylib/msgp/msgp/autoshim.go b/vendor/github.com/tinylib/msgp/msgp/autoshim.go
new file mode 100644
index 0000000000..a73406886b
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/autoshim.go
@@ -0,0 +1,159 @@
+package msgp
+
+import "strconv"
+
+// AutoShim provides helper functions for converting between string and
+// numeric types.
+type AutoShim struct{}
+
+// ParseUint converts a string to a uint.
+func (a AutoShim) ParseUint(s string) (uint, error) {
+ v, err := strconv.ParseUint(s, 10, strconv.IntSize)
+ return uint(v), err
+}
+
+// ParseUint8 converts a string to a uint8.
+func (a AutoShim) ParseUint8(s string) (uint8, error) {
+ v, err := strconv.ParseUint(s, 10, 8)
+ return uint8(v), err
+}
+
+// ParseUint16 converts a string to a uint16.
+func (a AutoShim) ParseUint16(s string) (uint16, error) {
+ v, err := strconv.ParseUint(s, 10, 16)
+ return uint16(v), err
+}
+
+// ParseUint32 converts a string to a uint32.
+func (a AutoShim) ParseUint32(s string) (uint32, error) {
+ v, err := strconv.ParseUint(s, 10, 32)
+ return uint32(v), err
+}
+
+// ParseUint64 converts a string to a uint64.
+func (a AutoShim) ParseUint64(s string) (uint64, error) {
+ v, err := strconv.ParseUint(s, 10, 64)
+ return v, err
+}
+
+// ParseInt converts a string to an int.
+func (a AutoShim) ParseInt(s string) (int, error) {
+ v, err := strconv.ParseInt(s, 10, strconv.IntSize)
+ return int(v), err
+}
+
+// ParseInt8 converts a string to an int8.
+func (a AutoShim) ParseInt8(s string) (int8, error) {
+ v, err := strconv.ParseInt(s, 10, 8)
+ return int8(v), err
+}
+
+// ParseInt16 converts a string to an int16.
+func (a AutoShim) ParseInt16(s string) (int16, error) {
+ v, err := strconv.ParseInt(s, 10, 16)
+ return int16(v), err
+}
+
+// ParseInt32 converts a string to an int32.
+func (a AutoShim) ParseInt32(s string) (int32, error) {
+ v, err := strconv.ParseInt(s, 10, 32)
+ return int32(v), err
+}
+
+// ParseInt64 converts a string to an int64.
+func (a AutoShim) ParseInt64(s string) (int64, error) {
+ v, err := strconv.ParseInt(s, 10, 64)
+ return v, err
+}
+
+// ParseBool converts a string to a bool.
+func (a AutoShim) ParseBool(s string) (bool, error) {
+ return strconv.ParseBool(s)
+}
+
+// ParseFloat64 converts a string to a float64.
+func (a AutoShim) ParseFloat64(s string) (float64, error) {
+ return strconv.ParseFloat(s, 64)
+}
+
+// ParseFloat32 converts a string to a float32.
+func (a AutoShim) ParseFloat32(s string) (float32, error) {
+ v, err := strconv.ParseFloat(s, 32)
+ return float32(v), err
+}
+
+// ParseByte converts a string to a byte.
+func (a AutoShim) ParseByte(s string) (byte, error) {
+ v, err := strconv.ParseUint(s, 10, 8)
+ return byte(v), err
+}
+
+// Uint8String returns the string representation of a uint8.
+func (a AutoShim) Uint8String(v uint8) string {
+ return strconv.FormatUint(uint64(v), 10)
+}
+
+// UintString returns the string representation of a uint.
+func (a AutoShim) UintString(v uint) string {
+ return strconv.FormatUint(uint64(v), 10)
+}
+
+// Uint16String returns the string representation of a uint16.
+func (a AutoShim) Uint16String(v uint16) string {
+ return strconv.FormatUint(uint64(v), 10)
+}
+
+// Uint32String returns the string representation of a uint32.
+func (a AutoShim) Uint32String(v uint32) string {
+ return strconv.FormatUint(uint64(v), 10)
+}
+
+// Uint64String returns the string representation of a uint64.
+func (a AutoShim) Uint64String(v uint64) string {
+ return strconv.FormatUint(v, 10)
+}
+
+// IntString returns the string representation of an int.
+func (a AutoShim) IntString(v int) string {
+ return strconv.FormatInt(int64(v), 10)
+}
+
+// Int8String returns the string representation of an int8.
+func (a AutoShim) Int8String(v int8) string {
+ return strconv.FormatInt(int64(v), 10)
+}
+
+// Int16String returns the string representation of an int16.
+func (a AutoShim) Int16String(v int16) string {
+ return strconv.FormatInt(int64(v), 10)
+}
+
+// Int32String returns the string representation of an int32.
+func (a AutoShim) Int32String(v int32) string {
+ return strconv.FormatInt(int64(v), 10)
+}
+
+// Int64String returns the string representation of an int64.
+func (a AutoShim) Int64String(v int64) string {
+ return strconv.FormatInt(v, 10)
+}
+
+// BoolString returns the string representation of a bool.
+func (a AutoShim) BoolString(v bool) string {
+ return strconv.FormatBool(v)
+}
+
+// Float64String returns the string representation of a float64.
+func (a AutoShim) Float64String(v float64) string {
+ return strconv.FormatFloat(v, 'g', -1, 64)
+}
+
+// Float32String returns the string representation of a float32.
+func (a AutoShim) Float32String(v float32) string {
+ return strconv.FormatFloat(float64(v), 'g', -1, 32)
+}
+
+// ByteString returns the string representation of a byte.
+func (a AutoShim) ByteString(v byte) string {
+ return strconv.FormatUint(uint64(v), 10)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/defs.go b/vendor/github.com/tinylib/msgp/msgp/defs.go
index 47a8c18345..f622bf1e16 100644
--- a/vendor/github.com/tinylib/msgp/msgp/defs.go
+++ b/vendor/github.com/tinylib/msgp/msgp/defs.go
@@ -26,6 +26,27 @@
// the wiki at http://github.com/tinylib/msgp
package msgp
+// RT is the runtime interface for all types that can be encoded and decoded.
+type RT interface {
+ Decodable
+ Encodable
+ Sizer
+ Unmarshaler
+ Marshaler
+}
+
+// PtrTo is the runtime interface for all types that can be encoded and decoded.
+type PtrTo[T any] interface {
+ ~*T
+}
+
+// RTFor is the runtime interface for all types that can be encoded and decoded.
+// Use for generic types.
+type RTFor[T any] interface {
+ PtrTo[T]
+ RT
+}
+
const (
last4 = 0x0f
first4 = 0xf0
diff --git a/vendor/github.com/tinylib/msgp/msgp/edit.go b/vendor/github.com/tinylib/msgp/msgp/edit.go
index b473a6f668..e5d86f9369 100644
--- a/vendor/github.com/tinylib/msgp/msgp/edit.go
+++ b/vendor/github.com/tinylib/msgp/msgp/edit.go
@@ -58,7 +58,7 @@ func HasKey(key string, raw []byte) bool {
return false
}
var field []byte
- for i := uint32(0); i < sz; i++ {
+ for range sz {
field, bts, err = ReadStringZC(bts)
if err != nil {
return false
diff --git a/vendor/github.com/tinylib/msgp/msgp/elsize_default.go b/vendor/github.com/tinylib/msgp/msgp/elsize_default.go
index e7e8b547a9..d68c6bb265 100644
--- a/vendor/github.com/tinylib/msgp/msgp/elsize_default.go
+++ b/vendor/github.com/tinylib/msgp/msgp/elsize_default.go
@@ -1,5 +1,4 @@
//go:build !tinygo
-// +build !tinygo
package msgp
@@ -10,7 +9,7 @@ package msgp
var sizes [256]bytespec
func init() {
- for i := 0; i < 256; i++ {
+ for i := range 256 {
sizes[i] = calcBytespec(byte(i))
}
}
diff --git a/vendor/github.com/tinylib/msgp/msgp/elsize_tinygo.go b/vendor/github.com/tinylib/msgp/msgp/elsize_tinygo.go
index 041f4ad694..b1c11f96a0 100644
--- a/vendor/github.com/tinylib/msgp/msgp/elsize_tinygo.go
+++ b/vendor/github.com/tinylib/msgp/msgp/elsize_tinygo.go
@@ -1,5 +1,4 @@
//go:build tinygo
-// +build tinygo
package msgp
diff --git a/vendor/github.com/tinylib/msgp/msgp/errors.go b/vendor/github.com/tinylib/msgp/msgp/errors.go
index e6b42b6893..ce42207364 100644
--- a/vendor/github.com/tinylib/msgp/msgp/errors.go
+++ b/vendor/github.com/tinylib/msgp/msgp/errors.go
@@ -17,6 +17,10 @@ var (
// This should only realistically be seen on adversarial data trying to exhaust the stack.
ErrRecursion error = errRecursion{}
+ // ErrLimitExceeded is returned when a set limit is exceeded.
+ // Limits can be set on the Reader to prevent excessive memory usage by adversarial data.
+ ErrLimitExceeded error = errLimitExceeded{}
+
// this error is only returned
// if we reach code that should
// be unreachable
@@ -73,7 +77,7 @@ func Resumable(e error) bool {
//
// ErrShortBytes is not wrapped with any context due to backward compatibility
// issues with the public API.
-func WrapError(err error, ctx ...interface{}) error {
+func WrapError(err error, ctx ...any) error {
switch e := err.(type) {
case errShort:
return e
@@ -143,6 +147,11 @@ type errRecursion struct{}
func (e errRecursion) Error() string { return "msgp: recursion limit reached" }
func (e errRecursion) Resumable() bool { return false }
+type errLimitExceeded struct{}
+
+func (e errLimitExceeded) Error() string { return "msgp: configured reader limit exceeded" }
+func (e errLimitExceeded) Resumable() bool { return false }
+
// ArrayError is an error returned
// when decoding a fix-sized array
// of the wrong size
@@ -382,8 +391,8 @@ l: // loop through string bytes (not UTF-8 characters)
}
// anything else is \x
sb = append(sb, `\x`...)
- sb = append(sb, lowerhex[byte(b)>>4])
- sb = append(sb, lowerhex[byte(b)&0xF])
+ sb = append(sb, lowerhex[b>>4])
+ sb = append(sb, lowerhex[b&0xF])
continue l
}
}
diff --git a/vendor/github.com/tinylib/msgp/msgp/errors_default.go b/vendor/github.com/tinylib/msgp/msgp/errors_default.go
index e45c00a8b8..b9dc0d994d 100644
--- a/vendor/github.com/tinylib/msgp/msgp/errors_default.go
+++ b/vendor/github.com/tinylib/msgp/msgp/errors_default.go
@@ -1,5 +1,4 @@
//go:build !tinygo
-// +build !tinygo
package msgp
@@ -9,7 +8,7 @@ import (
)
// ctxString converts the incoming interface{} slice into a single string.
-func ctxString(ctx []interface{}) string {
+func ctxString(ctx []any) string {
out := ""
for idx, cv := range ctx {
if idx > 0 {
diff --git a/vendor/github.com/tinylib/msgp/msgp/errors_tinygo.go b/vendor/github.com/tinylib/msgp/msgp/errors_tinygo.go
index 8691cd387e..18f44c5f6b 100644
--- a/vendor/github.com/tinylib/msgp/msgp/errors_tinygo.go
+++ b/vendor/github.com/tinylib/msgp/msgp/errors_tinygo.go
@@ -1,5 +1,4 @@
//go:build tinygo
-// +build tinygo
package msgp
diff --git a/vendor/github.com/tinylib/msgp/msgp/extension.go b/vendor/github.com/tinylib/msgp/msgp/extension.go
index cda71c9840..2ec255240a 100644
--- a/vendor/github.com/tinylib/msgp/msgp/extension.go
+++ b/vendor/github.com/tinylib/msgp/msgp/extension.go
@@ -181,7 +181,7 @@ func (mw *Writer) writeExtensionHeader(length int, extType int8) error {
return err
}
mw.buf[o] = mext8
- mw.buf[o+1] = byte(uint8(length))
+ mw.buf[o+1] = byte(length)
mw.buf[o+2] = byte(extType)
case length < math.MaxUint16:
o, err := mw.require(4)
@@ -342,7 +342,7 @@ func (m *Reader) peekExtensionHeader() (offset int, length int, extType int8, er
}
offset = 3
extType = int8(p[2])
- length = int(uint8(p[1]))
+ length = int(p[1])
case mext16:
p, err = m.R.Peek(4)
@@ -383,6 +383,9 @@ func (m *Reader) ReadExtension(e Extension) error {
if expectedType := e.ExtensionType(); extType != expectedType {
return errExt(extType, expectedType)
}
+ if uint32(length) > m.GetMaxElements() {
+ return ErrLimitExceeded
+ }
p, err := m.R.Peek(offset + length)
if err != nil {
@@ -404,6 +407,9 @@ func (m *Reader) ReadExtensionRaw() (int8, []byte, error) {
if err != nil {
return 0, nil, err
}
+ if uint32(length) > m.GetMaxElements() {
+ return 0, nil, ErrLimitExceeded
+ }
payload, err := m.R.Next(offset + length)
if err != nil {
@@ -455,7 +461,7 @@ func AppendExtension(b []byte, e Extension) ([]byte, error) {
case l < math.MaxUint8:
o, n = ensure(b, l+3)
o[n] = mext8
- o[n+1] = byte(uint8(l))
+ o[n+1] = byte(l)
o[n+2] = byte(e.ExtensionType())
n += 3
case l < math.MaxUint16:
@@ -528,7 +534,7 @@ func readExt(b []byte) (typ int8, remain []byte, data []byte, err error) {
sz = 16
off = 2
case mext8:
- sz = int(uint8(b[1]))
+ sz = int(b[1])
typ = int8(b[2])
off = 3
if sz == 0 {
diff --git a/vendor/github.com/tinylib/msgp/msgp/file.go b/vendor/github.com/tinylib/msgp/msgp/file.go
index a6d91ede14..f12bea3d9f 100644
--- a/vendor/github.com/tinylib/msgp/msgp/file.go
+++ b/vendor/github.com/tinylib/msgp/msgp/file.go
@@ -1,7 +1,4 @@
//go:build (linux || darwin || dragonfly || freebsd || illumos || netbsd || openbsd) && !appengine && !tinygo
-// +build linux darwin dragonfly freebsd illumos netbsd openbsd
-// +build !appengine
-// +build !tinygo
package msgp
diff --git a/vendor/github.com/tinylib/msgp/msgp/file_port.go b/vendor/github.com/tinylib/msgp/msgp/file_port.go
index dac0dba3fa..b7de634cbf 100644
--- a/vendor/github.com/tinylib/msgp/msgp/file_port.go
+++ b/vendor/github.com/tinylib/msgp/msgp/file_port.go
@@ -1,5 +1,4 @@
//go:build windows || appengine || tinygo
-// +build windows appengine tinygo
package msgp
diff --git a/vendor/github.com/tinylib/msgp/msgp/integers.go b/vendor/github.com/tinylib/msgp/msgp/integers.go
index d07a5fba7f..fabe6c2377 100644
--- a/vendor/github.com/tinylib/msgp/msgp/integers.go
+++ b/vendor/github.com/tinylib/msgp/msgp/integers.go
@@ -133,11 +133,11 @@ func putMuint8(b []byte, u uint8) {
_ = b[1] // bounds check elimination
b[0] = muint8
- b[1] = byte(u)
+ b[1] = u
}
func getMuint8(b []byte) uint8 {
- return uint8(b[1])
+ return b[1]
}
func getUnix(b []byte) (sec int64, nsec int32) {
@@ -161,7 +161,7 @@ func prefixu8(b []byte, pre byte, sz uint8) {
_ = b[1] // bounds check elimination
b[0] = pre
- b[1] = byte(sz)
+ b[1] = sz
}
// write prefix and big-endian uint16
diff --git a/vendor/github.com/tinylib/msgp/msgp/iter.go b/vendor/github.com/tinylib/msgp/msgp/iter.go
new file mode 100644
index 0000000000..a7c3b030e7
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/iter.go
@@ -0,0 +1,386 @@
+package msgp
+
+import (
+ "cmp"
+ "fmt"
+ "iter"
+ "maps"
+ "math"
+ "slices"
+)
+
+// ReadArray returns an iterator that can be used to iterate over the elements
+// of an array in the MessagePack data while being read by the provided Reader.
+// The type parameter V specifies the type of the elements in the array.
+// The returned iterator implements the iter.Seq[V] interface,
+// allowing for sequential access to the array elements.
+// The iterator will always stop after one error has been encountered.
+func ReadArray[T any](m *Reader, readFn func() (T, error)) iter.Seq2[T, error] {
+ return func(yield func(T, error) bool) {
+ // Check if nil
+ if m.IsNil() {
+ m.ReadNil()
+ return
+ }
+ // Regular array.
+ var empty T
+ length, err := m.ReadArrayHeader()
+ if err != nil {
+ yield(empty, fmt.Errorf("cannot read array header: %w", err))
+ return
+ }
+ for range length {
+ var v T
+ v, err = readFn()
+ if !yield(v, err) || err != nil {
+ return
+ }
+ }
+ }
+}
+
+// WriteArray writes an array to the provided Writer.
+// The writeFn parameter specifies the function to use to write each element of the array.
+func WriteArray[T any](w *Writer, a []T, writeFn func(T) error) error {
+ // Check if nil
+ if a == nil {
+ return w.WriteNil()
+ }
+ if uint64(len(a)) > math.MaxUint32 {
+ return fmt.Errorf("array too large to encode: %d elements", len(a))
+ }
+ // Write array header
+ err := w.WriteArrayHeader(uint32(len(a)))
+ if err != nil {
+ return err
+ }
+ // Write elements
+ for _, v := range a {
+ err = writeFn(v)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ReadMap returns an iterator that can be used to iterate over the elements
+// of a map in the MessagePack data while being read by the provided Reader.
+// The type parameters K and V specify the types of the keys and values in the map.
+// The returned iterator implements the iter.Seq2[K, V] interface,
+// allowing for sequential access to the map elements.
+// The returned function can be used to read any error that
+// occurred during iteration when iteration is done.
+func ReadMap[K, V any](m *Reader, readKey func() (K, error), readVal func() (V, error)) (iter.Seq2[K, V], func() error) {
+ var err error
+ return func(yield func(K, V) bool) {
+ var sz uint32
+ if m.IsNil() {
+ err = m.ReadNil()
+ return
+ }
+ sz, err = m.ReadMapHeader()
+ if err != nil {
+ err = fmt.Errorf("cannot read map header: %w", err)
+ return
+ }
+
+ for range sz {
+ var k K
+ k, err = readKey()
+ if err != nil {
+ err = fmt.Errorf("cannot read key: %w", err)
+ return
+ }
+ var v V
+ v, err = readVal()
+ if err != nil {
+ err = fmt.Errorf("cannot read value: %w", err)
+ return
+ }
+ if !yield(k, v) {
+ return
+ }
+ }
+ }, func() error { return err }
+}
+
+// WriteMap writes a map to the provided Writer.
+// The writeKey and writeVal parameters specify the functions
+// to use to write each key and value of the map.
+func WriteMap[K comparable, V any](w *Writer, m map[K]V, writeKey func(K) error, writeVal func(V) error) error {
+ if m == nil {
+ return w.WriteNil()
+ }
+ if uint64(len(m)) > math.MaxUint32 {
+ return fmt.Errorf("map too large to encode: %d elements", len(m))
+ }
+
+ // Write map header
+ err := w.WriteMapHeader(uint32(len(m)))
+ if err != nil {
+ return err
+ }
+ // Write elements
+ for k, v := range m {
+ err = writeKey(k)
+ if err != nil {
+ return err
+ }
+ err = writeVal(v)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// WriteMapSorted writes a map to the provided Writer.
+// The keys of the map are sorted before writing.
+// This provides deterministic output, but will allocate to sort the keys.
+// The writeKey and writeVal parameters specify the functions
+// to use to write each key and value of the map.
+func WriteMapSorted[K cmp.Ordered, V any](w *Writer, m map[K]V, writeKey func(K) error, writeVal func(V) error) error {
+ if m == nil {
+ return w.WriteNil()
+ }
+ if uint64(len(m)) > math.MaxUint32 {
+ return fmt.Errorf("map too large to encode: %d elements", len(m))
+ }
+
+ // Write map header
+ err := w.WriteMapHeader(uint32(len(m)))
+ if err != nil {
+ return err
+ }
+ // Write elements
+ for _, k := range slices.Sorted(maps.Keys(m)) {
+ err = writeKey(k)
+ if err != nil {
+ return err
+ }
+ err = writeVal(m[k])
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ReadArrayBytes returns an iterator that can be used to iterate over the elements
+// of an array in the MessagePack data while being read by the provided Reader.
+// The type parameter V specifies the type of the elements in the array.
+// After the iterator is exhausted, the remaining bytes in the buffer
+// and any error can be read by calling the returned function.
+func ReadArrayBytes[T any](b []byte, readFn func([]byte) (T, []byte, error)) (iter.Seq[T], func() (remain []byte, err error)) {
+ if IsNil(b) {
+ b, err := ReadNilBytes(b)
+ return func(yield func(T) bool) {}, func() ([]byte, error) { return b, err }
+ }
+ sz, b, err := ReadArrayHeaderBytes(b)
+ if err != nil || sz == 0 {
+ return func(yield func(T) bool) {}, func() ([]byte, error) { return b, err }
+ }
+ return func(yield func(T) bool) {
+ for range sz {
+ var v T
+ v, b, err = readFn(b)
+ if err != nil || !yield(v) {
+ return
+ }
+ }
+ }, func() ([]byte, error) {
+ return b, err
+ }
+}
+
+// AppendArray writes an array to the provided buffer.
+// The writeFn parameter specifies the function to use to write each element of the array.
+// The returned buffer contains the encoded array.
+// The function panics if the array is larger than math.MaxUint32 elements.
+func AppendArray[T any](b []byte, a []T, writeFn func(b []byte, v T) []byte) []byte {
+ if a == nil {
+ return AppendNil(b)
+ }
+ if uint64(len(a)) > math.MaxUint32 {
+ panic(fmt.Sprintf("array too large to encode: %d elements", len(a)))
+ }
+ b = AppendArrayHeader(b, uint32(len(a)))
+ for _, v := range a {
+ b = writeFn(b, v)
+ }
+ return b
+}
+
+// ReadMapBytes returns an iterator over key/value
+// pairs from a MessagePack map encoded in b.
+// The iterator yields K,V pairs, and this function also returns
+// a closure to get the remaining bytes and any error.
+func ReadMapBytes[K any, V any](b []byte,
+ readK func([]byte) (K, []byte, error),
+ readV func([]byte) (V, []byte, error)) (iter.Seq2[K, V], func() (remain []byte, err error)) {
+ var err error
+ var sz uint32
+ if IsNil(b) {
+ b, err = ReadNilBytes(b)
+ return func(yield func(K, V) bool) {}, func() ([]byte, error) { return b, err }
+ }
+ sz, b, err = ReadMapHeaderBytes(b)
+ if err != nil || sz == 0 {
+ return func(yield func(K, V) bool) {}, func() ([]byte, error) { return b, err }
+ }
+
+ return func(yield func(K, V) bool) {
+ for range sz {
+ var k K
+ k, b, err = readK(b)
+ if err != nil {
+ err = fmt.Errorf("cannot read map key: %w", err)
+ return
+ }
+ var v V
+ v, b, err = readV(b)
+ if err != nil {
+ err = fmt.Errorf("cannot read map value: %w", err)
+ return
+ }
+ if !yield(k, v) {
+ return
+ }
+ }
+ }, func() ([]byte, error) { return b, err }
+}
+
+// AppendMap writes a map to the provided buffer.
+// The writeK and writeV parameters specify the functions to use to write each key and value of the map.
+// The returned buffer contains the encoded map.
+// The function panics if the map is larger than math.MaxUint32 elements.
+func AppendMap[K comparable, V any](b []byte, m map[K]V,
+ writeK func(b []byte, k K) []byte,
+ writeV func(b []byte, v V) []byte) []byte {
+ if m == nil {
+ return AppendNil(b)
+ }
+ if uint64(len(m)) > math.MaxUint32 {
+ panic(fmt.Sprintf("map too large to encode: %d elements", len(m)))
+ }
+ b = AppendMapHeader(b, uint32(len(m)))
+ for k, v := range m {
+ b = writeK(b, k)
+ b = writeV(b, v)
+ }
+ return b
+}
+
+// AppendMapSorted writes a map to the provided buffer.
+// Keys are sorted before writing.
+// This provides deterministic output, but will allocate to sort the keys.
+// The writeK and writeV parameters specify the functions to use to write each key and value of the map.
+// The returned buffer contains the encoded map.
+// The function panics if the map is larger than math.MaxUint32 elements.
+func AppendMapSorted[K cmp.Ordered, V any](b []byte, m map[K]V,
+ writeK func(b []byte, k K) []byte,
+ writeV func(b []byte, v V) []byte) []byte {
+ if m == nil {
+ return AppendNil(b)
+ }
+ if uint64(len(m)) > math.MaxUint32 {
+ panic(fmt.Sprintf("map too large to encode: %d elements", len(m)))
+ }
+ b = AppendMapHeader(b, uint32(len(m)))
+ for _, k := range slices.Sorted(maps.Keys(m)) {
+ b = writeK(b, k)
+ b = writeV(b, m[k])
+ }
+ return b
+}
+
+// DecodePtr is a convenience type for decoding into a pointer.
+type DecodePtr[T any] interface {
+ *T
+ Decodable
+}
+
+// DecoderFrom allows augmenting any type with a DecodeMsg method into a method
+// that reads from Reader and returns a T.
+// Provide an instance of T. This value isn't used.
+// See ReadArray/ReadMap "struct" examples for usage.
+func DecoderFrom[T any, PT DecodePtr[T]](r *Reader, _ T) func() (T, error) {
+ return func() (T, error) {
+ var t T
+ tPtr := PT(&t)
+ err := tPtr.DecodeMsg(r)
+ return t, err
+ }
+}
+
+// FlexibleEncoder is a constraint for types where either T or *T implements Encodable
+type FlexibleEncoder[T any] interface {
+ Encodable
+ *T
+}
+
+// EncoderTo allows augmenting any type with an EncodeMsg
+// method into a method that writes to Writer on each call.
+// Provide an instance of T. This value isn't used.
+// See ReadArray or ReadMap "struct" examples for usage.
+func EncoderTo[T any, _ FlexibleEncoder[T]](w *Writer, _ T) func(T) error {
+ return func(t T) error {
+ // Check if T implements Marshaler
+ if marshaler, ok := any(t).(Encodable); ok {
+ return marshaler.EncodeMsg(w)
+ }
+ // Check if *T implements Marshaler
+ if ptrMarshaler, ok := any(&t).(Encodable); ok {
+ return ptrMarshaler.EncodeMsg(w)
+ }
+ // The compiler should have asserted this.
+ panic("type does not implement Marshaler")
+ }
+}
+
+// UnmarshalPtr is a convenience type for unmarshaling into a pointer.
+type UnmarshalPtr[T any] interface {
+ *T
+ Unmarshaler
+}
+
+// DecoderFromBytes allows augmenting any type with an UnmarshalMsg
+// method into a method that reads from []byte and returns a T.
+// Provide an instance of T. This value isn't used.
+// See ReadArrayBytes or ReadMapBytes "struct" examples for usage.
+func DecoderFromBytes[T any, PT UnmarshalPtr[T]](_ T) func([]byte) (T, []byte, error) {
+ return func(b []byte) (T, []byte, error) {
+ var t T
+ tPtr := PT(&t)
+ b, err := tPtr.UnmarshalMsg(b)
+ return t, b, err
+ }
+}
+
+// FlexibleMarshaler is a constraint for types where either T or *T implements Marshaler
+type FlexibleMarshaler[T any] interface {
+ Marshaler
+ *T // Include *T in the interface
+}
+
+// EncoderToBytes allows augmenting any type with a MarshalMsg method into a method
+// that reads from T and returns a []byte.
+// Provide an instance of T. This value isn't used.
+// See ReadArrayBytes or ReadMapBytes "struct" examples for usage.
+func EncoderToBytes[T any, _ FlexibleMarshaler[T]](_ T) func([]byte, T) []byte {
+ return func(b []byte, t T) []byte {
+ // Check if T implements Marshaler
+ if marshaler, ok := any(t).(Marshaler); ok {
+ b, _ = marshaler.MarshalMsg(b)
+ return b
+ }
+ // Check if *T implements Marshaler
+ if ptrMarshaler, ok := any(&t).(Marshaler); ok {
+ b, _ = ptrMarshaler.MarshalMsg(b)
+ return b
+ }
+ // The compiler should have asserted this.
+ panic("type does not implement Marshaler")
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/json.go b/vendor/github.com/tinylib/msgp/msgp/json.go
index 18593f64d5..896d690ba0 100644
--- a/vendor/github.com/tinylib/msgp/msgp/json.go
+++ b/vendor/github.com/tinylib/msgp/msgp/json.go
@@ -60,7 +60,7 @@ func CopyToJSON(dst io.Writer, src io.Reader) (n int64, err error) {
// WriteToJSON translates MessagePack from 'r' and writes it as
// JSON to 'w' until the underlying reader returns io.EOF. It returns
// the number of bytes written, and an error if it stopped before EOF.
-func (r *Reader) WriteToJSON(w io.Writer) (n int64, err error) {
+func (m *Reader) WriteToJSON(w io.Writer) (n int64, err error) {
var j jsWriter
var bf *bufio.Writer
if jsw, ok := w.(jsWriter); ok {
@@ -71,7 +71,7 @@ func (r *Reader) WriteToJSON(w io.Writer) (n int64, err error) {
}
var nn int
for err == nil {
- nn, err = rwNext(j, r)
+ nn, err = rwNext(j, m)
n += int64(nn)
}
if err != io.EOF {
@@ -364,7 +364,7 @@ func rwString(dst jsWriter, src *Reader) (n int, err error) {
if err != nil {
return
}
- read = int(uint8(p[1]))
+ read = int(p[1])
case mstr16:
p, err = src.R.Next(3)
if err != nil {
@@ -382,6 +382,10 @@ func rwString(dst jsWriter, src *Reader) (n int, err error) {
return
}
write:
+ if uint64(read) > src.GetMaxStringLength() {
+ err = ErrLimitExceeded
+ return
+ }
p, err = src.R.Next(read)
if err != nil {
return
diff --git a/vendor/github.com/tinylib/msgp/msgp/json_bytes.go b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go
index d4fbda6315..7efd162f19 100644
--- a/vendor/github.com/tinylib/msgp/msgp/json_bytes.go
+++ b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go
@@ -91,7 +91,7 @@ func rwArrayBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []
if err != nil {
return msg, scratch, err
}
- for i := uint32(0); i < sz; i++ {
+ for i := range sz {
if i != 0 {
err = w.WriteByte(',')
if err != nil {
@@ -119,7 +119,7 @@ func rwMapBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []by
if err != nil {
return msg, scratch, err
}
- for i := uint32(0); i < sz; i++ {
+ for i := range sz {
if i != 0 {
err = w.WriteByte(',')
if err != nil {
diff --git a/vendor/github.com/tinylib/msgp/msgp/number.go b/vendor/github.com/tinylib/msgp/msgp/number.go
index edfe328b44..0bd3f31964 100644
--- a/vendor/github.com/tinylib/msgp/msgp/number.go
+++ b/vendor/github.com/tinylib/msgp/msgp/number.go
@@ -2,6 +2,7 @@ package msgp
import (
"math"
+ "math/bits"
"strconv"
)
@@ -77,7 +78,7 @@ func (n *Number) Uint() (uint64, bool) {
}
// Float casts the number to a float64, and
-// returns whether or not that was the underlying
+// returns whether that was the underlying
// type (either a float64 or a float32).
func (n *Number) Float() (float64, bool) {
switch n.typ {
@@ -182,7 +183,7 @@ func (n *Number) MarshalMsg(b []byte) ([]byte, error) {
case IntType:
return AppendInt64(b, int64(n.bits)), nil
case UintType:
- return AppendUint64(b, uint64(n.bits)), nil
+ return AppendUint64(b, n.bits), nil
case Float64Type:
return AppendFloat64(b, math.Float64frombits(n.bits)), nil
case Float32Type:
@@ -208,6 +209,129 @@ func (n *Number) EncodeMsg(w *Writer) error {
}
}
+// CoerceInt attempts to coerce the value of
+// the number into a signed integer and returns
+// whether it was successful.
+// "Success" implies that no precision in the value of
+// the number was lost, which means that the number was an integer or
+// a floating point that mapped exactly to an integer without rounding.
+func (n *Number) CoerceInt() (int64, bool) {
+ switch n.typ {
+ case InvalidType, IntType:
+ // InvalidType just means un-initialized.
+ return int64(n.bits), true
+ case UintType:
+ return int64(n.bits), n.bits <= math.MaxInt64
+ case Float32Type:
+ f := math.Float32frombits(uint32(n.bits))
+ if n.isExactInt() && f <= math.MaxInt64 && f >= math.MinInt64 {
+ return int64(f), true
+ }
+ if n.bits == 0 || n.bits == 1<<31 {
+ return 0, true
+ }
+ case Float64Type:
+ f := math.Float64frombits(n.bits)
+ if n.isExactInt() && f <= math.MaxInt64 && f >= math.MinInt64 {
+ return int64(f), true
+ }
+ return 0, n.bits == 0 || n.bits == 1<<63
+ }
+ return 0, false
+}
+
+// CoerceUInt attempts to coerce the value of
+// the number into an unsigned integer and returns
+// whether it was successful.
+// "Success" implies that no precision in the value of
+// the number was lost, which means that the number was an integer or
+// a floating point that mapped exactly to an integer without rounding.
+func (n *Number) CoerceUInt() (uint64, bool) {
+ switch n.typ {
+ case InvalidType, IntType:
+ // InvalidType just means un-initialized.
+ if int64(n.bits) >= 0 {
+ return n.bits, true
+ }
+ case UintType:
+ return n.bits, true
+ case Float32Type:
+ f := math.Float32frombits(uint32(n.bits))
+ if f >= 0 && f <= math.MaxUint64 && n.isExactInt() {
+ return uint64(f), true
+ }
+ if n.bits == 0 || n.bits == 1<<31 {
+ return 0, true
+ }
+ case Float64Type:
+ f := math.Float64frombits(n.bits)
+ if f >= 0 && f <= math.MaxUint64 && n.isExactInt() {
+ return uint64(f), true
+ }
+ return 0, n.bits == 0 || n.bits == 1<<63
+ }
+ return 0, false
+}
+
+// isExactInt will return true if the number represents an integer value.
+// NaN, Inf returns false.
+func (n *Number) isExactInt() bool {
+ var eBits int // Exponent bits
+ var mBits int // Mantissa bits
+
+ switch n.typ {
+ case InvalidType, IntType, UintType:
+ return true
+ case Float32Type:
+ eBits = 8
+ mBits = 23
+ case Float64Type:
+ eBits = 11
+ mBits = 52
+ default:
+ return false
+ }
+ // Calculate float parts
+ exp := int(n.bits>>mBits) & ((1 << eBits) - 1)
+ mant := n.bits & ((1 << mBits) - 1)
+ if exp == 0 && mant == 0 {
+ // Handle zero value.
+ return true
+ }
+
+ exp -= (1 << (eBits - 1)) - 1
+ if exp < 0 || exp == 1<<(eBits-1) {
+ // Negative exponent is never integer (except zero handled above)
+ // Handles NaN (exp all 1s)
+ return false
+ }
+
+ if exp >= mBits {
+ // If we have more exponent than mantissa bits it is always an integer.
+ return true
+ }
+ // Check if all bits below the exponent are zero.
+ return bits.TrailingZeros64(mant) >= mBits-exp
+}
+
+// CoerceFloat returns the number as a float64.
+// If the number is an integer, it will be
+// converted to a float64 with the closest representation.
+func (n *Number) CoerceFloat() float64 {
+ switch n.typ {
+ case IntType:
+ return float64(int64(n.bits))
+ case UintType:
+ return float64(n.bits)
+ case Float32Type:
+ return float64(math.Float32frombits(uint32(n.bits)))
+ case Float64Type:
+ return math.Float64frombits(n.bits)
+ default:
+ return 0.0
+ }
+}
+
// Msgsize implements msgp.Sizer
func (n *Number) Msgsize() int {
switch n.typ {
diff --git a/vendor/github.com/tinylib/msgp/msgp/purego.go b/vendor/github.com/tinylib/msgp/msgp/purego.go
index fe8723412b..03525aca5d 100644
--- a/vendor/github.com/tinylib/msgp/msgp/purego.go
+++ b/vendor/github.com/tinylib/msgp/msgp/purego.go
@@ -1,5 +1,4 @@
//go:build (purego && !unsafe) || appengine
-// +build purego,!unsafe appengine
package msgp
diff --git a/vendor/github.com/tinylib/msgp/msgp/read.go b/vendor/github.com/tinylib/msgp/msgp/read.go
index 20d3463bbd..264933ad0e 100644
--- a/vendor/github.com/tinylib/msgp/msgp/read.go
+++ b/vendor/github.com/tinylib/msgp/msgp/read.go
@@ -1,8 +1,10 @@
package msgp
import (
+ "encoding"
"encoding/binary"
"encoding/json"
+ "fmt"
"io"
"math"
"strconv"
@@ -13,7 +15,7 @@ import (
)
// where we keep old *Readers
-var readerPool = sync.Pool{New: func() interface{} { return &Reader{} }}
+var readerPool = sync.Pool{New: func() any { return &Reader{} }}
// Type is a MessagePack wire type,
// including this package's built-in
@@ -152,6 +154,10 @@ type Reader struct {
R *fwd.Reader
scratch []byte
recursionDepth int
+
+ maxRecursionDepth int // maximum recursion depth
+ maxElements uint32 // maximum number of elements in arrays and maps
+ maxStrLen uint64 // maximum number of bytes in any string
}
// Read implements `io.Reader`
@@ -171,7 +177,7 @@ func (m *Reader) CopyNext(w io.Writer) (int64, error) {
// Opportunistic optimization: if we can fit the whole thing in the m.R
// buffer, then just get a pointer to that, and pass it to w.Write,
// avoiding an allocation.
- if int(sz) <= m.R.BufferSize() {
+ if int(sz) >= 0 && int(sz) <= m.R.BufferSize() {
var nn int
var buf []byte
buf, err = m.R.Next(int(sz))
@@ -203,7 +209,7 @@ func (m *Reader) CopyNext(w io.Writer) (int64, error) {
defer done()
}
// for maps and slices, read elements
- for x := uintptr(0); x < o; x++ {
+ for range o {
var n2 int64
n2, err = m.CopyNext(w)
if err != nil {
@@ -214,10 +220,53 @@ func (m *Reader) CopyNext(w io.Writer) (int64, error) {
return n, nil
}
+// SetMaxRecursionDepth sets the maximum recursion depth.
+func (m *Reader) SetMaxRecursionDepth(d int) {
+ m.maxRecursionDepth = d
+}
+
+// GetMaxRecursionDepth returns the maximum recursion depth.
+// Set to 0 to use the default value of 100000.
+func (m *Reader) GetMaxRecursionDepth() int {
+ if m.maxRecursionDepth <= 0 {
+ return recursionLimit
+ }
+ return m.maxRecursionDepth
+}
+
+// SetMaxElements sets the maximum number of elements to allow in map, bin, array or extension payload.
+// Setting this to 0 will allow any number of elements - math.MaxUint32.
+// This does currently apply to generated code.
+func (m *Reader) SetMaxElements(d uint32) {
+ m.maxElements = d
+}
+
+// GetMaxElements will return the maximum number of elements in a map, bin, array or extension payload.
+func (m *Reader) GetMaxElements() uint32 {
+ if m.maxElements <= 0 {
+ return math.MaxUint32
+ }
+ return m.maxElements
+}
+
+// SetMaxStringLength sets the maximum number of bytes to allow in strings.
+// Setting this == 0 will allow any number of elements - math.MaxUint64.
+func (m *Reader) SetMaxStringLength(d uint64) {
+ m.maxStrLen = d
+}
+
+// GetMaxStringLength will return the current string length limit.
+func (m *Reader) GetMaxStringLength() uint64 {
+ if m.maxStrLen <= 0 {
+ return math.MaxUint64
+ }
+ return min(m.maxStrLen, math.MaxUint64)
+}
+
// recursiveCall will increment the recursion depth and return an error if it is exceeded.
// If a nil error is returned, done must be called to decrement the counter.
func (m *Reader) recursiveCall() (done func(), err error) {
- if m.recursionDepth >= recursionLimit {
+ if m.recursionDepth >= m.GetMaxRecursionDepth() {
return func() {}, ErrRecursion
}
m.recursionDepth++
@@ -415,7 +464,11 @@ func (m *Reader) ReadMapKey(scratch []byte) ([]byte, error) {
out, err := m.ReadStringAsBytes(scratch)
if err != nil {
if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType {
- return m.ReadBytes(scratch)
+ key, err := m.ReadBytes(scratch)
+ if uint64(len(key)) > m.GetMaxStringLength() {
+ return nil, ErrLimitExceeded
+ }
+ return key, err
}
return nil, err
}
@@ -468,6 +521,9 @@ fill:
if read == 0 {
return nil, ErrShortBytes
}
+ if uint64(read) > m.GetMaxStringLength() {
+ return nil, ErrLimitExceeded
+ }
return m.R.Next(read)
}
@@ -528,7 +584,7 @@ func (m *Reader) ReadFloat64() (f float64, err error) {
var p []byte
p, err = m.R.Peek(9)
if err != nil {
- // we'll allow a coversion from float32 to float64,
+ // we'll allow a conversion from float32 to float64,
// since we don't lose any precision
if err == io.EOF && len(p) > 0 && p[0] == mfloat32 {
ef, err := m.ReadFloat32()
@@ -816,7 +872,7 @@ func (m *Reader) ReadUint64() (u uint64, err error) {
if err != nil {
return
}
- v := int64(getMint64(p))
+ v := getMint64(p)
if v < 0 {
err = UintBelowZero{Value: v}
return
@@ -941,6 +997,10 @@ func (m *Reader) ReadBytes(scratch []byte) (b []byte, err error) {
return
}
if int64(cap(scratch)) < read {
+ if read > int64(m.GetMaxElements()) {
+ err = ErrLimitExceeded
+ return
+ }
b = make([]byte, read)
} else {
b = scratch[0:read]
@@ -980,10 +1040,10 @@ func (m *Reader) ReadBytesHeader() (sz uint32, err error) {
if err != nil {
return
}
- sz = uint32(big.Uint32(p[1:]))
+ sz = big.Uint32(p[1:])
return
default:
- err = badPrefix(BinType, p[0])
+ err = badPrefix(BinType, lead)
return
}
}
@@ -1052,7 +1112,7 @@ func (m *Reader) ReadStringAsBytes(scratch []byte) (b []byte, err error) {
if err != nil {
return
}
- read = int64(uint8(p[1]))
+ read = int64(p[1])
case mstr16:
p, err = m.R.Next(3)
if err != nil {
@@ -1070,6 +1130,10 @@ func (m *Reader) ReadStringAsBytes(scratch []byte) (b []byte, err error) {
return
}
fill:
+ if uint64(read) > m.GetMaxStringLength() {
+ err = ErrLimitExceeded
+ return
+ }
if int64(cap(scratch)) < read {
b = make([]byte, read)
} else {
@@ -1143,7 +1207,7 @@ func (m *Reader) ReadString() (s string, err error) {
if err != nil {
return
}
- read = int64(uint8(p[1]))
+ read = int64(p[1])
case mstr16:
p, err = m.R.Next(3)
if err != nil {
@@ -1165,6 +1229,11 @@ fill:
s, err = "", nil
return
}
+ if uint64(read) > m.GetMaxStringLength() {
+ err = ErrLimitExceeded
+ return
+ }
+
// reading into the memory
// that will become the string
// itself has vastly superior
@@ -1235,7 +1304,7 @@ func (m *Reader) ReadComplex128() (f complex128, err error) {
// ReadMapStrIntf reads a MessagePack map into a map[string]interface{}.
// (You must pass a non-nil map into the function.)
-func (m *Reader) ReadMapStrIntf(mp map[string]interface{}) (err error) {
+func (m *Reader) ReadMapStrIntf(mp map[string]any) (err error) {
var sz uint32
sz, err = m.ReadMapHeader()
if err != nil {
@@ -1244,9 +1313,13 @@ func (m *Reader) ReadMapStrIntf(mp map[string]interface{}) (err error) {
for key := range mp {
delete(mp, key)
}
+ if sz > m.GetMaxElements() {
+ err = ErrLimitExceeded
+ return
+ }
for i := uint32(0); i < sz; i++ {
var key string
- var val interface{}
+ var val any
key, err = m.ReadString()
if err != nil {
return
@@ -1376,7 +1449,7 @@ func (m *Reader) ReadJSONNumber() (n json.Number, err error) {
// Arrays are decoded as []interface{}, and maps are decoded
// as map[string]interface{}. Integers are decoded as int64
// and unsigned integers are decoded as uint64.
-func (m *Reader) ReadIntf() (i interface{}, err error) {
+func (m *Reader) ReadIntf() (i any, err error) {
var t Type
t, err = m.NextType()
if err != nil {
@@ -1446,7 +1519,7 @@ func (m *Reader) ReadIntf() (i interface{}, err error) {
defer done()
}
- mp := make(map[string]interface{})
+ mp := make(map[string]any)
err = m.ReadMapStrIntf(mp)
i = mp
return
@@ -1477,8 +1550,12 @@ func (m *Reader) ReadIntf() (i interface{}, err error) {
} else {
defer done()
}
+ if sz > m.GetMaxElements() {
+ err = ErrLimitExceeded
+ return
+ }
- out := make([]interface{}, int(sz))
+ out := make([]any, int(sz))
for j := range out {
out[j], err = m.ReadIntf()
if err != nil {
@@ -1492,3 +1569,51 @@ func (m *Reader) ReadIntf() (i interface{}, err error) {
return nil, fatal // unreachable
}
}
+
+// ReadBinaryUnmarshal reads a binary-encoded object from the reader and unmarshals it into dst.
+func (m *Reader) ReadBinaryUnmarshal(dst encoding.BinaryUnmarshaler) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("msgp: panic during UnmarshalBinary: %v", r)
+ }
+ }()
+ tmp := bytesPool.Get().([]byte)
+ defer bytesPool.Put(tmp) //nolint:staticcheck
+ tmp, err = m.ReadBytes(tmp[:0])
+ if err != nil {
+ return
+ }
+ return dst.UnmarshalBinary(tmp)
+}
+
+// ReadTextUnmarshal reads a text-encoded bin array from the reader and unmarshals it into dst.
+func (m *Reader) ReadTextUnmarshal(dst encoding.TextUnmarshaler) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("msgp: panic during UnmarshalText: %v", r)
+ }
+ }()
+ tmp := bytesPool.Get().([]byte)
+ defer bytesPool.Put(tmp) //nolint:staticcheck
+ tmp, err = m.ReadBytes(tmp[:0])
+ if err != nil {
+ return
+ }
+ return dst.UnmarshalText(tmp)
+}
+
+// ReadTextUnmarshalString reads a text-encoded string from the reader and unmarshals it into dst.
+func (m *Reader) ReadTextUnmarshalString(dst encoding.TextUnmarshaler) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("msgp: panic during UnmarshalText: %v", r)
+ }
+ }()
+ tmp := bytesPool.Get().([]byte)
+ defer bytesPool.Put(tmp) //nolint:staticcheck
+ tmp, err = m.ReadStringAsBytes(tmp[:0])
+ if err != nil {
+ return
+ }
+ return dst.UnmarshalText(tmp)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go
index 8ed15a9688..3d439a2159 100644
--- a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go
+++ b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go
@@ -1130,7 +1130,7 @@ func ReadTimeBytes(b []byte) (t time.Time, o []byte, err error) {
return
}
default:
- err = errExt(int8(b[2]), TimeExtension)
+ err = errExt(typ, TimeExtension)
return
}
}
@@ -1138,11 +1138,11 @@ func ReadTimeBytes(b []byte) (t time.Time, o []byte, err error) {
// ReadMapStrIntfBytes reads a map[string]interface{}
// out of 'b' and returns the map and remaining bytes.
// If 'old' is non-nil, the values will be read into that map.
-func ReadMapStrIntfBytes(b []byte, old map[string]interface{}) (v map[string]interface{}, o []byte, err error) {
+func ReadMapStrIntfBytes(b []byte, old map[string]any) (v map[string]any, o []byte, err error) {
return readMapStrIntfBytesDepth(b, old, 0)
}
-func readMapStrIntfBytesDepth(b []byte, old map[string]interface{}, depth int) (v map[string]interface{}, o []byte, err error) {
+func readMapStrIntfBytesDepth(b []byte, old map[string]any, depth int) (v map[string]any, o []byte, err error) {
if depth >= recursionLimit {
err = ErrRecursion
return
@@ -1155,14 +1155,18 @@ func readMapStrIntfBytesDepth(b []byte, old map[string]interface{}, depth int) (
if err != nil {
return
}
-
+ // Map key, min size is 2 bytes. Value min 1 byte.
+ if int64(len(b)) < int64(sz)*3 {
+ err = ErrShortBytes
+ return
+ }
if old != nil {
for key := range old {
delete(old, key)
}
v = old
} else {
- v = make(map[string]interface{}, int(sz))
+ v = make(map[string]any, int(sz))
}
for z := uint32(0); z < sz; z++ {
@@ -1175,7 +1179,7 @@ func readMapStrIntfBytesDepth(b []byte, old map[string]interface{}, depth int) (
if err != nil {
return
}
- var val interface{}
+ var val any
val, o, err = readIntfBytesDepth(o, depth)
if err != nil {
return
@@ -1188,11 +1192,11 @@ func readMapStrIntfBytesDepth(b []byte, old map[string]interface{}, depth int) (
// ReadIntfBytes attempts to read
// the next object out of 'b' as a raw interface{} and
// return the remaining bytes.
-func ReadIntfBytes(b []byte) (i interface{}, o []byte, err error) {
+func ReadIntfBytes(b []byte) (i any, o []byte, err error) {
return readIntfBytesDepth(b, 0)
}
-func readIntfBytesDepth(b []byte, depth int) (i interface{}, o []byte, err error) {
+func readIntfBytesDepth(b []byte, depth int) (i any, o []byte, err error) {
if depth >= recursionLimit {
err = ErrRecursion
return
@@ -1215,7 +1219,12 @@ func readIntfBytesDepth(b []byte, depth int) (i interface{}, o []byte, err error
if err != nil {
return
}
- j := make([]interface{}, int(sz))
+ // Each element will at least be 1 byte.
+ if uint32(len(o)) < sz {
+ err = ErrShortBytes
+ return
+ }
+ j := make([]any, int(sz))
i = j
for d := range j {
j[d], o, err = readIntfBytesDepth(o, depth+1)
@@ -1274,7 +1283,7 @@ func readIntfBytesDepth(b []byte, depth int) (i interface{}, o []byte, err error
}
// last resort is a raw extension
e := RawExtension{}
- e.Type = int8(t)
+ e.Type = t
o, err = ReadExtensionBytes(b, &e)
i = &e
return
diff --git a/vendor/github.com/tinylib/msgp/msgp/setof/generated.go b/vendor/github.com/tinylib/msgp/msgp/setof/generated.go
new file mode 100644
index 0000000000..9cf64d0a06
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/setof/generated.go
@@ -0,0 +1,3989 @@
+// Code generated by ./gen/main.go. DO NOT EDIT.
+
+package setof
+
+import (
+ "slices"
+ "sort"
+
+ "github.com/tinylib/msgp/msgp"
+)
+
+// String is a set of strings that will be stored as an array.
+// Elements are not sorted and the order of elements is not guaranteed.
+type String map[string]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s String) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ for k := range s {
+ err = writer.WriteString(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s String) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ for k := range s {
+ bytes = msgp.AppendString(bytes, string(k))
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a slice.
+func (s String) AsSlice() []string {
+ if s == nil {
+ return nil
+ }
+ dst := make([]string, 0, len(s))
+ for k := range s {
+ dst = append(dst, k)
+ }
+ return dst
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *String) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(String, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k string
+ k, err = reader.ReadString()
+ if err != nil {
+ return err
+ }
+ dst[string(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *String) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(String, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k string
+ k, bytes, err = msgp.ReadStringBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[string(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s String) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ for key := range s {
+ size += msgp.StringPrefixSize + len(key)
+ }
+ return size
+}
+
+// StringFromSlice creates a String from a slice.
+func StringFromSlice(s []string) String {
+ if s == nil {
+ return nil
+ }
+ dst := make(String, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// StringSorted is a set of strings that will be stored as an array.
+// Elements are sorted and the order of elements is guaranteed.
+type StringSorted map[string]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s StringSorted) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ keys := make([]string, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ err = writer.WriteString(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s StringSorted) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ keys := make([]string, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ bytes = msgp.AppendString(bytes, k)
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a sorted slice.
+func (s StringSorted) AsSlice() []string {
+ if s == nil {
+ return nil
+ }
+ keys := make([]string, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *StringSorted) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(StringSorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k string
+ k, err = reader.ReadString()
+ if err != nil {
+ return err
+ }
+ dst[string(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *StringSorted) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(StringSorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k string
+ k, bytes, err = msgp.ReadStringBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[string(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s StringSorted) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ for key := range s {
+ size += msgp.StringPrefixSize + len(key)
+ }
+ return size
+}
+
+// StringSortedFromSlice creates a StringSorted from a slice.
+func StringSortedFromSlice(s []string) StringSorted {
+ if s == nil {
+ return nil
+ }
+ dst := make(StringSorted, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Int is a set of ints that will be stored as an array.
+// Elements are not sorted and the order of elements is not guaranteed.
+type Int map[int]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Int) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ for k := range s {
+ err = writer.WriteInt(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Int) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ for k := range s {
+ bytes = msgp.AppendInt(bytes, int(k))
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a slice.
+func (s Int) AsSlice() []int {
+ if s == nil {
+ return nil
+ }
+ dst := make([]int, 0, len(s))
+ for k := range s {
+ dst = append(dst, k)
+ }
+ return dst
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Int) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Int, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int
+ k, err = reader.ReadInt()
+ if err != nil {
+ return err
+ }
+ dst[int(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Int) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Int, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int
+ k, bytes, err = msgp.ReadIntBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[int(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Int) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.IntSize
+ return size
+}
+
+// IntFromSlice creates a Int from a slice.
+func IntFromSlice(s []int) Int {
+ if s == nil {
+ return nil
+ }
+ dst := make(Int, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// IntSorted is a set of ints that will be stored as an array.
+// Elements are sorted and the order of elements is guaranteed.
+type IntSorted map[int]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s IntSorted) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ keys := make([]int, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b int) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+
+ for _, k := range keys {
+ err = writer.WriteInt(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s IntSorted) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ keys := make([]int, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b int) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ for _, k := range keys {
+ bytes = msgp.AppendInt(bytes, k)
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a sorted slice.
+func (s IntSorted) AsSlice() []int {
+ if s == nil {
+ return nil
+ }
+ keys := make([]int, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b int) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ return keys
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *IntSorted) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(IntSorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int
+ k, err = reader.ReadInt()
+ if err != nil {
+ return err
+ }
+ dst[int(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *IntSorted) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(IntSorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int
+ k, bytes, err = msgp.ReadIntBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[int(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s IntSorted) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.IntSize
+ return size
+}
+
+// IntSortedFromSlice creates a IntSorted from a slice.
+func IntSortedFromSlice(s []int) IntSorted {
+ if s == nil {
+ return nil
+ }
+ dst := make(IntSorted, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Uint is a set of uints that will be stored as an array.
+// Elements are not sorted and the order of elements is not guaranteed.
+type Uint map[uint]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Uint) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ for k := range s {
+ err = writer.WriteUint(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Uint) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ for k := range s {
+ bytes = msgp.AppendUint(bytes, uint(k))
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a slice.
+func (s Uint) AsSlice() []uint {
+ if s == nil {
+ return nil
+ }
+ dst := make([]uint, 0, len(s))
+ for k := range s {
+ dst = append(dst, k)
+ }
+ return dst
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Uint) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Uint, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint
+ k, err = reader.ReadUint()
+ if err != nil {
+ return err
+ }
+ dst[uint(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Uint) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Uint, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint
+ k, bytes, err = msgp.ReadUintBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[uint(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Uint) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.UintSize
+ return size
+}
+
+// UintFromSlice creates a Uint from a slice.
+func UintFromSlice(s []uint) Uint {
+ if s == nil {
+ return nil
+ }
+ dst := make(Uint, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// UintSorted is a set of uints that will be stored as an array.
+// Elements are sorted and the order of elements is guaranteed.
+type UintSorted map[uint]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s UintSorted) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ keys := make([]uint, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b uint) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+
+ for _, k := range keys {
+ err = writer.WriteUint(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s UintSorted) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ keys := make([]uint, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b uint) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ for _, k := range keys {
+ bytes = msgp.AppendUint(bytes, k)
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a sorted slice.
+func (s UintSorted) AsSlice() []uint {
+ if s == nil {
+ return nil
+ }
+ keys := make([]uint, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b uint) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ return keys
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *UintSorted) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(UintSorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint
+ k, err = reader.ReadUint()
+ if err != nil {
+ return err
+ }
+ dst[uint(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *UintSorted) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(UintSorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint
+ k, bytes, err = msgp.ReadUintBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[uint(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s UintSorted) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.UintSize
+ return size
+}
+
+// UintSortedFromSlice creates a UintSorted from a slice.
+func UintSortedFromSlice(s []uint) UintSorted {
+ if s == nil {
+ return nil
+ }
+ dst := make(UintSorted, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Byte is a set of bytes that will be stored as an array.
+// Elements are not sorted and the order of elements is not guaranteed.
+type Byte map[byte]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Byte) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ for k := range s {
+ err = writer.WriteByte(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Byte) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ for k := range s {
+ bytes = msgp.AppendByte(bytes, byte(k))
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a slice.
+func (s Byte) AsSlice() []byte {
+ if s == nil {
+ return nil
+ }
+ dst := make([]byte, 0, len(s))
+ for k := range s {
+ dst = append(dst, k)
+ }
+ return dst
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Byte) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Byte, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k byte
+ k, err = reader.ReadByte()
+ if err != nil {
+ return err
+ }
+ dst[byte(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Byte) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Byte, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k byte
+ k, bytes, err = msgp.ReadByteBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[byte(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Byte) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.ByteSize
+ return size
+}
+
+// ByteFromSlice creates a Byte from a slice.
+func ByteFromSlice(s []byte) Byte {
+ if s == nil {
+ return nil
+ }
+ dst := make(Byte, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// ByteSorted is a set of bytes that will be stored as an array.
+// Elements are sorted and the order of elements is guaranteed.
+type ByteSorted map[byte]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s ByteSorted) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ keys := make([]byte, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b byte) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+
+ for _, k := range keys {
+ err = writer.WriteByte(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s ByteSorted) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ keys := make([]byte, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b byte) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ for _, k := range keys {
+ bytes = msgp.AppendByte(bytes, k)
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a sorted slice.
+func (s ByteSorted) AsSlice() []byte {
+ if s == nil {
+ return nil
+ }
+ keys := make([]byte, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b byte) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ return keys
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *ByteSorted) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(ByteSorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k byte
+ k, err = reader.ReadByte()
+ if err != nil {
+ return err
+ }
+ dst[byte(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *ByteSorted) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(ByteSorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k byte
+ k, bytes, err = msgp.ReadByteBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[byte(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s ByteSorted) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.ByteSize
+ return size
+}
+
+// ByteSortedFromSlice creates a ByteSorted from a slice.
+func ByteSortedFromSlice(s []byte) ByteSorted {
+ if s == nil {
+ return nil
+ }
+ dst := make(ByteSorted, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Int8 is a set of int8s that will be stored as an array.
+// Elements are not sorted and the order of elements is not guaranteed.
+type Int8 map[int8]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Int8) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ for k := range s {
+ err = writer.WriteInt8(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Int8) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ for k := range s {
+ bytes = msgp.AppendInt8(bytes, int8(k))
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a slice.
+func (s Int8) AsSlice() []int8 {
+ if s == nil {
+ return nil
+ }
+ dst := make([]int8, 0, len(s))
+ for k := range s {
+ dst = append(dst, k)
+ }
+ return dst
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Int8) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Int8, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int8
+ k, err = reader.ReadInt8()
+ if err != nil {
+ return err
+ }
+ dst[int8(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Int8) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Int8, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int8
+ k, bytes, err = msgp.ReadInt8Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[int8(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Int8) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Int8Size
+ return size
+}
+
+// Int8FromSlice creates a Int8 from a slice.
+func Int8FromSlice(s []int8) Int8 {
+ if s == nil {
+ return nil
+ }
+ dst := make(Int8, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Int8Sorted is a set of int8s that will be stored as an array.
+// Elements are sorted and the order of elements is guaranteed.
+type Int8Sorted map[int8]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Int8Sorted) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ keys := make([]int8, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b int8) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+
+ for _, k := range keys {
+ err = writer.WriteInt8(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Int8Sorted) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ keys := make([]int8, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b int8) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ for _, k := range keys {
+ bytes = msgp.AppendInt8(bytes, k)
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a sorted slice.
+func (s Int8Sorted) AsSlice() []int8 {
+ if s == nil {
+ return nil
+ }
+ keys := make([]int8, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b int8) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ return keys
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Int8Sorted) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Int8Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int8
+ k, err = reader.ReadInt8()
+ if err != nil {
+ return err
+ }
+ dst[int8(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Int8Sorted) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Int8Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int8
+ k, bytes, err = msgp.ReadInt8Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[int8(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Int8Sorted) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Int8Size
+ return size
+}
+
+// Int8SortedFromSlice creates a Int8Sorted from a slice.
+func Int8SortedFromSlice(s []int8) Int8Sorted {
+ if s == nil {
+ return nil
+ }
+ dst := make(Int8Sorted, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Uint8 is a set of uint8s that will be stored as an array.
+// Elements are not sorted and the order of elements is not guaranteed.
+type Uint8 map[uint8]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Uint8) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ for k := range s {
+ err = writer.WriteUint8(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Uint8) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ for k := range s {
+ bytes = msgp.AppendUint8(bytes, uint8(k))
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a slice.
+func (s Uint8) AsSlice() []uint8 {
+ if s == nil {
+ return nil
+ }
+ dst := make([]uint8, 0, len(s))
+ for k := range s {
+ dst = append(dst, k)
+ }
+ return dst
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Uint8) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Uint8, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint8
+ k, err = reader.ReadUint8()
+ if err != nil {
+ return err
+ }
+ dst[uint8(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Uint8) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Uint8, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint8
+ k, bytes, err = msgp.ReadUint8Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[uint8(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Uint8) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Uint8Size
+ return size
+}
+
+// Uint8FromSlice creates a Uint8 from a slice.
+func Uint8FromSlice(s []uint8) Uint8 {
+ if s == nil {
+ return nil
+ }
+ dst := make(Uint8, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Uint8Sorted is a set of uint8s that will be stored as an array.
+// Elements are sorted and the order of elements is guaranteed.
+type Uint8Sorted map[uint8]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Uint8Sorted) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ keys := make([]uint8, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b uint8) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+
+ for _, k := range keys {
+ err = writer.WriteUint8(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Uint8Sorted) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ keys := make([]uint8, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b uint8) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ for _, k := range keys {
+ bytes = msgp.AppendUint8(bytes, k)
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a sorted slice.
+func (s Uint8Sorted) AsSlice() []uint8 {
+ if s == nil {
+ return nil
+ }
+ keys := make([]uint8, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b uint8) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ return keys
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Uint8Sorted) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Uint8Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint8
+ k, err = reader.ReadUint8()
+ if err != nil {
+ return err
+ }
+ dst[uint8(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Uint8Sorted) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Uint8Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint8
+ k, bytes, err = msgp.ReadUint8Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[uint8(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Uint8Sorted) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Uint8Size
+ return size
+}
+
+// Uint8SortedFromSlice creates a Uint8Sorted from a slice.
+func Uint8SortedFromSlice(s []uint8) Uint8Sorted {
+ if s == nil {
+ return nil
+ }
+ dst := make(Uint8Sorted, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Int16 is a set of int16s that will be stored as an array.
+// Elements are not sorted and the order of elements is not guaranteed.
+type Int16 map[int16]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Int16) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ for k := range s {
+ err = writer.WriteInt16(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Int16) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ for k := range s {
+ bytes = msgp.AppendInt16(bytes, int16(k))
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a slice.
+func (s Int16) AsSlice() []int16 {
+ if s == nil {
+ return nil
+ }
+ dst := make([]int16, 0, len(s))
+ for k := range s {
+ dst = append(dst, k)
+ }
+ return dst
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Int16) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Int16, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int16
+ k, err = reader.ReadInt16()
+ if err != nil {
+ return err
+ }
+ dst[int16(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Int16) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Int16, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int16
+ k, bytes, err = msgp.ReadInt16Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[int16(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Int16) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Int16Size
+ return size
+}
+
+// Int16FromSlice creates a Int16 from a slice.
+func Int16FromSlice(s []int16) Int16 {
+ if s == nil {
+ return nil
+ }
+ dst := make(Int16, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Int16Sorted is a set of int16s that will be stored as an array.
+// Elements are sorted and the order of elements is guaranteed.
+type Int16Sorted map[int16]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Int16Sorted) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ keys := make([]int16, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b int16) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+
+ for _, k := range keys {
+ err = writer.WriteInt16(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Int16Sorted) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ keys := make([]int16, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b int16) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ for _, k := range keys {
+ bytes = msgp.AppendInt16(bytes, k)
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a sorted slice.
+func (s Int16Sorted) AsSlice() []int16 {
+ if s == nil {
+ return nil
+ }
+ keys := make([]int16, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b int16) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ return keys
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Int16Sorted) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Int16Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int16
+ k, err = reader.ReadInt16()
+ if err != nil {
+ return err
+ }
+ dst[int16(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Int16Sorted) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Int16Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int16
+ k, bytes, err = msgp.ReadInt16Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[int16(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Int16Sorted) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Int16Size
+ return size
+}
+
+// Int16SortedFromSlice creates a Int16Sorted from a slice.
+func Int16SortedFromSlice(s []int16) Int16Sorted {
+ if s == nil {
+ return nil
+ }
+ dst := make(Int16Sorted, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Uint16 is a set of uint16s that will be stored as an array.
+// Elements are not sorted and the order of elements is not guaranteed.
+type Uint16 map[uint16]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Uint16) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ for k := range s {
+ err = writer.WriteUint16(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Uint16) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ for k := range s {
+ bytes = msgp.AppendUint16(bytes, uint16(k))
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a slice.
+func (s Uint16) AsSlice() []uint16 {
+ if s == nil {
+ return nil
+ }
+ dst := make([]uint16, 0, len(s))
+ for k := range s {
+ dst = append(dst, k)
+ }
+ return dst
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Uint16) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Uint16, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint16
+ k, err = reader.ReadUint16()
+ if err != nil {
+ return err
+ }
+ dst[uint16(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Uint16) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Uint16, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint16
+ k, bytes, err = msgp.ReadUint16Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[uint16(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Uint16) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Uint16Size
+ return size
+}
+
+// Uint16FromSlice creates a Uint16 from a slice.
+func Uint16FromSlice(s []uint16) Uint16 {
+ if s == nil {
+ return nil
+ }
+ dst := make(Uint16, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Uint16Sorted is a set of uint16s that will be stored as an array.
+// Elements are sorted and the order of elements is guaranteed.
+type Uint16Sorted map[uint16]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Uint16Sorted) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ keys := make([]uint16, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b uint16) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+
+ for _, k := range keys {
+ err = writer.WriteUint16(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Uint16Sorted) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ keys := make([]uint16, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b uint16) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ for _, k := range keys {
+ bytes = msgp.AppendUint16(bytes, k)
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a sorted slice.
+func (s Uint16Sorted) AsSlice() []uint16 {
+ if s == nil {
+ return nil
+ }
+ keys := make([]uint16, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b uint16) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ return keys
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Uint16Sorted) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Uint16Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint16
+ k, err = reader.ReadUint16()
+ if err != nil {
+ return err
+ }
+ dst[uint16(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Uint16Sorted) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Uint16Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint16
+ k, bytes, err = msgp.ReadUint16Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[uint16(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Uint16Sorted) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Uint16Size
+ return size
+}
+
+// Uint16SortedFromSlice creates a Uint16Sorted from a slice.
+func Uint16SortedFromSlice(s []uint16) Uint16Sorted {
+ if s == nil {
+ return nil
+ }
+ dst := make(Uint16Sorted, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Int32 is a set of int32s that will be stored as an array.
+// Elements are not sorted and the order of elements is not guaranteed.
+type Int32 map[int32]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Int32) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ for k := range s {
+ err = writer.WriteInt32(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Int32) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ for k := range s {
+ bytes = msgp.AppendInt32(bytes, int32(k))
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a slice.
+func (s Int32) AsSlice() []int32 {
+ if s == nil {
+ return nil
+ }
+ dst := make([]int32, 0, len(s))
+ for k := range s {
+ dst = append(dst, k)
+ }
+ return dst
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Int32) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Int32, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int32
+ k, err = reader.ReadInt32()
+ if err != nil {
+ return err
+ }
+ dst[int32(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Int32) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Int32, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int32
+ k, bytes, err = msgp.ReadInt32Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[int32(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Int32) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Int32Size
+ return size
+}
+
+// Int32FromSlice creates a Int32 from a slice.
+func Int32FromSlice(s []int32) Int32 {
+ if s == nil {
+ return nil
+ }
+ dst := make(Int32, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Int32Sorted is a set of int32s that will be stored as an array.
+// Elements are sorted and the order of elements is guaranteed.
+type Int32Sorted map[int32]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Int32Sorted) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ keys := make([]int32, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b int32) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+
+ for _, k := range keys {
+ err = writer.WriteInt32(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Int32Sorted) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ keys := make([]int32, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b int32) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ for _, k := range keys {
+ bytes = msgp.AppendInt32(bytes, k)
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a sorted slice.
+func (s Int32Sorted) AsSlice() []int32 {
+ if s == nil {
+ return nil
+ }
+ keys := make([]int32, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b int32) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ return keys
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Int32Sorted) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Int32Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int32
+ k, err = reader.ReadInt32()
+ if err != nil {
+ return err
+ }
+ dst[int32(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Int32Sorted) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Int32Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int32
+ k, bytes, err = msgp.ReadInt32Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[int32(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Int32Sorted) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Int32Size
+ return size
+}
+
+// Int32SortedFromSlice creates a Int32Sorted from a slice.
+func Int32SortedFromSlice(s []int32) Int32Sorted {
+ if s == nil {
+ return nil
+ }
+ dst := make(Int32Sorted, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Uint32 is a set of uint32s that will be stored as an array.
+// Elements are not sorted and the order of elements is not guaranteed.
+type Uint32 map[uint32]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Uint32) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ for k := range s {
+ err = writer.WriteUint32(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Uint32) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ for k := range s {
+ bytes = msgp.AppendUint32(bytes, uint32(k))
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a slice.
+func (s Uint32) AsSlice() []uint32 {
+ if s == nil {
+ return nil
+ }
+ dst := make([]uint32, 0, len(s))
+ for k := range s {
+ dst = append(dst, k)
+ }
+ return dst
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Uint32) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Uint32, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint32
+ k, err = reader.ReadUint32()
+ if err != nil {
+ return err
+ }
+ dst[uint32(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Uint32) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Uint32, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint32
+ k, bytes, err = msgp.ReadUint32Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[uint32(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Uint32) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Uint32Size
+ return size
+}
+
+// Uint32FromSlice creates a Uint32 from a slice.
+func Uint32FromSlice(s []uint32) Uint32 {
+ if s == nil {
+ return nil
+ }
+ dst := make(Uint32, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Uint32Sorted is a set of uint32s that will be stored as an array.
+// Elements are sorted and the order of elements is guaranteed.
+type Uint32Sorted map[uint32]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Uint32Sorted) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ keys := make([]uint32, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b uint32) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+
+ for _, k := range keys {
+ err = writer.WriteUint32(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Uint32Sorted) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ keys := make([]uint32, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b uint32) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ for _, k := range keys {
+ bytes = msgp.AppendUint32(bytes, k)
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a sorted slice.
+func (s Uint32Sorted) AsSlice() []uint32 {
+ if s == nil {
+ return nil
+ }
+ keys := make([]uint32, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b uint32) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ return keys
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Uint32Sorted) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Uint32Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint32
+ k, err = reader.ReadUint32()
+ if err != nil {
+ return err
+ }
+ dst[uint32(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Uint32Sorted) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Uint32Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint32
+ k, bytes, err = msgp.ReadUint32Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[uint32(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Uint32Sorted) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Uint32Size
+ return size
+}
+
+// Uint32SortedFromSlice creates a Uint32Sorted from a slice.
+func Uint32SortedFromSlice(s []uint32) Uint32Sorted {
+ if s == nil {
+ return nil
+ }
+ dst := make(Uint32Sorted, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Int64 is a set of int64s that will be stored as an array.
+// Elements are not sorted and the order of elements is not guaranteed.
+type Int64 map[int64]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Int64) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ for k := range s {
+ err = writer.WriteInt64(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Int64) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ for k := range s {
+ bytes = msgp.AppendInt64(bytes, int64(k))
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a slice.
+func (s Int64) AsSlice() []int64 {
+ if s == nil {
+ return nil
+ }
+ dst := make([]int64, 0, len(s))
+ for k := range s {
+ dst = append(dst, k)
+ }
+ return dst
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Int64) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Int64, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int64
+ k, err = reader.ReadInt64()
+ if err != nil {
+ return err
+ }
+ dst[int64(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Int64) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Int64, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int64
+ k, bytes, err = msgp.ReadInt64Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[int64(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Int64) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Int64Size
+ return size
+}
+
+// Int64FromSlice creates a Int64 from a slice.
+func Int64FromSlice(s []int64) Int64 {
+ if s == nil {
+ return nil
+ }
+ dst := make(Int64, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Int64Sorted is a set of int64s that will be stored as an array.
+// Elements are sorted and the order of elements is guaranteed.
+type Int64Sorted map[int64]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Int64Sorted) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ keys := make([]int64, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b int64) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+
+ for _, k := range keys {
+ err = writer.WriteInt64(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Int64Sorted) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ keys := make([]int64, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b int64) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ for _, k := range keys {
+ bytes = msgp.AppendInt64(bytes, k)
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a sorted slice.
+func (s Int64Sorted) AsSlice() []int64 {
+ if s == nil {
+ return nil
+ }
+ keys := make([]int64, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b int64) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ return keys
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Int64Sorted) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Int64Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int64
+ k, err = reader.ReadInt64()
+ if err != nil {
+ return err
+ }
+ dst[int64(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Int64Sorted) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Int64Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k int64
+ k, bytes, err = msgp.ReadInt64Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[int64(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Int64Sorted) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Int64Size
+ return size
+}
+
+// Int64SortedFromSlice creates a Int64Sorted from a slice.
+func Int64SortedFromSlice(s []int64) Int64Sorted {
+ if s == nil {
+ return nil
+ }
+ dst := make(Int64Sorted, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Uint64 is a set of uint64s that will be stored as an array.
+// Elements are not sorted and the order of elements is not guaranteed.
+type Uint64 map[uint64]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Uint64) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ for k := range s {
+ err = writer.WriteUint64(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Uint64) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ for k := range s {
+ bytes = msgp.AppendUint64(bytes, uint64(k))
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a slice.
+func (s Uint64) AsSlice() []uint64 {
+ if s == nil {
+ return nil
+ }
+ dst := make([]uint64, 0, len(s))
+ for k := range s {
+ dst = append(dst, k)
+ }
+ return dst
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Uint64) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Uint64, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint64
+ k, err = reader.ReadUint64()
+ if err != nil {
+ return err
+ }
+ dst[uint64(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Uint64) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Uint64, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint64
+ k, bytes, err = msgp.ReadUint64Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[uint64(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Uint64) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Uint64Size
+ return size
+}
+
+// Uint64FromSlice creates a Uint64 from a slice.
+func Uint64FromSlice(s []uint64) Uint64 {
+ if s == nil {
+ return nil
+ }
+ dst := make(Uint64, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Uint64Sorted is a set of uint64s that will be stored as an array.
+// Elements are sorted and the order of elements is guaranteed.
+type Uint64Sorted map[uint64]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Uint64Sorted) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ keys := make([]uint64, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b uint64) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+
+ for _, k := range keys {
+ err = writer.WriteUint64(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Uint64Sorted) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ keys := make([]uint64, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b uint64) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ for _, k := range keys {
+ bytes = msgp.AppendUint64(bytes, k)
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a sorted slice.
+func (s Uint64Sorted) AsSlice() []uint64 {
+ if s == nil {
+ return nil
+ }
+ keys := make([]uint64, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b uint64) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ return keys
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Uint64Sorted) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Uint64Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint64
+ k, err = reader.ReadUint64()
+ if err != nil {
+ return err
+ }
+ dst[uint64(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Uint64Sorted) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Uint64Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k uint64
+ k, bytes, err = msgp.ReadUint64Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[uint64(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Uint64Sorted) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Uint64Size
+ return size
+}
+
+// Uint64SortedFromSlice creates a Uint64Sorted from a slice.
+func Uint64SortedFromSlice(s []uint64) Uint64Sorted {
+ if s == nil {
+ return nil
+ }
+ dst := make(Uint64Sorted, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Float64 is a set of float64s that will be stored as an array.
+// Elements are not sorted and the order of elements is not guaranteed.
+type Float64 map[float64]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Float64) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ for k := range s {
+ err = writer.WriteFloat(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Float64) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ for k := range s {
+ bytes = msgp.AppendFloat(bytes, float64(k))
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a slice.
+func (s Float64) AsSlice() []float64 {
+ if s == nil {
+ return nil
+ }
+ dst := make([]float64, 0, len(s))
+ for k := range s {
+ dst = append(dst, k)
+ }
+ return dst
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Float64) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Float64, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k float64
+ k, err = reader.ReadFloat64()
+ if err != nil {
+ return err
+ }
+ dst[float64(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Float64) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Float64, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k float64
+ k, bytes, err = msgp.ReadFloat64Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[float64(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Float64) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Float64Size
+ return size
+}
+
+// Float64FromSlice creates a Float64 from a slice.
+func Float64FromSlice(s []float64) Float64 {
+ if s == nil {
+ return nil
+ }
+ dst := make(Float64, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Float64Sorted is a set of float64s that will be stored as an array.
+// Elements are sorted and the order of elements is guaranteed.
+type Float64Sorted map[float64]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Float64Sorted) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ keys := make([]float64, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b float64) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+
+ for _, k := range keys {
+ err = writer.WriteFloat(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Float64Sorted) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ keys := make([]float64, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b float64) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ for _, k := range keys {
+ bytes = msgp.AppendFloat(bytes, k)
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a sorted slice.
+func (s Float64Sorted) AsSlice() []float64 {
+ if s == nil {
+ return nil
+ }
+ keys := make([]float64, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b float64) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ return keys
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Float64Sorted) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Float64Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k float64
+ k, err = reader.ReadFloat64()
+ if err != nil {
+ return err
+ }
+ dst[float64(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Float64Sorted) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Float64Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k float64
+ k, bytes, err = msgp.ReadFloat64Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[float64(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Float64Sorted) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Float64Size
+ return size
+}
+
+// Float64SortedFromSlice creates a Float64Sorted from a slice.
+func Float64SortedFromSlice(s []float64) Float64Sorted {
+ if s == nil {
+ return nil
+ }
+ dst := make(Float64Sorted, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Float32 is a set of float32s that will be stored as an array.
+// Elements are not sorted and the order of elements is not guaranteed.
+type Float32 map[float32]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Float32) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ for k := range s {
+ err = writer.WriteFloat32(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Float32) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ for k := range s {
+ bytes = msgp.AppendFloat32(bytes, float32(k))
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a slice.
+func (s Float32) AsSlice() []float32 {
+ if s == nil {
+ return nil
+ }
+ dst := make([]float32, 0, len(s))
+ for k := range s {
+ dst = append(dst, k)
+ }
+ return dst
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Float32) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Float32, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k float32
+ k, err = reader.ReadFloat32()
+ if err != nil {
+ return err
+ }
+ dst[float32(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Float32) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Float32, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k float32
+ k, bytes, err = msgp.ReadFloat32Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[float32(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Float32) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Float32Size
+ return size
+}
+
+// Float32FromSlice creates a Float32 from a slice.
+func Float32FromSlice(s []float32) Float32 {
+ if s == nil {
+ return nil
+ }
+ dst := make(Float32, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
+
+// Float32Sorted is a set of float32s that will be stored as an array.
+// Elements are sorted and the order of elements is guaranteed.
+type Float32Sorted map[float32]struct{}
+
+// EncodeMsg encodes the message to the writer.
+func (s Float32Sorted) EncodeMsg(writer *msgp.Writer) error {
+ if s == nil {
+ return writer.WriteNil()
+ }
+ err := writer.WriteArrayHeader(uint32(len(s)))
+ if err != nil {
+ return err
+ }
+ keys := make([]float32, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b float32) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+
+ for _, k := range keys {
+ err = writer.WriteFloat32(k)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalMsg encodes the message to the bytes.
+func (s Float32Sorted) MarshalMsg(bytes []byte) ([]byte, error) {
+ if s == nil {
+ return msgp.AppendNil(bytes), nil
+ }
+ if len(s) == 0 {
+ return msgp.AppendArrayHeader(bytes, 0), nil
+ }
+ bytes = ensure(bytes, s.Msgsize())
+ bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
+ keys := make([]float32, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b float32) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ for _, k := range keys {
+ bytes = msgp.AppendFloat32(bytes, k)
+ }
+ return bytes, nil
+}
+
+// AsSlice returns the set as a sorted slice.
+func (s Float32Sorted) AsSlice() []float32 {
+ if s == nil {
+ return nil
+ }
+ keys := make([]float32, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ slices.SortFunc(keys, func(a, b float32) int {
+ if a < b {
+ return -1
+ }
+ return 1
+ })
+ return keys
+}
+
+// DecodeMsg decodes the message from the reader.
+func (s *Float32Sorted) DecodeMsg(reader *msgp.Reader) error {
+ if reader.IsNil() {
+ *s = nil
+ return reader.Skip()
+ }
+ sz, err := reader.ReadArrayHeader()
+ if err != nil {
+ return err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Float32Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k float32
+ k, err = reader.ReadFloat32()
+ if err != nil {
+ return err
+ }
+ dst[float32(k)] = struct{}{}
+ }
+ *s = dst
+ return nil
+}
+
+// UnmarshalMsg decodes the message from the bytes.
+func (s *Float32Sorted) UnmarshalMsg(bytes []byte) ([]byte, error) {
+ if msgp.IsNil(bytes) {
+ *s = nil
+ return bytes[msgp.NilSize:], nil
+ }
+ // Read the array header
+ sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst := *s
+ if dst == nil {
+ dst = make(Float32Sorted, sz)
+ } else {
+ clear(dst)
+ }
+ for range sz {
+ var k float32
+ k, bytes, err = msgp.ReadFloat32Bytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dst[float32(k)] = struct{}{}
+ }
+ *s = dst
+ return bytes, nil
+}
+
+// Msgsize returns the maximum size of the message.
+func (s Float32Sorted) Msgsize() int {
+ if s == nil {
+ return msgp.NilSize
+ }
+ size := msgp.ArrayHeaderSize
+ size += len(s) * msgp.Float32Size
+ return size
+}
+
+// Float32SortedFromSlice creates a Float32Sorted from a slice.
+func Float32SortedFromSlice(s []float32) Float32Sorted {
+ if s == nil {
+ return nil
+ }
+ dst := make(Float32Sorted, len(s))
+ for _, v := range s {
+ dst[v] = struct{}{}
+ }
+ return dst
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/setof/setof.go b/vendor/github.com/tinylib/msgp/msgp/setof/setof.go
new file mode 100644
index 0000000000..6e82c18c77
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/setof/setof.go
@@ -0,0 +1,19 @@
+// Package setof allows serializing sets map[T]struct{} as arrays.
+//
+// Nil maps are preserved as a nil value on stream.
+//
+// A deterministic, sorted version is available, with slightly lower performance.
+
+package setof
+
+// ensure 'sz' extra bytes in 'b' can be appended without reallocating
+func ensure(b []byte, sz int) []byte {
+ l := len(b)
+ c := cap(b)
+ if c-l < sz {
+ o := make([]byte, l, l+sz)
+ copy(o, b)
+ return o
+ }
+ return b
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/size.go b/vendor/github.com/tinylib/msgp/msgp/size.go
index 585a67fdb5..b81b94e69d 100644
--- a/vendor/github.com/tinylib/msgp/msgp/size.go
+++ b/vendor/github.com/tinylib/msgp/msgp/size.go
@@ -37,4 +37,13 @@ const (
BytesPrefixSize = 5
StringPrefixSize = 5
ExtensionPrefixSize = 6
+
+ // We cannot determine the exact size of the marshalled bytes,
+ // so we assume 32 bytes
+ BinaryMarshalerSize = BytesPrefixSize + 32
+ BinaryAppenderSize
+ TextMarshalerBinSize
+ TextAppenderBinSize
+ TextMarshalerStringSize = StringPrefixSize + 32
+ TextAppenderStringSize
)
diff --git a/vendor/github.com/tinylib/msgp/msgp/unsafe.go b/vendor/github.com/tinylib/msgp/msgp/unsafe.go
index 7d36bfb1e3..3a83eacdae 100644
--- a/vendor/github.com/tinylib/msgp/msgp/unsafe.go
+++ b/vendor/github.com/tinylib/msgp/msgp/unsafe.go
@@ -1,5 +1,4 @@
//go:build (!purego && !appengine) || (!appengine && purego && unsafe)
-// +build !purego,!appengine !appengine,purego,unsafe
package msgp
diff --git a/vendor/github.com/tinylib/msgp/msgp/write.go b/vendor/github.com/tinylib/msgp/msgp/write.go
index 352350f904..55192222cf 100644
--- a/vendor/github.com/tinylib/msgp/msgp/write.go
+++ b/vendor/github.com/tinylib/msgp/msgp/write.go
@@ -1,9 +1,11 @@
package msgp
import (
+ "encoding"
"encoding/binary"
"encoding/json"
"errors"
+ "fmt"
"io"
"math"
"reflect"
@@ -33,7 +35,7 @@ var (
btsType = reflect.TypeOf(([]byte)(nil))
writerPool = sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &Writer{buf: make([]byte, 2048)}
},
}
@@ -430,7 +432,7 @@ func (mw *Writer) WriteUint64(u uint64) error {
}
// WriteByte is analogous to WriteUint8
-func (mw *Writer) WriteByte(u byte) error { return mw.WriteUint8(uint8(u)) }
+func (mw *Writer) WriteByte(u byte) error { return mw.WriteUint8(u) }
// WriteUint8 writes a uint8 to the writer
func (mw *Writer) WriteUint8(u uint8) error { return mw.WriteUint64(uint64(u)) }
@@ -446,6 +448,9 @@ func (mw *Writer) WriteUint(u uint) error { return mw.WriteUint64(uint64(u)) }
// WriteBytes writes binary as 'bin' to the writer
func (mw *Writer) WriteBytes(b []byte) error {
+ if uint64(len(b)) > math.MaxUint32 {
+ return ErrLimitExceeded
+ }
sz := uint32(len(b))
var err error
switch {
@@ -488,6 +493,10 @@ func (mw *Writer) WriteBool(b bool) error {
// WriteString writes a messagepack string to the writer.
// (This is NOT an implementation of io.StringWriter)
func (mw *Writer) WriteString(s string) error {
+ if uint64(len(s)) > math.MaxUint32 {
+ return ErrLimitExceeded
+ }
+
sz := uint32(len(s))
var err error
switch {
@@ -526,6 +535,9 @@ func (mw *Writer) WriteStringHeader(sz uint32) error {
// WriteStringFromBytes writes a 'str' object
// from a []byte.
func (mw *Writer) WriteStringFromBytes(str []byte) error {
+ if uint64(len(str)) > math.MaxUint32 {
+ return ErrLimitExceeded
+ }
sz := uint32(len(str))
var err error
switch {
@@ -591,7 +603,7 @@ func (mw *Writer) WriteMapStrStr(mp map[string]string) (err error) {
}
// WriteMapStrIntf writes a map[string]interface to the writer
-func (mw *Writer) WriteMapStrIntf(mp map[string]interface{}) (err error) {
+func (mw *Writer) WriteMapStrIntf(mp map[string]any) (err error) {
err = mw.WriteMapHeader(uint32(len(mp)))
if err != nil {
return
@@ -703,7 +715,7 @@ func (mw *Writer) WriteJSONNumber(n json.Number) error {
// - A pointer to a supported type
// - A type that satisfies the msgp.Encodable interface
// - A type that satisfies the msgp.Extension interface
-func (mw *Writer) WriteIntf(v interface{}) error {
+func (mw *Writer) WriteIntf(v any) error {
if v == nil {
return mw.WriteNil()
}
@@ -754,7 +766,7 @@ func (mw *Writer) WriteIntf(v interface{}) error {
return mw.WriteBytes(v)
case map[string]string:
return mw.WriteMapStrStr(v)
- case map[string]interface{}:
+ case map[string]any:
return mw.WriteMapStrIntf(v)
case time.Time:
return mw.WriteTime(v)
@@ -817,7 +829,7 @@ func (mw *Writer) writeSlice(v reflect.Value) (err error) {
if err != nil {
return
}
- for i := uint32(0); i < sz; i++ {
+ for i := range sz {
err = mw.WriteIntf(v.Index(int(i)).Interface())
if err != nil {
return
@@ -840,7 +852,7 @@ func isSupported(k reflect.Kind) bool {
// value of 'i'. If the underlying value is not
// a simple builtin (or []byte), GuessSize defaults
// to 512.
-func GuessSize(i interface{}) int {
+func GuessSize(i any) int {
if i == nil {
return NilSize
}
@@ -868,7 +880,7 @@ func GuessSize(i interface{}) int {
return Complex128Size
case bool:
return BoolSize
- case map[string]interface{}:
+ case map[string]any:
s := MapHeaderSize
for key, val := range i {
s += StringPrefixSize + len(key) + GuessSize(val)
@@ -884,3 +896,57 @@ func GuessSize(i interface{}) int {
return 512
}
}
+
+// Temporary buffer for reading/writing binary data.
+var bytesPool = sync.Pool{New: func() any { return make([]byte, 0, 1024) }}
+
+// WriteBinaryAppender will write the bytes from the given
+// encoding.BinaryAppender as a bin array.
+func (mw *Writer) WriteBinaryAppender(b encoding.BinaryAppender) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("msgp: panic during AppendBinary: %v", r)
+ }
+ }()
+ dst := bytesPool.Get().([]byte)
+ defer bytesPool.Put(dst) //nolint:staticcheck
+ dst, err = b.AppendBinary(dst[:0])
+ if err != nil {
+ return err
+ }
+ return mw.WriteBytes(dst)
+}
+
+// WriteTextAppender will write the bytes from the given
+// encoding.TextAppender as a bin array.
+func (mw *Writer) WriteTextAppender(b encoding.TextAppender) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("msgp: panic during AppendText: %v", r)
+ }
+ }()
+ dst := bytesPool.Get().([]byte)
+ defer bytesPool.Put(dst) //nolint:staticcheck
+ dst, err = b.AppendText(dst[:0])
+ if err != nil {
+ return err
+ }
+ return mw.WriteBytes(dst)
+}
+
+// WriteTextAppenderString will write the bytes from the given
+// encoding.TextAppender as a string.
+func (mw *Writer) WriteTextAppenderString(b encoding.TextAppender) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("msgp: panic during AppendText: %v", r)
+ }
+ }()
+ dst := bytesPool.Get().([]byte)
+ defer bytesPool.Put(dst) //nolint:staticcheck
+ dst, err = b.AppendText(dst[:0])
+ if err != nil {
+ return err
+ }
+ return mw.WriteStringFromBytes(dst)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go
index 704501746a..378f14f91b 100644
--- a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go
+++ b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go
@@ -181,7 +181,7 @@ func AppendUint(b []byte, u uint) []byte { return AppendUint64(b, uint64(u)) }
func AppendUint8(b []byte, u uint8) []byte { return AppendUint64(b, uint64(u)) }
// AppendByte is analogous to AppendUint8
-func AppendByte(b []byte, u byte) []byte { return AppendUint8(b, uint8(u)) }
+func AppendByte(b []byte, u byte) []byte { return AppendUint8(b, u) }
// AppendUint16 appends a uint16 to the slice
func AppendUint16(b []byte, u uint16) []byte { return AppendUint64(b, uint64(u)) }
@@ -371,7 +371,7 @@ func AppendMapStrStr(b []byte, m map[string]string) []byte {
// AppendMapStrIntf appends a map[string]interface{} to the slice
// as a MessagePack map with 'str'-type keys.
-func AppendMapStrIntf(b []byte, m map[string]interface{}) ([]byte, error) {
+func AppendMapStrIntf(b []byte, m map[string]any) ([]byte, error) {
sz := uint32(len(m))
b = AppendMapHeader(b, sz)
var err error
@@ -394,7 +394,7 @@ func AppendMapStrIntf(b []byte, m map[string]interface{}) ([]byte, error) {
// - A *T, where T is another supported type
// - A type that satisfies the msgp.Marshaler interface
// - A type that satisfies the msgp.Extension interface
-func AppendIntf(b []byte, i interface{}) ([]byte, error) {
+func AppendIntf(b []byte, i any) ([]byte, error) {
if i == nil {
return AppendNil(b), nil
}
@@ -444,13 +444,13 @@ func AppendIntf(b []byte, i interface{}) ([]byte, error) {
return AppendTime(b, i), nil
case time.Duration:
return AppendDuration(b, i), nil
- case map[string]interface{}:
+ case map[string]any:
return AppendMapStrIntf(b, i)
case map[string]string:
return AppendMapStrStr(b, i), nil
case json.Number:
return AppendJSONNumber(b, i)
- case []interface{}:
+ case []any:
b = AppendArrayHeader(b, uint32(len(i)))
var err error
for _, k := range i {
@@ -483,7 +483,7 @@ func AppendIntf(b []byte, i interface{}) ([]byte, error) {
case reflect.Array, reflect.Slice:
l := v.Len()
b = AppendArrayHeader(b, uint32(l))
- for i := 0; i < l; i++ {
+ for i := range l {
b, err = AppendIntf(b, v.Index(i).Interface())
if err != nil {
return b, err
@@ -518,3 +518,49 @@ func AppendJSONNumber(b []byte, n json.Number) ([]byte, error) {
}
return b, err
}
+
+// AppendBytesTwoPrefixed will add the length to a bin section written with
+// 2 bytes of space saved for a bin8 header.
+// If the sz cannot fit inside a bin8, the data will be moved to make space for the header.
+func AppendBytesTwoPrefixed(b []byte, sz int) []byte {
+ off := len(b) - sz - 2
+ switch {
+ case sz <= math.MaxUint8:
+ // Just write header...
+ prefixu8(b[off:], mbin8, uint8(sz))
+ case sz <= math.MaxUint16:
+ // Scoot one
+ b = append(b, 0)
+ copy(b[off+1:], b[off:])
+ prefixu16(b[off:], mbin16, uint16(sz))
+ default:
+ // Scoot three
+ b = append(b, 0, 0, 0)
+ copy(b[off+3:], b[off:])
+ prefixu32(b[off:], mbin32, uint32(sz))
+ }
+ return b
+}
+
+// AppendBytesStringTwoPrefixed will add the length to a string section written with
+// 2 bytes of space saved for a str8 header.
+// If the sz cannot fit inside a str8, the data will be moved to make space for the header.
+func AppendBytesStringTwoPrefixed(b []byte, sz int) []byte {
+ off := len(b) - sz - 2
+ switch {
+ case sz <= math.MaxUint8:
+ // Just write header...
+ prefixu8(b[off:], mstr8, uint8(sz))
+ case sz <= math.MaxUint16:
+ // Scoot one
+ b = append(b, 0)
+ copy(b[off+1:], b[off:])
+ prefixu16(b[off:], mstr16, uint16(sz))
+ default:
+ // Scoot three
+ b = append(b, 0, 0, 0)
+ copy(b[off+3:], b[off:])
+ prefixu32(b[off:], mstr32, uint32(sz))
+ }
+ return b
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 8fb02d1745..65a68beafd 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -15,7 +15,7 @@ github.com/Azure/go-ansiterm/winterm
# github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358
## explicit
github.com/Azure/go-ntlmssp
-# github.com/BurntSushi/toml v1.5.0
+# github.com/BurntSushi/toml v1.6.0
## explicit; go 1.18
github.com/BurntSushi/toml
github.com/BurntSushi/toml/internal
@@ -821,6 +821,11 @@ github.com/hashicorp/go-plugin/internal/cmdrunner
github.com/hashicorp/go-plugin/internal/grpcmux
github.com/hashicorp/go-plugin/internal/plugin
github.com/hashicorp/go-plugin/runner
+# github.com/hashicorp/golang-lru/v2 v2.0.7
+## explicit; go 1.18
+github.com/hashicorp/golang-lru/v2
+github.com/hashicorp/golang-lru/v2/internal
+github.com/hashicorp/golang-lru/v2/simplelru
# github.com/hashicorp/yamux v0.1.2
## explicit; go 1.20
github.com/hashicorp/yamux
@@ -1064,7 +1069,7 @@ github.com/miekg/dns
# github.com/mileusna/useragent v1.3.5
## explicit; go 1.14
github.com/mileusna/useragent
-# github.com/minio/crc64nvme v1.1.0
+# github.com/minio/crc64nvme v1.1.1
## explicit; go 1.22
github.com/minio/crc64nvme
# github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76
@@ -1073,8 +1078,8 @@ github.com/minio/highwayhash
# github.com/minio/md5-simd v1.1.2
## explicit; go 1.14
github.com/minio/md5-simd
-# github.com/minio/minio-go/v7 v7.0.97
-## explicit; go 1.23.0
+# github.com/minio/minio-go/v7 v7.0.98
+## explicit; go 1.24.0
github.com/minio/minio-go/v7
github.com/minio/minio-go/v7/pkg/cors
github.com/minio/minio-go/v7/pkg/credentials
@@ -1370,7 +1375,7 @@ github.com/opencloud-eu/icap-client
# github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250724122329-41ba6b191e76
## explicit; go 1.18
github.com/opencloud-eu/libre-graph-api-go
-# github.com/opencloud-eu/reva/v2 v2.41.1-0.20260107152322-93760b632993
+# github.com/opencloud-eu/reva/v2 v2.41.1-0.20260120144836-2769c3c07a19
## explicit; go 1.24.1
github.com/opencloud-eu/reva/v2/cmd/revad/internal/grace
github.com/opencloud-eu/reva/v2/cmd/revad/runtime
@@ -1835,7 +1840,7 @@ github.com/power-devops/perfstat
## explicit; go 1.16
github.com/pquerna/cachecontrol
github.com/pquerna/cachecontrol/cacheobject
-# github.com/prometheus/alertmanager v0.30.0
+# github.com/prometheus/alertmanager v0.30.1
## explicit; go 1.24.0
github.com/prometheus/alertmanager/asset
github.com/prometheus/alertmanager/featurecontrol
@@ -1938,7 +1943,7 @@ github.com/segmentio/asm/cpu/arm64
github.com/segmentio/asm/cpu/cpuid
github.com/segmentio/asm/cpu/x86
github.com/segmentio/asm/internal/unsafebytes
-# github.com/segmentio/kafka-go v0.4.49
+# github.com/segmentio/kafka-go v0.4.50
## explicit; go 1.23
github.com/segmentio/kafka-go
github.com/segmentio/kafka-go/compress
@@ -2121,9 +2126,10 @@ github.com/tidwall/pretty
# github.com/tidwall/sjson v1.2.5
## explicit; go 1.14
github.com/tidwall/sjson
-# github.com/tinylib/msgp v1.3.0
-## explicit; go 1.20
+# github.com/tinylib/msgp v1.6.1
+## explicit; go 1.24
github.com/tinylib/msgp/msgp
+github.com/tinylib/msgp/msgp/setof
# github.com/tklauser/go-sysconf v0.3.14
## explicit; go 1.18
github.com/tklauser/go-sysconf