From 3a225996a3b73a564bda8db153f60561e7627172 Mon Sep 17 00:00:00 2001 From: aly <16789036+aly-obol@users.noreply.github.com> Date: Tue, 13 Aug 2024 22:18:19 -0400 Subject: [PATCH 01/89] add third relay in p2p-relays (#3227) - dns failover to obol.dev --- cmd/cmd_internal_test.go | 4 ++-- cmd/run.go | 2 +- docs/configuration.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/cmd_internal_test.go b/cmd/cmd_internal_test.go index af7452edc0..2a985838b8 100644 --- a/cmd/cmd_internal_test.go +++ b/cmd/cmd_internal_test.go @@ -62,7 +62,7 @@ func TestCmdFlags(t *testing.T) { LokiService: "charon", }, P2P: p2p.Config{ - Relays: []string{"https://0.relay.obol.tech", "https://1.relay.obol.tech"}, + Relays: []string{"https://0.relay.obol.tech", "https://2.relay.obol.dev", "https://1.relay.obol.tech"}, TCPAddrs: nil, }, Feature: featureset.Config{ @@ -113,7 +113,7 @@ func TestCmdFlags(t *testing.T) { LokiService: "charon", }, P2P: p2p.Config{ - Relays: []string{"https://0.relay.obol.tech", "https://1.relay.obol.tech"}, + Relays: []string{"https://0.relay.obol.tech", "https://2.relay.obol.dev", "https://1.relay.obol.tech"}, TCPAddrs: nil, }, Feature: featureset.Config{ diff --git a/cmd/run.go b/cmd/run.go index 4ebaf210b7..1bc174f052 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -119,7 +119,7 @@ func bindLogFlags(flags *pflag.FlagSet, config *log.Config) { } func bindP2PFlags(cmd *cobra.Command, config *p2p.Config) { - cmd.Flags().StringSliceVar(&config.Relays, "p2p-relays", []string{"https://0.relay.obol.tech", "https://1.relay.obol.tech"}, "Comma-separated list of libp2p relay URLs or multiaddrs.") + cmd.Flags().StringSliceVar(&config.Relays, "p2p-relays", []string{"https://0.relay.obol.tech", "https://2.relay.obol.dev", "https://1.relay.obol.tech"}, "Comma-separated list of libp2p relay URLs or multiaddrs.") cmd.Flags().StringVar(&config.ExternalIP, "p2p-external-ip", "", "The IP address advertised by libp2p. This may be used to advertise an external IP.") cmd.Flags().StringVar(&config.ExternalHost, "p2p-external-hostname", "", "The DNS hostname advertised by libp2p. This may be used to advertise an external DNS.") cmd.Flags().StringSliceVar(&config.TCPAddrs, "p2p-tcp-address", nil, "Comma-separated list of listening TCP addresses (ip and port) for libP2P traffic. Empty default doesn't bind to local port therefore only supports outgoing connections.") diff --git a/docs/configuration.md b/docs/configuration.md index f41e32cb99..71fb0c6a5a 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -173,7 +173,7 @@ Flags: --p2p-disable-reuseport Disables TCP port reuse for outgoing libp2p connections. --p2p-external-hostname string The DNS hostname advertised by libp2p. This may be used to advertise an external DNS. --p2p-external-ip string The IP address advertised by libp2p. This may be used to advertise an external IP. - --p2p-relays strings Comma-separated list of libp2p relay URLs or multiaddrs. (default [https://0.relay.obol.tech,https://1.relay.obol.tech]) + --p2p-relays strings Comma-separated list of libp2p relay URLs or multiaddrs. (default [https://0.relay.obol.tech,https://2.relay.obol.dev,https://1.relay.obol.tech]) --p2p-tcp-address strings Comma-separated list of listening TCP addresses (ip and port) for libP2P traffic. Empty default doesn't bind to local port therefore only supports outgoing connections. --private-key-file string The path to the charon enr private key file. (default ".charon/charon-enr-private-key") --private-key-file-lock Enables private key locking to prevent multiple instances using the same key. From b3f19eb9d7c5ce05f8249f1d69e5093ec1f41253 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 16 Aug 2024 12:31:00 +0000 Subject: [PATCH 02/89] build(deps): Bump github.com/attestantio/go-eth2-client from 0.21.9 to 0.21.10 (#3214) Bumps [github.com/attestantio/go-eth2-client](https://github.com/attestantio/go-eth2-client) from 0.21.9 to 0.21.10.
Changelog

Sourced from github.com/attestantio/go-eth2-client's changelog.

0.21.10:

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/attestantio/go-eth2-client&package-manager=go_modules&previous-version=0.21.9&new-version=0.21.10)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index d21da46e74..947c439fe8 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/obolnetwork/charon go 1.22 require ( - github.com/attestantio/go-eth2-client v0.21.9 + github.com/attestantio/go-eth2-client v0.21.10 github.com/bufbuild/buf v1.35.1 github.com/coinbase/kryptology v1.5.6-0.20220316191335-269410e1b06b github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 From ed1c95bd2137fc263531ab91682284b47fdb9e44 Mon Sep 17 00:00:00 2001 From: Luke Hackett Date: Mon, 19 Aug 2024 18:27:48 +0100 Subject: [PATCH 03/89] testutil/promrated: fixing promrated network overview stats (#3234) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rated api seemed to change the url for this endpoint 👍🏼 category: misc ticket: #3233 --- testutil/promrated/rated.go | 2 +- testutil/promrated/rated_internal_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/testutil/promrated/rated.go b/testutil/promrated/rated.go index 68fb76182e..bc2e08155f 100644 --- a/testutil/promrated/rated.go +++ b/testutil/promrated/rated.go @@ -35,7 +35,7 @@ func getNetworkStatistics(ctx context.Context, ratedEndpoint string, ratedAuth s return networkEffectivenessData{}, errors.Wrap(err, "parse rated endpoint") } - url.Path = "/v0/eth/network/stats" + url.Path = "/v0/eth/network/overview" body, err := queryRatedAPI(ctx, url, ratedAuth, network) if err != nil { diff --git a/testutil/promrated/rated_internal_test.go b/testutil/promrated/rated_internal_test.go index 04a72d4585..4ac3cfbe04 100644 --- a/testutil/promrated/rated_internal_test.go +++ b/testutil/promrated/rated_internal_test.go @@ -17,7 +17,7 @@ import ( func TestGetNetworkStatistics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - require.Equal(t, "/v0/eth/network/stats", r.URL.Path) + require.Equal(t, "/v0/eth/network/overview", r.URL.Path) require.Equal(t, "Bearer auth", r.Header.Get("Authorization")) require.Equal(t, "prater", r.Header.Get("X-Rated-Network")) From b3cd3eec947fde9077e8b167d9bb60a88e32f106 Mon Sep 17 00:00:00 2001 From: Ilia Groshev Date: Mon, 2 Sep 2024 07:26:27 +0100 Subject: [PATCH 04/89] docs: launchpad link is broken (#3231) goerli launchpad is down, and link goes nowhere category: docs ticket: none --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b868d6ad6a..bb96bfd399 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ This repo contains the source code for the distributed validator client _Charon_ (pronounced 'kharon'); a HTTP middleware client for Ethereum Staking that enables you to safely run a single validator across a group of independent nodes. -Charon is accompanied by a webapp called the [Distributed Validator Launchpad](https://goerli.launchpad.obol.tech/) for distributed validator key creation. +Charon is accompanied by a webapp called the [Distributed Validator Launchpad](https://holesky.launchpad.obol.tech/) for distributed validator key creation. Charon is used by stakers to distribute the responsibility of running Ethereum Validators across a number of different instances and client implementations. From 85d1dacb484dd50f94f2450b2ed0c6e22913266d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 21:56:54 +0000 Subject: [PATCH 05/89] build(deps): Bump golang.org/x/sync from 0.7.0 to 0.8.0 (#3217) Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.7.0 to 0.8.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/sync&package-manager=go_modules&previous-version=0.7.0&new-version=0.8.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 947c439fe8..37cb5a484f 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( go.uber.org/zap v1.27.0 golang.org/x/crypto v0.25.0 golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 - golang.org/x/sync v0.7.0 + golang.org/x/sync v0.8.0 golang.org/x/term v0.22.0 golang.org/x/time v0.5.0 golang.org/x/tools v0.23.0 diff --git a/go.sum b/go.sum index 8d31047ec5..a86851b35a 100644 --- a/go.sum +++ b/go.sum @@ -639,8 +639,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= From 096e6461116d341913a6c6c0b147206a09c0ec95 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 22:02:46 +0000 Subject: [PATCH 06/89] build(deps): Bump github.com/docker/docker from 27.1.0+incompatible to 27.1.1+incompatible (#3232) Bumps [github.com/docker/docker](https://github.com/docker/docker) from 27.1.0+incompatible to 27.1.1+incompatible.
Release notes

Sourced from github.com/docker/docker's releases.

v27.1.1

27.1.1

Security

This release contains a fix for CVE-2024-41110 / GHSA-v23v-6jw2-98fq that impacted setups using authorization plugins (AuthZ) for access control. No other changes are included in this release, and this release is otherwise identical for users not using AuthZ plugins.

Packaging updates

Full Changelog: https://github.com/moby/moby/compare/v27.1.0...v27.1.1

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/docker/docker&package-manager=go_modules&previous-version=27.1.0+incompatible&new-version=27.1.1+incompatible)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/ObolNetwork/charon/network/alerts).
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 37cb5a484f..5da0d75e12 100644 --- a/go.mod +++ b/go.mod @@ -82,7 +82,7 @@ require ( github.com/distribution/reference v0.6.0 // indirect github.com/docker/cli v26.1.4+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v27.1.0+incompatible // indirect + github.com/docker/docker v27.1.1+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect diff --git a/go.sum b/go.sum index a86851b35a..aba51444e7 100644 --- a/go.sum +++ b/go.sum @@ -110,8 +110,8 @@ github.com/docker/cli v26.1.4+incompatible h1:I8PHdc0MtxEADqYJZvhBrW9bo8gawKwwen github.com/docker/cli v26.1.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.1.0+incompatible h1:rEHVQc4GZ0MIQKifQPHSFGV/dVgaZafgRf8fCPtDYBs= -github.com/docker/docker v27.1.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= +github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= From ee1c8fb9ebe2b9b3cb5c57f5a9b4d736f6feab74 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 22:09:12 +0000 Subject: [PATCH 07/89] build(deps): Bump golang.org/x/time from 0.5.0 to 0.6.0 (#3216) Bumps [golang.org/x/time](https://github.com/golang/time) from 0.5.0 to 0.6.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/time&package-manager=go_modules&previous-version=0.5.0&new-version=0.6.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5da0d75e12..f6fe177eec 100644 --- a/go.mod +++ b/go.mod @@ -45,7 +45,7 @@ require ( golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 golang.org/x/sync v0.8.0 golang.org/x/term v0.22.0 - golang.org/x/time v0.5.0 + golang.org/x/time v0.6.0 golang.org/x/tools v0.23.0 google.golang.org/protobuf v1.34.2 gopkg.in/natefinch/lumberjack.v2 v2.2.1 diff --git a/go.sum b/go.sum index aba51444e7..3859bbf316 100644 --- a/go.sum +++ b/go.sum @@ -681,8 +681,8 @@ golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 5453475d74d259abc98a490de0e713c72de61d1e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 22:51:15 +0000 Subject: [PATCH 08/89] build(deps): Bump golang.org/x/crypto from 0.25.0 to 0.26.0 (#3219) Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.25.0 to 0.26.0.
Commits
  • 5bcd010 go.mod: update golang.org/x dependencies
  • 3375612 ssh: add support for unpadded RSA signatures
  • bb80217 ssh: don't use dsa keys in integration tests
  • 6879722 ssh: remove go 1.21+ dependency on slices
  • e983fa2 sha3: Avo port of keccakf_amd64.s
  • 80fd972 LICENSE: update per Google Legal
  • f2bc3a6 x509roots/fallback/internal/goissue52287: delete
  • d66d9c3 x509roots/fallback: update bundle
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/crypto&package-manager=go_modules&previous-version=0.25.0&new-version=0.26.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index f6fe177eec..2ff4e3328b 100644 --- a/go.mod +++ b/go.mod @@ -41,10 +41,10 @@ require ( go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.3.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.25.0 + golang.org/x/crypto v0.26.0 golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 golang.org/x/sync v0.8.0 - golang.org/x/term v0.22.0 + golang.org/x/term v0.23.0 golang.org/x/time v0.6.0 golang.org/x/tools v0.23.0 google.golang.org/protobuf v1.34.2 @@ -189,8 +189,8 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/mod v0.19.0 // indirect golang.org/x/net v0.27.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/sys v0.23.0 // indirect + golang.org/x/text v0.17.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 // indirect diff --git a/go.sum b/go.sum index 3859bbf316..5a2f569214 100644 --- a/go.sum +++ b/go.sum @@ -591,8 +591,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= @@ -668,17 +668,17 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= +golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= From fff444d6a92e647c7c0de4326da20f3e4e8676b0 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Wed, 4 Sep 2024 09:49:33 +0200 Subject: [PATCH 09/89] *: bump linter to v1.60.3 (#3247) Bump golangci-lint version to v1.60.3 category: misc ticket: none --- .github/workflows/golangci-lint.yml | 2 +- .golangci.yml | 19 ++++++++--------- .pre-commit/run_linter.sh | 2 +- app/eth2wrap/httpwrap.go | 1 - app/expbackoff/expbackoff.go | 8 ++++---- app/health/checks.go | 16 +++++++-------- app/health/checks_internal_test.go | 26 ++++++++++++------------ app/health/reducers.go | 8 ++++---- app/health/select.go | 8 ++++---- app/log/loki/client.go | 1 - app/obolapi/api.go | 1 - cluster/helpers.go | 1 - cmd/markdown_internal_test.go | 1 + cmd/testperformance.go | 1 - core/aggsigdb/memory_v2_internal_test.go | 4 ++-- core/tracker/tracker.go | 1 + core/validatorapi/router.go | 1 - eth2util/keymanager/keymanager.go | 1 - p2p/bootnode.go | 2 +- testutil/promrated/rated.go | 2 +- 20 files changed, 49 insertions(+), 57 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 735861149b..73051431d9 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -19,7 +19,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.59.1 + version: v1.60.3 - name: notify failure if: failure() && github.ref == 'refs/heads/main' env: diff --git a/.golangci.yml b/.golangci.yml index 41a6796623..4ab7b147c8 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -116,6 +116,12 @@ linters-settings: - expected-actual go-require: ignore-http-handlers: true + gosec: + excludes: + # Flags for potentially-unsafe casting of ints, seems good, + # but currently is really unstable with no clear way to make the linter pass. + # https://github.com/securego/gosec/issues/1187 + - G115 issues: fix: true @@ -158,7 +164,6 @@ linters: - gocyclo - godot - godox - - goerr113 - gomnd - gomoddirectives - inamedparam @@ -176,13 +181,5 @@ linters: - varnamelen - wsl # Deprecated - - deadcode - - exhaustivestruct - - golint - - ifshort - - interfacer - - maligned - - nosnakecase - - structcheck - - scopelint - - varcheck + - goerr113 + - execinquery diff --git a/.pre-commit/run_linter.sh b/.pre-commit/run_linter.sh index 46a738ec78..86d167724b 100755 --- a/.pre-commit/run_linter.sh +++ b/.pre-commit/run_linter.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -VERSION="1.59.1" +VERSION="1.60.3" if ! command -v golangci-lint &> /dev/null then diff --git a/app/eth2wrap/httpwrap.go b/app/eth2wrap/httpwrap.go index d257fa5d1c..747ab546d1 100644 --- a/app/eth2wrap/httpwrap.go +++ b/app/eth2wrap/httpwrap.go @@ -296,7 +296,6 @@ func httpPost(ctx context.Context, base string, endpoint string, body io.Reader, return nil, errors.Wrap(err, "failed to read POST response") } - //nolint:usestdlibvars // we should not replace 100 with http.StatusContinue, it makes it less readable if res.StatusCode/100 != 2 { return nil, errors.New("post failed", z.Int("status", res.StatusCode), z.Str("body", string(data))) } diff --git a/app/expbackoff/expbackoff.go b/app/expbackoff/expbackoff.go index f76c647a83..863c378168 100644 --- a/app/expbackoff/expbackoff.go +++ b/app/expbackoff/expbackoff.go @@ -148,14 +148,14 @@ func Backoff(config Config, retries int) time.Duration { } backoff := float64(config.BaseDelay) - max := float64(config.MaxDelay) + maxVal := float64(config.MaxDelay) - for backoff < max && retries > 0 { + for backoff < maxVal && retries > 0 { backoff *= config.Multiplier retries-- } - if backoff > max { - backoff = max + if backoff > maxVal { + backoff = maxVal } // Randomize backoff delays so that if a cluster of requests start at // the same time, they won't operate in lockstep. diff --git a/app/health/checks.go b/app/health/checks.go index 0af5b4e611..ef16038299 100644 --- a/app/health/checks.go +++ b/app/health/checks.go @@ -70,12 +70,12 @@ var checks = []check{ Description: "Beacon Node in syncing state.", Severity: severityCritical, Func: func(q query, _ Metadata) (bool, error) { - max, err := q("app_monitoring_beacon_node_syncing", noLabels, gaugeMax) + maxVal, err := q("app_monitoring_beacon_node_syncing", noLabels, gaugeMax) if err != nil { return false, err } - return max == 1, nil + return maxVal == 1, nil }, }, { @@ -83,14 +83,14 @@ var checks = []check{ Description: "Not connected to at least quorum peers. Check logs for networking issue or coordinate with peers.", Severity: severityCritical, Func: func(q query, m Metadata) (bool, error) { - max, err := q("p2p_ping_success", countNonZeroLabels, gaugeMax) + maxVal, err := q("p2p_ping_success", countNonZeroLabels, gaugeMax) if err != nil { return false, err } required := float64(m.QuorumPeers) - 1 // Exclude self - return max < required, nil + return maxVal < required, nil }, }, { @@ -98,14 +98,14 @@ var checks = []check{ Description: "Pending validators detected. Activate them to start validating.", Severity: severityInfo, Func: func(q query, _ Metadata) (bool, error) { - max, err := q("core_scheduler_validator_status", + maxVal, err := q("core_scheduler_validator_status", countLabels(l("status", "pending")), gaugeMax) if err != nil { return false, err } - return max > 0, nil + return maxVal > 0, nil }, }, { @@ -140,12 +140,12 @@ var checks = []check{ Description: "Metrics reached high cardinality threshold. Please check metrics reported by app_health_metrics_high_cardinality.", Severity: severityWarning, Func: func(q query, _ Metadata) (bool, error) { - max, err := q("app_health_metrics_high_cardinality", sumLabels(), gaugeMax) + maxVal, err := q("app_health_metrics_high_cardinality", sumLabels(), gaugeMax) if err != nil { return false, err } - return max > 0, nil + return maxVal > 0, nil }, }, } diff --git a/app/health/checks_internal_test.go b/app/health/checks_internal_test.go index 2e3b17aa06..332bc6bba0 100644 --- a/app/health/checks_internal_test.go +++ b/app/health/checks_internal_test.go @@ -407,19 +407,19 @@ func testCheck(t *testing.T, m Metadata, checkName string, expect bool, metrics genGauge(genLabels("bar", "bar2"), 1, 1, 1), ) - var max int - if len(metrics) > max { - max = len(metrics) + var maxVal int + if len(metrics) > maxVal { + maxVal = len(metrics) } - if len(randomFamFoo) > max { - max = len(randomFamFoo) + if len(randomFamFoo) > maxVal { + maxVal = len(randomFamFoo) } - if len(randomFamBar) > max { - max = len(randomFamBar) + if len(randomFamBar) > maxVal { + maxVal = len(randomFamBar) } - multiFams := make([][]*pb.MetricFamily, max) - for i := range max { + multiFams := make([][]*pb.MetricFamily, maxVal) + for i := range maxVal { var fam []*pb.MetricFamily if i < len(metrics) { fam = append(fam, metrics[i]) @@ -455,14 +455,14 @@ func genFam(name string, metrics ...[]*pb.Metric) []*pb.MetricFamily { typ = pb.MetricType_GAUGE } - var max int + var maxVal int for _, series := range metrics { - if len(series) > max { - max = len(series) + if len(series) > maxVal { + maxVal = len(series) } } - resp := make([]*pb.MetricFamily, max) + resp := make([]*pb.MetricFamily, maxVal) for _, series := range metrics { for i, metric := range series { if resp[i] == nil { diff --git a/app/health/reducers.go b/app/health/reducers.go index 52507cb7fb..a81ab54285 100644 --- a/app/health/reducers.go +++ b/app/health/reducers.go @@ -29,16 +29,16 @@ func increase(samples []*pb.Metric) (float64, error) { // gaugeMax returns the maximum value in a time series of gauge metrics. func gaugeMax(samples []*pb.Metric) (float64, error) { - var max float64 + var maxVal float64 for _, sample := range samples { if sample.GetGauge() == nil { return 0, errors.New("bug: non-gauge metric passed") } - if sample.GetGauge().GetValue() > max { - max = sample.GetGauge().GetValue() + if sample.GetGauge().GetValue() > maxVal { + maxVal = sample.GetGauge().GetValue() } } - return max, nil + return maxVal, nil } diff --git a/app/health/select.go b/app/health/select.go index 3d42f5b685..a1c17d62e2 100644 --- a/app/health/select.go +++ b/app/health/select.go @@ -15,8 +15,8 @@ type labelSelector func(*pb.MetricFamily) (*pb.Metric, error) // maxLabel returns the metric with the highest value. func maxLabel(metricsFam *pb.MetricFamily) *pb.Metric { //nolint: unused // This is used in the future. var ( - max float64 - resp *pb.Metric + maxVal float64 + resp *pb.Metric ) for _, metric := range metricsFam.GetMetric() { var val float64 @@ -29,8 +29,8 @@ func maxLabel(metricsFam *pb.MetricFamily) *pb.Metric { //nolint: unused // This panic("invalid metric type for simple value labelSelector") } - if max == 0 || val > max { - max = val + if maxVal == 0 || val > maxVal { + maxVal = val resp = metric } } diff --git a/app/log/loki/client.go b/app/log/loki/client.go index e1b3c18c9f..327fd72be4 100644 --- a/app/log/loki/client.go +++ b/app/log/loki/client.go @@ -210,7 +210,6 @@ func send(ctx context.Context, client *http.Client, endpoint string, batch *batc } defer resp.Body.Close() - //nolint:usestdlibvars // we should not replace 100 with http.StatusContinue, it makes it less readable if resp.StatusCode/100 != 2 { scanner := bufio.NewScanner(io.LimitReader(resp.Body, maxErrMsgLen)) line := "" diff --git a/app/obolapi/api.go b/app/obolapi/api.go index 0c58a67b40..4ce60a1a88 100644 --- a/app/obolapi/api.go +++ b/app/obolapi/api.go @@ -123,7 +123,6 @@ func httpPost(ctx context.Context, url *url.URL, b []byte) error { return errors.Wrap(err, "failed to read POST response") } - //nolint:usestdlibvars // we should not replace 100 with http.StatusContinue, it makes it less readable if res.StatusCode/100 != 2 { return errors.New("post failed", z.Int("status", res.StatusCode), z.Str("body", string(data))) } diff --git a/cluster/helpers.go b/cluster/helpers.go index 788bdc696e..8ff3ee1ef8 100644 --- a/cluster/helpers.go +++ b/cluster/helpers.go @@ -40,7 +40,6 @@ func FetchDefinition(ctx context.Context, url string) (Definition, error) { return Definition{}, errors.Wrap(err, "fetch file") } - //nolint:usestdlibvars // we should not replace 100 with http.StatusContinue, it makes it less readable if resp.StatusCode/100 != 2 { return Definition{}, errors.New("http error", z.Int("status_code", resp.StatusCode)) } diff --git a/cmd/markdown_internal_test.go b/cmd/markdown_internal_test.go index b46a95a47b..98815d0e39 100644 --- a/cmd/markdown_internal_test.go +++ b/cmd/markdown_internal_test.go @@ -205,6 +205,7 @@ func writeMarkdown(t *testing.T, file string, tpl *template.Template, data any) content, err := os.ReadFile(file) require.NoError(t, err) + //nolint:testifylint // don't remove fmt.Sprintf, it's not unnecessary require.Equal(t, string(content), result, fmt.Sprintf("%s doesn't contain latest metrics.\n"+ "To fix, run: go test github.com/obolnetwork/charon/cmd -update-markdown", file)) diff --git a/cmd/testperformance.go b/cmd/testperformance.go index 7acd4b8395..69e5980d4a 100644 --- a/cmd/testperformance.go +++ b/cmd/testperformance.go @@ -656,7 +656,6 @@ func fetchOoklaServer(_ context.Context, conf *testPerformanceConfig) (speedtest } if len(conf.InternetTestServersExclude) != 0 { - var targets speedtest.Servers for _, server := range serverList { if !slices.Contains(conf.InternetTestServersExclude, server.Name) { targets = append(targets, server) diff --git a/core/aggsigdb/memory_v2_internal_test.go b/core/aggsigdb/memory_v2_internal_test.go index 1b15cb9461..6bd30a5bee 100644 --- a/core/aggsigdb/memory_v2_internal_test.go +++ b/core/aggsigdb/memory_v2_internal_test.go @@ -35,8 +35,8 @@ func TestDutyExpirationV2(t *testing.T) { deadliner.Expire() - require.Zero(t, len(db.data)) - require.Zero(t, len(db.keysByDuty)) + require.Empty(t, db.data) + require.Empty(t, db.keysByDuty) } func TestCancelledQueryV2(t *testing.T) { diff --git a/core/tracker/tracker.go b/core/tracker/tracker.go index f9bf3d6956..728c0e6f70 100644 --- a/core/tracker/tracker.go +++ b/core/tracker/tracker.go @@ -196,6 +196,7 @@ func dutyFailedStep(es []event) (bool, step, error) { } // Final step was successful. + //nolint:gosec // false positive slice index out of range if lastEvent.step == lastStep(es[0].duty.Type) && lastEvent.stepErr == nil { return false, zero, nil } diff --git a/core/validatorapi/router.go b/core/validatorapi/router.go index 6de62dbb91..8abd8af58c 100644 --- a/core/validatorapi/router.go +++ b/core/validatorapi/router.go @@ -1144,7 +1144,6 @@ func writeError(ctx context.Context, w http.ResponseWriter, endpoint string, err } } - //nolint:usestdlibvars // we should not replace 100 with http.StatusContinue, it makes it less readable if aerr.StatusCode/100 == 4 { // 4xx status codes are client errors (not server), so log as debug only. log.Debug(ctx, "Validator api 4xx response", diff --git a/eth2util/keymanager/keymanager.go b/eth2util/keymanager/keymanager.go index fdfaed76bd..124f0dfab6 100644 --- a/eth2util/keymanager/keymanager.go +++ b/eth2util/keymanager/keymanager.go @@ -110,7 +110,6 @@ func postKeys(ctx context.Context, addr, authToken string, reqBody keymanagerReq } _ = resp.Body.Close() - //nolint:usestdlibvars // we should not replace 100 with http.StatusContinue, it makes it less readable if resp.StatusCode/100 != 2 { return errors.New("failed posting keys", z.Int("status", resp.StatusCode), z.Str("body", string(data))) } diff --git a/p2p/bootnode.go b/p2p/bootnode.go index 53d7f33800..4ebd4495b3 100644 --- a/p2p/bootnode.go +++ b/p2p/bootnode.go @@ -153,7 +153,7 @@ func queryRelayAddrs(ctx context.Context, relayURL string, backoff func(), lockH if err != nil { log.Warn(ctx, "Failure querying relay addresses (will try again)", err) continue - } else if resp.StatusCode/100 != 2 { //nolint:usestdlibvars // we should not replace 100 with http.StatusContinue, it makes it less readable + } else if resp.StatusCode/100 != 2 { log.Warn(ctx, "Non-200 response querying relay addresses (will try again)", nil, z.Int("status_code", resp.StatusCode)) continue } diff --git a/testutil/promrated/rated.go b/testutil/promrated/rated.go index bc2e08155f..ed14c384f8 100644 --- a/testutil/promrated/rated.go +++ b/testutil/promrated/rated.go @@ -106,7 +106,7 @@ func queryRatedAPI(ctx context.Context, url *url.URL, ratedAuth string, network backoff() continue - } else if res.StatusCode/100 != 2 { //nolint:usestdlibvars // we should not replace 100 with http.StatusContinue, it makes it less readable + } else if res.StatusCode/100 != 2 { incRatedErrors(res.StatusCode) return nil, errors.New("not ok http response", z.Str("body", string(body))) From c929e8a112f2656e85a97aac08c52d4b6657174c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 10:06:26 +0000 Subject: [PATCH 10/89] build(deps): Bump sigp/lighthouse from v5.2.1 to v5.3.0 in /testutil/compose/static/lighthouse (#3223) Bumps sigp/lighthouse from v5.2.1 to v5.3.0. [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=sigp/lighthouse&package-manager=docker&previous-version=v5.2.1&new-version=v5.3.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- testutil/compose/static/lighthouse/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testutil/compose/static/lighthouse/Dockerfile b/testutil/compose/static/lighthouse/Dockerfile index 683787c437..4883967b82 100644 --- a/testutil/compose/static/lighthouse/Dockerfile +++ b/testutil/compose/static/lighthouse/Dockerfile @@ -1,4 +1,4 @@ -FROM sigp/lighthouse:v5.2.1 +FROM sigp/lighthouse:v5.3.0 ENV YQ_VERSION=v4.42.1 From 8a60188a402e2892f6a16f97c37579049ad83459 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 11:36:52 +0000 Subject: [PATCH 11/89] build(deps): Bump chainsafe/lodestar from v1.20.2 to v1.21.0 in /testutil/compose/static/lodestar (#3225) Bumps chainsafe/lodestar from v1.20.2 to v1.21.0. [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=chainsafe/lodestar&package-manager=docker&previous-version=v1.20.2&new-version=v1.21.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- testutil/compose/static/lodestar/Dockerfile | 4 ++-- testutil/compose/static/lodestar/run.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/testutil/compose/static/lodestar/Dockerfile b/testutil/compose/static/lodestar/Dockerfile index 5745c45284..57430c877c 100644 --- a/testutil/compose/static/lodestar/Dockerfile +++ b/testutil/compose/static/lodestar/Dockerfile @@ -1,6 +1,6 @@ -FROM chainsafe/lodestar:v1.20.2 +FROM chainsafe/lodestar:v1.21.0 -RUN apk update && apk add curl jq wget +RUN apt-get update && apt-get install -y curl jq wget ENV YQ_VERSION=v4.23.1 ENV YQ_BINARY=yq_linux_amd64 diff --git a/testutil/compose/static/lodestar/run.sh b/testutil/compose/static/lodestar/run.sh index b8350d0e10..b5b1c12ed5 100755 --- a/testutil/compose/static/lodestar/run.sh +++ b/testutil/compose/static/lodestar/run.sh @@ -17,7 +17,7 @@ for f in /compose/"${NODE}"/validator_keys/keystore-*.json; do node /usr/app/packages/cli/bin/lodestar validator import \ --network="dev" \ --importKeystores="$f" \ - --importKeystoresPassword="${f//json/txt}" + --importKeystoresPassword="${f%.json}.txt" done echo "Imported all keys" From 369e2d7e851d63ae92ebaf09d6abf1eee1a9168a Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Wed, 4 Sep 2024 17:44:48 +0200 Subject: [PATCH 12/89] cmd: refactor exits (#3248) Refactor exits logic. There is no change in the logic anywhere. The main differences are: - fetching the validator index and validator pub key are in separate functions, which makes it more readable what is the flow - `ExpertMode` is renamed to `SkipBeaconNodeCheck` as this is essentially what it does If others agree, I'm in favour of completely removing the `SkipBeaconNodeCheck` config, as it is unnecessary. category: refactor ticket: none --- cmd/exit.go | 2 +- cmd/exit_sign.go | 146 ++++++++++++++++++--------------- cmd/exit_sign_internal_test.go | 12 +-- 3 files changed, 86 insertions(+), 74 deletions(-) diff --git a/cmd/exit.go b/cmd/exit.go index 97e8350d49..1f86423d67 100644 --- a/cmd/exit.go +++ b/cmd/exit.go @@ -22,7 +22,7 @@ type exitConfig struct { ValidatorPubkey string ValidatorIndex uint64 ValidatorIndexPresent bool - ExpertMode bool + SkipBeaconNodeCheck bool PrivateKeyPath string ValidatorKeysDir string LockFilePath string diff --git a/cmd/exit_sign.go b/cmd/exit_sign.go index 2723f69b5c..e75b14367f 100644 --- a/cmd/exit_sign.go +++ b/cmd/exit_sign.go @@ -5,7 +5,6 @@ package cmd import ( "context" "fmt" - "strings" eth2api "github.com/attestantio/go-eth2-client/api" eth2p0 "github.com/attestantio/go-eth2-client/spec/phase0" @@ -13,6 +12,7 @@ import ( "github.com/spf13/cobra" "github.com/obolnetwork/charon/app/errors" + "github.com/obolnetwork/charon/app/eth2wrap" "github.com/obolnetwork/charon/app/k1util" "github.com/obolnetwork/charon/app/log" "github.com/obolnetwork/charon/app/obolapi" @@ -60,13 +60,13 @@ func newSubmitPartialExitCmd(runFunc func(context.Context, exitConfig) error) *c valIdxPresent := cmd.Flags().Lookup(validatorIndex.String()).Changed valPubkPresent := cmd.Flags().Lookup(validatorPubkey.String()).Changed - if strings.TrimSpace(config.ValidatorPubkey) == "" && !valIdxPresent { + if !valPubkPresent && !valIdxPresent { //nolint:revive // we use our own version of the errors package. return errors.New(fmt.Sprintf("either %s or %s must be specified at least.", validatorIndex.String(), validatorPubkey.String())) } config.ValidatorIndexPresent = valIdxPresent - config.ExpertMode = valIdxPresent && valPubkPresent + config.SkipBeaconNodeCheck = valIdxPresent && valPubkPresent return nil }) @@ -100,110 +100,122 @@ func runSignPartialExit(ctx context.Context, config exitConfig) error { return errors.Wrap(err, "could not match local validator key shares with their counterparty in cluster lock") } - validator := core.PubKey(config.ValidatorPubkey) + shareIdx, err := keystore.ShareIdxForCluster(cl, *identityKey.PubKey()) + if err != nil { + return errors.Wrap(err, "could not determine operator index from cluster lock for supplied identity key") + } - valEth2, err := validator.ToETH2() + oAPI, err := obolapi.New(config.PublishAddress, obolapi.WithTimeout(config.PublishTimeout)) if err != nil { - if (strings.TrimSpace(config.ValidatorPubkey) != "" && !config.ValidatorIndexPresent) || config.ExpertMode { - return errors.Wrap(err, "cannot convert validator pubkey to bytes") - } + return errors.Wrap(err, "could not create obol api client") } - switch { - case config.ExpertMode: - ctx = log.WithCtx(ctx, z.U64("validator_index", config.ValidatorIndex), z.Str("validator", validator.String())) - case config.ValidatorIndexPresent && !config.ExpertMode: + eth2Cl, err := eth2Client(ctx, config.BeaconNodeEndpoints, config.BeaconNodeTimeout, [4]byte(cl.GetForkVersion())) + if err != nil { + return errors.Wrap(err, "cannot create eth2 client for specified beacon node") + } + + if config.ValidatorIndexPresent { ctx = log.WithCtx(ctx, z.U64("validator_index", config.ValidatorIndex)) - default: - ctx = log.WithCtx(ctx, z.Str("validator", validator.String())) + } + if config.ValidatorPubkey != "" { + ctx = log.WithCtx(ctx, z.Str("validator_pubkey", config.ValidatorPubkey)) } - shareIdx, err := keystore.ShareIdxForCluster(cl, *identityKey.PubKey()) + if config.SkipBeaconNodeCheck { + log.Info(ctx, "Both public key and index are specified, beacon node won't be checked for validator existence/liveness") + } + + valEth2, err := fetchValidatorBLSPubKey(ctx, config, eth2Cl) if err != nil { - return errors.Wrap(err, "could not determine operator index from cluster lock for supplied identity key") + return errors.Wrap(err, "cannot fetch validator public key") } + validator := core.PubKeyFrom48Bytes(valEth2) + ourShare, ok := shares[validator] if !ok { - if (strings.TrimSpace(config.ValidatorPubkey) != "" && !config.ValidatorIndexPresent) || config.ExpertMode { - return errors.New("validator not present in cluster lock", z.Str("validator", validator.String())) - } + return errors.New("validator not present in cluster lock", z.Str("validator", validator.String())) } - oAPI, err := obolapi.New(config.PublishAddress, obolapi.WithTimeout(config.PublishTimeout)) + valIndex, err := fetchValidatorIndex(ctx, config, eth2Cl) if err != nil { - return errors.Wrap(err, "could not create obol api client") + return errors.Wrap(err, "cannot fetch validator index") } log.Info(ctx, "Signing exit message for validator") - var valIndex eth2p0.ValidatorIndex - var valIndexFound bool - - valAPICallOpts := ð2api.ValidatorsOpts{ - State: "head", + exitMsg, err := signExit(ctx, eth2Cl, valIndex, ourShare.Share, eth2p0.Epoch(config.ExitEpoch)) + if err != nil { + return errors.Wrap(err, "cannot sign partial exit message") } - if config.ValidatorIndexPresent { - valAPICallOpts.Indices = []eth2p0.ValidatorIndex{ - eth2p0.ValidatorIndex(config.ValidatorIndex), - } - valIndex = eth2p0.ValidatorIndex(config.ValidatorIndex) - } else { - valAPICallOpts.PubKeys = []eth2p0.BLSPubKey{ - valEth2, - } + exitBlob := obolapi.ExitBlob{ + PublicKey: valEth2.String(), + SignedExitMessage: exitMsg, } - eth2Cl, err := eth2Client(ctx, config.BeaconNodeEndpoints, config.BeaconNodeTimeout, [4]byte(cl.GetForkVersion())) - if err != nil { - return errors.Wrap(err, "cannot create eth2 client for specified beacon node") + if err := oAPI.PostPartialExit(ctx, cl.GetInitialMutationHash(), shareIdx, identityKey, exitBlob); err != nil { + return errors.Wrap(err, "could not POST partial exit message to Obol API") } - if !config.ExpertMode { - rawValData, err := eth2Cl.Validators(ctx, valAPICallOpts) + return nil +} + +func fetchValidatorBLSPubKey(ctx context.Context, config exitConfig, eth2Cl eth2wrap.Client) (eth2p0.BLSPubKey, error) { + if config.ValidatorPubkey != "" { + valEth2, err := core.PubKey(config.ValidatorPubkey).ToETH2() if err != nil { - return errors.Wrap(err, "cannot fetch validator") + return eth2p0.BLSPubKey{}, errors.Wrap(err, "cannot convert validator pubkey to bytes") } - valData := rawValData.Data + return valEth2, nil + } - for _, val := range valData { - if val.Validator.PublicKey == valEth2 || val.Index == eth2p0.ValidatorIndex(config.ValidatorIndex) { - valIndex = val.Index - valIndexFound = true + valAPICallOpts := ð2api.ValidatorsOpts{ + State: "head", + Indices: []eth2p0.ValidatorIndex{eth2p0.ValidatorIndex(config.ValidatorIndex)}, + } - // re-initialize state variable after looking up all the necessary details, since user only provided a validator index - if config.ValidatorIndexPresent { - valEth2 = val.Validator.PublicKey - ourShare, ok = shares[core.PubKeyFrom48Bytes(valEth2)] - if !ok && !config.ValidatorIndexPresent { - return errors.New("validator not present in cluster lock", z.U64("validator_index", config.ValidatorIndex), z.Str("validator", validator.String())) - } - } + rawValData, err := eth2Cl.Validators(ctx, valAPICallOpts) + if err != nil { + return eth2p0.BLSPubKey{}, errors.Wrap(err, "cannot fetch validators") + } - break - } + for _, val := range rawValData.Data { + if val.Index == eth2p0.ValidatorIndex(config.ValidatorIndex) { + return val.Validator.PublicKey, nil } + } - if !valIndexFound { - return errors.New("validator index not found in beacon node response") - } + return eth2p0.BLSPubKey{}, errors.New("validator index not found in beacon node response") +} + +func fetchValidatorIndex(ctx context.Context, config exitConfig, eth2Cl eth2wrap.Client) (eth2p0.ValidatorIndex, error) { + if config.ValidatorIndexPresent { + return eth2p0.ValidatorIndex(config.ValidatorIndex), nil } - exitMsg, err := signExit(ctx, eth2Cl, valIndex, ourShare.Share, eth2p0.Epoch(config.ExitEpoch)) + valEth2, err := core.PubKey(config.ValidatorPubkey).ToETH2() if err != nil { - return errors.Wrap(err, "cannot sign partial exit message") + return 0, errors.Wrap(err, "cannot convert validator pubkey to bytes") } - exitBlob := obolapi.ExitBlob{ - PublicKey: valEth2.String(), - SignedExitMessage: exitMsg, + valAPICallOpts := ð2api.ValidatorsOpts{ + State: "head", + PubKeys: []eth2p0.BLSPubKey{valEth2}, } - if err := oAPI.PostPartialExit(ctx, cl.GetInitialMutationHash(), shareIdx, identityKey, exitBlob); err != nil { - return errors.Wrap(err, "could not POST partial exit message to Obol API") + rawValData, err := eth2Cl.Validators(ctx, valAPICallOpts) + if err != nil { + return 0, errors.Wrap(err, "cannot fetch validators") } - return nil + for _, val := range rawValData.Data { + if val.Validator.PublicKey == valEth2 { + return val.Index, nil + } + } + + return 0, errors.New("validator public key not found in beacon node response") } diff --git a/cmd/exit_sign_internal_test.go b/cmd/exit_sign_internal_test.go index 772da2650d..536129a122 100644 --- a/cmd/exit_sign_internal_test.go +++ b/cmd/exit_sign_internal_test.go @@ -92,7 +92,7 @@ func Test_runSubmitPartialExit(t *testing.T) { ) }) - t.Run("main flow with expert mode with bad pubkey", func(t *testing.T) { + t.Run("main flow with skipBeaconNodeCheck mode with bad pubkey", func(t *testing.T) { runSubmitPartialExitFlowTest( t, true, @@ -103,7 +103,7 @@ func Test_runSubmitPartialExit(t *testing.T) { ) }) - t.Run("main flow with expert mode with pubkey not found in cluster lock", func(t *testing.T) { + t.Run("main flow with skipBeaconNodeCheck mode with pubkey not found in cluster lock", func(t *testing.T) { runSubmitPartialExitFlowTest( t, true, @@ -120,14 +120,14 @@ func Test_runSubmitPartialExit(t *testing.T) { t.Run("main flow with validator index", func(t *testing.T) { runSubmitPartialExitFlowTest(t, true, false, "", 0, "") }) - t.Run("main flow with expert mode", func(t *testing.T) { + t.Run("main flow with skipBeaconNodeCheck mode", func(t *testing.T) { runSubmitPartialExitFlowTest(t, true, true, "", 0, "") }) t.Run("config", Test_runSubmitPartialExit_Config) } -func runSubmitPartialExitFlowTest(t *testing.T, useValIdx bool, expertMode bool, valPubkey string, valIndex uint64, errString string) { +func runSubmitPartialExitFlowTest(t *testing.T, useValIdx bool, skipBeaconNodeCheck bool, valPubkey string, valIndex uint64, errString string) { t.Helper() t.Parallel() ctx := context.Background() @@ -215,11 +215,11 @@ func runSubmitPartialExitFlowTest(t *testing.T, useValIdx bool, expertMode bool, pubkey = valPubkey } - if expertMode { + if skipBeaconNodeCheck { config.ValidatorIndex = index config.ValidatorIndexPresent = true config.ValidatorPubkey = pubkey - config.ExpertMode = true + config.SkipBeaconNodeCheck = true } else { if useValIdx { config.ValidatorIndex = index From faba6c307b6ae8f09018e2d2cb7ec5931153b806 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 07:43:47 +0000 Subject: [PATCH 13/89] build(deps): Bump github.com/pelletier/go-toml/v2 from 2.2.2 to 2.2.3 (#3254) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/pelletier/go-toml/v2](https://github.com/pelletier/go-toml) from 2.2.2 to 2.2.3.
Release notes

Sourced from github.com/pelletier/go-toml/v2's releases.

v2.2.3

What's Changed

What's new

Performance

Fixed bugs

Documentation

Other changes

New Contributors

Full Changelog: https://github.com/pelletier/go-toml/compare/v2.2.2...v2.2.3

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/pelletier/go-toml/v2&package-manager=go_modules&previous-version=2.2.2&new-version=2.2.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 2ff4e3328b..29fc19806e 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/libp2p/go-libp2p v0.33.2 github.com/libp2p/go-msgio v0.3.0 github.com/multiformats/go-multiaddr v0.13.0 - github.com/pelletier/go-toml/v2 v2.2.2 + github.com/pelletier/go-toml/v2 v2.2.3 github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.1 github.com/protolambda/eth2-shuffle v1.1.0 diff --git a/go.sum b/go.sum index 5a2f569214..88b8220c99 100644 --- a/go.sum +++ b/go.sum @@ -399,8 +399,8 @@ github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTm github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pk910/dynamic-ssz v0.0.3 h1:fCWzFowq9P6SYCc7NtJMkZcIHk+r5hSVD+32zVi6Aio= github.com/pk910/dynamic-ssz v0.0.3/go.mod h1:b6CrLaB2X7pYA+OSEEbkgXDEcRnjLOZIxZTsMuO/Y9c= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= @@ -516,7 +516,6 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= From f63dd7090f8e6c3a44d111658f458029fd1486ba Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 07:44:12 +0000 Subject: [PATCH 14/89] build(deps): Bump github.com/showwin/speedtest-go from 1.7.8 to 1.7.9 (#3255) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/showwin/speedtest-go](https://github.com/showwin/speedtest-go) from 1.7.8 to 1.7.9.
Release notes

Sourced from github.com/showwin/speedtest-go's releases.

v1.7.9

What's Changed

Full Changelog: https://github.com/showwin/speedtest-go/compare/v1.7.8...v1.7.9

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/showwin/speedtest-go&package-manager=go_modules&previous-version=1.7.8&new-version=1.7.9)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 29fc19806e..0dbd74d3ef 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e github.com/r3labs/sse/v2 v2.10.0 github.com/rs/zerolog v1.33.0 - github.com/showwin/speedtest-go v1.7.8 + github.com/showwin/speedtest-go v1.7.9 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.19.0 diff --git a/go.sum b/go.sum index 88b8220c99..1233f3ef91 100644 --- a/go.sum +++ b/go.sum @@ -457,8 +457,8 @@ github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgY github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/showwin/speedtest-go v1.7.8 h1:UZbFQ/ArVgPvkR03egSeTM2FXBd6qJsLp8lzt9aeod0= -github.com/showwin/speedtest-go v1.7.8/go.mod h1:uLgdWCNarXxlYsL2E5TOZpCIwpgSWnEANZp7gfHXHu0= +github.com/showwin/speedtest-go v1.7.9 h1:5b3T3U3WSppVXFqsIqF1zdHRYKKVuPNpzFU71HnYNEY= +github.com/showwin/speedtest-go v1.7.9/go.mod h1:uLgdWCNarXxlYsL2E5TOZpCIwpgSWnEANZp7gfHXHu0= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= From 4f89490b14b46edfa45a04cd5037b9684423b036 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Mon, 9 Sep 2024 10:14:40 +0200 Subject: [PATCH 15/89] *: bump golang to 1.23.1 (#3261) Security fixes... category: misc ticket: none --- .github/actions/setup-go/action.yml | 2 +- .golangci.yml | 2 +- Dockerfile | 2 +- testutil/promrated/Dockerfile | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/actions/setup-go/action.yml b/.github/actions/setup-go/action.yml index d36e379a95..9509680ffc 100644 --- a/.github/actions/setup-go/action.yml +++ b/.github/actions/setup-go/action.yml @@ -6,4 +6,4 @@ runs: - name: Setup go uses: actions/setup-go@v4 with: - go-version: '1.22.5' + go-version: '1.23.1' diff --git a/.golangci.yml b/.golangci.yml index 4ab7b147c8..780e633046 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,6 +1,6 @@ run: timeout: 5m - go: "1.22.5" + go: "1.23.1" linters-settings: cyclop: max-complexity: 15 diff --git a/Dockerfile b/Dockerfile index f9ab8a8bea..b3879b69e1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Container for building Go binary. -FROM golang:1.22.5-bookworm AS builder +FROM golang:1.23.1-bookworm AS builder # Install dependencies RUN apt-get update && apt-get install -y build-essential git # Prep and copy source diff --git a/testutil/promrated/Dockerfile b/testutil/promrated/Dockerfile index d75382daf3..6de3992ad1 100644 --- a/testutil/promrated/Dockerfile +++ b/testutil/promrated/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.5-alpine AS builder +FROM golang:1.23.1-alpine AS builder # Install dependencies RUN apk add --no-cache build-base git From 0e003d28afae603d7e035e2dd4d827190b59c550 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 08:36:54 +0000 Subject: [PATCH 16/89] build(deps): Bump github.com/herumi/bls-eth-go-binary from 1.35.0 to 1.36.1 (#3256) Bumps [github.com/herumi/bls-eth-go-binary](https://github.com/herumi/bls-eth-go-binary) from 1.35.0 to 1.36.1.
Commits
  • 2968c0f fix(release): added libs for windows
  • 15b40ca fix(release): added libs for ios/ios simulator
  • e61bb5b fix(release): added libs for darwin
  • 07189b7 fix(release): added libs for linux(amd64/arm64)
  • 282b920 Merge branch 'master' into release
  • f170c21 [skip ci] release build uses gcc on x86-64
  • 930a992 fix(release): added libs for windows
  • 0d1d676 fix(release): added libs for ios/ios simulator
  • 0069a43 fix(release): added libs for darwin
  • 5666078 fix(release): added libs for android
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/herumi/bls-eth-go-binary&package-manager=go_modules&previous-version=1.35.0&new-version=1.36.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 0dbd74d3ef..21ec1e0e86 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/golang/snappy v0.0.4 github.com/google/gofuzz v1.2.0 github.com/gorilla/mux v1.8.1 - github.com/herumi/bls-eth-go-binary v1.35.0 + github.com/herumi/bls-eth-go-binary v1.36.1 github.com/holiman/uint256 v1.3.1 github.com/ipfs/go-log/v2 v2.5.1 github.com/jonboulle/clockwork v0.4.0 diff --git a/go.sum b/go.sum index 1233f3ef91..cbae1c4fd6 100644 --- a/go.sum +++ b/go.sum @@ -228,8 +228,8 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1 github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/herumi/bls-eth-go-binary v1.35.0 h1:4CgrKurBK4g0ZMKBdHq5CwK9slYe7Ei+HF+/n6RSkOI= -github.com/herumi/bls-eth-go-binary v1.35.0/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U= +github.com/herumi/bls-eth-go-binary v1.36.1 h1:SfLjxbO1fWkKtKS7J3Ezd1/5QXrcaTZgWynxdSe10hQ= +github.com/herumi/bls-eth-go-binary v1.36.1/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U= github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs= github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= From 92bdfda19945800e6a81d2cc5be6aecd9f32b9fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexandre=20Adomnic=C4=83i?= Date: Mon, 9 Sep 2024 11:40:08 +0200 Subject: [PATCH 17/89] cmd: hardening threshold parameter checks (#3242) Ensure the threshold parameter given as input to the `create dkg` and `create cluster` commands is sound. category: bug ticket: #3241 --- cmd/createcluster.go | 18 ++++++++++++ cmd/createcluster_internal_test.go | 47 +++++++++++++++++++++++++++++- cmd/createdkg.go | 16 ++++++---- cmd/createdkg_internal_test.go | 34 ++++++++++++++------- 4 files changed, 98 insertions(+), 17 deletions(-) diff --git a/cmd/createcluster.go b/cmd/createcluster.go index 5050182177..249feec920 100644 --- a/cmd/createcluster.go +++ b/cmd/createcluster.go @@ -10,6 +10,7 @@ import ( "encoding/json" "fmt" "io" + "math" "net/url" "os" "path" @@ -51,6 +52,7 @@ const ( zeroAddress = "0x0000000000000000000000000000000000000000" defaultNetwork = "mainnet" minNodes = 3 + minThreshold = 2 ) type clusterConfig struct { @@ -144,6 +146,7 @@ func runCreateCluster(ctx context.Context, w io.Writer, conf clusterConfig) erro } conf.NumNodes = len(def.Operators) + conf.Threshold = def.Threshold } if err = validateCreateConfig(ctx, conf); err != nil { @@ -366,6 +369,21 @@ func validateCreateConfig(ctx context.Context, conf clusterConfig) error { } } + // Don't allow cluster size to be less than 3. + if conf.NumNodes < minNodes { + return errors.New("number of operators is below minimum", z.Int("operators", conf.NumNodes), z.Int("min", minNodes)) + } + + // Check for threshold parameter + minThreshold := int(math.Ceil(float64(conf.NumNodes*2) / 3)) + if conf.Threshold < minThreshold { + return errors.New("threshold cannot be smaller than BFT quorum", z.Int("threshold", conf.Threshold), z.Int("min", minThreshold)) + } + if conf.Threshold > conf.NumNodes { + return errors.New("threshold cannot be greater than number of operators", + z.Int("threshold", conf.Threshold), z.Int("operators", conf.NumNodes)) + } + return nil } diff --git a/cmd/createcluster_internal_test.go b/cmd/createcluster_internal_test.go index 0d066474f0..95be0bad46 100644 --- a/cmd/createcluster_internal_test.go +++ b/cmd/createcluster_internal_test.go @@ -39,6 +39,10 @@ func TestCreateCluster(t *testing.T) { def, err := loadDefinition(context.Background(), defPath) require.NoError(t, err) + defPathTwoNodes := "../cluster/examples/cluster-definition-001.json" + defTwoNodes, err := loadDefinition(context.Background(), defPathTwoNodes) + require.NoError(t, err) + tests := []struct { Name string Config clusterConfig @@ -218,7 +222,7 @@ func TestCreateCluster(t *testing.T) { Config: clusterConfig{ Name: "test_cluster", NumNodes: 3, - Threshold: 4, + Threshold: 3, NumDVs: 5, Network: "goerli", }, @@ -246,6 +250,43 @@ func TestCreateCluster(t *testing.T) { }, }, }, + { + Name: "threshold greater than the number of operators", + Config: clusterConfig{ + NumNodes: 4, + Threshold: 5, + NumDVs: 1, + Network: defaultNetwork, + }, + expectedErr: "threshold cannot be greater than number of operators", + }, + { + Name: "threshold smaller than BFT quorum", + Config: clusterConfig{ + NumNodes: 4, + Threshold: 2, + NumDVs: 1, + Network: defaultNetwork, + }, + expectedErr: "threshold cannot be smaller than BFT quorum", + }, + { + Name: "test with number of nodes below minimum", + Config: clusterConfig{ + Name: "test_cluster", + NumNodes: 2, + Threshold: 2, + NumDVs: 1, + Network: "goerli", + }, + defFileProvider: func() []byte { + data, err := json.Marshal(defTwoNodes) + require.NoError(t, err) + + return data + }, + expectedErr: "number of operators is below minimum", + }, } for _, test := range tests { t.Run(test.Name, func(t *testing.T) { @@ -555,6 +596,7 @@ func TestMultipleAddresses(t *testing.T) { err := runCreateCluster(context.Background(), io.Discard, clusterConfig{ NumDVs: 4, NumNodes: 4, + Threshold: 3, Network: defaultNetwork, FeeRecipientAddrs: []string{}, WithdrawalAddrs: []string{}, @@ -566,6 +608,7 @@ func TestMultipleAddresses(t *testing.T) { err := runCreateCluster(context.Background(), io.Discard, clusterConfig{ NumDVs: 1, NumNodes: 4, + Threshold: 3, Network: defaultNetwork, FeeRecipientAddrs: []string{feeRecipientAddr}, WithdrawalAddrs: []string{}, @@ -639,6 +682,7 @@ func TestKeymanager(t *testing.T) { SplitKeysDir: keyDir, SplitKeys: true, NumNodes: minNodes, + Threshold: minThreshold, KeymanagerAddrs: addrs, KeymanagerAuthTokens: authTokens, Network: eth2util.Goerli.Name, @@ -720,6 +764,7 @@ func TestPublish(t *testing.T) { conf := clusterConfig{ Name: t.Name(), NumNodes: minNodes, + Threshold: minThreshold, NumDVs: 1, Network: eth2util.Goerli.Name, WithdrawalAddrs: []string{zeroAddress}, diff --git a/cmd/createdkg.go b/cmd/createdkg.go index 8deb0e9f0f..79fe1f4767 100644 --- a/cmd/createdkg.go +++ b/cmd/createdkg.go @@ -6,6 +6,7 @@ import ( "context" crand "crypto/rand" "encoding/json" + "math" "os" "path" @@ -181,16 +182,21 @@ func validateWithdrawalAddrs(addrs []string, network string) error { // validateDKGConfig returns an error if any of the provided config parameter is invalid. func validateDKGConfig(threshold, numOperators int, network string, depositAmounts []int) error { + // Don't allow cluster size to be less than 3. + if numOperators < minNodes { + return errors.New("number of operators is below minimum", z.Int("operators", numOperators), z.Int("min", minNodes)) + } + + // Ensure threshold setting is sound + minThreshold := int(math.Ceil(float64(numOperators*2) / 3)) + if threshold < minThreshold { + return errors.New("threshold cannot be smaller than BFT quorum", z.Int("threshold", threshold), z.Int("min", minThreshold)) + } if threshold > numOperators { return errors.New("threshold cannot be greater than length of operators", z.Int("threshold", threshold), z.Int("operators", numOperators)) } - // Don't allow cluster size to be less than 4. - if numOperators < minNodes { - return errors.New("insufficient operator ENRs", z.Int("count", numOperators), z.Int("min", minNodes)) - } - if !eth2util.ValidNetwork(network) { return errors.New("unsupported network", z.Str("network", network)) } diff --git a/cmd/createdkg_internal_test.go b/cmd/createdkg_internal_test.go index fbf53da4e2..a6d522db59 100644 --- a/cmd/createdkg_internal_test.go +++ b/cmd/createdkg_internal_test.go @@ -57,7 +57,8 @@ func TestCreateDkgInvalid(t *testing.T) { OperatorENRs: append([]string{ "-JG4QDKNYm_JK-w6NuRcUFKvJAlq2L4CwkECelzyCVrMWji4YnVRn8AqQEL5fTQotPL2MKxiKNmn2k6XEINtq-6O3Z2GAYGvzr_LgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQKlO7fSaBa3h48CdM-qb_Xb2_hSrJOy6nNjR0mapAqMboN0Y3CCDhqDdWRwgg4u", }, validENRs...), - Network: defaultNetwork, + Threshold: 3, + Network: defaultNetwork, }, errMsg: "invalid ENR: missing 'enr:' prefix", }, @@ -66,7 +67,8 @@ func TestCreateDkgInvalid(t *testing.T) { OperatorENRs: append([]string{ "enr:JG4QDKNYm_JK-w6NuRcUFKvJAlq2L4CwkECelzyCVrMWji4YnVRn8AqQEL5fTQotPL2MKxiKNmn2k6XEINtq-6O3Z2GAYGvzr_LgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQKlO7fSaBa3h48CdM-qb_Xb2_hSrJOy6nNjR0mapAqMboN0Y3CCDhqDdWRwgg4u", }, validENRs...), - Network: defaultNetwork, + Threshold: 3, + Network: defaultNetwork, }, errMsg: "invalid ENR: invalid enr record, too few elements", }, @@ -75,7 +77,8 @@ func TestCreateDkgInvalid(t *testing.T) { OperatorENRs: append([]string{ "enrJG4QDKNYm_JK-w6NuRcUFKvJAlq2L4CwkECelzyCVrMWji4YnVRn8AqQEL5fTQotPL2MKxiKNmn2k6XEINtq-6O3Z2GAYGvzr_LgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQKlO7fSaBa3h48CdM-qb_Xb2_hSrJOy6nNjR0mapAqMboN0Y3CCDhqDdWRwgg4u", }, validENRs...), - Network: defaultNetwork, + Threshold: 3, + Network: defaultNetwork, }, errMsg: "invalid ENR: missing 'enr:' prefix", }, @@ -84,17 +87,18 @@ func TestCreateDkgInvalid(t *testing.T) { OperatorENRs: append([]string{ "JG4QDKNYm_JK-w6NuRcUFKvJAlq2L4CwkECelzyCVrMWji4YnVRn8AqQEL5fTQotPL2MKxiKNmn2k6XEINtq-6O3Z2GAYGvzr_LgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQKlO7fSaBa3h48CdM-qb_Xb2_hSrJOy6nNjR0mapAqMboN0Y3CCDhqDdWRwgg4u", }, validENRs...), - Network: defaultNetwork, + Threshold: 3, + Network: defaultNetwork, }, errMsg: "invalid ENR: missing 'enr:' prefix", }, { conf: createDKGConfig{OperatorENRs: []string{""}}, - errMsg: "insufficient operator ENRs", + errMsg: "number of operators is below minimum", }, { conf: createDKGConfig{}, - errMsg: "insufficient operator ENRs", + errMsg: "number of operators is below minimum", }, } @@ -120,7 +124,7 @@ func TestRequireOperatorENRFlag(t *testing.T) { { name: "operator ENRs less than threshold", args: []string{"dkg", "--operator-enrs=enr:-JG4QG472ZVvl8ySSnUK9uNVDrP_hjkUrUqIxUC75aayzmDVQedXkjbqc7QKyOOS71VmlqnYzri_taV8ZesFYaoQSIOGAYHtv1WsgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQKwwq_CAld6oVKOrixE-JzMtvvNgb9yyI-_rwq4NFtajIN0Y3CCDhqDdWRwgg4u", "--fee-recipient-addresses=0xa6430105220d0b29688b734b8ea0f3ca9936e846", "--withdrawal-addresses=0xa6430105220d0b29688b734b8ea0f3ca9936e846"}, - err: "insufficient operator ENRs", + err: "number of operators is below minimum", }, } @@ -146,9 +150,10 @@ func TestExistingClusterDefinition(t *testing.T) { feeRecipientArg := "--fee-recipient-addresses=" + validEthAddr withdrawalArg := "--withdrawal-addresses=" + validEthAddr outputDirArg := "--output-dir=" + charonDir + thresholdArg := "--threshold=2" cmd := newCreateCmd(newCreateDKGCmd(runCreateDKG)) - cmd.SetArgs([]string{"dkg", enrArg, feeRecipientArg, withdrawalArg, outputDirArg}) + cmd.SetArgs([]string{"dkg", enrArg, feeRecipientArg, withdrawalArg, outputDirArg, thresholdArg}) require.EqualError(t, cmd.Execute(), "existing cluster-definition.json found. Try again after deleting it") } @@ -179,18 +184,25 @@ func TestValidateWithdrawalAddr(t *testing.T) { } func TestValidateDKGConfig(t *testing.T) { - t.Run("invalid threshold", func(t *testing.T) { + t.Run("threshold exceeds numOperators", func(t *testing.T) { threshold := 5 numOperators := 4 err := validateDKGConfig(threshold, numOperators, "", nil) require.ErrorContains(t, err, "threshold cannot be greater than length of operators") }) - t.Run("insufficient ENRs", func(t *testing.T) { + t.Run("threshold equals 1", func(t *testing.T) { threshold := 1 + numOperators := 3 + err := validateDKGConfig(threshold, numOperators, "", nil) + require.ErrorContains(t, err, "threshold cannot be smaller than BFT quorum") + }) + + t.Run("insufficient ENRs", func(t *testing.T) { + threshold := 2 numOperators := 2 err := validateDKGConfig(threshold, numOperators, "", nil) - require.ErrorContains(t, err, "insufficient operator ENRs") + require.ErrorContains(t, err, "number of operators is below minimum") }) t.Run("invalid network", func(t *testing.T) { From f14538f70c575aaf26fd9f959cf2778b9b4f3398 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 12:04:50 +0000 Subject: [PATCH 18/89] build(deps): Bump github.com/prometheus/client_golang from 1.19.1 to 1.20.3 (#3258) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.19.1 to 1.20.3.
Release notes

Sourced from github.com/prometheus/client_golang's releases.

v1.20.3

  • [BUGFIX] histograms: Fix possible data race when appending exemplars. #1608

v1.20.2

  • [BUGFIX] promhttp: Unset Content-Encoding header when data is uncompressed. #1596

v1.20.1

This release contains the critical fix for the issue. Thanks to @​geberl, @​CubicrootXYZ, @​zetaab and @​timofurrer for helping us with the investigation!

  • [BUGFIX] process-collector: Fixed unregistered descriptor error when using process collector with PedanticRegistry on Linux machines. #1587

v1.20.0

Thanks everyone for contributions!

:warning: In this release we remove one (broken anyway, given Go runtime changes) metric and add three new (representing GOGC, GOMEMLIMIT and GOMAXPROCS flags) to the default collectors.NewGoCollector() collector. Given its popular usage, expect your binary to expose two additional metric.

Changes

  • [CHANGE] :warning: go-collector: Remove go_memstat_lookups_total metric which was always 0; Go runtime stopped sharing pointer lookup statistics. #1577
  • [FEATURE] :warning: go-collector: Add 3 default metrics: go_gc_gogc_percent, go_gc_gomemlimit_bytes and go_sched_gomaxprocs_threads as those are recommended by the Go team. #1559
  • [FEATURE] go-collector: Add more information to all metrics' HELP e.g. the exact runtime/metrics sourcing each metric (if relevant). #1568 #1578
  • [FEATURE] testutil: Add CollectAndFormat method. #1503
  • [FEATURE] histograms: Add support for exemplars in native histograms. #1471
  • [FEATURE] promhttp: Add experimental support for zstd on scrape, controlled by the request Accept-Encoding header. #1496
  • [FEATURE] api/v1: Add WithLimit parameter to all API methods that supports it. #1544
  • [FEATURE] prometheus: Add support for created timestamps in constant histograms and constant summaries. #1537
  • [FEATURE] process-collectors: Add network usage metrics: process_network_receive_bytes_total and process_network_transmit_bytes_total. #1555
  • [FEATURE] promlint: Add duplicated metric lint rule. #1472
  • [BUGFIX] promlint: Relax metric type in name linter rule. #1455
  • [BUGFIX] promhttp: Make sure server instrumentation wrapping supports new and future extra responseWriter methods. #1480
  • [BUGFIX] testutil: Functions using compareMetricFamilies are now failing if filtered metricNames are not in the input. #1424

... (truncated)

Changelog

Sourced from github.com/prometheus/client_golang's changelog.

1.20.3 / 2024-09-05

  • [BUGFIX] histograms: Fix possible data race when appending exemplars. #1608

1.20.2 / 2024-08-23

  • [BUGFIX] promhttp: Unset Content-Encoding header when data is uncompressed. #1596

1.20.1 / 2024-08-20

  • [BUGFIX] process-collector: Fixed unregistered descriptor error when using process collector with PedanticRegistry on linux machines. #1587

1.20.0 / 2024-08-14

  • [CHANGE] :warning: go-collector: Remove go_memstat_lookups_total metric which was always 0; Go runtime stopped sharing pointer lookup statistics. #1577
  • [FEATURE] :warning: go-collector: Add 3 default metrics: go_gc_gogc_percent, go_gc_gomemlimit_bytes and go_sched_gomaxprocs_threads as those are recommended by the Go team. #1559
  • [FEATURE] go-collector: Add more information to all metrics' HELP e.g. the exact runtime/metrics sourcing each metric (if relevant). #1568 #1578
  • [FEATURE] testutil: Add CollectAndFormat method. #1503
  • [FEATURE] histograms: Add support for exemplars in native histograms. #1471
  • [FEATURE] promhttp: Add experimental support for zstd on scrape, controlled by the request Accept-Encoding header. #1496
  • [FEATURE] api/v1: Add WithLimit parameter to all API methods that supports it. #1544
  • [FEATURE] prometheus: Add support for created timestamps in constant histograms and constant summaries. #1537
  • [FEATURE] process-collector: Add network usage metrics: process_network_receive_bytes_total and process_network_transmit_bytes_total. #1555
  • [FEATURE] promlint: Add duplicated metric lint rule. #1472
  • [BUGFIX] promlint: Relax metric type in name linter rule. #1455
  • [BUGFIX] promhttp: Make sure server instrumentation wrapping supports new and future extra responseWriter methods. #1480
  • [BUGFIX] testutil: Functions using compareMetricFamilies are now failing if filtered metricNames are not in the input. #1424

1.19.0 / 2024-02-27

The module prometheus/common v0.48.0 introduced an incompatibility when used together with client_golang (See prometheus/client_golang#1448 for more details). If your project uses client_golang and you want to use prometheus/common v0.48.0 or higher, please update client_golang to v1.19.0.

  • [CHANGE] Minimum required go version is now 1.20 (we also test client_golang against new 1.22 version). #1445 #1449
  • [FEATURE] collectors: Add version collector. #1422 #1427

1.18.0 / 2023-12-22

  • [FEATURE] promlint: Allow creation of custom metric validations. #1311
  • [FEATURE] Go programs using client_golang can be built in wasip1 OS. #1350
  • [BUGFIX] histograms: Add timer to reset ASAP after bucket limiting has happened. #1367
  • [BUGFIX] testutil: Fix comparison of metrics with empty Help strings. #1378
  • [ENHANCEMENT] Improved performance of MetricVec.WithLabelValues(...). #1360

1.17.0 / 2023-09-27

  • [CHANGE] Minimum required go version is now 1.19 (we also test client_golang against new 1.21 version). #1325
  • [FEATURE] Add support for Created Timestamps in Counters, Summaries and Historams. #1313
  • [ENHANCEMENT] Enable detection of a native histogram without observations. #1314

1.16.0 / 2023-06-15

... (truncated)

Commits
  • ef2f87e Merge pull request #1620 from prometheus/arthursens/prepare-1.20.3
  • 937ac63 Add changelog entry for 1.20.3
  • 6e9914d Merge pull request #1608 from krajorama/index-out-of-range-native-histogram-e...
  • d6b8c89 Update comments with more explanations
  • 504566f Use simplified solution from #1609 for the data race
  • dc8e9a4 fix: native histogram: Simplify and fix addExemplar
  • dc819ce Use a trivial solution to #1605
  • e061dfa native histogram: use exemplars in concurrency test
  • 67121dc Merge pull request #1596 from mrueg/fix-uncompressed-content-header
  • 187acd4 Cut 1.20.2
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/prometheus/client_golang&package-manager=go_modules&previous-version=1.19.1&new-version=1.20.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 10 ++++++---- go.sum | 20 ++++++++++++-------- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 21ec1e0e86..3332275b53 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/libp2p/go-msgio v0.3.0 github.com/multiformats/go-multiaddr v0.13.0 github.com/pelletier/go-toml/v2 v2.2.3 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.20.3 github.com/prometheus/client_model v0.6.1 github.com/protolambda/eth2-shuffle v1.1.0 github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e @@ -70,7 +70,7 @@ require ( github.com/bufbuild/protovalidate-go v0.6.2 // indirect github.com/bufbuild/protoyaml-go v0.1.9 // indirect github.com/bwesterb/go-ristretto v1.2.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/gnark-crypto v0.12.1 // indirect github.com/containerd/cgroups v1.1.0 // indirect @@ -120,6 +120,7 @@ require ( github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/koron/go-ssdp v0.0.4 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect @@ -151,6 +152,7 @@ require ( github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-multistream v0.5.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/onsi/ginkgo/v2 v2.15.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect @@ -161,8 +163,8 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pkg/profile v1.7.0 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/common v0.48.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/quic-go/qpack v0.4.0 // indirect github.com/quic-go/quic-go v0.42.0 // indirect github.com/quic-go/webtransport-go v0.6.0 // indirect diff --git a/go.sum b/go.sum index cbae1c4fd6..95f8a2b362 100644 --- a/go.sum +++ b/go.sum @@ -60,8 +60,8 @@ github.com/bwesterb/go-ristretto v1.2.0 h1:xxWOVbN5m8NNKiSDZXE1jtZvZnC6JSJ9cYFAD github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= @@ -284,6 +284,8 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= @@ -382,6 +384,8 @@ github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dy github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= @@ -416,17 +420,17 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4= +github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= -github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/protolambda/eth2-shuffle v1.1.0 h1:gixIBI84IeugTwwHXm8vej1bSSEhueBCSryA4lAKRLU= github.com/protolambda/eth2-shuffle v1.1.0/go.mod h1:FhA2c0tN15LTC+4T9DNVm+55S7uXTTjQ8TQnBuXlkF8= github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e h1:ATgOe+abbzfx9kCPeXIW4fiWyDdxlwHw07j8UGhdTd4= From 23897212a561f40892803ef8553917376036d510 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Mon, 9 Sep 2024 21:11:45 +0200 Subject: [PATCH 19/89] workflows: bump govulncheck to v1.1.3 (#3262) govulncheck is failing currently in the pipeline, this is an attempt to fix it by simply bumping its version... category: misc ticket: none --- .github/workflows/govulncheck.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index d7e2cbcb1a..bd2beb84ba 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -13,5 +13,5 @@ jobs: steps: - uses: actions/checkout@v3 - uses: ./.github/actions/setup-go - - run: go install golang.org/x/vuln/cmd/govulncheck@v1.1.0 + - run: go install golang.org/x/vuln/cmd/govulncheck@v1.1.3 - run: govulncheck -show=traces -test ./... From 0140d1ca7978bfff708405addff42b2fac0ecc2f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 11:27:06 +0000 Subject: [PATCH 20/89] build(deps): Bump golang.org/x/crypto from 0.26.0 to 0.27.0 (#3265) Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.26.0 to 0.27.0.
Commits
  • c9da6b9 all: fix printf(var) mistakes detected by latest printf checker
  • b35ab4f go.mod: update golang.org/x dependencies
  • bcb0f91 internal/poly1305: Port sum_amd64.s to Avo
  • 7eace71 chacha20poly1305: Avo port of chacha20poly1305_amd64.s
  • 620dfbc salsa20/salsa: Port salsa20_amd64.s to Avo
  • 82942cf blake2b: port blake2b_amd64.s to Avo
  • 0484c26 blake2b: port blake2bAVX2_amd64.s to Avo
  • 38ed1bc blake2s: port blake2s_amd64.s to Avo
  • 38a0b5d argon2: Avo port of blamka_amd64.s
  • bf5f14f x509roots/fallback: update bundle
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/crypto&package-manager=go_modules&previous-version=0.26.0&new-version=0.27.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 3332275b53..ad28650244 100644 --- a/go.mod +++ b/go.mod @@ -41,10 +41,10 @@ require ( go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.3.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.26.0 + golang.org/x/crypto v0.27.0 golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 golang.org/x/sync v0.8.0 - golang.org/x/term v0.23.0 + golang.org/x/term v0.24.0 golang.org/x/time v0.6.0 golang.org/x/tools v0.23.0 google.golang.org/protobuf v1.34.2 @@ -191,8 +191,8 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/mod v0.19.0 // indirect golang.org/x/net v0.27.0 // indirect - golang.org/x/sys v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 // indirect diff --git a/go.sum b/go.sum index 95f8a2b362..43b227743a 100644 --- a/go.sum +++ b/go.sum @@ -594,8 +594,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= @@ -671,17 +671,17 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= -golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= From 1cf2c5ea81c47053085e2dc0c6fa6783d99757ac Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 13:48:46 +0000 Subject: [PATCH 21/89] build(deps): Bump go.opentelemetry.io/otel/trace from 1.28.0 to 1.29.0 (#3252) Bumps [go.opentelemetry.io/otel/trace](https://github.com/open-telemetry/opentelemetry-go) from 1.28.0 to 1.29.0.
Changelog

Sourced from go.opentelemetry.io/otel/trace's changelog.

[1.29.0/0.51.0/0.5.0] 2024-08-23

This release is the last to support [Go 1.21]. The next release will require at least [Go 1.22].

Added

  • Add MacOS ARM64 platform to the compatibility testing suite. (#5577)
  • Add InstrumentationScope field to SpanStub in go.opentelemetry.io/otel/sdk/trace/tracetest, as a replacement for the deprecated InstrumentationLibrary. (#5627)
  • Make the initial release of go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc. This new module contains an OTLP exporter that transmits log telemetry using gRPC. This module is unstable and breaking changes may be introduced. See our versioning policy for more information about these stability guarantees. (#5629)
  • Add Walk function to TraceState in go.opentelemetry.io/otel/trace to iterate all the key-value pairs. (#5651)
  • Bridge the trace state in go.opentelemetry.io/otel/bridge/opencensus. (#5651)
  • Zero value of SimpleProcessor in go.opentelemetry.io/otel/sdk/log no longer panics. (#5665)
  • The FilterProcessor interface type is added in go.opentelemetry.io/otel/sdk/log/internal/x. This is an optional and experimental interface that log Processors can implement to instruct the Logger if a Record will be processed or not. It replaces the existing Enabled method that is removed from the Processor interface itself. It does not fall within the scope of the OpenTelemetry Go versioning and stability policy and it may be changed in backwards incompatible ways or removed in feature releases. (#5692)
  • Support [Go 1.23]. (#5720)

Changed

  • NewMemberRaw, NewKeyProperty and NewKeyValuePropertyRaw in go.opentelemetry.io/otel/baggage allow UTF-8 string in key. (#5132)
  • Processor.OnEmit in go.opentelemetry.io/otel/sdk/log now accepts a pointer to Record instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636)
  • SimpleProcessor.Enabled in go.opentelemetry.io/otel/sdk/log now returns false if the exporter is nil. (#5665)
  • Update the concurrency requirements of Exporter in go.opentelemetry.io/otel/sdk/log. (#5666)
  • SimpleProcessor in go.opentelemetry.io/otel/sdk/log synchronizes OnEmit calls. (#5666)
  • The Processor interface in go.opentelemetry.io/otel/sdk/log no longer includes the Enabled method. See the FilterProcessor interface type added in go.opentelemetry.io/otel/sdk/log/internal/x to continue providing this functionality. (#5692)
  • The SimpleProcessor type in go.opentelemetry.io/otel/sdk/log is no longer comparable. (#5693)
  • The BatchProcessor type in go.opentelemetry.io/otel/sdk/log is no longer comparable. (#5693)

Fixed

  • Correct comments for the priority of the WithEndpoint and WithEndpointURL options and their corresponding environment variables in go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp. (#5584)
  • Pass the underlying error rather than a generic retry-able failure in go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp, go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp and go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp. (#5541)
  • Correct the Tracer, Meter, and Logger names used in go.opentelemetry.io/otel/example/dice. (#5612)
  • Correct the Tracer names used in go.opentelemetry.io/otel/example/namedtracer. (#5612)
  • Correct the Tracer name used in go.opentelemetry.io/otel/example/opencensus. (#5612)
  • Correct the Tracer and Meter names used in go.opentelemetry.io/otel/example/otel-collector. (#5612)
  • Correct the Tracer names used in go.opentelemetry.io/otel/example/passthrough. (#5612)
  • Correct the Meter name used in go.opentelemetry.io/otel/example/prometheus. (#5612)
  • Correct the Tracer names used in go.opentelemetry.io/otel/example/zipkin. (#5612)
  • Correct comments for the priority of the WithEndpoint and WithEndpointURL options and their corresponding environment variables in go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc and go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp. (#5641)
  • Correct comments for the priority of the WithEndpoint and WithEndpointURL options and their corresponding environment variables in go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp. (#5650)
  • Stop percent encoding header environment variables in go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc, go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp, go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc and go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp (#5705)
  • Remove invalid environment variable header keys in go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc, go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp, go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc and go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp (#5705)

... (truncated)

Commits
  • 6b1d94f Release v1.29.0/v0.51.0/v0.5.0 (#5732)
  • 2a54df7 fix(deps): update module github.com/golangci/golangci-lint to v1.60.3 (#5730)
  • 4875735 fix(deps): update module github.com/golangci/golangci-lint to v1.60.2 (#5711)
  • 30fc407 fix(deps): update golang.org/x/exp digest to 9b4947d (#5729)
  • 9402143 fix(deps): update golang.org/x/exp digest to 778ce7b (#5728)
  • bc48d69 chore(deps): update google.golang.org/genproto/googleapis/rpc digest to fc7c0...
  • fe02ce7 chore(deps): update google.golang.org/genproto/googleapis/api digest to fc7c0...
  • 002c0a4 Move log.Processor.Enabled to independent FilterProcessor interfaced type...
  • fe6c67e OpenCensus bridge to support TraceState (#5651)
  • 83ae9bd Bugfix: OTLP exporters should not percent decode the key when parsing HEADERS...
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=go.opentelemetry.io/otel/trace&package-manager=go_modules&previous-version=1.28.0&new-version=1.29.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index ad28650244..7218d5b90b 100644 --- a/go.mod +++ b/go.mod @@ -33,11 +33,11 @@ require ( github.com/stretchr/testify v1.9.0 github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.4.1 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 - go.opentelemetry.io/otel v1.28.0 + go.opentelemetry.io/otel v1.29.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 go.opentelemetry.io/otel/sdk v1.28.0 - go.opentelemetry.io/otel/trace v1.28.0 + go.opentelemetry.io/otel/trace v1.29.0 go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.3.0 go.uber.org/zap v1.27.0 @@ -183,7 +183,7 @@ require ( github.com/subosito/gotenv v1.6.0 // indirect github.com/vbatts/tar-split v0.11.5 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.17.1 // indirect go.uber.org/fx v1.20.1 // indirect diff --git a/go.sum b/go.sum index 43b227743a..cd9d43896e 100644 --- a/go.sum +++ b/go.sum @@ -544,8 +544,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= @@ -554,14 +554,14 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k= go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= From d673b48368e8a5472e4aebb34a1b2901b10481e8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 14:01:58 +0000 Subject: [PATCH 22/89] build(deps): Bump go.opentelemetry.io/otel/exporters/stdout/stdouttrace from 1.28.0 to 1.29.0 (#3263) Bumps [go.opentelemetry.io/otel/exporters/stdout/stdouttrace](https://github.com/open-telemetry/opentelemetry-go) from 1.28.0 to 1.29.0.
Changelog

Sourced from go.opentelemetry.io/otel/exporters/stdout/stdouttrace's changelog.

[1.29.0/0.51.0/0.5.0] 2024-08-23

This release is the last to support [Go 1.21]. The next release will require at least [Go 1.22].

Added

  • Add MacOS ARM64 platform to the compatibility testing suite. (#5577)
  • Add InstrumentationScope field to SpanStub in go.opentelemetry.io/otel/sdk/trace/tracetest, as a replacement for the deprecated InstrumentationLibrary. (#5627)
  • Make the initial release of go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc. This new module contains an OTLP exporter that transmits log telemetry using gRPC. This module is unstable and breaking changes may be introduced. See our versioning policy for more information about these stability guarantees. (#5629)
  • Add Walk function to TraceState in go.opentelemetry.io/otel/trace to iterate all the key-value pairs. (#5651)
  • Bridge the trace state in go.opentelemetry.io/otel/bridge/opencensus. (#5651)
  • Zero value of SimpleProcessor in go.opentelemetry.io/otel/sdk/log no longer panics. (#5665)
  • The FilterProcessor interface type is added in go.opentelemetry.io/otel/sdk/log/internal/x. This is an optional and experimental interface that log Processors can implement to instruct the Logger if a Record will be processed or not. It replaces the existing Enabled method that is removed from the Processor interface itself. It does not fall within the scope of the OpenTelemetry Go versioning and stability policy and it may be changed in backwards incompatible ways or removed in feature releases. (#5692)
  • Support [Go 1.23]. (#5720)

Changed

  • NewMemberRaw, NewKeyProperty and NewKeyValuePropertyRaw in go.opentelemetry.io/otel/baggage allow UTF-8 string in key. (#5132)
  • Processor.OnEmit in go.opentelemetry.io/otel/sdk/log now accepts a pointer to Record instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636)
  • SimpleProcessor.Enabled in go.opentelemetry.io/otel/sdk/log now returns false if the exporter is nil. (#5665)
  • Update the concurrency requirements of Exporter in go.opentelemetry.io/otel/sdk/log. (#5666)
  • SimpleProcessor in go.opentelemetry.io/otel/sdk/log synchronizes OnEmit calls. (#5666)
  • The Processor interface in go.opentelemetry.io/otel/sdk/log no longer includes the Enabled method. See the FilterProcessor interface type added in go.opentelemetry.io/otel/sdk/log/internal/x to continue providing this functionality. (#5692)
  • The SimpleProcessor type in go.opentelemetry.io/otel/sdk/log is no longer comparable. (#5693)
  • The BatchProcessor type in go.opentelemetry.io/otel/sdk/log is no longer comparable. (#5693)

Fixed

  • Correct comments for the priority of the WithEndpoint and WithEndpointURL options and their corresponding environment variables in go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp. (#5584)
  • Pass the underlying error rather than a generic retry-able failure in go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp, go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp and go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp. (#5541)
  • Correct the Tracer, Meter, and Logger names used in go.opentelemetry.io/otel/example/dice. (#5612)
  • Correct the Tracer names used in go.opentelemetry.io/otel/example/namedtracer. (#5612)
  • Correct the Tracer name used in go.opentelemetry.io/otel/example/opencensus. (#5612)
  • Correct the Tracer and Meter names used in go.opentelemetry.io/otel/example/otel-collector. (#5612)
  • Correct the Tracer names used in go.opentelemetry.io/otel/example/passthrough. (#5612)
  • Correct the Meter name used in go.opentelemetry.io/otel/example/prometheus. (#5612)
  • Correct the Tracer names used in go.opentelemetry.io/otel/example/zipkin. (#5612)
  • Correct comments for the priority of the WithEndpoint and WithEndpointURL options and their corresponding environment variables in go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc and go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp. (#5641)
  • Correct comments for the priority of the WithEndpoint and WithEndpointURL options and their corresponding environment variables in go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp. (#5650)
  • Stop percent encoding header environment variables in go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc, go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp, go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc and go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp (#5705)
  • Remove invalid environment variable header keys in go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc, go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp, go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc and go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp (#5705)

... (truncated)

Commits
  • 6b1d94f Release v1.29.0/v0.51.0/v0.5.0 (#5732)
  • 2a54df7 fix(deps): update module github.com/golangci/golangci-lint to v1.60.3 (#5730)
  • 4875735 fix(deps): update module github.com/golangci/golangci-lint to v1.60.2 (#5711)
  • 30fc407 fix(deps): update golang.org/x/exp digest to 9b4947d (#5729)
  • 9402143 fix(deps): update golang.org/x/exp digest to 778ce7b (#5728)
  • bc48d69 chore(deps): update google.golang.org/genproto/googleapis/rpc digest to fc7c0...
  • fe02ce7 chore(deps): update google.golang.org/genproto/googleapis/api digest to fc7c0...
  • 002c0a4 Move log.Processor.Enabled to independent FilterProcessor interfaced type...
  • fe6c67e OpenCensus bridge to support TraceState (#5651)
  • 83ae9bd Bugfix: OTLP exporters should not percent decode the key when parsing HEADERS...
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=go.opentelemetry.io/otel/exporters/stdout/stdouttrace&package-manager=go_modules&previous-version=1.28.0&new-version=1.29.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 7218d5b90b..213e1a3eb1 100644 --- a/go.mod +++ b/go.mod @@ -35,8 +35,8 @@ require ( go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 go.opentelemetry.io/otel v1.29.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 - go.opentelemetry.io/otel/sdk v1.28.0 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0 + go.opentelemetry.io/otel/sdk v1.29.0 go.opentelemetry.io/otel/trace v1.29.0 go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.3.0 diff --git a/go.sum b/go.sum index cd9d43896e..e217e909f0 100644 --- a/go.sum +++ b/go.sum @@ -552,12 +552,12 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYa go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0 h1:X3ZjNp36/WlkSYx0ul2jw4PtbNEDDeLskw3VPsrpYM0= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0/go.mod h1:2uL/xnOXh0CHOBFCWXz5u1A4GXLiW+0IQIzVbeOEQ0U= go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k= go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY= go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= From dc4a4319398a49462d8d20b6731ec88af44fb08c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 14:12:12 +0000 Subject: [PATCH 23/89] build(deps): Bump github.com/ferranbt/fastssz from 0.1.3 to 0.1.4 (#3264) Bumps [github.com/ferranbt/fastssz](https://github.com/ferranbt/fastssz) from 0.1.3 to 0.1.4.
Changelog

Sourced from github.com/ferranbt/fastssz's changelog.

0.1.4 (7 Aug, 2024)

  • fix: Do not skip intermediate hashes in multi-proof GH-173]
  • feat: Add dot graph generation [GH-172]
  • fix: Fix spurious allocation in hasher.Merkleize [GH-171]
  • feat: Increase performance for repeated proving [GH-168]
  • fix: Infer size for fixed []byte without tags [GH-155]
  • fix: Unmarshaling of fixed sized custom types [GH-152]
  • feat: Support list of non-ptr containers [GH-151]
  • feat: Support uin32 lists [GH-149]
  • fix: Fix chunk count in merkleize [GH-147]
  • feat: Add deneb fork to specs [GH-139]
  • fix: Sszgen incorrect output for nested []byte types [GH-127]
  • fix: Sszgen do not import package references if not used [GH-137]
Commits
  • f5aaaba Improve error message (#175)
  • e9dfc1b feat(proof): Reduce interface reqs (#174)
  • 31cd371 Fix(multiproofs verification): don't skip visiting intermediate hashes (#173)
  • c98805c simple dot graph gen (#172)
  • 87ee6ff Fix typo in go generate (#170)
  • 8e1c57a Fix spurious allocation in hasher.Merkleize (#171)
  • eac385e Increase performance of repeat proving. (#168)
  • edc73fd Fix missing import for external []byte alias (#167)
  • f43e88e Fix handling of aliases to unsigned integer types other than uint64. (#162)
  • a4db753 Fix infer dimensions from type with non-ssz tags (#157)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/ferranbt/fastssz&package-manager=go_modules&previous-version=0.1.3&new-version=0.1.4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 3 ++- go.sum | 10 ++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 213e1a3eb1..51d039dd3e 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/bufbuild/buf v1.35.1 github.com/coinbase/kryptology v1.5.6-0.20220316191335-269410e1b06b github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 - github.com/ferranbt/fastssz v0.1.3 + github.com/ferranbt/fastssz v0.1.4 github.com/golang/snappy v0.0.4 github.com/google/gofuzz v1.2.0 github.com/gorilla/mux v1.8.1 @@ -87,6 +87,7 @@ require ( github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/elastic/gosigar v0.14.2 // indirect + github.com/emicklei/dot v1.6.2 // indirect github.com/fatih/color v1.16.0 // indirect github.com/felixge/fgprof v0.9.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect diff --git a/go.sum b/go.sum index e217e909f0..bdc26eea1f 100644 --- a/go.sum +++ b/go.sum @@ -123,6 +123,8 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= +github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= @@ -133,8 +135,8 @@ github.com/felixge/fgprof v0.9.4 h1:ocDNwMFlnA0NU0zSB3I52xkO4sFXk80VK9lXjLClu88= github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/ferranbt/fastssz v0.1.3 h1:ZI+z3JH05h4kgmFXdHuR1aWYsgrg7o+Fw7/NCzM16Mo= -github.com/ferranbt/fastssz v0.1.3/go.mod h1:0Y9TEd/9XuFlh7mskMPfXiI2Dkw4Ddg9EyXt1W7MRvE= +github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY= +github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= @@ -435,6 +437,8 @@ github.com/protolambda/eth2-shuffle v1.1.0 h1:gixIBI84IeugTwwHXm8vej1bSSEhueBCSr github.com/protolambda/eth2-shuffle v1.1.0/go.mod h1:FhA2c0tN15LTC+4T9DNVm+55S7uXTTjQ8TQnBuXlkF8= github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e h1:ATgOe+abbzfx9kCPeXIW4fiWyDdxlwHw07j8UGhdTd4= github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e/go.mod h1:wmuf/mdK4VMD+jA9ThwcUKjg3a2XWM9cVfFYjDyY4j4= +github.com/prysmaticlabs/gohashtree v0.0.4-beta h1:H/EbCuXPeTV3lpKeXGPpEV9gsUpkqOOVnWapUyeWro4= +github.com/prysmaticlabs/gohashtree v0.0.4-beta/go.mod h1:BFdtALS+Ffhg3lGQIHv9HDWuHS8cTvHZzrHWxwOtGOs= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= github.com/quic-go/quic-go v0.42.0 h1:uSfdap0eveIl8KXnipv9K7nlwZ5IqLlYOpJ58u5utpM= @@ -525,8 +529,6 @@ github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8 github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/umbracle/gohashtree v0.0.2-alpha.0.20230207094856-5b775a815c10 h1:CQh33pStIp/E30b7TxDlXfM0145bn2e8boI30IxAhTg= -github.com/umbracle/gohashtree v0.0.2-alpha.0.20230207094856-5b775a815c10/go.mod h1:x/Pa0FF5Te9kdrlZKJK82YmAkvL8+f989USgz6Jiw7M= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= From 18ffecb2667eec3a92d07773c2ef47ac869c464e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Sep 2024 15:19:48 +0000 Subject: [PATCH 24/89] build(deps): Bump golang.org/x/tools from 0.23.0 to 0.25.0 (#3270) Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.23.0 to 0.25.0.
Commits
  • 7398f36 all: fix some symbols error in comment
  • f111c72 go/callgraph/rta: skip test on js platform
  • 9f9b7e3 gopls/internal/settings: add missing deep cloning in Options.Clone
  • ce7eed4 doc/generate: minor cleanup
  • 075ae7d go/callgraph/vta: add basic tests for range-over-func
  • 2c7aaab go/ssa: skip failing test
  • 1b5663f go/callgraph/vta: perform minor cleanups
  • 0a49883 gopls/go.mod: update the go directive to 1.23.1
  • ad366a8 go.mod: update golang.org/x dependencies
  • 4fb36d1 go/callgraph/rta: add rta analysis test case for multiple go packages
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/tools&package-manager=go_modules&previous-version=0.23.0&new-version=0.25.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 51d039dd3e..fde3650afb 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( golang.org/x/sync v0.8.0 golang.org/x/term v0.24.0 golang.org/x/time v0.6.0 - golang.org/x/tools v0.23.0 + golang.org/x/tools v0.25.0 google.golang.org/protobuf v1.34.2 gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) @@ -190,8 +190,8 @@ require ( go.uber.org/fx v1.20.1 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/mod v0.19.0 // indirect - golang.org/x/net v0.27.0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.29.0 // indirect golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.18.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect diff --git a/go.sum b/go.sum index bdc26eea1f..a89939cab3 100644 --- a/go.sum +++ b/go.sum @@ -610,8 +610,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -629,8 +629,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -700,8 +700,8 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 8150bff9d7d7e0052b140a2f36bd8038cfe30d48 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Sep 2024 15:44:27 +0000 Subject: [PATCH 25/89] build(deps): Bump go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp from 0.53.0 to 0.54.0 (#3271) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp](https://github.com/open-telemetry/opentelemetry-go-contrib) from 0.53.0 to 0.54.0.
Release notes

Sourced from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp's releases.

Release v1.29.0/v0.54.0/v0.23.0/v0.9.0/v0.4.0/v0.2.0/v0.1.0

Overview

This release is the last to support Go 1.21. The next release will require at least Go 1.22.

Added

  • Add the WithSpanAttributes and WithMetricAttributes methods to set custom attributes to the stats handler in go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc. (#5133)
  • The go.opentelemetry.io/contrib/bridges/otelzap module. This module provides an OpenTelemetry logging bridge for go.uber.org/zap. (#5191)
  • Support for the OTEL_HTTP_CLIENT_COMPATIBILITY_MODE=http/dup environment variable in go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp to emit attributes for both the v1.20.0 and v1.26.0 semantic conventions. (#5401)
  • The go.opentelemetry.io/contrib/bridges/otelzerolog module. This module provides an OpenTelemetry logging bridge for github.com/rs/zerolog. (#5405)
  • Add WithGinFilter filter parameter in go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin to allow filtering requests with *gin.Context. (#5743)
  • Support for stdoutlog exporter in go.opentelemetry.io/contrib/config. (#5850)
  • Add macOS ARM64 platform to the compatibility testing suite. (#5868)
  • Add new runtime metrics to go.opentelemetry.io/contrib/instrumentation/runtime, which are still disabled by default. (#5870)
  • Add the WithMetricsAttributesFn option to allow setting dynamic, per-request metric attributes in go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp. (#5876)
  • The go.opentelemetry.io/contrib/config package supports configuring with_resource_constant_labels for the prometheus exporter. (#5890)
  • Support Go 1.23. (#6017)

Removed

  • The deprecated go.opentelemetry.io/contrib/processors/baggagecopy package is removed. (#5853)

Fixed

  • Race condition when reading the HTTP body and writing the response in go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp. (#5916)

What's Changed

... (truncated)

Changelog

Sourced from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp's changelog.

[1.29.0/0.54.0/0.23.0/0.9.0/0.4.0/0.2.0/0.1.0] - 2024-08-23

This release is the last to support [Go 1.21]. The next release will require at least [Go 1.22].

Added

  • Add the WithSpanAttributes and WithMetricAttributes methods to set custom attributes to the stats handler in go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc. (#5133)
  • The go.opentelemetry.io/contrib/bridges/otelzap module. This module provides an OpenTelemetry logging bridge for go.uber.org/zap. (#5191)
  • Support for the OTEL_HTTP_CLIENT_COMPATIBILITY_MODE=http/dup environment variable in go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp to emit attributes for both the v1.20.0 and v1.26.0 semantic conventions. (#5401)
  • The go.opentelemetry.io/contrib/bridges/otelzerolog module. This module provides an OpenTelemetry logging bridge for github.com/rs/zerolog. (#5405)
  • Add WithGinFilter filter parameter in go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin to allow filtering requests with *gin.Context. (#5743)
  • Support for stdoutlog exporter in go.opentelemetry.io/contrib/config. (#5850)
  • Add macOS ARM64 platform to the compatibility testing suite. (#5868)
  • Add new runtime metrics to go.opentelemetry.io/contrib/instrumentation/runtime, which are still disabled by default. (#5870)
  • Add the WithMetricsAttributesFn option to allow setting dynamic, per-request metric attributes in go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp. (#5876)
  • The go.opentelemetry.io/contrib/config package supports configuring with_resource_constant_labels for the prometheus exporter. (#5890)
  • Support [Go 1.23]. (#6017)

Removed

  • The deprecated go.opentelemetry.io/contrib/processors/baggagecopy package is removed. (#5853)

Fixed

  • Race condition when reading the HTTP body and writing the response in go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp. (#5916)
Commits
  • fc25f67 Release v1.29.0/v0.54.0/v0.23.0/v0.9.0/v0.4.0/v0.2.0/v0.1.0 (#6042)
  • c42406a fix(deps): update module github.com/golangci/golangci-lint to v1.60.3 (#6039)
  • fd28620 fix(deps): update module github.com/golangci/golangci-lint to v1.60.2 (#6008)
  • 21e0a4d fix(deps): update golang.org/x/exp digest to 9b4947d (#6038)
  • 3e4b550 fix(deps): update golang.org/x/exp digest to 778ce7b (#6035)
  • e9d1d30 fix(deps): update google.golang.org/genproto/googleapis/api digest to fc7c04a...
  • 35cdd98 fix(deps): update aws-sdk-go-v2 monorepo (#6037)
  • b0a60d2 chore(deps): update k8s.io/kube-openapi digest to 76de80e (#6033)
  • 3a400b4 chore(deps): update google.golang.org/genproto/googleapis/rpc digest to fc7c0...
  • cf214e5 chore(deps): update k8s.io/utils digest to f90d014 (#6029)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp&package-manager=go_modules&previous-version=0.53.0&new-version=0.54.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index fde3650afb..2b14c23019 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.4.1 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 go.opentelemetry.io/otel v1.29.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0 diff --git a/go.sum b/go.sum index a89939cab3..4d4c3be34d 100644 --- a/go.sum +++ b/go.sum @@ -544,8 +544,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= From 93bdc47bb481321266bad4d804f5d25d812e011d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Sep 2024 19:39:21 +0000 Subject: [PATCH 26/89] build(deps): Bump go.opentelemetry.io/otel from 1.29.0 to 1.30.0 (#3274) Bumps [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go) from 1.29.0 to 1.30.0.
Changelog

Sourced from go.opentelemetry.io/otel's changelog.

[1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09

Added

  • Support OTEL_EXPORTER_OTLP_LOGS_INSECURE and OTEL_EXPORTER_OTLP_INSECURE environments in go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc. (#5739)
  • The WithResource option for NewMeterProvider now merges the provided resources with the ones from environment variables. (#5773)
  • The WithResource option for NewLoggerProvider now merges the provided resources with the ones from environment variables. (#5773)
  • Add UTF-8 support to go.opentelemetry.io/otel/exporters/prometheus. (#5755)

Fixed

  • Fix memory leak in the global MeterProvider when identical instruments are repeatedly created. (#5754)
  • Fix panic on instruments creation when setting meter provider. (#5758)
  • Fix an issue where SetMeterProvider in go.opentelemetry.io/otel might miss the delegation for instruments and registries. (#5780)

Removed

Commits
  • ed4fc75 Release v1.30.0/v0.52.0/v0.6.0/v0.0.9 (#5797)
  • cdd2dbb Drop support for Go 1.21 in dice example (#5800)
  • e9ac0d2 fix(deps): update module google.golang.org/grpc to v1.66.1 (#5798)
  • 4cc9fee fix(deps): update golang.org/x/exp digest to 701f63a (#5795)
  • 71b341f Add utf8 support to the prometheus exporter (#5755)
  • 506a9ba Fix typos (#5763)
  • b37e8a9 SetMeterProvider might miss the delegation for instruments and registries (...
  • 9e1b015 fix(metric, log): merge explicit resource with environment variables (#5773)
  • 8dca9cc Support OTEL_EXPORTER_OTLP_LOGS_INSECURE and OTEL_EXPORTER_OTLP_INSECURE envi...
  • fb7cc02 fix(deps): update module github.com/prometheus/client_golang to v1.20.3 (#5788)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=go.opentelemetry.io/otel&package-manager=go_modules&previous-version=1.29.0&new-version=1.30.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 2b14c23019..c58cd58c0e 100644 --- a/go.mod +++ b/go.mod @@ -33,11 +33,11 @@ require ( github.com/stretchr/testify v1.9.0 github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.4.1 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 - go.opentelemetry.io/otel v1.29.0 + go.opentelemetry.io/otel v1.30.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0 go.opentelemetry.io/otel/sdk v1.29.0 - go.opentelemetry.io/otel/trace v1.29.0 + go.opentelemetry.io/otel/trace v1.30.0 go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.3.0 go.uber.org/zap v1.27.0 @@ -184,7 +184,7 @@ require ( github.com/subosito/gotenv v1.6.0 // indirect github.com/vbatts/tar-split v0.11.5 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect - go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.30.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.17.1 // indirect go.uber.org/fx v1.20.1 // indirect diff --git a/go.sum b/go.sum index 4d4c3be34d..eadb07f9d3 100644 --- a/go.sum +++ b/go.sum @@ -546,8 +546,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= -go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= -go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= +go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= @@ -556,14 +556,14 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0 h1:X3ZjNp36/WlkSYx0ul2jw4PtbNEDDeLskw3VPsrpYM0= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0/go.mod h1:2uL/xnOXh0CHOBFCWXz5u1A4GXLiW+0IQIzVbeOEQ0U= -go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= -go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= +go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k= go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY= -go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= -go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= +go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= From c64f13589159cc7006efb0656aa217a568860f3a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Sep 2024 19:46:55 +0000 Subject: [PATCH 27/89] build(deps): Bump go.opentelemetry.io/otel/exporters/stdout/stdouttrace from 1.29.0 to 1.30.0 (#3275) Bumps [go.opentelemetry.io/otel/exporters/stdout/stdouttrace](https://github.com/open-telemetry/opentelemetry-go) from 1.29.0 to 1.30.0.
Changelog

Sourced from go.opentelemetry.io/otel/exporters/stdout/stdouttrace's changelog.

[1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09

Added

  • Support OTEL_EXPORTER_OTLP_LOGS_INSECURE and OTEL_EXPORTER_OTLP_INSECURE environments in go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc. (#5739)
  • The WithResource option for NewMeterProvider now merges the provided resources with the ones from environment variables. (#5773)
  • The WithResource option for NewLoggerProvider now merges the provided resources with the ones from environment variables. (#5773)
  • Add UTF-8 support to go.opentelemetry.io/otel/exporters/prometheus. (#5755)

Fixed

  • Fix memory leak in the global MeterProvider when identical instruments are repeatedly created. (#5754)
  • Fix panic on instruments creation when setting meter provider. (#5758)
  • Fix an issue where SetMeterProvider in go.opentelemetry.io/otel might miss the delegation for instruments and registries. (#5780)

Removed

Commits
  • ed4fc75 Release v1.30.0/v0.52.0/v0.6.0/v0.0.9 (#5797)
  • cdd2dbb Drop support for Go 1.21 in dice example (#5800)
  • e9ac0d2 fix(deps): update module google.golang.org/grpc to v1.66.1 (#5798)
  • 4cc9fee fix(deps): update golang.org/x/exp digest to 701f63a (#5795)
  • 71b341f Add utf8 support to the prometheus exporter (#5755)
  • 506a9ba Fix typos (#5763)
  • b37e8a9 SetMeterProvider might miss the delegation for instruments and registries (...
  • 9e1b015 fix(metric, log): merge explicit resource with environment variables (#5773)
  • 8dca9cc Support OTEL_EXPORTER_OTLP_LOGS_INSECURE and OTEL_EXPORTER_OTLP_INSECURE envi...
  • fb7cc02 fix(deps): update module github.com/prometheus/client_golang to v1.20.3 (#5788)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=go.opentelemetry.io/otel/exporters/stdout/stdouttrace&package-manager=go_modules&previous-version=1.29.0&new-version=1.30.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index c58cd58c0e..d2e92fc72b 100644 --- a/go.mod +++ b/go.mod @@ -35,8 +35,8 @@ require ( go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 go.opentelemetry.io/otel v1.30.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0 - go.opentelemetry.io/otel/sdk v1.29.0 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 + go.opentelemetry.io/otel/sdk v1.30.0 go.opentelemetry.io/otel/trace v1.30.0 go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.3.0 diff --git a/go.sum b/go.sum index eadb07f9d3..cfaec9b9bf 100644 --- a/go.sum +++ b/go.sum @@ -554,12 +554,12 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYa go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0 h1:X3ZjNp36/WlkSYx0ul2jw4PtbNEDDeLskw3VPsrpYM0= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0/go.mod h1:2uL/xnOXh0CHOBFCWXz5u1A4GXLiW+0IQIzVbeOEQ0U= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 h1:kn1BudCgwtE7PxLqcZkErpD8GKqLZ6BSzeW9QihQJeM= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0/go.mod h1:ljkUDtAMdleoi9tIG1R6dJUpVwDcYjw3J2Q6Q/SuiC0= go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= -go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= -go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= +go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k= go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY= go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= From 7e4660cd1821675e910be6de603fed603f125bff Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Thu, 12 Sep 2024 18:07:03 +0200 Subject: [PATCH 28/89] *: add --all for exit sign command (#3272) Add `--all` command for signing partial exits. This PR is one of a couple incoming PRs that will be for the `--all` functionality. Mind that the CLI flag is not enabled until all of them are implemented and merged. category: feature ticket: #3243 --- app/obolapi/exit.go | 4 +- app/obolapi/exit_test.go | 4 +- cmd/exit.go | 8 ++ cmd/exit_sign.go | 130 ++++++++++++++++++++++++++------- cmd/exit_sign_internal_test.go | 23 ++++-- 5 files changed, 131 insertions(+), 38 deletions(-) diff --git a/app/obolapi/exit.go b/app/obolapi/exit.go index 2a142b11b3..6a41aa60d8 100644 --- a/app/obolapi/exit.go +++ b/app/obolapi/exit.go @@ -62,9 +62,9 @@ func fullExitURL(valPubkey, lockHash string, shareIndex uint64) string { ).Replace(fullExitTmpl) } -// PostPartialExit POSTs the set of msg's to the Obol API, for a given lock hash. +// PostPartialExits POSTs the set of msg's to the Obol API, for a given lock hash. // It respects the timeout specified in the Client instance. -func (c Client) PostPartialExit(ctx context.Context, lockHash []byte, shareIndex uint64, identityKey *k1.PrivateKey, exitBlobs ...ExitBlob) error { +func (c Client) PostPartialExits(ctx context.Context, lockHash []byte, shareIndex uint64, identityKey *k1.PrivateKey, exitBlobs ...ExitBlob) error { lockHashStr := "0x" + hex.EncodeToString(lockHash) path := partialExitURL(lockHashStr) diff --git a/app/obolapi/exit_test.go b/app/obolapi/exit_test.go index 329b8312fe..22b3ae82c2 100644 --- a/app/obolapi/exit_test.go +++ b/app/obolapi/exit_test.go @@ -97,7 +97,7 @@ func TestAPIFlow(t *testing.T) { // send all the partial exits for idx, exit := range exits { - require.NoError(t, cl.PostPartialExit(ctx, lock.LockHash, uint64(idx+1), identityKeys[idx], exit), "share index: %d", idx+1) + require.NoError(t, cl.PostPartialExits(ctx, lock.LockHash, uint64(idx+1), identityKeys[idx], exit), "share index: %d", idx+1) } for idx := range exits { @@ -188,7 +188,7 @@ func TestAPIFlowMissingSig(t *testing.T) { // send all the partial exits for idx, exit := range exits { - require.NoError(t, cl.PostPartialExit(ctx, lock.LockHash, uint64(idx+1), identityKeys[idx], exit), "share index: %d", idx+1) + require.NoError(t, cl.PostPartialExits(ctx, lock.LockHash, uint64(idx+1), identityKeys[idx], exit), "share index: %d", idx+1) } for idx := range exits { diff --git a/cmd/exit.go b/cmd/exit.go index 1f86423d67..916ec9d526 100644 --- a/cmd/exit.go +++ b/cmd/exit.go @@ -34,6 +34,7 @@ type exitConfig struct { BeaconNodeTimeout time.Duration ExitFromFilePath string Log log.Config + All bool } func newExitCmd(cmds ...*cobra.Command) *cobra.Command { @@ -63,6 +64,7 @@ const ( fetchedExitPath publishTimeout validatorIndex + all ) func (ef exitFlag) String() string { @@ -91,6 +93,8 @@ func (ef exitFlag) String() string { return "publish-timeout" case validatorIndex: return "validator-index" + case all: + return "all" default: return "unknown" } @@ -113,6 +117,7 @@ func bindExitFlags(cmd *cobra.Command, config *exitConfig, flags []exitCLIFlag) return s } + //nolint:exhaustive // `all` is not yet implemented switch flag { case publishAddress: cmd.Flags().StringVar(&config.PublishAddress, publishAddress.String(), "https://api.obol.tech/v1", maybeRequired("The URL of the remote API.")) @@ -138,6 +143,9 @@ func bindExitFlags(cmd *cobra.Command, config *exitConfig, flags []exitCLIFlag) cmd.Flags().DurationVar(&config.PublishTimeout, publishTimeout.String(), 30*time.Second, "Timeout for publishing a signed exit to the publish-address API.") case validatorIndex: cmd.Flags().Uint64Var(&config.ValidatorIndex, validatorIndex.String(), 0, "Validator index of the validator to exit, the associated public key must be present in the cluster lock manifest. If --validator-public-key is also provided, validator existence won't be checked on the beacon chain.") + // TODO: enable after all functionalities for --all are ready + // case all: + // cmd.Flags().BoolVar(&config.All, all.String(), false, "Exit all currently active validators in the cluster.") } if f.required { diff --git a/cmd/exit_sign.go b/cmd/exit_sign.go index e75b14367f..3b5f3b7e88 100644 --- a/cmd/exit_sign.go +++ b/cmd/exit_sign.go @@ -7,6 +7,7 @@ import ( "fmt" eth2api "github.com/attestantio/go-eth2-client/api" + eth2v1 "github.com/attestantio/go-eth2-client/api/v1" eth2p0 "github.com/attestantio/go-eth2-client/spec/phase0" libp2plog "github.com/ipfs/go-log/v2" "github.com/spf13/cobra" @@ -52,6 +53,7 @@ func newSubmitPartialExitCmd(runFunc func(context.Context, exitConfig) error) *c {beaconNodeEndpoints, true}, {beaconNodeTimeout, false}, {publishTimeout, false}, + {all, false}, }) bindLogFlags(cmd.Flags(), &config.Log) @@ -60,11 +62,16 @@ func newSubmitPartialExitCmd(runFunc func(context.Context, exitConfig) error) *c valIdxPresent := cmd.Flags().Lookup(validatorIndex.String()).Changed valPubkPresent := cmd.Flags().Lookup(validatorPubkey.String()).Changed - if !valPubkPresent && !valIdxPresent { + if !valPubkPresent && !valIdxPresent && !config.All { //nolint:revive // we use our own version of the errors package. return errors.New(fmt.Sprintf("either %s or %s must be specified at least.", validatorIndex.String(), validatorPubkey.String())) } + if config.All && (valIdxPresent || valPubkPresent) { + //nolint:revive // we use our own version of the errors package. + return errors.New(fmt.Sprintf("%s or %s should not be specified when %s is, as they are obsolete and misleading.", validatorIndex.String(), validatorPubkey.String(), all.String())) + } + config.ValidatorIndexPresent = valIdxPresent config.SkipBeaconNodeCheck = valIdxPresent && valPubkPresent @@ -126,60 +133,119 @@ func runSignPartialExit(ctx context.Context, config exitConfig) error { log.Info(ctx, "Both public key and index are specified, beacon node won't be checked for validator existence/liveness") } + var exitBlobs []obolapi.ExitBlob + if config.All { + exitBlobs, err = signAllValidatorsExits(ctx, config, eth2Cl, shares) + if err != nil { + return errors.Wrap(err, "could not sign exits for all validators") + } + } else { + exitBlobs, err = signSingleValidatorExit(ctx, config, eth2Cl, shares) + if err != nil { + return errors.Wrap(err, "could not sign exit for validator") + } + } + + if err := oAPI.PostPartialExits(ctx, cl.GetInitialMutationHash(), shareIdx, identityKey, exitBlobs...); err != nil { + return errors.Wrap(err, "could not POST partial exit message to Obol API") + } + + return nil +} + +func signSingleValidatorExit(ctx context.Context, config exitConfig, eth2Cl eth2wrap.Client, shares keystore.ValidatorShares) ([]obolapi.ExitBlob, error) { valEth2, err := fetchValidatorBLSPubKey(ctx, config, eth2Cl) if err != nil { - return errors.Wrap(err, "cannot fetch validator public key") + return nil, errors.Wrap(err, "cannot fetch validator public key") } validator := core.PubKeyFrom48Bytes(valEth2) ourShare, ok := shares[validator] if !ok { - return errors.New("validator not present in cluster lock", z.Str("validator", validator.String())) + return nil, errors.New("validator not present in cluster lock", z.Str("validator", validator.String())) } valIndex, err := fetchValidatorIndex(ctx, config, eth2Cl) if err != nil { - return errors.Wrap(err, "cannot fetch validator index") + return nil, errors.Wrap(err, "cannot fetch validator index") } log.Info(ctx, "Signing exit message for validator") exitMsg, err := signExit(ctx, eth2Cl, valIndex, ourShare.Share, eth2p0.Epoch(config.ExitEpoch)) if err != nil { - return errors.Wrap(err, "cannot sign partial exit message") + return nil, errors.Wrap(err, "cannot sign partial exit message") } - exitBlob := obolapi.ExitBlob{ - PublicKey: valEth2.String(), - SignedExitMessage: exitMsg, + return []obolapi.ExitBlob{ + { + PublicKey: valEth2.String(), + SignedExitMessage: exitMsg, + }, + }, nil +} + +func signAllValidatorsExits(ctx context.Context, config exitConfig, eth2Cl eth2wrap.Client, shares keystore.ValidatorShares) ([]obolapi.ExitBlob, error) { + var valsEth2 []eth2p0.BLSPubKey + for pk := range shares { + eth2PK, err := pk.ToETH2() + if err != nil { + return nil, errors.Wrap(err, "cannot convert core pubkey to eth2 pubkey") + } + valsEth2 = append(valsEth2, eth2PK) } - if err := oAPI.PostPartialExit(ctx, cl.GetInitialMutationHash(), shareIdx, identityKey, exitBlob); err != nil { - return errors.Wrap(err, "could not POST partial exit message to Obol API") + rawValData, err := queryBeaconForValidator(ctx, eth2Cl, valsEth2, nil) + if err != nil { + return nil, errors.Wrap(err, "fetch validator indices from beacon") } - return nil + for _, val := range rawValData.Data { + share, ok := shares[core.PubKeyFrom48Bytes(val.Validator.PublicKey)] + if !ok { + //nolint:revive // we use our own version of the errors package. + return nil, errors.New(fmt.Sprintf("validator public key %s not found in cluster lock", val.Validator.PublicKey)) + } + share.Index = int(val.Index) + shares[core.PubKeyFrom48Bytes(val.Validator.PublicKey)] = share + } + + log.Info(ctx, "Signing exit message for all validators") + + var exitBlobs []obolapi.ExitBlob + for pk, share := range shares { + exitMsg, err := signExit(ctx, eth2Cl, eth2p0.ValidatorIndex(share.Index), share.Share, eth2p0.Epoch(config.ExitEpoch)) + if err != nil { + return nil, errors.Wrap(err, "cannot sign partial exit message") + } + eth2PK, err := pk.ToETH2() + if err != nil { + return nil, errors.Wrap(err, "cannot convert core pubkey to eth2 pubkey") + } + exitBlob := obolapi.ExitBlob{ + PublicKey: eth2PK.String(), + SignedExitMessage: exitMsg, + } + exitBlobs = append(exitBlobs, exitBlob) + } + + return exitBlobs, nil } func fetchValidatorBLSPubKey(ctx context.Context, config exitConfig, eth2Cl eth2wrap.Client) (eth2p0.BLSPubKey, error) { if config.ValidatorPubkey != "" { valEth2, err := core.PubKey(config.ValidatorPubkey).ToETH2() if err != nil { - return eth2p0.BLSPubKey{}, errors.Wrap(err, "cannot convert validator pubkey to bytes") + return eth2p0.BLSPubKey{}, errors.Wrap(err, "cannot convert core pubkey to eth2 pubkey") } return valEth2, nil } - valAPICallOpts := ð2api.ValidatorsOpts{ - State: "head", - Indices: []eth2p0.ValidatorIndex{eth2p0.ValidatorIndex(config.ValidatorIndex)}, - } - - rawValData, err := eth2Cl.Validators(ctx, valAPICallOpts) + rawValData, err := queryBeaconForValidator(ctx, eth2Cl, nil, []eth2p0.ValidatorIndex{eth2p0.ValidatorIndex(config.ValidatorIndex)}) if err != nil { - return eth2p0.BLSPubKey{}, errors.Wrap(err, "cannot fetch validators") + return eth2p0.BLSPubKey{}, errors.Wrap(err, "fetch validator pubkey from beacon") } for _, val := range rawValData.Data { @@ -198,17 +264,12 @@ func fetchValidatorIndex(ctx context.Context, config exitConfig, eth2Cl eth2wrap valEth2, err := core.PubKey(config.ValidatorPubkey).ToETH2() if err != nil { - return 0, errors.Wrap(err, "cannot convert validator pubkey to bytes") - } - - valAPICallOpts := ð2api.ValidatorsOpts{ - State: "head", - PubKeys: []eth2p0.BLSPubKey{valEth2}, + return 0, errors.Wrap(err, "cannot convert core pubkey to eth2 pubkey") } - rawValData, err := eth2Cl.Validators(ctx, valAPICallOpts) + rawValData, err := queryBeaconForValidator(ctx, eth2Cl, []eth2p0.BLSPubKey{valEth2}, nil) if err != nil { - return 0, errors.Wrap(err, "cannot fetch validators") + return 0, errors.Wrap(err, "cannot fetch validator index from beacon") } for _, val := range rawValData.Data { @@ -219,3 +280,18 @@ func fetchValidatorIndex(ctx context.Context, config exitConfig, eth2Cl eth2wrap return 0, errors.New("validator public key not found in beacon node response") } + +func queryBeaconForValidator(ctx context.Context, eth2Cl eth2wrap.Client, pubKeys []eth2p0.BLSPubKey, indices []eth2p0.ValidatorIndex) (*eth2api.Response[map[eth2p0.ValidatorIndex]*eth2v1.Validator], error) { + valAPICallOpts := ð2api.ValidatorsOpts{ + State: "head", + PubKeys: pubKeys, + Indices: indices, + } + + rawValData, err := eth2Cl.Validators(ctx, valAPICallOpts) + if err != nil { + return nil, errors.Wrap(err, "fetch validators from beacon") + } + + return rawValData, nil +} diff --git a/cmd/exit_sign_internal_test.go b/cmd/exit_sign_internal_test.go index 536129a122..7adf39f81f 100644 --- a/cmd/exit_sign_internal_test.go +++ b/cmd/exit_sign_internal_test.go @@ -66,7 +66,8 @@ func Test_runSubmitPartialExit(t *testing.T) { false, "test", 0, - "cannot convert validator pubkey to bytes", + "cannot convert core pubkey to eth2 pubkey", + false, ) }) @@ -78,6 +79,7 @@ func Test_runSubmitPartialExit(t *testing.T) { testutil.RandomEth2PubKey(t).String(), 0, "validator not present in cluster lock", + false, ) }) @@ -89,6 +91,7 @@ func Test_runSubmitPartialExit(t *testing.T) { "", 9999, "validator index not found in beacon node response", + false, ) }) @@ -99,7 +102,8 @@ func Test_runSubmitPartialExit(t *testing.T) { true, "test", 9999, - "cannot convert validator pubkey to bytes", + "cannot convert core pubkey to eth2 pubkey", + false, ) }) @@ -111,23 +115,27 @@ func Test_runSubmitPartialExit(t *testing.T) { testutil.RandomEth2PubKey(t).String(), 9999, "validator not present in cluster lock", + false, ) }) t.Run("main flow with pubkey", func(t *testing.T) { - runSubmitPartialExitFlowTest(t, false, false, "", 0, "") + runSubmitPartialExitFlowTest(t, false, false, "", 0, "", false) }) t.Run("main flow with validator index", func(t *testing.T) { - runSubmitPartialExitFlowTest(t, true, false, "", 0, "") + runSubmitPartialExitFlowTest(t, true, false, "", 0, "", false) }) t.Run("main flow with skipBeaconNodeCheck mode", func(t *testing.T) { - runSubmitPartialExitFlowTest(t, true, true, "", 0, "") + runSubmitPartialExitFlowTest(t, true, true, "", 0, "", false) + }) + t.Run("main flow with all mode", func(t *testing.T) { + runSubmitPartialExitFlowTest(t, false, false, "", 0, "", true) }) t.Run("config", Test_runSubmitPartialExit_Config) } -func runSubmitPartialExitFlowTest(t *testing.T, useValIdx bool, skipBeaconNodeCheck bool, valPubkey string, valIndex uint64, errString string) { +func runSubmitPartialExitFlowTest(t *testing.T, useValIdx bool, skipBeaconNodeCheck bool, valPubkey string, valIndex uint64, errString string, all bool) { t.Helper() t.Parallel() ctx := context.Background() @@ -202,6 +210,7 @@ func runSubmitPartialExitFlowTest(t *testing.T, useValIdx bool, skipBeaconNodeCh ExitEpoch: 194048, BeaconNodeTimeout: 30 * time.Second, PublishTimeout: 10 * time.Second, + All: all, } index := uint64(0) @@ -279,7 +288,7 @@ func Test_runSubmitPartialExit_Config(t *testing.T) { { name: "Bad validator address", badValidatorAddr: true, - errData: "cannot convert validator pubkey to bytes", + errData: "cannot convert core pubkey to eth2 pubkey", }, } From 4f76adc6bbb6c6af79c80e682cfbf4bcb7c6d6be Mon Sep 17 00:00:00 2001 From: Christina <156356273+cratiu222@users.noreply.github.com> Date: Tue, 17 Sep 2024 10:26:58 +0300 Subject: [PATCH 29/89] docs: fix typos (#3236) Hello I fixed several minor typos. Br, Christina. category: docs ticket: none --- docs/architecture.md | 6 +++--- docs/branching.md | 6 +++--- docs/configuration.md | 2 +- docs/contributing.md | 6 +++--- docs/goguidelines.md | 4 ++-- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/architecture.md b/docs/architecture.md index ce54210884..61fb86721d 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -151,7 +151,7 @@ Therefore, Charon v1.x will not work together with Charon v0.x. See *Version com ### Scheduler -The scheduler is the initiator of a duty in the core workflow. It resolves the which DVs in the cluster are active and +The scheduler is the initiator of a duty in the core workflow. It resolves which DVs in the cluster are active and is then responsible for starting a duty at the optimal time by calling the `fetcher`. DVs are identified by their root public key `PubKey`. @@ -325,7 +325,7 @@ type Entry struct { ValCommIdx int64 // validator committee index (0 for DutyProposer) } ``` -> ℹ️ Database entry fields are persistence friendly types and are not exported or used outside this component +> ℹ️ Database entry fields are persistence-friendly types and are not exported or used outside this component The database has the following indexes: - `Slot,DutyType,PubKey`: unique index for deduplication and idempotent inserts @@ -361,7 +361,7 @@ type DutyDB interface { ### Validator API The validator API provides a [beacon-node API](https://ethereum.github.io/beacon-APIs/#/ValidatorRequiredApi) to downstream VCs, intercepting some calls and proxying others directly to the upstream beacon node. -It mostly serves unsigned duty data requests from the `DutyDB` and sends the resulting partial signed duty objects to the `ParSigDB`. +It mostly serves unsigned duty data requests from the `DutyDB` and sends the resulting partially signed duty objects to the `ParSigDB`. Partial signed duty data values are defined as `ParSignedData` which extend `SignedData` values: ```go diff --git a/docs/branching.md b/docs/branching.md index 112681b6f1..56a8ce7444 100644 --- a/docs/branching.md +++ b/docs/branching.md @@ -33,7 +33,7 @@ The important aspects of the release process are: - Releases are cut from release branches, not the main branch. Release branches are named `main-v0.X`. - Release candidates, `v0.X.Y-rc[1-99]`, are created for each patch release from commits in the release branch. They are thoroughly tested both internally and externally before a release is created. - Critical patches and fixes to releases are cherry-picked from main to the release branch. -- The Charon binary version, `charon version`, are inferred from git tags at build time using `ldflags`, not hardcoded app/version versions. +- The Charon binary version, `charon version`, is inferred from git tags at build time using `ldflags`, not hardcoded app/version versions. - Hardcoded Charon app/version is only used to indicate branch type and major version, `v0.X-rc` for release branches or `v0.Y-dev` for main branch. The process to follow for the next v0.16.0 release is the following: @@ -43,7 +43,7 @@ The process to follow for the next v0.16.0 release is the following: 4. The dev team also avoids adding risky or large changes during this “pre-release” period. 5. When all relevant changes have been included in main, a new “release branch”. It must be called `main-v0.16`. - Release branches are called `main-v0.X` - - Release branches are high risk branches, and must be treated with the same security mindset as the `main` branch. + - Release branches are high-risk branches, and must be treated with the same security mindset as the `main` branch. - Note that github branch matching doesn’t support OR logic, so we chose a common `main*` prefix to identify all protected branches. 6. After the release branch has been created, the `main` branch app/version is manually updated to `v0.17-dev` and add `v0.17` to `version.Supported()` versions. - `v0.X-dev` indicates that the code is in the main branch. @@ -57,7 +57,7 @@ The process to follow for the next v0.16.0 release is the following: - Note that the `build-push-release` action should dynamically update the app/version to the value of the git tag when building the docker image. 9. Before a `v0.16.X` release is created, a `v0.16.X-rc[1-99]` release candidate needs to be created and thoroughly tested both internally and externally. 10. After a `v0.16.X` release was created, the release notes need to be created. - - The release github action does auto generate release notes. + - The release github action does auto-generate release notes. - If they are incorrect, manual release notes can be created via: `go run testutil/genchangelog/main.go --range=v0.15.0..v0.16.0`. Note that images are built and tagged for each commit on the main and release branch using the app/version tag, e.g. `v0.X-dev` for `main`, and `v0.X-rc` for release branches. Main branch commits are also tagged with `latest`. diff --git a/docs/configuration.md b/docs/configuration.md index 71fb0c6a5a..4d30b14ef8 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -76,7 +76,7 @@ The `cluster-lock.json` has the following schema: "16000000000" ], "lock_hash": "0xabcdef...abcedef", // Hash of the cluster definition and distributed validators. Uniquely identifies a cluster lock. - "signature_aggregate": "0xabcdef...abcedef", // BLS aggregate signature of the lock hash signed by all the key shares of all the distributed validators. Proves that the key shares exist and attested to being part of this cluster. + "signature_aggregate": "0xabcdef...abcedef", // BLS aggregate signature of the lock hash signed by all the key shares of all the distributed validators. Proves that the key shares exist and attest to being part of this cluster. "node_signatures": ["0xabcdef...abcedef"] // Signatures of the lock hash by each operator. Proves that this lock file (and the validators) was generated by all the operators } ``` diff --git a/docs/contributing.md b/docs/contributing.md index a528467825..2ea143c1c2 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -72,7 +72,7 @@ Note: PRs can only be merged by obol-bulldozer bot. It is author's responsibilit ### PR Template - **PRs are always squash merged into main**. -- The PR title and body is used as the final squash-merged git commit message. +- The PR title and body are used as the final squash-merged git commit message. - The PR's original git commits are therefore lost (so naming isn't specified) - **PR title format** is defined as: - Following the [go team's commit format](https://github.com/golang/go/commits/master): `package[/path]: concise overview of change` @@ -80,7 +80,7 @@ Note: PRs can only be merged by obol-bulldozer bot. It is author's responsibilit - Prefix can be a single or double hierarchical package name, but not three or more. E.g. `app` , or `app/tracer`. - The rest of the title must be a concise high-level overview in the present tense and starting with lower case. - **PR body format** is defined as: - - Start with detailed description of the change. + - Start with a detailed description of the change. - Description should use proper grammar in present tense. - Ends with a list of tags (some required, others optional) (`^tag: value of this tag\n`): - `category`: required; one of: `refactor`, `bug`, `feature`, `docs`, `release`, `tidy`, `fixbuild`. @@ -123,7 +123,7 @@ pre-commit clean The **linter** used is [golangci-lint](https://golangci-lint.run/). It runs as part of the githooks and is configured in [.golangci.yml](../.golangci.yml) -Different **dev tools** are used in throughout the code base and are defined and installed from [tools.go](../tools.go). To install the dev tools run: `go generate tools.go` +Different **dev tools** are used throughout the code base and are defined and installed from [tools.go](../tools.go). To install the dev tools run: `go generate tools.go` ## Code Review We tend to closely follow the following code review structure: diff --git a/docs/goguidelines.md b/docs/goguidelines.md index 78eabeb919..5d1fe4602a 100644 --- a/docs/goguidelines.md +++ b/docs/goguidelines.md @@ -29,7 +29,7 @@ thousand ways to approach a problem. The Charon codebase doesn't follow the comm Instead, it follows a more procedural style for a focus on *functions and values*, [#AlgorthimsAndDataStructuresOverTypes](https://en.wikipedia.org/wiki/Object-oriented_programming#cite_note-48). This style can be summarized by the following tradeoffs: - Prefer **unexported over exported** types and functions. [#WriteShyCode](https://dave.cheney.net/practical-go/presentations/qcon-china.html#_package_design) -- Prefer **functions over methods** as methods lends itself to stateful code while functions are stateless. [#FunctionsOverMethods](https://kellysutton.com/2018/07/13/simple-made-easy-methods-vs-functions.html) +- Prefer **functions over methods** as methods lends themselves to stateful code while functions are stateless. [#FunctionsOverMethods](https://kellysutton.com/2018/07/13/simple-made-easy-methods-vs-functions.html) - Prefer **structs over objects** as structs tend to be more on the immutable data side while “objects” tend to be mutable and combine data with logic. [#TheValueOfValues](https://www.youtube.com/watch?v=-I-VpPMzG7c) - Prefer **explicit over implement** as explicit code doesn’t hide anything while implicit code does. - Prefer **immutability over mutability** as that results in code that is easier to reason about and debug and compose. @@ -146,7 +146,7 @@ Please try to inform your decisions by the following style for improved consiste - Note that passing pointers around is in general not faster than non-pointers (except in some edge cases). ### Naming: - - Data labels should be snake_case. This include json fields, structured logging fields, prometheus labels etc. + - Data labels should be snake_case. This includes json fields, structured logging fields, prometheus labels etc. - Go package names should be concise; aim for a single noun (`validator`) or two concatenated nouns (`validatorapi`). Avoid underscores or three word nouns. ### Declarations: From 4b856b75724902433794bbedc1f54066ca047404 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 07:55:04 +0000 Subject: [PATCH 30/89] build(deps): Bump go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp from 0.54.0 to 0.55.0 (#3284) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp](https://github.com/open-telemetry/opentelemetry-go-contrib) from 0.54.0 to 0.55.0.
Release notes

Sourced from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp's releases.

Release v1.30.0/v0.55.0/v0.24.0/v0.10.0/v0.5.0/v0.3.0/v0.2.0

Overview

Added

  • Add NewProducer to go.opentelemetry.io/contrib/instrumentation/runtime, which allows collecting the go.schedule.duration histogram metric from the Go runtime. (#5991)
  • Add gRPC protocol support for OTLP log exporter in go.opentelemetry.io/contrib/exporters/autoexport. (#6083)

Removed

Fixed

  • Superfluous call to WriteHeader when flushing after setting a status code in go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp. (#6074)
  • Superfluous call to WriteHeader when writing the response body after setting a status code in go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp. (#6055)

What's Changed

... (truncated)

Changelog

Sourced from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp's changelog.

[1.30.0/0.55.0/0.24.0/0.10.0/0.5.0/0.3.0/0.2.0] - 2024-09-10

Added

  • Add NewProducer to go.opentelemetry.io/contrib/instrumentation/runtime, which allows collecting the go.schedule.duration histogram metric from the Go runtime. (#5991)
  • Add gRPC protocol support for OTLP log exporter in go.opentelemetry.io/contrib/exporters/autoexport. (#6083)

Removed

Fixed

  • Superfluous call to WriteHeader when flushing after setting a status code in go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp. (#6074)
  • Superfluous call to WriteHeader when writing the response body after setting a status code in go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp. (#6055)
Commits
  • 4ccc9c6 Release v1.30.0/v0.55.0/v0.24.0/v0.10.0/v0.5.0/v0.3.0/v0.2.0 (#6106)
  • d312469 fix(deps): update module github.com/golangci/golangci-lint to v1.61.0 (#6101)
  • 5425de9 Fix gosec lint issues (#6107)
  • 774b20e chore(deps): update kubernetes packages to v0.31.0 (#5926)
  • 38e6e1e chore(deps): update github.com/lufia/plan9stats digest to 873cd01 (#6098)
  • 9309161 fix(deps): update module google.golang.org/grpc to v1.66.1 (#6103)
  • 9a46844 fix(deps): update module github.com/aws/aws-sdk-go-v2/service/dynamodb to v1....
  • f43f59e fix(deps): update golang.org/x/exp digest to 701f63a (#6099)
  • 53b99ae feat: add grpc support for log autoexport (#6083)
  • 23e6f6c chore(deps): update module github.com/go-playground/validator/v10 to v10.22.1...
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp&package-manager=go_modules&previous-version=0.54.0&new-version=0.55.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d2e92fc72b..5a9ea3e24c 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.4.1 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 go.opentelemetry.io/otel v1.30.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 diff --git a/go.sum b/go.sum index cfaec9b9bf..c08251851c 100644 --- a/go.sum +++ b/go.sum @@ -544,8 +544,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= From c882583a37f2c6dcd70c29b92e07f6e9f0fa1023 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 07:55:44 +0000 Subject: [PATCH 31/89] build(deps): Bump github.com/prometheus/client_golang from 1.20.3 to 1.20.4 (#3289) Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.20.3 to 1.20.4.
Release notes

Sourced from github.com/prometheus/client_golang's releases.

v1.20.4

  • [BUGFIX] histograms: Fix a possible data race when appending exemplars vs metrics gather. #1623
Changelog

Sourced from github.com/prometheus/client_golang's changelog.

Unreleased

  • [BUGFIX] histograms: Fix possible data race when appending exemplars vs metrics gather. #1623
Commits
  • 05fcde9 Merge pull request #1623 from krajorama/data-race-in-histogram-write
  • 209f4c0 Add changelog
  • 1e398cc native histogram: Fix race between Write and addExemplar
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/prometheus/client_golang&package-manager=go_modules&previous-version=1.20.3&new-version=1.20.4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5a9ea3e24c..8b5a6ca1a2 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/libp2p/go-msgio v0.3.0 github.com/multiformats/go-multiaddr v0.13.0 github.com/pelletier/go-toml/v2 v2.2.3 - github.com/prometheus/client_golang v1.20.3 + github.com/prometheus/client_golang v1.20.4 github.com/prometheus/client_model v0.6.1 github.com/protolambda/eth2-shuffle v1.1.0 github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e diff --git a/go.sum b/go.sum index c08251851c..8897056dfe 100644 --- a/go.sum +++ b/go.sum @@ -422,8 +422,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4= -github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= From 744f9b65a5c4a4e15684cb623ec19378f5af3d2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gianguido=20Sor=C3=A0?= Date: Wed, 18 Sep 2024 17:03:42 +0200 Subject: [PATCH 32/89] *: disable intrange linter (#3282) Go v1.23 allows to range over int slices without calling `len` first. Since it's a low-importance change, we can disable intrange linter to avoid being blocked by it during pre-commit. category: misc ticket: none --- .golangci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.golangci.yml b/.golangci.yml index 780e633046..8bb3fa8413 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -151,6 +151,7 @@ linters: enable-all: true disable: # Keep disabled + - intrange - containedctx - contextcheck - cyclop From 4f348838e31eddc69b1ca97a853b803eb3cd3536 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Thu, 19 Sep 2024 11:55:45 +0200 Subject: [PATCH 33/89] cmd: broadcast all exits (#3288) Second part of adding the `--all` flag for exits. This PR is for the `charon exit broadcast` command. Additionally, a flag `exitFromDir` is introduced so point to a directory with exit files. category: feature ticket: #3243 --- cmd/exit.go | 6 + cmd/exit_broadcast.go | 193 ++++++++++++++++++++++------ cmd/exit_broadcast_internal_test.go | 64 +++++++-- 3 files changed, 207 insertions(+), 56 deletions(-) diff --git a/cmd/exit.go b/cmd/exit.go index 916ec9d526..95b500090e 100644 --- a/cmd/exit.go +++ b/cmd/exit.go @@ -33,6 +33,7 @@ type exitConfig struct { PlaintextOutput bool BeaconNodeTimeout time.Duration ExitFromFilePath string + ExitFromFileDir string Log log.Config All bool } @@ -60,6 +61,7 @@ const ( validatorPubkey exitEpoch exitFromFile + exitFromDir beaconNodeTimeout fetchedExitPath publishTimeout @@ -85,6 +87,8 @@ func (ef exitFlag) String() string { return "exit-epoch" case exitFromFile: return "exit-from-file" + case exitFromDir: + return "exit-from-dir" case beaconNodeTimeout: return "beacon-node-timeout" case fetchedExitPath: @@ -135,6 +139,8 @@ func bindExitFlags(cmd *cobra.Command, config *exitConfig, flags []exitCLIFlag) cmd.Flags().Uint64Var(&config.ExitEpoch, exitEpoch.String(), 162304, maybeRequired("Exit epoch at which the validator will exit, must be the same across all the partial exits.")) case exitFromFile: cmd.Flags().StringVar(&config.ExitFromFilePath, exitFromFile.String(), "", maybeRequired("Retrieves a signed exit message from a pre-prepared file instead of --publish-address.")) + case exitFromDir: + cmd.Flags().StringVar(&config.ExitFromFileDir, exitFromDir.String(), "", maybeRequired("Retrieves a signed exit messages from a pre-prepared files in a directory instead of --publish-address.")) case beaconNodeTimeout: cmd.Flags().DurationVar(&config.BeaconNodeTimeout, beaconNodeTimeout.String(), 30*time.Second, maybeRequired("Timeout for beacon node HTTP calls.")) case fetchedExitPath: diff --git a/cmd/exit_broadcast.go b/cmd/exit_broadcast.go index a93ae492c5..6f28ac01ad 100644 --- a/cmd/exit_broadcast.go +++ b/cmd/exit_broadcast.go @@ -4,8 +4,11 @@ package cmd import ( "context" + "encoding/hex" "encoding/json" + "fmt" "os" + "path/filepath" "strings" "time" @@ -15,6 +18,7 @@ import ( "github.com/spf13/cobra" "github.com/obolnetwork/charon/app/errors" + "github.com/obolnetwork/charon/app/eth2wrap" "github.com/obolnetwork/charon/app/k1util" "github.com/obolnetwork/charon/app/log" "github.com/obolnetwork/charon/app/obolapi" @@ -52,15 +56,44 @@ func newBcastFullExitCmd(runFunc func(context.Context, exitConfig) error) *cobra {lockFilePath, false}, {validatorKeysDir, false}, {exitEpoch, false}, - {validatorPubkey, true}, + {validatorPubkey, false}, {beaconNodeEndpoints, true}, {exitFromFile, false}, + {exitFromDir, false}, {beaconNodeTimeout, false}, {publishTimeout, false}, }) bindLogFlags(cmd.Flags(), &config.Log) + wrapPreRunE(cmd, func(cmd *cobra.Command, _ []string) error { + valPubkPresent := cmd.Flags().Lookup(validatorPubkey.String()).Changed + exitFilePresent := cmd.Flags().Lookup(exitFromFile.String()).Changed + exitDirPresent := cmd.Flags().Lookup(exitFromDir.String()).Changed + + if !valPubkPresent && !config.All { + //nolint:revive,perfsprint // we use our own version of the errors package; keep consistency with other checks. + return errors.New(fmt.Sprintf("%s must be specified when exiting single validator.", validatorPubkey.String())) + } + + if config.All && valPubkPresent { + //nolint:revive // we use our own version of the errors package. + return errors.New(fmt.Sprintf("%s should not be specified when %s is, as it is obsolete and misleading.", validatorPubkey.String(), all.String())) + } + + if valPubkPresent && exitDirPresent { + //nolint:revive // we use our own version of the errors package. + return errors.New(fmt.Sprintf("if you want to specify exit file for single validator, you must provide %s and not %s.", exitFromFile.String(), exitFromDir.String())) + } + + if config.All && exitFilePresent { + //nolint:revive // we use our own version of the errors package. + return errors.New(fmt.Sprintf("if you want to specify exit file directory for all validators, you must provide %s and not %s.", exitFromDir.String(), exitFromFile.String())) + } + + return nil + }) + return cmd } @@ -75,66 +108,142 @@ func runBcastFullExit(ctx context.Context, config exitConfig) error { return errors.Wrap(err, "could not load cluster-lock.json") } - validator := core.PubKey(config.ValidatorPubkey) - if _, err := validator.Bytes(); err != nil { - return errors.Wrap(err, "cannot convert validator pubkey to bytes") - } - - ctx = log.WithCtx(ctx, z.Str("validator", validator.String())) - eth2Cl, err := eth2Client(ctx, config.BeaconNodeEndpoints, config.BeaconNodeTimeout, [4]byte(cl.GetForkVersion())) if err != nil { return errors.Wrap(err, "cannot create eth2 client for specified beacon node") } - var fullExit eth2p0.SignedVoluntaryExit - maybeExitFilePath := strings.TrimSpace(config.ExitFromFilePath) - - if len(maybeExitFilePath) != 0 { - log.Info(ctx, "Retrieving full exit message from path", z.Str("path", maybeExitFilePath)) - fullExit, err = exitFromPath(maybeExitFilePath) + fullExits := make(map[core.PubKey]eth2p0.SignedVoluntaryExit) + if config.All { + if config.ExitFromFileDir != "" { + entries, err := os.ReadDir(config.ExitFromFileDir) + if err != nil { + return errors.Wrap(err, "could not read exits directory") + } + for _, entry := range entries { + if !strings.HasPrefix(entry.Name(), "exit-") { + continue + } + exit, err := fetchFullExit(ctx, filepath.Join(config.ExitFromFileDir, entry.Name()), config, cl, identityKey, "") + if err != nil { + return errors.Wrap(err, "fetch full exit for all from dir") + } + + validatorPubKey, err := validatorPubKeyFromFileName(entry.Name()) + if err != nil { + return err + } + + fullExits[validatorPubKey] = exit + } + } else { + for _, validator := range cl.GetValidators() { + validatorPubKeyHex := fmt.Sprintf("0x%x", validator.GetPublicKey()) + + valCtx := log.WithCtx(ctx, z.Str("validator", validatorPubKeyHex)) + + exit, err := fetchFullExit(valCtx, "", config, cl, identityKey, validatorPubKeyHex) + if err != nil { + return errors.Wrap(err, "fetch full exit for all from public key") + } + validatorPubKey, err := core.PubKeyFromBytes(validator.GetPublicKey()) + if err != nil { + return errors.Wrap(err, "convert public key for validator") + } + fullExits[validatorPubKey] = exit + } + } } else { - log.Info(ctx, "Retrieving full exit message from publish address") - fullExit, err = exitFromObolAPI(ctx, config.ValidatorPubkey, config.PublishAddress, config.PublishTimeout, cl, identityKey) + exit, err := fetchFullExit(ctx, strings.TrimSpace(config.ExitFromFilePath), config, cl, identityKey, config.ValidatorPubkey) + if err != nil { + return errors.Wrap(err, "fetch full exit for public key") + } + var validatorPubKey core.PubKey + if len(strings.TrimSpace(config.ExitFromFilePath)) != 0 { + validatorPubKey, err = validatorPubKeyFromFileName(config.ExitFromFilePath) + if err != nil { + return err + } + } else { + validatorPubKey = core.PubKey(config.ValidatorPubkey) + } + fullExits[validatorPubKey] = exit } - if err != nil { - return err - } + return broadcastExitsToBeacon(ctx, eth2Cl, fullExits) +} - // parse validator public key - rawPkBytes, err := validator.Bytes() +func validatorPubKeyFromFileName(fileName string) (core.PubKey, error) { + fileNameChecked := filepath.Base(fileName) + fileExtension := filepath.Ext(fileNameChecked) + validatorPubKeyHex := strings.TrimPrefix(strings.TrimSuffix(fileNameChecked, fileExtension), "exit-0x") + validatorPubKeyBytes, err := hex.DecodeString(validatorPubKeyHex) if err != nil { - return errors.Wrap(err, "could not serialize validator key bytes") + return "", errors.Wrap(err, "cannot decode public key hex from file name") } - - pubkey, err := tblsconv.PubkeyFromBytes(rawPkBytes) + validatorPubKey, err := core.PubKeyFromBytes(validatorPubKeyBytes) if err != nil { - return errors.Wrap(err, "could not convert validator key bytes to BLS public key") + return "", errors.Wrap(err, "cannot decode core public key from hex") } - // parse signature - signature, err := tblsconv.SignatureFromBytes(fullExit.Signature[:]) - if err != nil { - return errors.Wrap(err, "could not parse BLS signature from bytes") - } + return validatorPubKey, nil +} - exitRoot, err := sigDataForExit( - ctx, - *fullExit.Message, - eth2Cl, - fullExit.Message.Epoch, - ) - if err != nil { - return errors.Wrap(err, "cannot calculate hash tree root for exit message for verification") +func fetchFullExit(ctx context.Context, exitFilePath string, config exitConfig, cl *manifestpb.Cluster, identityKey *k1.PrivateKey, validatorPubKey string) (eth2p0.SignedVoluntaryExit, error) { + var fullExit eth2p0.SignedVoluntaryExit + var err error + + if len(exitFilePath) != 0 { + log.Info(ctx, "Retrieving full exit message from path", z.Str("path", exitFilePath)) + fullExit, err = exitFromPath(exitFilePath) + } else { + log.Info(ctx, "Retrieving full exit message from publish address") + fullExit, err = exitFromObolAPI(ctx, validatorPubKey, config.PublishAddress, config.PublishTimeout, cl, identityKey) } - if err := tbls.Verify(pubkey, exitRoot[:], signature); err != nil { - return errors.Wrap(err, "exit message signature not verified") + return fullExit, err +} + +func broadcastExitsToBeacon(ctx context.Context, eth2Cl eth2wrap.Client, exits map[core.PubKey]eth2p0.SignedVoluntaryExit) error { + for validator, fullExit := range exits { + valCtx := log.WithCtx(ctx, z.Str("validator", validator.String())) + + rawPkBytes, err := validator.Bytes() + if err != nil { + return errors.Wrap(err, "could not serialize validator key bytes") + } + + pubkey, err := tblsconv.PubkeyFromBytes(rawPkBytes) + if err != nil { + return errors.Wrap(err, "could not convert validator key bytes to BLS public key") + } + + // parse signature + signature, err := tblsconv.SignatureFromBytes(fullExit.Signature[:]) + if err != nil { + return errors.Wrap(err, "could not parse BLS signature from bytes") + } + + exitRoot, err := sigDataForExit( + valCtx, + *fullExit.Message, + eth2Cl, + fullExit.Message.Epoch, + ) + if err != nil { + return errors.Wrap(err, "cannot calculate hash tree root for exit message for verification") + } + + if err := tbls.Verify(pubkey, exitRoot[:], signature); err != nil { + return errors.Wrap(err, "exit message signature not verified") + } } - if err := eth2Cl.SubmitVoluntaryExit(ctx, &fullExit); err != nil { - return errors.Wrap(err, "could not submit voluntary exit") + for validator, fullExit := range exits { + valCtx := log.WithCtx(ctx, z.Str("validator", validator.String())) + if err := eth2Cl.SubmitVoluntaryExit(valCtx, &fullExit); err != nil { + return errors.Wrap(err, "could not submit voluntary exit") + } } return nil diff --git a/cmd/exit_broadcast_internal_test.go b/cmd/exit_broadcast_internal_test.go index 5c2c7b3fe8..88bf2ba5b6 100644 --- a/cmd/exit_broadcast_internal_test.go +++ b/cmd/exit_broadcast_internal_test.go @@ -31,16 +31,24 @@ func Test_runBcastFullExitCmd(t *testing.T) { t.Parallel() t.Run("main flow from api", func(t *testing.T) { t.Parallel() - testRunBcastFullExitCmdFlow(t, false) + testRunBcastFullExitCmdFlow(t, false, false) }) t.Run("main flow from file", func(t *testing.T) { t.Parallel() - testRunBcastFullExitCmdFlow(t, true) + testRunBcastFullExitCmdFlow(t, true, false) + }) + t.Run("main flow from api for all", func(t *testing.T) { + t.Parallel() + testRunBcastFullExitCmdFlow(t, false, true) + }) + t.Run("main flow from file for all", func(t *testing.T) { + t.Parallel() + testRunBcastFullExitCmdFlow(t, true, true) }) t.Run("config", Test_runBcastFullExitCmd_Config) } -func testRunBcastFullExitCmdFlow(t *testing.T, fromFile bool) { +func testRunBcastFullExitCmdFlow(t *testing.T, fromFile bool, all bool) { t.Helper() ctx := context.Background() @@ -114,7 +122,6 @@ func testRunBcastFullExitCmdFlow(t *testing.T, fromFile bool) { config := exitConfig{ BeaconNodeEndpoints: []string{beaconMock.Address()}, - ValidatorPubkey: lock.Validators[0].PublicKeyHex(), PrivateKeyPath: filepath.Join(baseDir, "charon-enr-private-key"), ValidatorKeysDir: filepath.Join(baseDir, "validator_keys"), LockFilePath: filepath.Join(baseDir, "cluster-lock.json"), @@ -124,6 +131,12 @@ func testRunBcastFullExitCmdFlow(t *testing.T, fromFile bool) { PublishTimeout: 10 * time.Second, } + if all { + config.All = all + } else { + config.ValidatorPubkey = lock.Validators[0].PublicKeyHex() + } + require.NoError(t, runSignPartialExit(ctx, config), "operator index: %v", idx) } @@ -131,7 +144,6 @@ func testRunBcastFullExitCmdFlow(t *testing.T, fromFile bool) { config := exitConfig{ BeaconNodeEndpoints: []string{beaconMock.Address()}, - ValidatorPubkey: lock.Validators[0].PublicKeyHex(), PrivateKeyPath: filepath.Join(baseDir, "charon-enr-private-key"), ValidatorKeysDir: filepath.Join(baseDir, "validator_keys"), LockFilePath: filepath.Join(baseDir, "cluster-lock.json"), @@ -141,17 +153,39 @@ func testRunBcastFullExitCmdFlow(t *testing.T, fromFile bool) { PublishTimeout: 10 * time.Second, } + if all { + config.All = all + } else { + config.ValidatorPubkey = lock.Validators[0].PublicKeyHex() + } + if fromFile { - exit, err := exitFromObolAPI(ctx, lock.Validators[0].PublicKeyHex(), srv.URL, 10*time.Second, cl, enrs[0]) - require.NoError(t, err) + if all { + for _, validator := range lock.Validators { + validatorPublicKey := validator.PublicKeyHex() + exit, err := exitFromObolAPI(ctx, validatorPublicKey, srv.URL, 10*time.Second, cl, enrs[0]) + require.NoError(t, err) + + exitBytes, err := json.Marshal(exit) + require.NoError(t, err) + + exitPath := filepath.Join(baseDir, fmt.Sprintf("exit-%s.json", validatorPublicKey)) + require.NoError(t, os.WriteFile(exitPath, exitBytes, 0o755)) + } + config.ExitFromFileDir = baseDir + } else { + validatorPublicKey := lock.Validators[0].PublicKeyHex() + exit, err := exitFromObolAPI(ctx, validatorPublicKey, srv.URL, 10*time.Second, cl, enrs[0]) + require.NoError(t, err) - exitBytes, err := json.Marshal(exit) - require.NoError(t, err) + exitBytes, err := json.Marshal(exit) + require.NoError(t, err) - exitPath := filepath.Join(baseDir, "exit.json") - require.NoError(t, os.WriteFile(exitPath, exitBytes, 0o755)) + exitPath := filepath.Join(baseDir, fmt.Sprintf("exit-%s.json", validatorPublicKey)) + require.NoError(t, os.WriteFile(exitPath, exitBytes, 0o755)) - config.ExitFromFilePath = exitPath + config.ExitFromFilePath = exitPath + } } require.NoError(t, runBcastFullExit(ctx, config)) @@ -168,6 +202,7 @@ func Test_runBcastFullExitCmd_Config(t *testing.T) { badValidatorAddr bool badExistingExitPath bool errData string + all bool } tests := []test{ @@ -194,7 +229,7 @@ func Test_runBcastFullExitCmd_Config(t *testing.T) { { name: "Bad validator address", badValidatorAddr: true, - errData: "cannot convert validator pubkey to bytes", + errData: "validator pubkey to bytes", }, { name: "Bad existing exit file", @@ -289,10 +324,11 @@ func Test_runBcastFullExitCmd_Config(t *testing.T) { ExitEpoch: 0, BeaconNodeTimeout: 30 * time.Second, PublishTimeout: 10 * time.Second, + All: test.all, } if test.badExistingExitPath { - path := filepath.Join(baseDir, "exit.json") + path := filepath.Join(baseDir, fmt.Sprintf("exit-%s.json", lock.Validators[0].PublicKeyHex())) require.NoError(t, os.WriteFile(path, []byte("bad"), 0o755)) config.ExitFromFilePath = path } From 40426082e256ca1e365fc6fd41adaef63632ac14 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Thu, 19 Sep 2024 11:55:53 +0200 Subject: [PATCH 34/89] cmd: fetch all exits (#3291) Third part of adding the `--all` flag for exits. This PR is for the charon exit fetch command. category: feature ticket: #3243 --- cmd/exit_fetch.go | 75 ++++++++++++++++++++++++++------- cmd/exit_fetch_internal_test.go | 15 +++++-- 2 files changed, 72 insertions(+), 18 deletions(-) diff --git a/cmd/exit_fetch.go b/cmd/exit_fetch.go index 67f9566961..0a3e251199 100644 --- a/cmd/exit_fetch.go +++ b/cmd/exit_fetch.go @@ -45,13 +45,30 @@ func newFetchExitCmd(runFunc func(context.Context, exitConfig) error) *cobra.Com {publishAddress, false}, {privateKeyPath, false}, {lockFilePath, false}, - {validatorPubkey, true}, + {validatorPubkey, false}, + {all, false}, {fetchedExitPath, false}, {publishTimeout, false}, }) bindLogFlags(cmd.Flags(), &config.Log) + wrapPreRunE(cmd, func(cmd *cobra.Command, _ []string) error { + valPubkPresent := cmd.Flags().Lookup(validatorPubkey.String()).Changed + + if !valPubkPresent && !config.All { + //nolint:revive,perfsprint // we use our own version of the errors package; keep consistency with other checks. + return errors.New(fmt.Sprintf("%s must be specified when exiting single validator.", validatorPubkey.String())) + } + + if config.All && valPubkPresent { + //nolint:revive // we use our own version of the errors package. + return errors.New(fmt.Sprintf("%s should not be specified when %s is, as it is obsolete and misleading.", validatorPubkey.String(), all.String())) + } + + return nil + }) + return cmd } @@ -79,33 +96,61 @@ func runFetchExit(ctx context.Context, config exitConfig) error { return errors.Wrap(err, "could not load cluster-lock.json") } - validator := core.PubKey(config.ValidatorPubkey) - if _, err := validator.Bytes(); err != nil { - return errors.Wrap(err, "cannot convert validator pubkey to bytes") - } - - ctx = log.WithCtx(ctx, z.Str("validator", validator.String())) - oAPI, err := obolapi.New(config.PublishAddress, obolapi.WithTimeout(config.PublishTimeout)) if err != nil { return errors.Wrap(err, "could not create obol api client") } - log.Info(ctx, "Retrieving full exit message") - shareIdx, err := keystore.ShareIdxForCluster(cl, *identityKey.PubKey()) if err != nil { return errors.Wrap(err, "could not determine operator index from cluster lock for supplied identity key") } - fullExit, err := oAPI.GetFullExit(ctx, config.ValidatorPubkey, cl.GetInitialMutationHash(), shareIdx, identityKey) - if err != nil { - return errors.Wrap(err, "could not load full exit data from Obol API") + if config.All { + for _, validator := range cl.GetValidators() { + validatorPubKeyHex := fmt.Sprintf("0x%x", validator.GetPublicKey()) + + valCtx := log.WithCtx(ctx, z.Str("validator", validatorPubKeyHex)) + + log.Info(valCtx, "Retrieving full exit message") + + fullExit, err := oAPI.GetFullExit(valCtx, validatorPubKeyHex, cl.GetInitialMutationHash(), shareIdx, identityKey) + if err != nil { + return errors.Wrap(err, "could not load full exit data from Obol API") + } + + err = writeExitToFile(valCtx, validatorPubKeyHex, config.FetchedExitPath, fullExit) + if err != nil { + return err + } + } + } else { + validator := core.PubKey(config.ValidatorPubkey) + if _, err := validator.Bytes(); err != nil { + return errors.Wrap(err, "cannot convert validator pubkey to bytes") + } + + ctx = log.WithCtx(ctx, z.Str("validator", validator.String())) + + log.Info(ctx, "Retrieving full exit message") + + fullExit, err := oAPI.GetFullExit(ctx, config.ValidatorPubkey, cl.GetInitialMutationHash(), shareIdx, identityKey) + if err != nil { + return errors.Wrap(err, "could not load full exit data from Obol API") + } + + err = writeExitToFile(ctx, config.ValidatorPubkey, config.FetchedExitPath, fullExit) + if err != nil { + return err + } } - fetchedExitFname := fmt.Sprintf("exit-%s.json", config.ValidatorPubkey) + return nil +} - fetchedExitPath := filepath.Join(config.FetchedExitPath, fetchedExitFname) +func writeExitToFile(ctx context.Context, valPubKey string, exitPath string, fullExit obolapi.ExitBlob) error { + fetchedExitFname := fmt.Sprintf("exit-%s.json", valPubKey) + fetchedExitPath := filepath.Join(exitPath, fetchedExitFname) exitData, err := json.Marshal(fullExit.SignedExitMessage) if err != nil { diff --git a/cmd/exit_fetch_internal_test.go b/cmd/exit_fetch_internal_test.go index f34d1015a0..8c690adea2 100644 --- a/cmd/exit_fetch_internal_test.go +++ b/cmd/exit_fetch_internal_test.go @@ -26,12 +26,19 @@ import ( func Test_runFetchExit(t *testing.T) { t.Parallel() - t.Run("full flow", Test_runFetchExitFullFlow) + t.Run("full flow", func(t *testing.T) { + t.Parallel() + testRunFetchExitFullFlow(t, false) + }) + t.Run("full flow all", func(t *testing.T) { + t.Parallel() + testRunFetchExitFullFlow(t, true) + }) t.Run("bad out dir", Test_runFetchExitBadOutDir) } -func Test_runFetchExitFullFlow(t *testing.T) { - t.Parallel() +func testRunFetchExitFullFlow(t *testing.T, all bool) { + t.Helper() ctx := context.Background() valAmt := 100 @@ -106,6 +113,7 @@ func Test_runFetchExitFullFlow(t *testing.T) { ExitEpoch: 194048, BeaconNodeTimeout: 30 * time.Second, PublishTimeout: 10 * time.Second, + All: all, } require.NoError(t, runSignPartialExit(ctx, config), "operator index: %v", idx) @@ -120,6 +128,7 @@ func Test_runFetchExitFullFlow(t *testing.T) { PublishAddress: srv.URL, FetchedExitPath: root, PublishTimeout: 10 * time.Second, + All: all, } require.NoError(t, runFetchExit(ctx, config)) From 053a8c50504cafbb1e9bd74fcec1a1e1e33da98b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Sep 2024 09:56:22 +0000 Subject: [PATCH 35/89] build(deps): Bump chainsafe/lodestar from v1.21.0 to v1.22.0 in /testutil/compose/static/lodestar (#3293) Bumps chainsafe/lodestar from v1.21.0 to v1.22.0. [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=chainsafe/lodestar&package-manager=docker&previous-version=v1.21.0&new-version=v1.22.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- testutil/compose/static/lodestar/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testutil/compose/static/lodestar/Dockerfile b/testutil/compose/static/lodestar/Dockerfile index 57430c877c..9755fe549f 100644 --- a/testutil/compose/static/lodestar/Dockerfile +++ b/testutil/compose/static/lodestar/Dockerfile @@ -1,4 +1,4 @@ -FROM chainsafe/lodestar:v1.21.0 +FROM chainsafe/lodestar:v1.22.0 RUN apt-get update && apt-get install -y curl jq wget From e828f1622e2bf048a65cea548e3abbd2179d0b0b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2024 07:35:26 +0000 Subject: [PATCH 36/89] build(deps): Bump attestant/vouch from 1.8.2 to 1.9.0 in /testutil/compose/static/vouch (#3294) Bumps attestant/vouch from 1.8.2 to 1.9.0. [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=attestant/vouch&package-manager=docker&previous-version=1.8.2&new-version=1.9.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- testutil/compose/static/vouch/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testutil/compose/static/vouch/Dockerfile b/testutil/compose/static/vouch/Dockerfile index 614157d3b1..6415308849 100644 --- a/testutil/compose/static/vouch/Dockerfile +++ b/testutil/compose/static/vouch/Dockerfile @@ -1,6 +1,6 @@ FROM wealdtech/ethdo:1.35.2 as ethdo -FROM attestant/vouch:1.8.2 +FROM attestant/vouch:1.9.0 COPY --from=ethdo /app/ethdo /app/ethdo From 159d6f2fdc4530d6bf7c7ac14d0d19331dd741db Mon Sep 17 00:00:00 2001 From: Andrei Smirnov Date: Tue, 1 Oct 2024 17:33:03 +0300 Subject: [PATCH 37/89] dkg: fixed TestSyncFlow (#3309) Possibly fixed the flakey test. category: test ticket: #3308 --- dkg/dkg_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/dkg/dkg_test.go b/dkg/dkg_test.go index b830c3d8c1..97e1dd2bfb 100644 --- a/dkg/dkg_test.go +++ b/dkg/dkg_test.go @@ -6,6 +6,7 @@ import ( "context" "encoding/hex" "encoding/json" + "errors" "fmt" "math/rand" "net/http" @@ -537,7 +538,9 @@ func TestSyncFlow(t *testing.T) { var disconnectedCount int for err := range dkgErrChan { testutil.SkipIfBindErr(t, err) - require.NoError(t, err) + if !errors.Is(err, context.Canceled) { + require.NoError(t, err) + } disconnectedCount++ if disconnectedCount == test.nodes { break From 72549c6ec8c1b716edb4063b012252af62825184 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Tue, 1 Oct 2024 16:33:35 +0200 Subject: [PATCH 38/89] cmd: move threshold check to CLI level (#3297) A [recent commit](https://github.com/ObolNetwork/charon/commit/98b84e11548e7267e2440fd6f582a9e8fcdfe970) introduced a misbehavior when omitting the optional `--threshold` flag of `create dkg` and `create cluster` commands. Because the threshold configuration is tested before the threshold variable is assigned to the default value `ceil(2*n/3)`, the flag is not optional anymore. This PR fixes this bug by moving the checks at the CLI level and by updating the corresponding tests accordingly. It also adds an input validation check on the [`ThresholdSplit`](https://github.com/ObolNetwork/charon/blob/ced30abb5a8c168b358a9bfc976fbe23927d72de/tbls/herumi.go#L133) and [`ThresholdSplitInsecure`](https://github.com/ObolNetwork/charon/blob/ced30abb5a8c168b358a9bfc976fbe23927d72de/tbls/herumi.go#L83) functions to ensure they are called with a threshold parameter greater than 1. category: bug ticket: none --- cmd/createcluster.go | 27 ++++--- cmd/createcluster_internal_test.go | 96 +++++++++++++++++++------ cmd/createdkg.go | 33 +++++---- cmd/createdkg_internal_test.go | 109 ++++++++++++++++++++++++----- tbls/herumi.go | 8 +++ 5 files changed, 210 insertions(+), 63 deletions(-) diff --git a/cmd/createcluster.go b/cmd/createcluster.go index 249feec920..065c713af1 100644 --- a/cmd/createcluster.go +++ b/cmd/createcluster.go @@ -10,7 +10,6 @@ import ( "encoding/json" "fmt" "io" - "math" "net/url" "os" "path" @@ -100,6 +99,22 @@ func newCreateClusterCmd(runFunc func(context.Context, io.Writer, clusterConfig) bindClusterFlags(cmd.Flags(), &conf) bindInsecureFlags(cmd.Flags(), &conf.InsecureKeys) + wrapPreRunE(cmd, func(cmd *cobra.Command, _ []string) error { + thresholdPresent := cmd.Flags().Lookup("threshold").Changed + + if thresholdPresent { + if conf.Threshold < minThreshold { + return errors.New("threshold must be greater than 1", z.Int("threshold", conf.Threshold), z.Int("min", minThreshold)) + } + if conf.Threshold > conf.NumNodes { + return errors.New("threshold cannot be greater than number of operators", + z.Int("threshold", conf.Threshold), z.Int("operators", conf.NumNodes)) + } + } + + return nil + }) + return cmd } @@ -374,16 +389,6 @@ func validateCreateConfig(ctx context.Context, conf clusterConfig) error { return errors.New("number of operators is below minimum", z.Int("operators", conf.NumNodes), z.Int("min", minNodes)) } - // Check for threshold parameter - minThreshold := int(math.Ceil(float64(conf.NumNodes*2) / 3)) - if conf.Threshold < minThreshold { - return errors.New("threshold cannot be smaller than BFT quorum", z.Int("threshold", conf.Threshold), z.Int("min", minThreshold)) - } - if conf.Threshold > conf.NumNodes { - return errors.New("threshold cannot be greater than number of operators", - z.Int("threshold", conf.Threshold), z.Int("operators", conf.NumNodes)) - } - return nil } diff --git a/cmd/createcluster_internal_test.go b/cmd/createcluster_internal_test.go index 95be0bad46..a611206c16 100644 --- a/cmd/createcluster_internal_test.go +++ b/cmd/createcluster_internal_test.go @@ -250,26 +250,6 @@ func TestCreateCluster(t *testing.T) { }, }, }, - { - Name: "threshold greater than the number of operators", - Config: clusterConfig{ - NumNodes: 4, - Threshold: 5, - NumDVs: 1, - Network: defaultNetwork, - }, - expectedErr: "threshold cannot be greater than number of operators", - }, - { - Name: "threshold smaller than BFT quorum", - Config: clusterConfig{ - NumNodes: 4, - Threshold: 2, - NumDVs: 1, - Network: defaultNetwork, - }, - expectedErr: "threshold cannot be smaller than BFT quorum", - }, { Name: "test with number of nodes below minimum", Config: clusterConfig{ @@ -788,6 +768,82 @@ func TestPublish(t *testing.T) { }) } +func TestClusterCLI(t *testing.T) { + feeRecipientArg := "--fee-recipient-addresses=" + validEthAddr + withdrawalArg := "--withdrawal-addresses=" + validEthAddr + + tests := []struct { + name string + network string + nodes string + numValidators string + feeRecipient string + withdrawal string + threshold string + expectedErr string + cleanup func(*testing.T) + }{ + { + name: "threshold below minimum", + nodes: "--nodes=3", + network: "--network=holesky", + numValidators: "--num-validators=1", + feeRecipient: feeRecipientArg, + withdrawal: withdrawalArg, + threshold: "--threshold=1", + expectedErr: "threshold must be greater than 1", + }, + { + name: "threshold above maximum", + nodes: "--nodes=4", + network: "--network=holesky", + numValidators: "--num-validators=1", + feeRecipient: feeRecipientArg, + withdrawal: withdrawalArg, + threshold: "--threshold=5", + expectedErr: "threshold cannot be greater than number of operators", + }, + { + name: "no threshold provided", + nodes: "--nodes=3", + network: "--network=holesky", + numValidators: "--num-validators=1", + feeRecipient: feeRecipientArg, + withdrawal: withdrawalArg, + threshold: "", + expectedErr: "", + cleanup: func(t *testing.T) { + t.Helper() + require.NoError(t, os.RemoveAll("node0")) + require.NoError(t, os.RemoveAll("node1")) + require.NoError(t, os.RemoveAll("node2")) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cmd := newCreateCmd(newCreateClusterCmd(runCreateCluster)) + if test.threshold != "" { + cmd.SetArgs([]string{"cluster", test.nodes, test.feeRecipient, test.withdrawal, test.network, test.numValidators, test.threshold}) + } else { + cmd.SetArgs([]string{"cluster", test.nodes, test.feeRecipient, test.withdrawal, test.network, test.numValidators}) + } + + err := cmd.Execute() + if test.expectedErr != "" { + require.ErrorContains(t, err, test.expectedErr) + } else { + require.NoError(t, err) + } + + if test.cleanup != nil { + test.cleanup(t) + } + }) + } +} + // mockKeymanagerReq is a mock keymanager request for use in tests. type mockKeymanagerReq struct { Keystores []string `json:"keystores"` diff --git a/cmd/createdkg.go b/cmd/createdkg.go index 79fe1f4767..4bc9e28ab4 100644 --- a/cmd/createdkg.go +++ b/cmd/createdkg.go @@ -6,7 +6,6 @@ import ( "context" crand "crypto/rand" "encoding/json" - "math" "os" "path" @@ -50,6 +49,22 @@ func newCreateDKGCmd(runFunc func(context.Context, createDKGConfig) error) *cobr bindCreateDKGFlags(cmd, &config) + wrapPreRunE(cmd, func(cmd *cobra.Command, _ []string) error { + thresholdPresent := cmd.Flags().Lookup("threshold").Changed + + if thresholdPresent { + if config.Threshold < minThreshold { + return errors.New("threshold must be greater than 1", z.Int("threshold", config.Threshold), z.Int("min", minThreshold)) + } + if config.Threshold > len(config.OperatorENRs) { + return errors.New("threshold cannot be greater than number of operators", + z.Int("threshold", config.Threshold), z.Int("operators", len(config.OperatorENRs))) + } + } + + return nil + }) + return cmd } @@ -82,7 +97,7 @@ func runCreateDKG(ctx context.Context, conf createDKGConfig) (err error) { conf.Network = eth2util.Goerli.Name } - if err = validateDKGConfig(conf.Threshold, len(conf.OperatorENRs), conf.Network, conf.DepositAmounts); err != nil { + if err = validateDKGConfig(len(conf.OperatorENRs), conf.Network, conf.DepositAmounts); err != nil { return err } @@ -115,7 +130,7 @@ func runCreateDKG(ctx context.Context, conf createDKGConfig) (err error) { safeThreshold := cluster.Threshold(len(conf.OperatorENRs)) if conf.Threshold == 0 { conf.Threshold = safeThreshold - } else if conf.Threshold != safeThreshold { + } else { log.Warn(ctx, "Non standard `--threshold` flag provided, this will affect cluster safety", nil, z.Int("threshold", conf.Threshold), z.Int("safe_threshold", safeThreshold)) } @@ -181,22 +196,12 @@ func validateWithdrawalAddrs(addrs []string, network string) error { } // validateDKGConfig returns an error if any of the provided config parameter is invalid. -func validateDKGConfig(threshold, numOperators int, network string, depositAmounts []int) error { +func validateDKGConfig(numOperators int, network string, depositAmounts []int) error { // Don't allow cluster size to be less than 3. if numOperators < minNodes { return errors.New("number of operators is below minimum", z.Int("operators", numOperators), z.Int("min", minNodes)) } - // Ensure threshold setting is sound - minThreshold := int(math.Ceil(float64(numOperators*2) / 3)) - if threshold < minThreshold { - return errors.New("threshold cannot be smaller than BFT quorum", z.Int("threshold", threshold), z.Int("min", minThreshold)) - } - if threshold > numOperators { - return errors.New("threshold cannot be greater than length of operators", - z.Int("threshold", threshold), z.Int("operators", numOperators)) - } - if !eth2util.ValidNetwork(network) { return errors.New("unsupported network", z.Str("network", network)) } diff --git a/cmd/createdkg_internal_test.go b/cmd/createdkg_internal_test.go index a6d522db59..3e93ebaffa 100644 --- a/cmd/createdkg_internal_test.go +++ b/cmd/createdkg_internal_test.go @@ -184,36 +184,109 @@ func TestValidateWithdrawalAddr(t *testing.T) { } func TestValidateDKGConfig(t *testing.T) { - t.Run("threshold exceeds numOperators", func(t *testing.T) { - threshold := 5 - numOperators := 4 - err := validateDKGConfig(threshold, numOperators, "", nil) - require.ErrorContains(t, err, "threshold cannot be greater than length of operators") - }) - - t.Run("threshold equals 1", func(t *testing.T) { - threshold := 1 - numOperators := 3 - err := validateDKGConfig(threshold, numOperators, "", nil) - require.ErrorContains(t, err, "threshold cannot be smaller than BFT quorum") - }) t.Run("insufficient ENRs", func(t *testing.T) { - threshold := 2 numOperators := 2 - err := validateDKGConfig(threshold, numOperators, "", nil) + err := validateDKGConfig(numOperators, "", nil) require.ErrorContains(t, err, "number of operators is below minimum") }) t.Run("invalid network", func(t *testing.T) { - threshold := 3 numOperators := 4 - err := validateDKGConfig(threshold, numOperators, "cosmos", nil) + err := validateDKGConfig(numOperators, "cosmos", nil) require.ErrorContains(t, err, "unsupported network") }) t.Run("wrong deposit amounts sum", func(t *testing.T) { - err := validateDKGConfig(3, 4, "goerli", []int{8, 16}) + err := validateDKGConfig(4, "goerli", []int{8, 16}) require.ErrorContains(t, err, "sum of partial deposit amounts must sum up to 32ETH") }) } + +func TestDKGCLI(t *testing.T) { + var enrs []string + for range minNodes { + enrs = append(enrs, "enr:-JG4QG472ZVvl8ySSnUK9uNVDrP_hjkUrUqIxUC75aayzmDVQedXkjbqc7QKyOOS71VmlqnYzri_taV8ZesFYaoQSIOGAYHtv1WsgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQKwwq_CAld6oVKOrixE-JzMtvvNgb9yyI-_rwq4NFtajIN0Y3CCDhqDdWRwgg4u") + } + enrArg := "--operator-enrs=" + strings.Join(enrs, ",") + feeRecipientArg := "--fee-recipient-addresses=" + validEthAddr + withdrawalArg := "--withdrawal-addresses=" + validEthAddr + outputDirArg := "--output-dir=.charon" + + tests := []struct { + name string + enr string + feeRecipient string + withdrawal string + outputDir string + threshold string + expectedErr string + prepare func(*testing.T) + cleanup func(*testing.T) + }{ + { + name: "threshold below minimum", + enr: enrArg, + feeRecipient: feeRecipientArg, + withdrawal: withdrawalArg, + outputDir: outputDirArg, + threshold: "--threshold=1", + expectedErr: "threshold must be greater than 1", + }, + { + name: "threshold above maximum", + enr: enrArg, + feeRecipient: feeRecipientArg, + withdrawal: withdrawalArg, + outputDir: outputDirArg, + threshold: "--threshold=4", + expectedErr: "threshold cannot be greater than number of operators", + }, + { + name: "no threshold provided", + enr: enrArg, + feeRecipient: feeRecipientArg, + withdrawal: withdrawalArg, + outputDir: outputDirArg, + threshold: "", + expectedErr: "", + prepare: func(t *testing.T) { + t.Helper() + charonDir := testutil.CreateTempCharonDir(t) + b := []byte("sample definition") + require.NoError(t, os.WriteFile(path.Join(charonDir, "cluster-definition.json"), b, 0o600)) + }, + cleanup: func(t *testing.T) { + t.Helper() + err := os.RemoveAll(".charon") + require.NoError(t, err) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.prepare != nil { + test.prepare(t) + } + + cmd := newCreateCmd(newCreateDKGCmd(runCreateDKG)) + if test.threshold != "" { + cmd.SetArgs([]string{"dkg", test.enr, test.feeRecipient, test.withdrawal, test.outputDir, test.threshold}) + } else { + cmd.SetArgs([]string{"dkg", test.enr, test.feeRecipient, test.withdrawal, test.outputDir}) + } + + err := cmd.Execute() + if test.expectedErr != "" { + require.ErrorContains(t, err, test.expectedErr) + } else { + require.NoError(t, err) + } + + if test.cleanup != nil { + test.cleanup(t) + } + }) + } +} diff --git a/tbls/herumi.go b/tbls/herumi.go index 0be2ef2a91..0c50c15995 100644 --- a/tbls/herumi.go +++ b/tbls/herumi.go @@ -84,6 +84,10 @@ func (Herumi) ThresholdSplitInsecure(t *testing.T, secret PrivateKey, total uint t.Helper() var p bls.SecretKey + if threshold <= 1 { + return nil, errors.New("threshold has to be greater than 1") + } + if err := p.Deserialize(secret[:]); err != nil { return nil, errors.Wrap(err, "cannot unmarshal bytes into Herumi secret key") } @@ -133,6 +137,10 @@ func (Herumi) ThresholdSplitInsecure(t *testing.T, secret PrivateKey, total uint func (Herumi) ThresholdSplit(secret PrivateKey, total uint, threshold uint) (map[int]PrivateKey, error) { var p bls.SecretKey + if threshold <= 1 { + return nil, errors.New("threshold has to be greater than 1") + } + if err := p.Deserialize(secret[:]); err != nil { return nil, errors.Wrap(err, "cannot unmarshal bytes into Herumi secret key") } From 224b7bae49aba92533a1201f939a91fabb9fb5b9 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Tue, 1 Oct 2024 16:33:45 +0200 Subject: [PATCH 39/89] cmd: enable exit all (#3296) Enable exit all. category: feature ticket: #3243 --- cmd/exit.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/cmd/exit.go b/cmd/exit.go index 95b500090e..59d8155f90 100644 --- a/cmd/exit.go +++ b/cmd/exit.go @@ -121,7 +121,6 @@ func bindExitFlags(cmd *cobra.Command, config *exitConfig, flags []exitCLIFlag) return s } - //nolint:exhaustive // `all` is not yet implemented switch flag { case publishAddress: cmd.Flags().StringVar(&config.PublishAddress, publishAddress.String(), "https://api.obol.tech/v1", maybeRequired("The URL of the remote API.")) @@ -149,9 +148,8 @@ func bindExitFlags(cmd *cobra.Command, config *exitConfig, flags []exitCLIFlag) cmd.Flags().DurationVar(&config.PublishTimeout, publishTimeout.String(), 30*time.Second, "Timeout for publishing a signed exit to the publish-address API.") case validatorIndex: cmd.Flags().Uint64Var(&config.ValidatorIndex, validatorIndex.String(), 0, "Validator index of the validator to exit, the associated public key must be present in the cluster lock manifest. If --validator-public-key is also provided, validator existence won't be checked on the beacon chain.") - // TODO: enable after all functionalities for --all are ready - // case all: - // cmd.Flags().BoolVar(&config.All, all.String(), false, "Exit all currently active validators in the cluster.") + case all: + cmd.Flags().BoolVar(&config.All, all.String(), false, "Exit all currently active validators in the cluster.") } if f.required { From 6972d4c82e9c1b9b5dc1d04d0d9e2b7911c36940 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 14:38:48 +0000 Subject: [PATCH 40/89] build(deps): Bump go.uber.org/automaxprocs from 1.5.3 to 1.6.0 (#3300) Bumps [go.uber.org/automaxprocs](https://github.com/uber-go/automaxprocs) from 1.5.3 to 1.6.0.
Release notes

Sourced from go.uber.org/automaxprocs's releases.

v1.6.0

  • Add RoundQuotaFunc option that allows configuration of rounding behavior for floating point CPU quota.
Changelog

Sourced from go.uber.org/automaxprocs's changelog.

v1.6.0 (2024-07-24)

  • Add RoundQuotaFunc option that allows configuration of rounding behavior for floating point CPU quota.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=go.uber.org/automaxprocs&package-manager=go_modules&previous-version=1.5.3&new-version=1.6.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8b5a6ca1a2..243c405af0 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,7 @@ require ( go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 go.opentelemetry.io/otel/sdk v1.30.0 go.opentelemetry.io/otel/trace v1.30.0 - go.uber.org/automaxprocs v1.5.3 + go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 go.uber.org/zap v1.27.0 golang.org/x/crypto v0.27.0 diff --git a/go.sum b/go.sum index 8897056dfe..e978c76de6 100644 --- a/go.sum +++ b/go.sum @@ -569,8 +569,8 @@ go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= go.uber.org/fx v1.20.1 h1:zVwVQGS8zYvhh9Xxcu4w1M6ESyeMzebzj2NbSayZ4Mk= From 24a2afda007c05756d7bf480b50b1539820b8fca Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Thu, 3 Oct 2024 13:34:15 +0200 Subject: [PATCH 41/89] test: fix test performance flaky test (#3316) Test performance writing to file was checking for the score, even though some tests are based on current machine's measurements. We have excluded testing measurements for a single test, but not for the overall score. category: test ticket: none --- cmd/createdkg_internal_test.go | 1 - cmd/testpeers_internal_test.go | 8 +++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/cmd/createdkg_internal_test.go b/cmd/createdkg_internal_test.go index 3e93ebaffa..c6f9371e0e 100644 --- a/cmd/createdkg_internal_test.go +++ b/cmd/createdkg_internal_test.go @@ -184,7 +184,6 @@ func TestValidateWithdrawalAddr(t *testing.T) { } func TestValidateDKGConfig(t *testing.T) { - t.Run("insufficient ENRs", func(t *testing.T) { numOperators := 2 err := validateDKGConfig(numOperators, "", nil) diff --git a/cmd/testpeers_internal_test.go b/cmd/testpeers_internal_test.go index 1d9fb8e4a7..1c15a71d8e 100644 --- a/cmd/testpeers_internal_test.go +++ b/cmd/testpeers_internal_test.go @@ -384,13 +384,15 @@ func testWriteFile(t *testing.T, expectedRes testCategoryResult, path string) { require.NoError(t, err) require.Equal(t, expectedRes.CategoryName, res.CategoryName) - require.Equal(t, expectedRes.Score, res.Score) require.Equal(t, len(expectedRes.Targets), len(res.Targets)) + checkFinalScore := true for targetName, testResults := range res.Targets { for idx, testRes := range testResults { // do not test verdicts based on measurements if expectedRes.Targets[targetName][idx].Verdict == testVerdictOk || expectedRes.Targets[targetName][idx].Verdict == testVerdictFail { require.Equal(t, expectedRes.Targets[targetName][idx].Verdict, testRes.Verdict) + } else { + checkFinalScore = false } require.Equal(t, expectedRes.Targets[targetName][idx].IsAcceptable, testRes.IsAcceptable) if expectedRes.Targets[targetName][idx].Error.error != nil { @@ -402,6 +404,10 @@ func testWriteFile(t *testing.T, expectedRes testCategoryResult, path string) { require.Equal(t, expectedRes.Targets[targetName][idx].Suggestion, testRes.Suggestion) } } + // check final score only if there are no tests based on actual measurement + if checkFinalScore { + require.Equal(t, expectedRes.Score, res.Score) + } } func startPeer(t *testing.T, conf testPeersConfig, peerPrivKey *k1.PrivateKey) enr.Record { From f4bcdd501c7bb8ecd76fa9283ed85be8fee960e4 Mon Sep 17 00:00:00 2001 From: Anthony PHAM Date: Thu, 3 Oct 2024 17:22:52 +0200 Subject: [PATCH 42/89] *: create automate pr for release (#3310) Ensure all charon repositories Reflects Current Charon Version category: feature ticket: none --- .github/workflows/release.yml | 71 +++++++++++++++++++++++++++++------ 1 file changed, 59 insertions(+), 12 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8c4a36c4fb..7d2e6dae93 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,15 +10,62 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 # Disable shallow checkout - - uses: ./.github/actions/setup-go - - run: go run . --help > cli-reference.txt - - run: go run testutil/genchangelog/main.go - - uses: softprops/action-gh-release@v1 - with: - draft: true - files: cli-reference.txt - body_path: changelog.md - token: ${{ secrets.RELEASE_SECRET }} + - name: Checkout repository + uses: actions /checkout@v4 + with: + fetch-depth: 0 # Disable shallow checkout + + - name: Setup Go environment + uses: ./.github/actions/setup-go + + - name: Generate CLI reference + run: go run . --help > cli-reference.txt + + - name: Generate changelog + run: go run testutil/genchangelog/main.go + + - name: Create GitHub release draft + uses: softprops/action-gh-release@v1 + with: + draft: true + files: cli-reference.txt + body_path: changelog.md + token: ${{ secrets.RELEASE_SECRET }} + + trigger-dispatch: + runs-on: ubuntu-latest + steps: + - name: Extract tag name + run: echo "TAG_NAME=${GITHUB_REF##*/}" >> $GITHUB_ENV + + - name: Trigger dispatch for obol-docs + uses: peter-evans/repository-dispatch@v3 + with: + token: ${{ secrets.OBOL_PLATFORM_PAT }} + repository: ObolNetwork/obol-docs + event-type: update-version + client-payload: '{"tag": "${{ env.TAG_NAME }}"}' + + - name: Trigger dispatch for obol-infrastructure + uses: peter-evans/repository-dispatch@v3 + with: + token: ${{ secrets.OBOL_PLATFORM_PAT }} + repository: ObolNetwork/obol-infrastructure + event-type: update-version + client-payload: '{"tag": "${{ env.TAG_NAME }}"}' + + - name: Trigger dispatch for helm-charts + uses: peter-evans/repository-dispatch@v3 + with: + token: ${{ secrets.OBOL_PLATFORM_PAT }} + repository: ObolNetwork/helm-charts + event-type: update-version + client-payload: '{"tag": "${{ env.TAG_NAME }}"}' + + - name: Trigger dispatch for obol-ansible + uses: peter-evans/repository-dispatch@v3 + with: + token: ${{ secrets.OBOL_PLATFORM_PAT }} + repository: ObolNetwork/obol-ansible + event-type: update-version + client-payload: '{"tag": "${{ env.TAG_NAME }}"}' From e9fdf8eed219c92a3ba648ac34185688f8c20cda Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 08:03:43 +0000 Subject: [PATCH 43/89] build(deps): Bump golang from 1.23.1-alpine to 1.23.2-alpine in /testutil/promrated (#3315) Bumps golang from 1.23.1-alpine to 1.23.2-alpine. [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang&package-manager=docker&previous-version=1.23.1-alpine&new-version=1.23.2-alpine)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- testutil/promrated/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testutil/promrated/Dockerfile b/testutil/promrated/Dockerfile index 6de3992ad1..c6a73e9c20 100644 --- a/testutil/promrated/Dockerfile +++ b/testutil/promrated/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.23.1-alpine AS builder +FROM golang:1.23.2-alpine AS builder # Install dependencies RUN apk add --no-cache build-base git From c9464a5d4ef335fdf115d5b95d9ca978d224c6c6 Mon Sep 17 00:00:00 2001 From: Andrei Smirnov Date: Thu, 10 Oct 2024 17:56:01 +0300 Subject: [PATCH 44/89] testutil: fixed smoke test (#3332) This addresses two issues: 1. The broken Smoke Test with lodestar, it refuses accepting network config for dev network. 2. Re-enabled previously disabled compatibility test. category: test ticket: #3004 --- testutil/compose/smoke/smoke_test.go | 74 ++++++++++++------------- testutil/compose/static/lodestar/run.sh | 2 - 2 files changed, 37 insertions(+), 39 deletions(-) diff --git a/testutil/compose/smoke/smoke_test.go b/testutil/compose/smoke/smoke_test.go index 449634b481..b4a921e42e 100644 --- a/testutil/compose/smoke/smoke_test.go +++ b/testutil/compose/smoke/smoke_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/obolnetwork/charon/app/version" "github.com/obolnetwork/charon/testutil" "github.com/obolnetwork/charon/testutil/compose" ) @@ -84,26 +85,26 @@ func TestSmoke(t *testing.T) { }, Timeout: time.Minute * 2, }, - // TODO: https://github.com/ObolNetwork/charon/issues/3004 - // { - // Name: "run_version_matrix_with_dkg", - // PrintYML: true, - // ConfigFunc: func(conf *compose.Config) { - // conf.KeyGen = compose.KeyGenDKG - // // NOTE: Add external VCs when supported versions include minimal preset. - // conf.VCs = []compose.VCType{compose.VCMock} - // }, - // DefineTmplFunc: func(data *compose.TmplData) { - // // Use oldest supported version for cluster lock - // pegImageTag(data.Nodes, 0, last(version.Supported()[1:])+"-rc") - // }, - // RunTmplFunc: func(data *compose.TmplData) { - // // Node 0 is local build - // pegImageTag(data.Nodes, 1, nth(version.Supported(), 0)+"-dev") // Node 1 is previous commit on this branch (v0.X-dev/rc) Note this will fail for first commit on new branch version. - // pegImageTag(data.Nodes, 2, nth(version.Supported()[1:], 1)+"-rc") - // pegImageTag(data.Nodes, 3, nth(version.Supported()[1:], 2)+"-rc") - // }, - // }, + { + Name: "run_version_matrix_with_dkg", + PrintYML: true, + ConfigFunc: func(conf *compose.Config) { + conf.KeyGen = compose.KeyGenDKG + // NOTE: Add external VCs when supported versions include minimal preset. + conf.VCs = []compose.VCType{compose.VCMock} + }, + DefineTmplFunc: func(data *compose.TmplData) { + // Use oldest supported version for cluster lock + pegImageTag(data.Nodes, 0, last(version.Supported()[1:])+".0-rc1") + }, + RunTmplFunc: func(data *compose.TmplData) { + // Node 0 is local build + // Nodeы 1-3 use the previous release; ensure better diversity in the matrix when more releases are added. + pegImageTag(data.Nodes, 1, nth(version.Supported(), 1)+".0-rc1") + pegImageTag(data.Nodes, 2, nth(version.Supported(), 1)+".0-rc1") + pegImageTag(data.Nodes, 3, nth(version.Supported(), 1)+".0-rc1") + }, + }, { Name: "teku_versions", ConfigFunc: func(conf *compose.Config) { @@ -171,8 +172,7 @@ func TestSmoke(t *testing.T) { for _, test := range tests { t.Run(test.Name, func(t *testing.T) { - dir, err := os.MkdirTemp("", "") - require.NoError(t, err) + dir := t.TempDir() conf := compose.NewDefaultConfig() conf.Monitoring = false @@ -204,7 +204,7 @@ func TestSmoke(t *testing.T) { autoConfig.LogFile = path.Join(*logDir, test.Name+".log") } - err = compose.Auto(context.Background(), autoConfig) + err := compose.Auto(context.Background(), autoConfig) testutil.RequireNoError(t, err) }) } @@ -212,17 +212,17 @@ func TestSmoke(t *testing.T) { // pegImageTag pegs the charon docker image tag for one of the nodes. // It overrides the default that uses locally built latest version. -// func pegImageTag(nodes []compose.TmplNode, index int, imageTag string) { -// nodes[index].ImageTag = imageTag -// nodes[index].Entrypoint = "/usr/local/bin/charon" // Use contains binary, not locally built latest version. -// } - -// // last returns the last element of a slice. -// func last(s []version.SemVer) string { -// return s[len(s)-1].String() -// } - -// // nth returns the nth element of a slice, wrapping if n > len(s). -// func nth(s []version.SemVer, n int) string { -// return s[n%len(s)].String() -// } +func pegImageTag(nodes []compose.TmplNode, index int, imageTag string) { + nodes[index].ImageTag = imageTag + nodes[index].Entrypoint = "/usr/local/bin/charon" // Use contains binary, not locally built latest version. +} + +// last returns the last element of a slice. +func last(s []version.SemVer) string { + return s[len(s)-1].String() +} + +// nth returns the nth element of a slice, wrapping if n > len(s). +func nth(s []version.SemVer, n int) string { + return s[n%len(s)].String() +} diff --git a/testutil/compose/static/lodestar/run.sh b/testutil/compose/static/lodestar/run.sh index b5b1c12ed5..ea04d5b97c 100755 --- a/testutil/compose/static/lodestar/run.sh +++ b/testutil/compose/static/lodestar/run.sh @@ -24,8 +24,6 @@ echo "Imported all keys" node /usr/app/packages/cli/bin/lodestar validator \ --network="dev" \ - --presetFile="/tmp/testnet/config.yaml" \ - --paramsFile="/tmp/testnet/config.yaml" \ --metrics=true \ --metrics.address="0.0.0.0" \ --metrics.port=5064 \ From 4696c215254e7fe0536905d978f83f38ac8a104f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Oct 2024 04:41:16 +0000 Subject: [PATCH 45/89] build(deps): Bump golang.org/x/tools from 0.25.0 to 0.26.0 (#3333) Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.25.0 to 0.26.0.
Commits
  • 2ab3b51 go.mod: update golang.org/x dependencies
  • 2683c79 gopls/internal/golang/stubmethods: rename analysis/stubmethods
  • efd951d gopls/internal/analysis/stubmethods: merge into CodeAction
  • d0d0d9e gopls/internal/cache: memoize dependent hash on analysisNode
  • a19eef6 gopls/internal/cache: express packageHandle as a state machine
  • dd745ec gopls/internal/test/marker: update regression test issue68918.txt
  • a02ee35 go/analysis/passes/stdversion: reenable tests
  • a24facf all: set gotypesalias=0 explicitly
  • ce2a33e gopls/internal: fix extract refactor for cases with anonymous functions
  • a2ff832 go/ssa: remove references to GOEXPERIMENT range
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/tools&package-manager=go_modules&previous-version=0.25.0&new-version=0.26.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index 243c405af0..2bbbda7fa1 100644 --- a/go.mod +++ b/go.mod @@ -41,12 +41,12 @@ require ( go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.27.0 + golang.org/x/crypto v0.28.0 golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 golang.org/x/sync v0.8.0 - golang.org/x/term v0.24.0 + golang.org/x/term v0.25.0 golang.org/x/time v0.6.0 - golang.org/x/tools v0.25.0 + golang.org/x/tools v0.26.0 google.golang.org/protobuf v1.34.2 gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) @@ -191,9 +191,9 @@ require ( go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/mod v0.21.0 // indirect - golang.org/x/net v0.29.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/text v0.18.0 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 // indirect diff --git a/go.sum b/go.sum index e978c76de6..8b1b30566f 100644 --- a/go.sum +++ b/go.sum @@ -596,8 +596,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= @@ -629,8 +629,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -673,17 +673,17 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= @@ -700,8 +700,8 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= -golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 555fa1203f8f66241d1392e49db32c840d0c90ac Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Oct 2024 04:51:16 +0000 Subject: [PATCH 46/89] build(deps): Bump golang.org/x/time from 0.6.0 to 0.7.0 (#3322) Bumps [golang.org/x/time](https://github.com/golang/time) from 0.6.0 to 0.7.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/time&package-manager=go_modules&previous-version=0.6.0&new-version=0.7.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2bbbda7fa1..324a3d0fd0 100644 --- a/go.mod +++ b/go.mod @@ -45,7 +45,7 @@ require ( golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 golang.org/x/sync v0.8.0 golang.org/x/term v0.25.0 - golang.org/x/time v0.6.0 + golang.org/x/time v0.7.0 golang.org/x/tools v0.26.0 google.golang.org/protobuf v1.34.2 gopkg.in/natefinch/lumberjack.v2 v2.2.1 diff --git a/go.sum b/go.sum index 8b1b30566f..ed9f554e87 100644 --- a/go.sum +++ b/go.sum @@ -686,8 +686,8 @@ golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 90ea27ae018e15a93f06af7c881e3a1554389814 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Oct 2024 05:41:29 +0000 Subject: [PATCH 47/89] build(deps): Bump golang from 1.23.1-bookworm to 1.23.2-bookworm (#3314) Bumps golang from 1.23.1-bookworm to 1.23.2-bookworm. [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang&package-manager=docker&previous-version=1.23.1-bookworm&new-version=1.23.2-bookworm)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index b3879b69e1..bb0396192b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Container for building Go binary. -FROM golang:1.23.1-bookworm AS builder +FROM golang:1.23.2-bookworm AS builder # Install dependencies RUN apt-get update && apt-get install -y build-essential git # Prep and copy source From 323d0e714078bf1c61c915ac8a2f9b0697e002ff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Oct 2024 07:08:27 +0000 Subject: [PATCH 48/89] build(deps): Bump go.opentelemetry.io/otel from 1.30.0 to 1.31.0 (#3339) Bumps [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go) from 1.30.0 to 1.31.0.
Changelog

Sourced from go.opentelemetry.io/otel's changelog.

[1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11

Added

  • Add go.opentelemetry.io/otel/sdk/metric/exemplar package which includes Exemplar, Filter, TraceBasedFilter, AlwaysOnFilter, HistogramReservoir, FixedSizeReservoir, Reservoir, Value and ValueType types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862)
  • Add WithExportBufferSize option to log batch processor.(#5877)

Changed

  • Enable exemplars by default in go.opentelemetry.io/otel/sdk/metric. Exemplars can be disabled by setting OTEL_METRICS_EXEMPLAR_FILTER=always_off (#5778)
  • Logger.Enabled in go.opentelemetry.io/otel/log now accepts a newly introduced EnabledParameters type instead of Record. (#5791)
  • FilterProcessor.Enabled in go.opentelemetry.io/otel/sdk/log/internal/x now accepts EnabledParameters instead of Record. (#5791)
  • The Record type in go.opentelemetry.io/otel/log is no longer comparable. (#5847)
  • Performance improvements for the trace SDK SetAttributes method in Span. (#5864)
  • Reduce memory allocations for the Event and Link lists in Span. (#5858)
  • Performance improvements for the trace SDK AddEvent, AddLink, RecordError and End methods in Span. (#5874)

Deprecated

Fixed

  • The race condition for multiple FixedSize exemplar reservoirs identified in #5814 is resolved. (#5819)
  • Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803)
  • Fix timer channel drain to avoid hanging on Go 1.23. (#5868)
  • Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827)
  • Change the reflect.TypeOf to use a nil pointer to not allocate on the heap unless necessary. (#5827)
Commits
  • bc2fe88 Release v1.31.0/v0.53.0/v0.7.0/v0.0.10 (#5883)
  • a7d5c1a Add an option to configure the exporter buffer of the BatchProcessor (#5877)
  • eb9279b fix(deps): update golang.org/x/exp digest to f66d83c (#5880)
  • 6441653 Performance improvements for the trace SDK in Span. (#5874)
  • 8e9baf2 chore(deps): update lycheeverse/lychee-action action to v2 (#5878)
  • 8fbaa97 Reduce newEvictedQueueLink and newEvictedQueueEvent memory allocations (#...
  • 4a911f9 chore(deps): update googleapis to 5fefd90 (#5876)
  • 98cbdcb fix(deps): update module google.golang.org/protobuf to v1.35.1 (#5875)
  • 3cbd967 Performance improvements for recordingSpan SetAttributes and `addOverCapA...
  • 9e791a6 fix(deps): update golang.org/x (#5872)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=go.opentelemetry.io/otel&package-manager=go_modules&previous-version=1.30.0&new-version=1.31.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 324a3d0fd0..10c13f706d 100644 --- a/go.mod +++ b/go.mod @@ -33,11 +33,11 @@ require ( github.com/stretchr/testify v1.9.0 github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.4.1 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 - go.opentelemetry.io/otel v1.30.0 + go.opentelemetry.io/otel v1.31.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 go.opentelemetry.io/otel/sdk v1.30.0 - go.opentelemetry.io/otel/trace v1.30.0 + go.opentelemetry.io/otel/trace v1.31.0 go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 go.uber.org/zap v1.27.0 @@ -184,7 +184,7 @@ require ( github.com/subosito/gotenv v1.6.0 // indirect github.com/vbatts/tar-split v0.11.5 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect - go.opentelemetry.io/otel/metric v1.30.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.17.1 // indirect go.uber.org/fx v1.20.1 // indirect diff --git a/go.sum b/go.sum index ed9f554e87..77ce53f7f3 100644 --- a/go.sum +++ b/go.sum @@ -546,8 +546,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= -go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= -go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= @@ -556,14 +556,14 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 h1:kn1BudCgwtE7PxLqcZkErpD8GKqLZ6BSzeW9QihQJeM= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0/go.mod h1:ljkUDtAMdleoi9tIG1R6dJUpVwDcYjw3J2Q6Q/SuiC0= -go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= -go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k= go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY= -go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= -go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= From 95770d55ca8b36bb049a7e9c53654a15fe83a870 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Oct 2024 07:16:26 +0000 Subject: [PATCH 49/89] build(deps): Bump go.opentelemetry.io/otel/exporters/stdout/stdouttrace from 1.30.0 to 1.31.0 (#3337) Bumps [go.opentelemetry.io/otel/exporters/stdout/stdouttrace](https://github.com/open-telemetry/opentelemetry-go) from 1.30.0 to 1.31.0.
Changelog

Sourced from go.opentelemetry.io/otel/exporters/stdout/stdouttrace's changelog.

[1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11

Added

  • Add go.opentelemetry.io/otel/sdk/metric/exemplar package which includes Exemplar, Filter, TraceBasedFilter, AlwaysOnFilter, HistogramReservoir, FixedSizeReservoir, Reservoir, Value and ValueType types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862)
  • Add WithExportBufferSize option to log batch processor.(#5877)

Changed

  • Enable exemplars by default in go.opentelemetry.io/otel/sdk/metric. Exemplars can be disabled by setting OTEL_METRICS_EXEMPLAR_FILTER=always_off (#5778)
  • Logger.Enabled in go.opentelemetry.io/otel/log now accepts a newly introduced EnabledParameters type instead of Record. (#5791)
  • FilterProcessor.Enabled in go.opentelemetry.io/otel/sdk/log/internal/x now accepts EnabledParameters instead of Record. (#5791)
  • The Record type in go.opentelemetry.io/otel/log is no longer comparable. (#5847)
  • Performance improvements for the trace SDK SetAttributes method in Span. (#5864)
  • Reduce memory allocations for the Event and Link lists in Span. (#5858)
  • Performance improvements for the trace SDK AddEvent, AddLink, RecordError and End methods in Span. (#5874)

Deprecated

Fixed

  • The race condition for multiple FixedSize exemplar reservoirs identified in #5814 is resolved. (#5819)
  • Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803)
  • Fix timer channel drain to avoid hanging on Go 1.23. (#5868)
  • Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827)
  • Change the reflect.TypeOf to use a nil pointer to not allocate on the heap unless necessary. (#5827)
Commits
  • bc2fe88 Release v1.31.0/v0.53.0/v0.7.0/v0.0.10 (#5883)
  • a7d5c1a Add an option to configure the exporter buffer of the BatchProcessor (#5877)
  • eb9279b fix(deps): update golang.org/x/exp digest to f66d83c (#5880)
  • 6441653 Performance improvements for the trace SDK in Span. (#5874)
  • 8e9baf2 chore(deps): update lycheeverse/lychee-action action to v2 (#5878)
  • 8fbaa97 Reduce newEvictedQueueLink and newEvictedQueueEvent memory allocations (#...
  • 4a911f9 chore(deps): update googleapis to 5fefd90 (#5876)
  • 98cbdcb fix(deps): update module google.golang.org/protobuf to v1.35.1 (#5875)
  • 3cbd967 Performance improvements for recordingSpan SetAttributes and `addOverCapA...
  • 9e791a6 fix(deps): update golang.org/x (#5872)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=go.opentelemetry.io/otel/exporters/stdout/stdouttrace&package-manager=go_modules&previous-version=1.30.0&new-version=1.31.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 10c13f706d..cad2eeb978 100644 --- a/go.mod +++ b/go.mod @@ -35,8 +35,8 @@ require ( go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 go.opentelemetry.io/otel v1.31.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 - go.opentelemetry.io/otel/sdk v1.30.0 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 + go.opentelemetry.io/otel/sdk v1.31.0 go.opentelemetry.io/otel/trace v1.31.0 go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 diff --git a/go.sum b/go.sum index 77ce53f7f3..67ff6738e8 100644 --- a/go.sum +++ b/go.sum @@ -554,12 +554,12 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYa go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 h1:kn1BudCgwtE7PxLqcZkErpD8GKqLZ6BSzeW9QihQJeM= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0/go.mod h1:ljkUDtAMdleoi9tIG1R6dJUpVwDcYjw3J2Q6Q/SuiC0= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64= go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= -go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k= go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY= go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= From bc87cf95ea62f922222ea0b362e5bfb9df8c77b4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Oct 2024 07:28:52 +0000 Subject: [PATCH 50/89] build(deps): Bump go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp from 0.55.0 to 0.56.0 (#3338) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp](https://github.com/open-telemetry/opentelemetry-go-contrib) from 0.55.0 to 0.56.0.
Release notes

Sourced from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp's releases.

Release v1.31.0/v0.56.0/v0.25.0/v0.11.0/v0.6.0/v0.4.0/v0.3.0

Overview

Added

  • The Severitier and SeverityVar types are added to go.opentelemetry.io/contrib/processors/minsev allowing dynamic configuration of the severity used by the LogProcessor. (#6116)
  • Move examples from go.opentelemetry.io/otel to this repository under examples directory. (#6158)
  • Support yaml/json struct tags for generated code in go.opentelemetry.io/contrib/config. (#5433)
  • Add support for parsing YAML configuration via ParseYAML in go.opentelemetry.io/contrib/config. (#5433)
  • Add support for temporality preference configuration in go.opentelemetry.io/contrib/config. (#5860)

Changed

  • The function signature of NewLogProcessor in go.opentelemetry.io/contrib/processors/minsev has changed to accept the added Severitier interface instead of a log.Severity. (#6116)
  • Updated go.opentelemetry.io/contrib/config to use the v0.3.0 release of schema which includes backwards incompatible changes. (#6126)
  • NewSDK in go.opentelemetry.io/contrib/config now returns a no-op SDK if disabled is set to true. (#6185)
  • The deprecated go.opentelemetry.io/contrib/instrumentation/github.com/labstack/echo/otelecho package has found a Code Owner. The package is no longer deprecated. (#6207)

Fixed

  • Possible nil dereference panic in go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace. (#5965)
  • logrus.Level transformed to appropriate log.Severity in go.opentelemetry.io/contrib/bridges/otellogrus. (#6191)

Removed

  • The Minimum field of the LogProcessor in go.opentelemetry.io/contrib/processors/minsev is removed. Use NewLogProcessor to configure this setting. (#6116)
  • The deprecated go.opentelemetry.io/contrib/instrumentation/gopkg.in/macaron.v1/otelmacaron package is removed. (#6186)
  • The deprecated go.opentelemetry.io/contrib/samplers/aws/xray package is removed. (#6187)

What's Changed

... (truncated)

Changelog

Sourced from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp's changelog.

[1.31.0/0.56.0/0.25.0/0.11.0/0.6.0/0.4.0/0.3.0] - 2024-10-14

Added

  • The Severitier and SeverityVar types are added to go.opentelemetry.io/contrib/processors/minsev allowing dynamic configuration of the severity used by the LogProcessor. (#6116)
  • Move examples from go.opentelemetry.io/otel to this repository under examples directory. (#6158)
  • Support yaml/json struct tags for generated code in go.opentelemetry.io/contrib/config. (#5433)
  • Add support for parsing YAML configuration via ParseYAML in go.opentelemetry.io/contrib/config. (#5433)
  • Add support for temporality preference configuration in go.opentelemetry.io/contrib/config. (#5860)

Changed

  • The function signature of NewLogProcessor in go.opentelemetry.io/contrib/processors/minsev has changed to accept the added Severitier interface instead of a log.Severity. (#6116)
  • Updated go.opentelemetry.io/contrib/config to use the v0.3.0 release of schema which includes backwards incompatible changes. (#6126)
  • NewSDK in go.opentelemetry.io/contrib/config now returns a no-op SDK if disabled is set to true. (#6185)
  • The deprecated go.opentelemetry.io/contrib/instrumentation/github.com/labstack/echo/otelecho package has found a Code Owner. The package is no longer deprecated. (#6207)

Fixed

  • Possible nil dereference panic in go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace. (#5965)
  • logrus.Level transformed to appropriate log.Severity in go.opentelemetry.io/contrib/bridges/otellogrus. (#6191)

Removed

  • The Minimum field of the LogProcessor in go.opentelemetry.io/contrib/processors/minsev is removed. Use NewLogProcessor to configure this setting. (#6116)
  • The deprecated go.opentelemetry.io/contrib/instrumentation/gopkg.in/macaron.v1/otelmacaron package is removed. (#6186)
  • The deprecated go.opentelemetry.io/contrib/samplers/aws/xray package is removed. (#6187)
Commits
  • 9cf5701 Release v1.31.0/v0.56.0/v0.25.0/v0.11.0/v0.6.0/v0.4.0/v0.3.0 (#6243)
  • d6305c0 chore(deps): update module github.com/klauspost/compress to v1.17.11 (#6232)
  • 09cbf41 fix(deps): update module github.com/aws/aws-sdk-go-v2/service/s3 to v1.65.3 (...
  • c3c8538 Update otel core to the latest release (#6233)
  • 79bb705 otelecho: Add Code Owner and remove deprecation (#6207)
  • 45ba204 config: support v0.3 of the config schema (#6126)
  • 20e45af Revert "chore(deps): update lycheeverse/lychee-action action to v2" (#6229)
  • 5322670 Remove otelmacaron (#6186)
  • 87d0229 feat(instrumentation/http/otelhttp): move client metrics creation into intern...
  • 900fc4b Run the test compatibility check even if tests failed (#6224)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp&package-manager=go_modules&previous-version=0.55.0&new-version=0.56.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index cad2eeb978..57e928a64b 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.4.1 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 go.opentelemetry.io/otel v1.31.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 diff --git a/go.sum b/go.sum index 67ff6738e8..9221d3dee6 100644 --- a/go.sum +++ b/go.sum @@ -544,8 +544,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= From c0072b68df4cc40f444fd819ea5d5941968851c6 Mon Sep 17 00:00:00 2001 From: David <39963997+No0key@users.noreply.github.com> Date: Tue, 15 Oct 2024 19:10:10 +0300 Subject: [PATCH 51/89] *: optimize Dockerfile (#3281) Hello! This PR optimizes the `Dockerfile`. Changes include: - Combined RUN commands to reduce the number of layers - Cached Go dependencies to speed up build times - Cleaned up unnecessary apt files to reduce image size - Consolidated user creation and permission setting commands Reduced image size from 370MB to 302MB category: refactor ticket: none --- Dockerfile | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/Dockerfile b/Dockerfile index bb0396192b..75f0184f95 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,14 +1,18 @@ # Container for building Go binary. FROM golang:1.23.2-bookworm AS builder # Install dependencies -RUN apt-get update && apt-get install -y build-essential git +RUN apt-get update && apt-get install -y --no-install-recommends build-essential git + # Prep and copy source WORKDIR /app/charon + COPY . . + # Populate GO_BUILD_FLAG with a build arg to provide an optional go build flag. ARG GO_BUILD_FLAG ENV GO_BUILD_FLAG=${GO_BUILD_FLAG} RUN echo "Building with GO_BUILD_FLAG='${GO_BUILD_FLAG}'" + # Build with Go module and Go build caches. RUN \ --mount=type=cache,target=/go/pkg \ @@ -18,30 +22,35 @@ RUN echo "Built charon version=$(./charon version)" # Copy final binary into light stage. FROM debian:bookworm-slim -RUN apt-get update && apt-get install -y ca-certificates wget fio +RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates fio wget \ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* ARG GITHUB_SHA=local ENV GITHUB_SHA=${GITHUB_SHA} + COPY --from=builder /app/charon/charon /usr/local/bin/ + # Don't run container as root ENV USER=charon ENV UID=1000 ENV GID=1000 -RUN addgroup --gid "$GID" "$USER" -RUN adduser \ +RUN addgroup --gid "$GID" "$USER" \ + && adduser \ --disabled-password \ --gecos "charon" \ --home "/opt/$USER" \ --ingroup "$USER" \ --no-create-home \ --uid "$UID" \ - "$USER" -RUN chown charon /usr/local/bin/charon -RUN chmod u+x /usr/local/bin/charon + "$USER" \ + && chown "$USER" /usr/local/bin/charon \ + && chmod u+x /usr/local/bin/charon + WORKDIR "/opt/$USER" -RUN chown charon "/opt/$USER" USER charon + ENTRYPOINT ["/usr/local/bin/charon"] CMD ["run"] + # Used by GitHub to associate container with repo. LABEL org.opencontainers.image.source="https://github.com/obolnetwork/charon" LABEL org.opencontainers.image.title="charon" From c9bdd6280fe70e12ed4e3ffee21dd9000f46b63e Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Tue, 15 Oct 2024 18:27:36 +0200 Subject: [PATCH 52/89] github: use minor go versions in pipelines (#3321) Instead of having to bump golang version on each patch release, use latest patch. That's what we already do in `go.mod`. category: misc ticket: none --- .github/actions/setup-go/action.yml | 2 +- .golangci.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/setup-go/action.yml b/.github/actions/setup-go/action.yml index 9509680ffc..9cc2fdd9e2 100644 --- a/.github/actions/setup-go/action.yml +++ b/.github/actions/setup-go/action.yml @@ -6,4 +6,4 @@ runs: - name: Setup go uses: actions/setup-go@v4 with: - go-version: '1.23.1' + go-version: '1.23' diff --git a/.golangci.yml b/.golangci.yml index 8bb3fa8413..20a6b434ae 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,6 +1,6 @@ run: timeout: 5m - go: "1.23.1" + go: "1.23" linters-settings: cyclop: max-complexity: 15 From 38d7a6a16c46d0de7eff03a205dcc0c814a5f304 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Tue, 15 Oct 2024 18:33:20 +0200 Subject: [PATCH 53/89] exit: add custom testnet flags for exits (#3317) Add testnet flags for exits. We have testnet for other CLI commands, but not for the exits. Required so we can easily test with Kurtosis. Fix a test in `cmd/createcluster_internal_test.go` as well. Add custom test dir for it. category: misc ticket: none --- cmd/exit.go | 27 +++++++++++++++++++++++++++ cmd/exit_broadcast.go | 13 +++++++++++++ cmd/exit_fetch.go | 12 ++++++++++++ cmd/exit_list.go | 12 ++++++++++++ cmd/exit_sign.go | 12 ++++++++++++ 5 files changed, 76 insertions(+) diff --git a/cmd/exit.go b/cmd/exit.go index 59d8155f90..2f933aa4c7 100644 --- a/cmd/exit.go +++ b/cmd/exit.go @@ -13,6 +13,7 @@ import ( "github.com/obolnetwork/charon/app/errors" "github.com/obolnetwork/charon/app/eth2wrap" "github.com/obolnetwork/charon/app/log" + "github.com/obolnetwork/charon/eth2util" "github.com/obolnetwork/charon/eth2util/signing" "github.com/obolnetwork/charon/tbls" ) @@ -36,6 +37,7 @@ type exitConfig struct { ExitFromFileDir string Log log.Config All bool + testnetConfig eth2util.Network } func newExitCmd(cmds ...*cobra.Command) *cobra.Command { @@ -67,6 +69,11 @@ const ( publishTimeout validatorIndex all + testnetName + testnetForkVersion + testnetChainID + testnetGenesisTimestamp + testnetCapellaHardFork ) func (ef exitFlag) String() string { @@ -99,6 +106,16 @@ func (ef exitFlag) String() string { return "validator-index" case all: return "all" + case testnetName: + return "testnet-name" + case testnetForkVersion: + return "testnet-fork-version" + case testnetChainID: + return "testnet-chain-id" + case testnetGenesisTimestamp: + return "testnet-genesis-timestamp" + case testnetCapellaHardFork: + return "testnet-capella-hard-fork" default: return "unknown" } @@ -150,6 +167,16 @@ func bindExitFlags(cmd *cobra.Command, config *exitConfig, flags []exitCLIFlag) cmd.Flags().Uint64Var(&config.ValidatorIndex, validatorIndex.String(), 0, "Validator index of the validator to exit, the associated public key must be present in the cluster lock manifest. If --validator-public-key is also provided, validator existence won't be checked on the beacon chain.") case all: cmd.Flags().BoolVar(&config.All, all.String(), false, "Exit all currently active validators in the cluster.") + case testnetName: + cmd.Flags().StringVar(&config.testnetConfig.Name, testnetName.String(), "", "Name of the custom test network.") + case testnetForkVersion: + cmd.Flags().StringVar(&config.testnetConfig.GenesisForkVersionHex, testnetForkVersion.String(), "", "Genesis fork version of the custom test network (in hex).") + case testnetChainID: + cmd.Flags().Uint64Var(&config.testnetConfig.ChainID, "testnet-chain-id", 0, "Chain ID of the custom test network.") + case testnetGenesisTimestamp: + cmd.Flags().Int64Var(&config.testnetConfig.GenesisTimestamp, "testnet-genesis-timestamp", 0, "Genesis timestamp of the custom test network.") + case testnetCapellaHardFork: + cmd.Flags().StringVar(&config.testnetConfig.CapellaHardFork, "testnet-capella-hard-fork", "", "Capella hard fork version of the custom test network.") } if f.required { diff --git a/cmd/exit_broadcast.go b/cmd/exit_broadcast.go index 6f28ac01ad..b32079c00e 100644 --- a/cmd/exit_broadcast.go +++ b/cmd/exit_broadcast.go @@ -25,6 +25,7 @@ import ( "github.com/obolnetwork/charon/app/z" manifestpb "github.com/obolnetwork/charon/cluster/manifestpb/v1" "github.com/obolnetwork/charon/core" + "github.com/obolnetwork/charon/eth2util" "github.com/obolnetwork/charon/eth2util/keystore" "github.com/obolnetwork/charon/tbls" "github.com/obolnetwork/charon/tbls/tblsconv" @@ -62,6 +63,12 @@ func newBcastFullExitCmd(runFunc func(context.Context, exitConfig) error) *cobra {exitFromDir, false}, {beaconNodeTimeout, false}, {publishTimeout, false}, + {all, false}, + {testnetName, false}, + {testnetForkVersion, false}, + {testnetChainID, false}, + {testnetGenesisTimestamp, false}, + {testnetCapellaHardFork, false}, }) bindLogFlags(cmd.Flags(), &config.Log) @@ -98,6 +105,12 @@ func newBcastFullExitCmd(runFunc func(context.Context, exitConfig) error) *cobra } func runBcastFullExit(ctx context.Context, config exitConfig) error { + // Check if custom testnet configuration is provided. + if config.testnetConfig.IsNonZero() { + // Add testnet config to supported networks. + eth2util.AddTestNetwork(config.testnetConfig) + } + identityKey, err := k1util.Load(config.PrivateKeyPath) if err != nil { return errors.Wrap(err, "could not load identity key") diff --git a/cmd/exit_fetch.go b/cmd/exit_fetch.go index 0a3e251199..7dd7296ec5 100644 --- a/cmd/exit_fetch.go +++ b/cmd/exit_fetch.go @@ -18,6 +18,7 @@ import ( "github.com/obolnetwork/charon/app/obolapi" "github.com/obolnetwork/charon/app/z" "github.com/obolnetwork/charon/core" + "github.com/obolnetwork/charon/eth2util" "github.com/obolnetwork/charon/eth2util/keystore" ) @@ -49,6 +50,11 @@ func newFetchExitCmd(runFunc func(context.Context, exitConfig) error) *cobra.Com {all, false}, {fetchedExitPath, false}, {publishTimeout, false}, + {testnetName, false}, + {testnetForkVersion, false}, + {testnetChainID, false}, + {testnetGenesisTimestamp, false}, + {testnetCapellaHardFork, false}, }) bindLogFlags(cmd.Flags(), &config.Log) @@ -73,6 +79,12 @@ func newFetchExitCmd(runFunc func(context.Context, exitConfig) error) *cobra.Com } func runFetchExit(ctx context.Context, config exitConfig) error { + // Check if custom testnet configuration is provided. + if config.testnetConfig.IsNonZero() { + // Add testnet config to supported networks. + eth2util.AddTestNetwork(config.testnetConfig) + } + if _, err := os.Stat(config.FetchedExitPath); err != nil { return errors.Wrap(err, "store exit path") } diff --git a/cmd/exit_list.go b/cmd/exit_list.go index 79c753df43..ef263f7ec1 100644 --- a/cmd/exit_list.go +++ b/cmd/exit_list.go @@ -15,6 +15,7 @@ import ( "github.com/obolnetwork/charon/app/errors" "github.com/obolnetwork/charon/app/log" "github.com/obolnetwork/charon/app/z" + "github.com/obolnetwork/charon/eth2util" ) func newListActiveValidatorsCmd(runFunc func(context.Context, exitConfig) error) *cobra.Command { @@ -43,6 +44,11 @@ func newListActiveValidatorsCmd(runFunc func(context.Context, exitConfig) error) {lockFilePath, false}, {beaconNodeEndpoints, true}, {beaconNodeTimeout, false}, + {testnetName, false}, + {testnetForkVersion, false}, + {testnetChainID, false}, + {testnetGenesisTimestamp, false}, + {testnetCapellaHardFork, false}, }) bindLogFlags(cmd.Flags(), &config.Log) @@ -51,6 +57,12 @@ func newListActiveValidatorsCmd(runFunc func(context.Context, exitConfig) error) } func runListActiveValidatorsCmd(ctx context.Context, config exitConfig) error { + // Check if custom testnet configuration is provided. + if config.testnetConfig.IsNonZero() { + // Add testnet config to supported networks. + eth2util.AddTestNetwork(config.testnetConfig) + } + valList, err := listActiveVals(ctx, config) if err != nil { return err diff --git a/cmd/exit_sign.go b/cmd/exit_sign.go index 3b5f3b7e88..9aac498f57 100644 --- a/cmd/exit_sign.go +++ b/cmd/exit_sign.go @@ -19,6 +19,7 @@ import ( "github.com/obolnetwork/charon/app/obolapi" "github.com/obolnetwork/charon/app/z" "github.com/obolnetwork/charon/core" + "github.com/obolnetwork/charon/eth2util" "github.com/obolnetwork/charon/eth2util/keystore" ) @@ -54,6 +55,11 @@ func newSubmitPartialExitCmd(runFunc func(context.Context, exitConfig) error) *c {beaconNodeTimeout, false}, {publishTimeout, false}, {all, false}, + {testnetName, false}, + {testnetForkVersion, false}, + {testnetChainID, false}, + {testnetGenesisTimestamp, false}, + {testnetCapellaHardFork, false}, }) bindLogFlags(cmd.Flags(), &config.Log) @@ -82,6 +88,12 @@ func newSubmitPartialExitCmd(runFunc func(context.Context, exitConfig) error) *c } func runSignPartialExit(ctx context.Context, config exitConfig) error { + // Check if custom testnet configuration is provided. + if config.testnetConfig.IsNonZero() { + // Add testnet config to supported networks. + eth2util.AddTestNetwork(config.testnetConfig) + } + identityKey, err := k1util.Load(config.PrivateKeyPath) if err != nil { return errors.Wrap(err, "could not load identity key") From 9ff33f3e2fa5a35b70048921852dc4e25cc31611 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 16 Oct 2024 05:43:08 +0000 Subject: [PATCH 54/89] build(deps): Bump github.com/prometheus/client_golang from 1.20.4 to 1.20.5 (#3340) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.20.4 to 1.20.5.
Release notes

Sourced from github.com/prometheus/client_golang's releases.

v1.20.5 / 2024-10-15

We decided to revert the testutil change that made our util functions less error-prone, but created a lot of work for our downstream users. Apologies for the pain! This revert should not cause any major breaking change, even if you already did the work--unless you depend on the exact error message.

Going forward, we plan to reinforce our release testing strategy [1],[2] and deliver an enhanced testutil package/module with more flexible and safer APIs.

Thanks to @​dashpole @​dgrisonnet @​kakkoyun @​ArthurSens @​vesari @​logicalhan @​krajorama @​bwplotka who helped in this patch release! 🤗

Changelog

[BUGFIX] testutil: Reverted #1424; functions using compareMetricFamilies are (again) only failing if filtered metricNames are in the expected input. #1645

Changelog

Sourced from github.com/prometheus/client_golang's changelog.

1.20.5 / 2024-10-15

  • [BUGFIX] testutil: Reverted #1424; functions using compareMetricFamilies are (again) only failing if filtered metricNames are in the expected input.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/prometheus/client_golang&package-manager=go_modules&previous-version=1.20.4&new-version=1.20.5)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 57e928a64b..7eaf9a7c80 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/libp2p/go-msgio v0.3.0 github.com/multiformats/go-multiaddr v0.13.0 github.com/pelletier/go-toml/v2 v2.2.3 - github.com/prometheus/client_golang v1.20.4 + github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 github.com/protolambda/eth2-shuffle v1.1.0 github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e diff --git a/go.sum b/go.sum index 9221d3dee6..dc865fbd20 100644 --- a/go.sum +++ b/go.sum @@ -422,8 +422,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= -github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= From 48acae8a62eec3c087fa714a29aef3832d2aefc1 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Wed, 23 Oct 2024 14:54:11 +0200 Subject: [PATCH 55/89] *: bump golangci-lint to v1.61.0 (#3348) Bump linter category: misc ticket: none --- .github/workflows/golangci-lint.yml | 2 +- .pre-commit/run_linter.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 73051431d9..2552b6411b 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -19,7 +19,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.60.3 + version: v1.61.0 - name: notify failure if: failure() && github.ref == 'refs/heads/main' env: diff --git a/.pre-commit/run_linter.sh b/.pre-commit/run_linter.sh index 86d167724b..b9996f10fc 100755 --- a/.pre-commit/run_linter.sh +++ b/.pre-commit/run_linter.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -VERSION="1.60.3" +VERSION="1.61.0" if ! command -v golangci-lint &> /dev/null then From c4c35575fa89bd2bbc765d4c19d85dc8a9dd780f Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Mon, 28 Oct 2024 14:39:58 +0100 Subject: [PATCH 56/89] *: improve logging and error handling for exits (#3347) Add more logging and more detailed errors for exits. Given that the exits are fire and forget operation and not a long running process, we can afford more details without the issue of polluting with too much. category: feature ticket: #3136 --- app/obolapi/api.go | 51 +++++++-- app/obolapi/api_internal_test.go | 155 +++++++++++++++++++++++++++ app/obolapi/exit.go | 51 ++------- cmd/cmd.go | 2 +- cmd/exit.go | 2 +- cmd/exit_broadcast.go | 48 +++++---- cmd/exit_broadcast_internal_test.go | 97 ++++++++++++++++- cmd/exit_fetch.go | 20 ++-- cmd/exit_fetch_internal_test.go | 77 +++++++++++++ cmd/exit_list.go | 8 +- cmd/exit_list_internal_test.go | 38 +++++++ cmd/exit_sign.go | 62 +++++------ cmd/exit_sign_internal_test.go | 124 +++++++++++++++++++-- testutil/obolapimock/obolapi_exit.go | 2 +- 14 files changed, 601 insertions(+), 136 deletions(-) diff --git a/app/obolapi/api.go b/app/obolapi/api.go index 4ce60a1a88..3224a3e9ab 100644 --- a/app/obolapi/api.go +++ b/app/obolapi/api.go @@ -28,7 +28,7 @@ const ( func New(urlStr string, options ...func(*Client)) (Client, error) { _, err := url.ParseRequestURI(urlStr) // check that urlStr is valid if err != nil { - return Client{}, errors.Wrap(err, "could not parse Obol API URL") + return Client{}, errors.Wrap(err, "parse Obol API URL") } // always set a default timeout, even if no options are provided @@ -63,7 +63,7 @@ func WithTimeout(timeout time.Duration) func(*Client) { func (c Client) url() *url.URL { baseURL, err := url.ParseRequestURI(c.baseURL) if err != nil { - panic(errors.Wrap(err, "could not parse Obol API URL, this should never happen")) + panic(errors.Wrap(err, "parse Obol API URL, this should never happen")) } return baseURL @@ -83,7 +83,7 @@ func (c Client) PublishLock(ctx context.Context, lock cluster.Lock) error { ctx, cancel := context.WithTimeout(ctx, c.reqTimeout) defer cancel() - err = httpPost(ctx, addr, b) + err = httpPost(ctx, addr, b, nil) if err != nil { return err } @@ -105,27 +105,58 @@ func launchpadURLPath(lock cluster.Lock) string { return fmt.Sprintf(launchpadReturnPathFmt, lock.LockHash) } -func httpPost(ctx context.Context, url *url.URL, b []byte) error { - req, err := http.NewRequestWithContext(ctx, http.MethodPost, url.String(), bytes.NewReader(b)) +func httpPost(ctx context.Context, url *url.URL, body []byte, headers map[string]string) error { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url.String(), bytes.NewReader(body)) if err != nil { return errors.Wrap(err, "new POST request with ctx") } req.Header.Add("Content-Type", "application/json") + for key, val := range headers { + req.Header.Set(key, val) + } res, err := new(http.Client).Do(req) if err != nil { - return errors.Wrap(err, "failed to call POST endpoint") + return errors.Wrap(err, "call POST endpoint") } defer res.Body.Close() - data, err := io.ReadAll(res.Body) + if res.StatusCode/100 != 2 { + data, err := io.ReadAll(res.Body) + if err != nil { + return errors.Wrap(err, "read POST response", z.Int("status", res.StatusCode)) + } + + return errors.New("http POST failed", z.Int("status", res.StatusCode), z.Str("body", string(data))) + } + + return nil +} + +func httpGet(ctx context.Context, url *url.URL, headers map[string]string) (io.ReadCloser, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url.String(), nil) if err != nil { - return errors.Wrap(err, "failed to read POST response") + return nil, errors.Wrap(err, "new GET request with ctx") + } + req.Header.Add("Content-Type", "application/json") + + for key, val := range headers { + req.Header.Set(key, val) + } + + res, err := new(http.Client).Do(req) + if err != nil { + return nil, errors.Wrap(err, "call GET endpoint") } if res.StatusCode/100 != 2 { - return errors.New("post failed", z.Int("status", res.StatusCode), z.Str("body", string(data))) + data, err := io.ReadAll(res.Body) + if err != nil { + return nil, errors.Wrap(err, "read POST response", z.Int("status", res.StatusCode)) + } + + return nil, errors.New("http GET failed", z.Int("status", res.StatusCode), z.Str("body", string(data))) } - return nil + return res.Body, nil } diff --git a/app/obolapi/api_internal_test.go b/app/obolapi/api_internal_test.go index 32bf7800a3..babcc9a848 100644 --- a/app/obolapi/api_internal_test.go +++ b/app/obolapi/api_internal_test.go @@ -3,6 +3,11 @@ package obolapi import ( + "context" + "io" + "net/http" + "net/http/httptest" + "net/url" "testing" "time" @@ -21,3 +26,153 @@ func TestWithTimeout(t *testing.T) { require.NoError(t, err) require.Equal(t, timeout, oapi.reqTimeout) } + +func TestHttpPost(t *testing.T) { + tests := []struct { + name string + body []byte + headers map[string]string + server *httptest.Server + endpoint string + expectedError string + }{ + { + name: "default scenario", + body: nil, + headers: nil, + endpoint: "/post-request", + server: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.URL.Path, "/post-request") + require.Equal(t, r.Method, http.MethodPost) + require.Equal(t, r.Header.Get("Content-Type"), "application/json") + w.WriteHeader(http.StatusOK) + })), + expectedError: "", + }, + { + name: "default scenario with body and headers", + body: []byte(`{"test_body_key": "test_body_value"}`), + headers: map[string]string{"test_header_key": "test_header_value"}, + endpoint: "/post-request", + server: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.URL.Path, "/post-request") + require.Equal(t, r.Method, http.MethodPost) + require.Equal(t, r.Header.Get("Content-Type"), "application/json") + require.Equal(t, r.Header.Get("test_header_key"), "test_header_value") //nolint:canonicalheader + + data, err := io.ReadAll(r.Body) + require.NoError(t, err) + defer r.Body.Close() + require.Equal(t, string(data), `{"test_body_key": "test_body_value"}`) + + w.WriteHeader(http.StatusOK) + _, err = w.Write([]byte(`"OK"`)) + require.NoError(t, err) + })), + expectedError: "", + }, + { + name: "status code not 2XX", + body: nil, + headers: nil, + endpoint: "/post-request", + server: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.URL.Path, "/post-request") + require.Equal(t, r.Method, http.MethodPost) + require.Equal(t, r.Header.Get("Content-Type"), "application/json") + + w.WriteHeader(http.StatusBadRequest) + _, err := w.Write([]byte(`"Bad Request response"`)) + require.NoError(t, err) + })), + expectedError: "POST failed", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testServerURL, err := url.ParseRequestURI(test.server.URL) + require.NoError(t, err) + err = httpPost(context.Background(), testServerURL.JoinPath(test.endpoint), test.body, test.headers) + if test.expectedError != "" { + require.Error(t, err) + require.ErrorContains(t, err, test.expectedError) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestHttpGet(t *testing.T) { + tests := []struct { + name string + headers map[string]string + server *httptest.Server + endpoint string + expectedResp []byte + expectedError string + }{ + { + name: "default scenario", + headers: nil, + endpoint: "/get-request", + server: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.URL.Path, "/get-request") + require.Equal(t, r.Method, http.MethodGet) + require.Equal(t, r.Header.Get("Content-Type"), "application/json") + w.WriteHeader(http.StatusOK) + })), + expectedError: "", + }, + { + name: "default scenario with headers", + headers: map[string]string{"test_header_key": "test_header_value"}, + endpoint: "/get-request", + server: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.URL.Path, "/get-request") + require.Equal(t, r.Method, http.MethodGet) + require.Equal(t, r.Header.Get("Content-Type"), "application/json") + require.Equal(t, r.Header.Get("test_header_key"), "test_header_value") //nolint:canonicalheader + + w.WriteHeader(http.StatusOK) + _, err := w.Write([]byte(`"OK"`)) + require.NoError(t, err) + })), + expectedResp: []byte(`"OK"`), + expectedError: "", + }, + { + name: "status code not 2XX", + headers: nil, + endpoint: "/get-request", + server: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.URL.Path, "/get-request") + require.Equal(t, r.Method, http.MethodGet) + require.Equal(t, r.Header.Get("Content-Type"), "application/json") + + w.WriteHeader(http.StatusBadRequest) + _, err := w.Write([]byte(`"Bad Request response"`)) + require.NoError(t, err) + })), + expectedResp: []byte(`"Bad Request response"`), + expectedError: "GET failed", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testServerURL, err := url.ParseRequestURI(test.server.URL) + require.NoError(t, err) + respBody, err := httpGet(context.Background(), testServerURL.JoinPath(test.endpoint), test.headers) + if test.expectedError != "" { + require.Error(t, err) + require.ErrorContains(t, err, test.expectedError) + } else { + require.NoError(t, err) + defer respBody.Close() + resp, err := io.ReadAll(respBody) + require.NoError(t, err) + require.Equal(t, string(resp), string(test.expectedResp)) + } + }) + } +} diff --git a/app/obolapi/exit.go b/app/obolapi/exit.go index 6a41aa60d8..dae3653d9f 100644 --- a/app/obolapi/exit.go +++ b/app/obolapi/exit.go @@ -3,12 +3,10 @@ package obolapi import ( - "bytes" "context" "encoding/hex" "encoding/json" "fmt" - "net/http" "net/url" "sort" "strconv" @@ -71,7 +69,7 @@ func (c Client) PostPartialExits(ctx context.Context, lockHash []byte, shareInde u, err := url.ParseRequestURI(c.baseURL) if err != nil { - return errors.Wrap(err, "bad obol api url") + return errors.Wrap(err, "bad Obol API url") } u.Path = path @@ -107,24 +105,9 @@ func (c Client) PostPartialExits(ctx context.Context, lockHash []byte, shareInde ctx, cancel := context.WithTimeout(ctx, c.reqTimeout) defer cancel() - req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), bytes.NewReader(data)) + err = httpPost(ctx, u, data, nil) if err != nil { - return errors.Wrap(err, "http new post request") - } - - req.Header.Set("Content-Type", "application/json") - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return errors.Wrap(err, "http post error") - } - - defer func() { - _ = resp.Body.Close() - }() - - if resp.StatusCode != http.StatusCreated { - return errors.New("http error", z.Int("status_code", resp.StatusCode)) + return errors.Wrap(err, "http Obol API POST request") } return nil @@ -142,7 +125,7 @@ func (c Client) GetFullExit(ctx context.Context, valPubkey string, lockHash []by u, err := url.ParseRequestURI(c.baseURL) if err != nil { - return ExitBlob{}, errors.Wrap(err, "bad obol api url") + return ExitBlob{}, errors.Wrap(err, "bad Obol API url") } u.Path = path @@ -150,11 +133,6 @@ func (c Client) GetFullExit(ctx context.Context, valPubkey string, lockHash []by ctx, cancel := context.WithTimeout(ctx, c.reqTimeout) defer cancel() - req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) - if err != nil { - return ExitBlob{}, errors.Wrap(err, "http new get request") - } - exitAuthData := FullExitAuthBlob{ LockHash: lockHash, ValidatorPubkey: valPubkeyBytes, @@ -172,28 +150,15 @@ func (c Client) GetFullExit(ctx context.Context, valPubkey string, lockHash []by return ExitBlob{}, errors.Wrap(err, "k1 sign") } - req.Header.Set("Authorization", bearerString(lockHashSignature)) - req.Header.Set("Content-Type", "application/json") - - resp, err := http.DefaultClient.Do(req) + respBody, err := httpGet(ctx, u, map[string]string{"Authorization": bearerString(lockHashSignature)}) if err != nil { - return ExitBlob{}, errors.Wrap(err, "http get error") - } - - if resp.StatusCode != http.StatusOK { - if resp.StatusCode == http.StatusNotFound { - return ExitBlob{}, ErrNoExit - } - - return ExitBlob{}, errors.New("http error", z.Int("status_code", resp.StatusCode)) + return ExitBlob{}, errors.Wrap(err, "http Obol API GET request") } - defer func() { - _ = resp.Body.Close() - }() + defer respBody.Close() var er FullExitResponse - if err := json.NewDecoder(resp.Body).Decode(&er); err != nil { + if err := json.NewDecoder(respBody).Decode(&er); err != nil { return ExitBlob{}, errors.Wrap(err, "json unmarshal error") } diff --git a/cmd/cmd.go b/cmd/cmd.go index 2b2d15ab54..1eda3c9cdb 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -59,7 +59,7 @@ func New() *cobra.Command { ), newExitCmd( newListActiveValidatorsCmd(runListActiveValidatorsCmd), - newSubmitPartialExitCmd(runSignPartialExit), + newSignPartialExitCmd(runSignPartialExit), newBcastFullExitCmd(runBcastFullExit), newFetchExitCmd(runFetchExit), ), diff --git a/cmd/exit.go b/cmd/exit.go index 2f933aa4c7..d14d19d4c5 100644 --- a/cmd/exit.go +++ b/cmd/exit.go @@ -192,7 +192,7 @@ func eth2Client(ctx context.Context, u []string, timeout time.Duration, forkVers } if _, err = cl.NodeVersion(ctx, ð2api.NodeVersionOpts{}); err != nil { - return nil, errors.Wrap(err, "can't connect to beacon node") + return nil, errors.Wrap(err, "connect to beacon node") } return cl, nil diff --git a/cmd/exit_broadcast.go b/cmd/exit_broadcast.go index b32079c00e..101a81a5be 100644 --- a/cmd/exit_broadcast.go +++ b/cmd/exit_broadcast.go @@ -113,17 +113,17 @@ func runBcastFullExit(ctx context.Context, config exitConfig) error { identityKey, err := k1util.Load(config.PrivateKeyPath) if err != nil { - return errors.Wrap(err, "could not load identity key") + return errors.Wrap(err, "load identity key") } cl, err := loadClusterManifest("", config.LockFilePath) if err != nil { - return errors.Wrap(err, "could not load cluster-lock.json") + return errors.Wrap(err, "load cluster lock", z.Str("lock_file_path", config.LockFilePath)) } eth2Cl, err := eth2Client(ctx, config.BeaconNodeEndpoints, config.BeaconNodeTimeout, [4]byte(cl.GetForkVersion())) if err != nil { - return errors.Wrap(err, "cannot create eth2 client for specified beacon node") + return errors.Wrap(err, "create eth2 client for specified beacon node(s)", z.Any("beacon_nodes_endpoints", config.BeaconNodeEndpoints)) } fullExits := make(map[core.PubKey]eth2p0.SignedVoluntaryExit) @@ -131,15 +131,16 @@ func runBcastFullExit(ctx context.Context, config exitConfig) error { if config.ExitFromFileDir != "" { entries, err := os.ReadDir(config.ExitFromFileDir) if err != nil { - return errors.Wrap(err, "could not read exits directory") + return errors.Wrap(err, "read exits directory", z.Str("exit_file_dir", config.ExitFromFileDir)) } for _, entry := range entries { if !strings.HasPrefix(entry.Name(), "exit-") { continue } - exit, err := fetchFullExit(ctx, filepath.Join(config.ExitFromFileDir, entry.Name()), config, cl, identityKey, "") + valCtx := log.WithCtx(ctx, z.Str("validator_exit_file", entry.Name())) + exit, err := fetchFullExit(valCtx, filepath.Join(config.ExitFromFileDir, entry.Name()), config, cl, identityKey, "") if err != nil { - return errors.Wrap(err, "fetch full exit for all from dir") + return err } validatorPubKey, err := validatorPubKeyFromFileName(entry.Name()) @@ -153,11 +154,10 @@ func runBcastFullExit(ctx context.Context, config exitConfig) error { for _, validator := range cl.GetValidators() { validatorPubKeyHex := fmt.Sprintf("0x%x", validator.GetPublicKey()) - valCtx := log.WithCtx(ctx, z.Str("validator", validatorPubKeyHex)) - + valCtx := log.WithCtx(ctx, z.Str("validator_public_key", validatorPubKeyHex)) exit, err := fetchFullExit(valCtx, "", config, cl, identityKey, validatorPubKeyHex) if err != nil { - return errors.Wrap(err, "fetch full exit for all from public key") + return errors.Wrap(err, "fetch full exit for all validators from public key") } validatorPubKey, err := core.PubKeyFromBytes(validator.GetPublicKey()) if err != nil { @@ -167,9 +167,10 @@ func runBcastFullExit(ctx context.Context, config exitConfig) error { } } } else { - exit, err := fetchFullExit(ctx, strings.TrimSpace(config.ExitFromFilePath), config, cl, identityKey, config.ValidatorPubkey) + valCtx := log.WithCtx(ctx, z.Str("validator_public_key", config.ValidatorPubkey), z.Str("validator_exit_file", config.ExitFromFilePath)) + exit, err := fetchFullExit(valCtx, strings.TrimSpace(config.ExitFromFilePath), config, cl, identityKey, config.ValidatorPubkey) if err != nil { - return errors.Wrap(err, "fetch full exit for public key") + return errors.Wrap(err, "fetch full exit for validator", z.Str("validator_public_key", config.ValidatorPubkey), z.Str("validator_exit_file", config.ExitFromFilePath)) } var validatorPubKey core.PubKey if len(strings.TrimSpace(config.ExitFromFilePath)) != 0 { @@ -192,11 +193,11 @@ func validatorPubKeyFromFileName(fileName string) (core.PubKey, error) { validatorPubKeyHex := strings.TrimPrefix(strings.TrimSuffix(fileNameChecked, fileExtension), "exit-0x") validatorPubKeyBytes, err := hex.DecodeString(validatorPubKeyHex) if err != nil { - return "", errors.Wrap(err, "cannot decode public key hex from file name") + return "", errors.Wrap(err, "decode public key hex from file name", z.Str("public_key", validatorPubKeyHex)) } validatorPubKey, err := core.PubKeyFromBytes(validatorPubKeyBytes) if err != nil { - return "", errors.Wrap(err, "cannot decode core public key from hex") + return "", errors.Wrap(err, "decode core public key from hex") } return validatorPubKey, nil @@ -207,7 +208,7 @@ func fetchFullExit(ctx context.Context, exitFilePath string, config exitConfig, var err error if len(exitFilePath) != 0 { - log.Info(ctx, "Retrieving full exit message from path", z.Str("path", exitFilePath)) + log.Info(ctx, "Retrieving full exit message from path") fullExit, err = exitFromPath(exitFilePath) } else { log.Info(ctx, "Retrieving full exit message from publish address") @@ -223,18 +224,18 @@ func broadcastExitsToBeacon(ctx context.Context, eth2Cl eth2wrap.Client, exits m rawPkBytes, err := validator.Bytes() if err != nil { - return errors.Wrap(err, "could not serialize validator key bytes") + return errors.Wrap(err, "serialize validator key bytes", z.Str("validator", validator.String())) } pubkey, err := tblsconv.PubkeyFromBytes(rawPkBytes) if err != nil { - return errors.Wrap(err, "could not convert validator key bytes to BLS public key") + return errors.Wrap(err, "convert validator key bytes to BLS public key") } // parse signature signature, err := tblsconv.SignatureFromBytes(fullExit.Signature[:]) if err != nil { - return errors.Wrap(err, "could not parse BLS signature from bytes") + return errors.Wrap(err, "parse BLS signature from bytes", z.Str("exit_signature", fullExit.Signature.String())) } exitRoot, err := sigDataForExit( @@ -244,7 +245,7 @@ func broadcastExitsToBeacon(ctx context.Context, eth2Cl eth2wrap.Client, exits m fullExit.Message.Epoch, ) if err != nil { - return errors.Wrap(err, "cannot calculate hash tree root for exit message for verification") + return errors.Wrap(err, "calculate hash tree root for exit message for verification") } if err := tbls.Verify(pubkey, exitRoot[:], signature); err != nil { @@ -255,8 +256,9 @@ func broadcastExitsToBeacon(ctx context.Context, eth2Cl eth2wrap.Client, exits m for validator, fullExit := range exits { valCtx := log.WithCtx(ctx, z.Str("validator", validator.String())) if err := eth2Cl.SubmitVoluntaryExit(valCtx, &fullExit); err != nil { - return errors.Wrap(err, "could not submit voluntary exit") + return errors.Wrap(err, "submit voluntary exit") } + log.Info(valCtx, "Successfully submitted voluntary exit for validator") } return nil @@ -266,17 +268,17 @@ func broadcastExitsToBeacon(ctx context.Context, eth2Cl eth2wrap.Client, exits m func exitFromObolAPI(ctx context.Context, validatorPubkey, publishAddr string, publishTimeout time.Duration, cl *manifestpb.Cluster, identityKey *k1.PrivateKey) (eth2p0.SignedVoluntaryExit, error) { oAPI, err := obolapi.New(publishAddr, obolapi.WithTimeout(publishTimeout)) if err != nil { - return eth2p0.SignedVoluntaryExit{}, errors.Wrap(err, "could not create obol api client") + return eth2p0.SignedVoluntaryExit{}, errors.Wrap(err, "create Obol API client", z.Str("publish_address", publishAddr)) } shareIdx, err := keystore.ShareIdxForCluster(cl, *identityKey.PubKey()) if err != nil { - return eth2p0.SignedVoluntaryExit{}, errors.Wrap(err, "could not determine operator index from cluster lock for supplied identity key") + return eth2p0.SignedVoluntaryExit{}, errors.Wrap(err, "determine operator index from cluster lock for supplied identity key") } fullExit, err := oAPI.GetFullExit(ctx, validatorPubkey, cl.GetInitialMutationHash(), shareIdx, identityKey) if err != nil { - return eth2p0.SignedVoluntaryExit{}, errors.Wrap(err, "could not load full exit data from Obol API") + return eth2p0.SignedVoluntaryExit{}, errors.Wrap(err, "load full exit data from Obol API", z.Str("publish_address", publishAddr)) } return fullExit.SignedExitMessage, nil @@ -286,7 +288,7 @@ func exitFromObolAPI(ctx context.Context, validatorPubkey, publishAddr string, p func exitFromPath(path string) (eth2p0.SignedVoluntaryExit, error) { f, err := os.Open(path) if err != nil { - return eth2p0.SignedVoluntaryExit{}, errors.Wrap(err, "can't open signed exit message from path") + return eth2p0.SignedVoluntaryExit{}, errors.Wrap(err, "open signed exit message from path") } var exit eth2p0.SignedVoluntaryExit diff --git a/cmd/exit_broadcast_internal_test.go b/cmd/exit_broadcast_internal_test.go index 88bf2ba5b6..83990d5d20 100644 --- a/cmd/exit_broadcast_internal_test.go +++ b/cmd/exit_broadcast_internal_test.go @@ -209,22 +209,22 @@ func Test_runBcastFullExitCmd_Config(t *testing.T) { { name: "No identity key", noIdentity: true, - errData: "could not load identity key", + errData: "load identity key", }, { name: "No lock", noLock: true, - errData: "could not load cluster-lock.json", + errData: "load cluster lock", }, { name: "Bad Obol API URL", badOAPIURL: true, - errData: "could not create obol api client", + errData: "create Obol API client", }, { name: "Bad beacon node URLs", badBeaconNodeEndpoints: true, - errData: "cannot create eth2 client for specified beacon node", + errData: "create eth2 client for specified beacon node", }, { name: "Bad validator address", @@ -337,3 +337,92 @@ func Test_runBcastFullExitCmd_Config(t *testing.T) { }) } } + +func TestExitBroadcastCLI(t *testing.T) { + tests := []struct { + name string + expectedErr string + + flags []string + }{ + { + name: "check flags", + expectedErr: "load identity key: read private key from disk: open test: no such file or directory", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--validator-keys-dir=test", + "--exit-epoch=1", + "--validator-public-key=test", // single exit + "--beacon-node-endpoints=test1,test2", + "--exit-from-file=test", // single exit + "--beacon-node-timeout=1ms", + "--publish-timeout=1ms", + "--all=false", // single exit + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + { + name: "check flags all", + expectedErr: "load identity key: read private key from disk: open test: no such file or directory", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--validator-keys-dir=test", // exit all + "--exit-epoch=1", + "--beacon-node-endpoints=test1,test2", + "--exit-from-dir=test", + "--beacon-node-timeout=1ms", + "--publish-timeout=1ms", + "--all", // exit all + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + { + name: "check flags all", + expectedErr: "load identity key: read private key from disk: open test: no such file or directory", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--validator-keys-dir=test", // exit all + "--exit-epoch=1", + "--beacon-node-endpoints=test1,test2", + "--exit-from-dir=test", + "--beacon-node-timeout=1ms", + "--publish-timeout=1ms", + "--all", // exit all + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cmd := newExitCmd(newBcastFullExitCmd(runBcastFullExit)) + cmd.SetArgs(append([]string{"broadcast"}, test.flags...)) + + err := cmd.Execute() + if test.expectedErr != "" { + require.Error(t, err) + require.ErrorContains(t, err, test.expectedErr) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/cmd/exit_fetch.go b/cmd/exit_fetch.go index 7dd7296ec5..9a27a39744 100644 --- a/cmd/exit_fetch.go +++ b/cmd/exit_fetch.go @@ -86,36 +86,36 @@ func runFetchExit(ctx context.Context, config exitConfig) error { } if _, err := os.Stat(config.FetchedExitPath); err != nil { - return errors.Wrap(err, "store exit path") + return errors.Wrap(err, "store exit path", z.Str("fetched_exit_path", config.FetchedExitPath)) } writeTestFile := filepath.Join(config.FetchedExitPath, ".write-test") if err := os.WriteFile(writeTestFile, []byte{}, 0o755); err != nil { //nolint:gosec // write test file - return errors.Wrap(err, "can't write to destination directory") + return errors.Wrap(err, "write to destination directory", z.Str("fetched_exit_path", config.FetchedExitPath)) } if err := os.Remove(writeTestFile); err != nil { - return errors.Wrap(err, "can't delete write test file") + return errors.Wrap(err, "delete write test file", z.Str("test_file_path", writeTestFile)) } identityKey, err := k1util.Load(config.PrivateKeyPath) if err != nil { - return errors.Wrap(err, "could not load identity key") + return errors.Wrap(err, "load identity key", z.Str("private_key_path", config.PrivateKeyPath)) } cl, err := loadClusterManifest("", config.LockFilePath) if err != nil { - return errors.Wrap(err, "could not load cluster-lock.json") + return errors.Wrap(err, "load cluster lock", z.Str("lock_file_path", config.LockFilePath)) } oAPI, err := obolapi.New(config.PublishAddress, obolapi.WithTimeout(config.PublishTimeout)) if err != nil { - return errors.Wrap(err, "could not create obol api client") + return errors.Wrap(err, "create Obol API client", z.Str("publish_address", config.PublishAddress)) } shareIdx, err := keystore.ShareIdxForCluster(cl, *identityKey.PubKey()) if err != nil { - return errors.Wrap(err, "could not determine operator index from cluster lock for supplied identity key") + return errors.Wrap(err, "determine operator index from cluster lock for supplied identity key") } if config.All { @@ -128,7 +128,7 @@ func runFetchExit(ctx context.Context, config exitConfig) error { fullExit, err := oAPI.GetFullExit(valCtx, validatorPubKeyHex, cl.GetInitialMutationHash(), shareIdx, identityKey) if err != nil { - return errors.Wrap(err, "could not load full exit data from Obol API") + return errors.Wrap(err, "load full exit data from Obol API", z.Str("validator_public_key", validatorPubKeyHex)) } err = writeExitToFile(valCtx, validatorPubKeyHex, config.FetchedExitPath, fullExit) @@ -139,7 +139,7 @@ func runFetchExit(ctx context.Context, config exitConfig) error { } else { validator := core.PubKey(config.ValidatorPubkey) if _, err := validator.Bytes(); err != nil { - return errors.Wrap(err, "cannot convert validator pubkey to bytes") + return errors.Wrap(err, "convert validator pubkey to bytes", z.Str("validator_public_key", config.ValidatorPubkey)) } ctx = log.WithCtx(ctx, z.Str("validator", validator.String())) @@ -148,7 +148,7 @@ func runFetchExit(ctx context.Context, config exitConfig) error { fullExit, err := oAPI.GetFullExit(ctx, config.ValidatorPubkey, cl.GetInitialMutationHash(), shareIdx, identityKey) if err != nil { - return errors.Wrap(err, "could not load full exit data from Obol API") + return errors.Wrap(err, "load full exit data from Obol API", z.Str("validator_public_key", config.ValidatorPubkey)) } err = writeExitToFile(ctx, config.ValidatorPubkey, config.FetchedExitPath, fullExit) diff --git a/cmd/exit_fetch_internal_test.go b/cmd/exit_fetch_internal_test.go index 8c690adea2..0496eb1da8 100644 --- a/cmd/exit_fetch_internal_test.go +++ b/cmd/exit_fetch_internal_test.go @@ -169,3 +169,80 @@ func Test_runFetchExitBadOutDir(t *testing.T) { require.ErrorContains(t, runFetchExit(context.Background(), config), "permission denied") } + +func TestExitFetchCLI(t *testing.T) { + tests := []struct { + name string + expectedErr string + flags []string + }{ + { + name: "check flags", + expectedErr: "store exit path: stat 1: no such file or directory", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--validator-public-key=test", + "--fetched-exit-path=1", + "--publish-timeout=1ms", + "--all=false", + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + { + name: "no validator public key and not all", + expectedErr: "validator-public-key must be specified when exiting single validator.", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--fetched-exit-path=1", + "--publish-timeout=1ms", + "--all=false", + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + { + name: "validator public key and all", + expectedErr: "validator-public-key should not be specified when all is, as it is obsolete and misleading.", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--validator-public-key=test", + "--fetched-exit-path=1", + "--publish-timeout=1ms", + "--all=true", + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cmd := newExitCmd(newFetchExitCmd(runFetchExit)) + cmd.SetArgs(append([]string{"fetch"}, test.flags...)) + + err := cmd.Execute() + if test.expectedErr != "" { + require.Error(t, err) + require.ErrorContains(t, err, test.expectedErr) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/cmd/exit_list.go b/cmd/exit_list.go index ef263f7ec1..e60959b0d6 100644 --- a/cmd/exit_list.go +++ b/cmd/exit_list.go @@ -75,7 +75,7 @@ func runListActiveValidatorsCmd(ctx context.Context, config exitConfig) error { continue } - log.Info(ctx, "Validator", z.Str("pubkey", validator)) + log.Info(ctx, "Validator", z.Str("validator_public_key", validator)) } return nil @@ -84,12 +84,12 @@ func runListActiveValidatorsCmd(ctx context.Context, config exitConfig) error { func listActiveVals(ctx context.Context, config exitConfig) ([]string, error) { cl, err := loadClusterManifest("", config.LockFilePath) if err != nil { - return nil, errors.Wrap(err, "could not load cluster-lock.json") + return nil, errors.Wrap(err, "load cluster lock", z.Str("lock_file_path", config.LockFilePath)) } eth2Cl, err := eth2Client(ctx, config.BeaconNodeEndpoints, config.BeaconNodeTimeout, [4]byte{}) // fine to avoid initializing a fork version, we're just querying the BN if err != nil { - return nil, errors.Wrap(err, "cannot create eth2 client for specified beacon node") + return nil, errors.Wrap(err, "create eth2 client for specified beacon node(s)", z.Any("beacon_nodes_endpoints", config.BeaconNodeEndpoints)) } var allVals []eth2p0.BLSPubKey @@ -103,7 +103,7 @@ func listActiveVals(ctx context.Context, config exitConfig) ([]string, error) { State: "head", }) if err != nil { - return nil, errors.Wrap(err, "cannot fetch validator list") + return nil, errors.Wrap(err, "fetch validator list from beacon", z.Str("beacon_address", eth2Cl.Address()), z.Any("validators", allVals)) } var ret []string diff --git a/cmd/exit_list_internal_test.go b/cmd/exit_list_internal_test.go index 05450b9b6f..974e1f2c21 100644 --- a/cmd/exit_list_internal_test.go +++ b/cmd/exit_list_internal_test.go @@ -197,3 +197,41 @@ func Test_listActiveVals(t *testing.T) { require.Len(t, vals, len(lock.Validators)/2) }) } + +func TestExitListCLI(t *testing.T) { + tests := []struct { + name string + expectedErr string + flags []string + }{ + { + name: "check flags", + expectedErr: "load cluster lock: load cluster manifest from disk: load dag from disk: no file found", + flags: []string{ + "--lock-file=test", + "--beacon-node-endpoints=test1,test2", + "--beacon-node-timeout=1ms", + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cmd := newExitCmd(newListActiveValidatorsCmd(runListActiveValidatorsCmd)) + cmd.SetArgs(append([]string{"active-validator-list"}, test.flags...)) + + err := cmd.Execute() + if test.expectedErr != "" { + require.Error(t, err) + require.ErrorContains(t, err, test.expectedErr) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/cmd/exit_sign.go b/cmd/exit_sign.go index 9aac498f57..342d6d7cc6 100644 --- a/cmd/exit_sign.go +++ b/cmd/exit_sign.go @@ -23,7 +23,7 @@ import ( "github.com/obolnetwork/charon/eth2util/keystore" ) -func newSubmitPartialExitCmd(runFunc func(context.Context, exitConfig) error) *cobra.Command { +func newSignPartialExitCmd(runFunc func(context.Context, exitConfig) error) *cobra.Command { var config exitConfig cmd := &cobra.Command{ @@ -70,7 +70,7 @@ func newSubmitPartialExitCmd(runFunc func(context.Context, exitConfig) error) *c if !valPubkPresent && !valIdxPresent && !config.All { //nolint:revive // we use our own version of the errors package. - return errors.New(fmt.Sprintf("either %s or %s must be specified at least.", validatorIndex.String(), validatorPubkey.String())) + return errors.New(fmt.Sprintf("either %s or %s must be specified at least when exiting single validator.", validatorIndex.String(), validatorPubkey.String())) } if config.All && (valIdxPresent || valPubkPresent) { @@ -96,42 +96,42 @@ func runSignPartialExit(ctx context.Context, config exitConfig) error { identityKey, err := k1util.Load(config.PrivateKeyPath) if err != nil { - return errors.Wrap(err, "could not load identity key") + return errors.Wrap(err, "load identity key", z.Str("private_key_path", config.PrivateKeyPath)) } cl, err := loadClusterManifest("", config.LockFilePath) if err != nil { - return errors.Wrap(err, "could not load cluster-lock.json") + return errors.Wrap(err, "load cluster lock", z.Str("lock_file_path", config.LockFilePath)) } rawValKeys, err := keystore.LoadFilesUnordered(config.ValidatorKeysDir) if err != nil { - return errors.Wrap(err, "could not load keystore, check if path exists", z.Str("path", config.ValidatorKeysDir)) + return errors.Wrap(err, "load keystore, check if path exists", z.Str("validator_keys_dir", config.ValidatorKeysDir)) } valKeys, err := rawValKeys.SequencedKeys() if err != nil { - return errors.Wrap(err, "could not load keystore") + return errors.Wrap(err, "load keystore") } shares, err := keystore.KeysharesToValidatorPubkey(cl, valKeys) if err != nil { - return errors.Wrap(err, "could not match local validator key shares with their counterparty in cluster lock") + return errors.Wrap(err, "match local validator key shares with their counterparty in cluster lock") } shareIdx, err := keystore.ShareIdxForCluster(cl, *identityKey.PubKey()) if err != nil { - return errors.Wrap(err, "could not determine operator index from cluster lock for supplied identity key") + return errors.Wrap(err, "determine operator index from cluster lock for supplied identity key") } oAPI, err := obolapi.New(config.PublishAddress, obolapi.WithTimeout(config.PublishTimeout)) if err != nil { - return errors.Wrap(err, "could not create obol api client") + return errors.Wrap(err, "create Obol API client", z.Str("publish_address", config.PublishAddress)) } eth2Cl, err := eth2Client(ctx, config.BeaconNodeEndpoints, config.BeaconNodeTimeout, [4]byte(cl.GetForkVersion())) if err != nil { - return errors.Wrap(err, "cannot create eth2 client for specified beacon node") + return errors.Wrap(err, "create eth2 client for specified beacon node(s)", z.Any("beacon_nodes_endpoints", config.BeaconNodeEndpoints)) } if config.ValidatorIndexPresent { @@ -149,17 +149,17 @@ func runSignPartialExit(ctx context.Context, config exitConfig) error { if config.All { exitBlobs, err = signAllValidatorsExits(ctx, config, eth2Cl, shares) if err != nil { - return errors.Wrap(err, "could not sign exits for all validators") + return errors.Wrap(err, "sign exits for all validators") } } else { exitBlobs, err = signSingleValidatorExit(ctx, config, eth2Cl, shares) if err != nil { - return errors.Wrap(err, "could not sign exit for validator") + return errors.Wrap(err, "sign exit for validator") } } if err := oAPI.PostPartialExits(ctx, cl.GetInitialMutationHash(), shareIdx, identityKey, exitBlobs...); err != nil { - return errors.Wrap(err, "could not POST partial exit message to Obol API") + return errors.Wrap(err, "http POST partial exit message to Obol API") } return nil @@ -168,7 +168,7 @@ func runSignPartialExit(ctx context.Context, config exitConfig) error { func signSingleValidatorExit(ctx context.Context, config exitConfig, eth2Cl eth2wrap.Client, shares keystore.ValidatorShares) ([]obolapi.ExitBlob, error) { valEth2, err := fetchValidatorBLSPubKey(ctx, config, eth2Cl) if err != nil { - return nil, errors.Wrap(err, "cannot fetch validator public key") + return nil, errors.Wrap(err, "fetch validator public key") } validator := core.PubKeyFrom48Bytes(valEth2) @@ -180,14 +180,14 @@ func signSingleValidatorExit(ctx context.Context, config exitConfig, eth2Cl eth2 valIndex, err := fetchValidatorIndex(ctx, config, eth2Cl) if err != nil { - return nil, errors.Wrap(err, "cannot fetch validator index") + return nil, errors.Wrap(err, "fetch validator index") } - log.Info(ctx, "Signing exit message for validator") + log.Info(ctx, "Signing partial exit message for validator", z.Str("validator_public_key", valEth2.String()), z.U64("validator_index", uint64(valIndex))) exitMsg, err := signExit(ctx, eth2Cl, valIndex, ourShare.Share, eth2p0.Epoch(config.ExitEpoch)) if err != nil { - return nil, errors.Wrap(err, "cannot sign partial exit message") + return nil, errors.Wrap(err, "sign partial exit message", z.Str("validator_public_key", valEth2.String()), z.U64("validator_index", uint64(valIndex)), z.Int("exit_epoch", int(config.ExitEpoch))) } return []obolapi.ExitBlob{ @@ -203,43 +203,43 @@ func signAllValidatorsExits(ctx context.Context, config exitConfig, eth2Cl eth2w for pk := range shares { eth2PK, err := pk.ToETH2() if err != nil { - return nil, errors.Wrap(err, "cannot convert core pubkey to eth2 pubkey") + return nil, errors.Wrap(err, "convert core pubkey to eth2 pubkey", z.Str("pub_key", eth2PK.String())) } valsEth2 = append(valsEth2, eth2PK) } rawValData, err := queryBeaconForValidator(ctx, eth2Cl, valsEth2, nil) if err != nil { - return nil, errors.Wrap(err, "fetch validator indices from beacon") + return nil, errors.Wrap(err, "fetch all validators indices from beacon") } for _, val := range rawValData.Data { share, ok := shares[core.PubKeyFrom48Bytes(val.Validator.PublicKey)] if !ok { - //nolint:revive // we use our own version of the errors package. - return nil, errors.New(fmt.Sprintf("validator public key %s not found in cluster lock", val.Validator.PublicKey)) + return nil, errors.New("validator public key not found in cluster lock", z.Str("validator_public_key", val.Validator.PublicKey.String())) } share.Index = int(val.Index) shares[core.PubKeyFrom48Bytes(val.Validator.PublicKey)] = share } - log.Info(ctx, "Signing exit message for all validators") + log.Info(ctx, "Signing partial exit message for all active validators") var exitBlobs []obolapi.ExitBlob for pk, share := range shares { exitMsg, err := signExit(ctx, eth2Cl, eth2p0.ValidatorIndex(share.Index), share.Share, eth2p0.Epoch(config.ExitEpoch)) if err != nil { - return nil, errors.Wrap(err, "cannot sign partial exit message") + return nil, errors.Wrap(err, "sign partial exit message", z.Str("validator_public_key", pk.String()), z.Int("validator_index", share.Index), z.Int("exit_epoch", int(config.ExitEpoch))) } eth2PK, err := pk.ToETH2() if err != nil { - return nil, errors.Wrap(err, "cannot convert core pubkey to eth2 pubkey") + return nil, errors.Wrap(err, "convert core pubkey to eth2 pubkey", z.Str("core_pubkey", pk.String())) } exitBlob := obolapi.ExitBlob{ PublicKey: eth2PK.String(), SignedExitMessage: exitMsg, } exitBlobs = append(exitBlobs, exitBlob) + log.Info(ctx, "Successfully signed exit message", z.Str("validator_public_key", pk.String()), z.Int("validator_index", share.Index)) } return exitBlobs, nil @@ -249,7 +249,7 @@ func fetchValidatorBLSPubKey(ctx context.Context, config exitConfig, eth2Cl eth2 if config.ValidatorPubkey != "" { valEth2, err := core.PubKey(config.ValidatorPubkey).ToETH2() if err != nil { - return eth2p0.BLSPubKey{}, errors.Wrap(err, "cannot convert core pubkey to eth2 pubkey") + return eth2p0.BLSPubKey{}, errors.Wrap(err, "convert core pubkey to eth2 pubkey", z.Str("core_pubkey", config.ValidatorPubkey)) } return valEth2, nil @@ -257,7 +257,7 @@ func fetchValidatorBLSPubKey(ctx context.Context, config exitConfig, eth2Cl eth2 rawValData, err := queryBeaconForValidator(ctx, eth2Cl, nil, []eth2p0.ValidatorIndex{eth2p0.ValidatorIndex(config.ValidatorIndex)}) if err != nil { - return eth2p0.BLSPubKey{}, errors.Wrap(err, "fetch validator pubkey from beacon") + return eth2p0.BLSPubKey{}, errors.Wrap(err, "fetch validator pubkey from beacon", z.Str("beacon_address", eth2Cl.Address()), z.U64("validator_index", config.ValidatorIndex)) } for _, val := range rawValData.Data { @@ -266,7 +266,7 @@ func fetchValidatorBLSPubKey(ctx context.Context, config exitConfig, eth2Cl eth2 } } - return eth2p0.BLSPubKey{}, errors.New("validator index not found in beacon node response") + return eth2p0.BLSPubKey{}, errors.New("validator index not found in beacon node response", z.Str("beacon_address", eth2Cl.Address()), z.U64("validator_index", config.ValidatorIndex), z.Any("raw_response", rawValData)) } func fetchValidatorIndex(ctx context.Context, config exitConfig, eth2Cl eth2wrap.Client) (eth2p0.ValidatorIndex, error) { @@ -276,12 +276,12 @@ func fetchValidatorIndex(ctx context.Context, config exitConfig, eth2Cl eth2wrap valEth2, err := core.PubKey(config.ValidatorPubkey).ToETH2() if err != nil { - return 0, errors.Wrap(err, "cannot convert core pubkey to eth2 pubkey") + return 0, errors.Wrap(err, "convert core pubkey to eth2 pubkey", z.Str("core_pubkey", config.ValidatorPubkey)) } rawValData, err := queryBeaconForValidator(ctx, eth2Cl, []eth2p0.BLSPubKey{valEth2}, nil) if err != nil { - return 0, errors.Wrap(err, "cannot fetch validator index from beacon") + return 0, errors.Wrap(err, "fetch validator index from beacon", z.Str("beacon_address", eth2Cl.Address()), z.Str("validator_pubkey", valEth2.String())) } for _, val := range rawValData.Data { @@ -290,7 +290,7 @@ func fetchValidatorIndex(ctx context.Context, config exitConfig, eth2Cl eth2wrap } } - return 0, errors.New("validator public key not found in beacon node response") + return 0, errors.New("validator public key not found in beacon node response", z.Str("beacon_address", eth2Cl.Address()), z.Str("validator_pubkey", valEth2.String()), z.Any("raw_response", rawValData)) } func queryBeaconForValidator(ctx context.Context, eth2Cl eth2wrap.Client, pubKeys []eth2p0.BLSPubKey, indices []eth2p0.ValidatorIndex) (*eth2api.Response[map[eth2p0.ValidatorIndex]*eth2v1.Validator], error) { @@ -302,7 +302,7 @@ func queryBeaconForValidator(ctx context.Context, eth2Cl eth2wrap.Client, pubKey rawValData, err := eth2Cl.Validators(ctx, valAPICallOpts) if err != nil { - return nil, errors.Wrap(err, "fetch validators from beacon") + return nil, errors.Wrap(err, "fetch validators from beacon", z.Str("beacon_address", eth2Cl.Address()), z.Any("options", valAPICallOpts)) } return rawValData, nil diff --git a/cmd/exit_sign_internal_test.go b/cmd/exit_sign_internal_test.go index 7adf39f81f..7ec08ad950 100644 --- a/cmd/exit_sign_internal_test.go +++ b/cmd/exit_sign_internal_test.go @@ -66,7 +66,7 @@ func Test_runSubmitPartialExit(t *testing.T) { false, "test", 0, - "cannot convert core pubkey to eth2 pubkey", + "convert core pubkey to eth2 pubkey", false, ) }) @@ -102,7 +102,7 @@ func Test_runSubmitPartialExit(t *testing.T) { true, "test", 9999, - "cannot convert core pubkey to eth2 pubkey", + "convert core pubkey to eth2 pubkey", false, ) }) @@ -263,32 +263,32 @@ func Test_runSubmitPartialExit_Config(t *testing.T) { { name: "No identity key", noIdentity: true, - errData: "could not load identity key", + errData: "load identity key", }, { name: "No cluster lock", noLock: true, - errData: "could not load cluster-lock.json", + errData: "load cluster lock", }, { name: "No keystore", noKeystore: true, - errData: "could not load keystore", + errData: "load keystore", }, { name: "Bad Obol API URL", badOAPIURL: true, - errData: "could not create obol api client", + errData: "create Obol API client", }, { name: "Bad beacon node URL", badBeaconNodeEndpoints: true, - errData: "cannot create eth2 client for specified beacon node", + errData: "create eth2 client for specified beacon node", }, { name: "Bad validator address", badValidatorAddr: true, - errData: "cannot convert core pubkey to eth2 pubkey", + errData: "convert core pubkey to eth2 pubkey", }, } @@ -386,3 +386,111 @@ func Test_runSubmitPartialExit_Config(t *testing.T) { }) } } + +func TestExitSignCLI(t *testing.T) { + tests := []struct { + name string + expectedErr string + flags []string + }{ + { + name: "check flags", + expectedErr: "load identity key: read private key from disk: open test: no such file or directory", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--validator-keys-dir=test", + "--exit-epoch=1", + "--validator-public-key=test", + "--validator-index=1", + "--beacon-node-endpoints=test1,test2", + "--beacon-node-timeout=1ms", + "--publish-timeout=1ms", + "--all=false", + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + { + name: "no pubkey, no index, single validator", + expectedErr: "either validator-index or validator-public-key must be specified at least when exiting single validator.", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--validator-keys-dir=test", + "--exit-epoch=1", + "--beacon-node-endpoints=test1,test2", + "--beacon-node-timeout=1ms", + "--publish-timeout=1ms", + "--all=false", + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + { + name: "pubkey present, all validators", + expectedErr: "validator-index or validator-public-key should not be specified when all is, as they are obsolete and misleading.", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--validator-keys-dir=test", + "--exit-epoch=1", + "--validator-public-key=test", + "--beacon-node-endpoints=test1,test2", + "--beacon-node-timeout=1ms", + "--publish-timeout=1ms", + "--all=true", + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + { + name: "index present, all validators", + expectedErr: "validator-index or validator-public-key should not be specified when all is, as they are obsolete and misleading.", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--validator-keys-dir=test", + "--exit-epoch=1", + "--validator-index=1", + "--beacon-node-endpoints=test1,test2", + "--beacon-node-timeout=1ms", + "--publish-timeout=1ms", + "--all=true", + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cmd := newExitCmd(newSignPartialExitCmd(runSignPartialExit)) + cmd.SetArgs(append([]string{"sign"}, test.flags...)) + + err := cmd.Execute() + if test.expectedErr != "" { + require.Error(t, err) + require.ErrorContains(t, err, test.expectedErr) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/testutil/obolapimock/obolapi_exit.go b/testutil/obolapimock/obolapi_exit.go index 4d321c03c8..86f2de1567 100644 --- a/testutil/obolapimock/obolapi_exit.go +++ b/testutil/obolapimock/obolapi_exit.go @@ -314,7 +314,7 @@ func cleanTmpl(tmpl string) string { "").Replace(tmpl) } -// MockServer returns a obol API mock test server. +// MockServer returns a Obol API mock test server. // It returns a http.Handler to be served over HTTP, and a function to add cluster lock files to its database. func MockServer(dropOnePsig bool, beacon eth2wrap.Client) (http.Handler, func(lock cluster.Lock)) { ts := testServer{ From fa29651a58aeb67adc2b652f857d9e1bad53cda2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 07:38:02 +0000 Subject: [PATCH 57/89] build(deps): Bump github.com/multiformats/go-multiaddr from 0.13.0 to 0.14.0 (#3356) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/multiformats/go-multiaddr](https://github.com/multiformats/go-multiaddr) from 0.13.0 to 0.14.0.
Release notes

Sourced from github.com/multiformats/go-multiaddr's releases.

v0.14.0

What's Changed

New Contributors

Full Changelog: https://github.com/multiformats/go-multiaddr/compare/v0.13.0...v0.14.0

Commits
  • 37363a0 Merge pull request #258 from multiformats/marco/release-v0.14
  • 04bcb19 Release v0.14.0
  • 04c33d5 Merge pull request #256 from pyropy/feat/memory
  • 94c19d5 Add memory validation function
  • 2159c37 Implement memory multiaddrs
  • 414c602 Merge pull request #253 from multiformats/uci/update-go
  • 111b9ec chore: bump go.mod to Go 1.22 and run go fix
  • f63b0ed Merge pull request #247 from multiformats/marco/validate-ipcidr
  • bbdd1a5 check for nil interfaces (#251)
  • 94628cf Make it safe to roundtrip SplitXXX and Join (#250)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/multiformats/go-multiaddr&package-manager=go_modules&previous-version=0.13.0&new-version=0.14.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7eaf9a7c80..2b04435cf4 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/jsternberg/zap-logfmt v1.3.0 github.com/libp2p/go-libp2p v0.33.2 github.com/libp2p/go-msgio v0.3.0 - github.com/multiformats/go-multiaddr v0.13.0 + github.com/multiformats/go-multiaddr v0.14.0 github.com/pelletier/go-toml/v2 v2.2.3 github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 diff --git a/go.sum b/go.sum index dc865fbd20..0cfb8a209b 100644 --- a/go.sum +++ b/go.sum @@ -368,8 +368,8 @@ github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9 github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.13.0 h1:BCBzs61E3AGHcYYTv8dqRH43ZfyrqM8RXVPT8t13tLQ= -github.com/multiformats/go-multiaddr v0.13.0/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= +github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU= +github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= From c70a5be53fea26bd481c20920bbddc98bb4fd5ac Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Mon, 4 Nov 2024 10:08:24 +0100 Subject: [PATCH 58/89] cmd: increase Obol API timeout for exits (#3353) In our docs we promote publish timeout set to 5 minutes. We might as well just update it in the CLI 5 min to be the default... category: misc ticket: none --- cmd/exit.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/exit.go b/cmd/exit.go index d14d19d4c5..702bb0037d 100644 --- a/cmd/exit.go +++ b/cmd/exit.go @@ -162,7 +162,7 @@ func bindExitFlags(cmd *cobra.Command, config *exitConfig, flags []exitCLIFlag) case fetchedExitPath: cmd.Flags().StringVar(&config.FetchedExitPath, fetchedExitPath.String(), "./", maybeRequired("Path to store fetched signed exit messages.")) case publishTimeout: - cmd.Flags().DurationVar(&config.PublishTimeout, publishTimeout.String(), 30*time.Second, "Timeout for publishing a signed exit to the publish-address API.") + cmd.Flags().DurationVar(&config.PublishTimeout, publishTimeout.String(), 5*time.Minute, "Timeout for publishing a signed exit to the publish-address API.") case validatorIndex: cmd.Flags().Uint64Var(&config.ValidatorIndex, validatorIndex.String(), 0, "Validator index of the validator to exit, the associated public key must be present in the cluster lock manifest. If --validator-public-key is also provided, validator existence won't be checked on the beacon chain.") case all: From 69efcbb114e1ca8e56a1d342e9788733367e4b9f Mon Sep 17 00:00:00 2001 From: Anthony PHAM Date: Mon, 4 Nov 2024 14:13:04 +0100 Subject: [PATCH 59/89] *: add needs to trigger (#3351) Add needs to trigger category: feature ticket: none --- .github/workflows/release.yml | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7d2e6dae93..712856d654 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -33,6 +33,7 @@ jobs: token: ${{ secrets.RELEASE_SECRET }} trigger-dispatch: + needs: release runs-on: ubuntu-latest steps: - name: Extract tag name @@ -46,14 +47,6 @@ jobs: event-type: update-version client-payload: '{"tag": "${{ env.TAG_NAME }}"}' - - name: Trigger dispatch for obol-infrastructure - uses: peter-evans/repository-dispatch@v3 - with: - token: ${{ secrets.OBOL_PLATFORM_PAT }} - repository: ObolNetwork/obol-infrastructure - event-type: update-version - client-payload: '{"tag": "${{ env.TAG_NAME }}"}' - - name: Trigger dispatch for helm-charts uses: peter-evans/repository-dispatch@v3 with: From 2815409f02941046ee1d23583cadda6e4cd71e23 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Thu, 7 Nov 2024 22:20:09 +0100 Subject: [PATCH 60/89] github: bump only patch versions for bls library (#3352) Bump only patch versions for `bls-wth-go-binary`. Hope this works as intended... category: misc ticket: none --- .github/dependabot.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 8206c430c3..5dbb112d61 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,6 +4,9 @@ updates: directory: "/" schedule: interval: "daily" + ignore: + - dependency-name: "github.com/herumi/bls-eth-go-binary" + update-types: ["version-update:semver-major","version-update:semver-minor"] - package-ecosystem: "docker" directories: - "/" From 603e263a48080f9015211a6ac82b6c6d71f46413 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Nov 2024 09:09:51 +0000 Subject: [PATCH 61/89] build(deps): Bump golang from 1.23.2-bookworm to 1.23.3-bookworm (#3359) Bumps golang from 1.23.2-bookworm to 1.23.3-bookworm. [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang&package-manager=docker&previous-version=1.23.2-bookworm&new-version=1.23.3-bookworm)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 75f0184f95..2fd16ed50f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Container for building Go binary. -FROM golang:1.23.2-bookworm AS builder +FROM golang:1.23.3-bookworm AS builder # Install dependencies RUN apt-get update && apt-get install -y --no-install-recommends build-essential git From ccd8a7fb86a8b78825a82e85344433cf91138aa3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Nov 2024 09:10:28 +0000 Subject: [PATCH 62/89] build(deps): Bump golang from 1.23.2-alpine to 1.23.3-alpine in /testutil/promrated (#3360) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [//]: # (dependabot-start) ⚠️ **Dependabot is rebasing this PR** ⚠️ Rebasing might not happen immediately, so don't worry if this takes some time. Note: if you make any changes to this PR yourself, they will take precedence over the rebase. --- [//]: # (dependabot-end) Bumps golang from 1.23.2-alpine to 1.23.3-alpine. [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang&package-manager=docker&previous-version=1.23.2-alpine&new-version=1.23.3-alpine)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- testutil/promrated/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testutil/promrated/Dockerfile b/testutil/promrated/Dockerfile index c6a73e9c20..4e490c82d9 100644 --- a/testutil/promrated/Dockerfile +++ b/testutil/promrated/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.23.2-alpine AS builder +FROM golang:1.23.3-alpine AS builder # Install dependencies RUN apk add --no-cache build-base git From 3943a41e5d5db0887694062ec87b51398b41d887 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Nov 2024 09:10:42 +0000 Subject: [PATCH 63/89] build(deps): Bump golang.org/x/sync from 0.8.0 to 0.9.0 (#3362) Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.8.0 to 0.9.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/sync&package-manager=go_modules&previous-version=0.8.0&new-version=0.9.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2b04435cf4..1ea23822d7 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( go.uber.org/zap v1.27.0 golang.org/x/crypto v0.28.0 golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 - golang.org/x/sync v0.8.0 + golang.org/x/sync v0.9.0 golang.org/x/term v0.25.0 golang.org/x/time v0.7.0 golang.org/x/tools v0.26.0 diff --git a/go.sum b/go.sum index 0cfb8a209b..b5e105c437 100644 --- a/go.sum +++ b/go.sum @@ -644,8 +644,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= From 569bb82f6f0425e6b55c40e5e30841fadb7640a4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Nov 2024 10:43:26 +0000 Subject: [PATCH 64/89] build(deps): Bump golang.org/x/time from 0.7.0 to 0.8.0 (#3363) Bumps [golang.org/x/time](https://github.com/golang/time) from 0.7.0 to 0.8.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/time&package-manager=go_modules&previous-version=0.7.0&new-version=0.8.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1ea23822d7..697da78c7c 100644 --- a/go.mod +++ b/go.mod @@ -45,7 +45,7 @@ require ( golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 golang.org/x/sync v0.9.0 golang.org/x/term v0.25.0 - golang.org/x/time v0.7.0 + golang.org/x/time v0.8.0 golang.org/x/tools v0.26.0 google.golang.org/protobuf v1.34.2 gopkg.in/natefinch/lumberjack.v2 v2.2.1 diff --git a/go.sum b/go.sum index b5e105c437..abd3c2a9c2 100644 --- a/go.sum +++ b/go.sum @@ -686,8 +686,8 @@ golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 3402ff7e31c916be34759051d53b13815f59a5c2 Mon Sep 17 00:00:00 2001 From: Cypher Pepe <125112044+cypherpepe@users.noreply.github.com> Date: Fri, 8 Nov 2024 18:07:06 +0300 Subject: [PATCH 65/89] docs: fix typos in documentation files (#3364) This pull request fixes several typographical errors found in the documentation files, specifically in the following: - `dkg.md` - `goguidelines.md` - `structure.md` category: docs ticket: none --- docs/dkg.md | 2 +- docs/goguidelines.md | 2 +- docs/structure.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/dkg.md b/docs/dkg.md index 876df49675..60f0fbdead 100644 --- a/docs/dkg.md +++ b/docs/dkg.md @@ -36,7 +36,7 @@ This cluster-definition file is created with the help of the [Distributed Valida - The list of participants in the cluster specified by Ethereum address(/ENS) - The threshold of fault tolerance required (if not choosing the safe default) - The network (fork_version/chainId) that this cluster will validate on -- These key pieces of information form the basis of the cluster configuration. These fields (and some technical fields like DKG algorithm to use) are serialised and merklised to produce the manifests `cluster_definition_hash`. This merkle root will be used to confirm that their is no ambiguity or deviation between manifests when they are provided to charon nodes. +- These key pieces of information form the basis of the cluster configuration. These fields (and some technical fields like DKG algorithm to use) are serialised and merklised to produce the manifests `cluster_definition_hash`. This merkle root will be used to confirm that there is no ambiguity or deviation between manifests when they are provided to charon nodes. - Once the leader is satisfied with the configuration they publish it to the launchpad's data availability layer for the other participants to access. (For early development the launchpad will use a centralised backend db to store the cluster configuration. Near production, solutions like IPFS or arweave may be more suitable for the long term decentralisation of the launchpad.) - The leader will then share the URL to this ceremony with their intended participants. - Anyone that clicks the ceremony url, or inputs the `config_hash` when prompted on the landing page will be brought to the ceremony status page. (After completing all disclaimers and advisories) diff --git a/docs/goguidelines.md b/docs/goguidelines.md index 5d1fe4602a..574233ad03 100644 --- a/docs/goguidelines.md +++ b/docs/goguidelines.md @@ -1,6 +1,6 @@ # Charon Go Guidelines -This page contains guidelines, principals and best practices relating to how we write go code. +This page contains guidelines, principles and best practices relating to how we write go code. As an open source project, we need to aim for high code quality, consistency and canonical go. ## Required Knowledge diff --git a/docs/structure.md b/docs/structure.md index c7ab166d2b..9d99bddfb0 100644 --- a/docs/structure.md +++ b/docs/structure.md @@ -78,7 +78,7 @@ charon/ # project root - `version`: Print charon version - Defines and parses [viper](https://github.com/spf13/viper) configuration parameters for required by each command. - `cluster/`: Cluster config definition and files formats - - `cluster-definition.json` defines the intended cluster including confutation including operators. + - `cluster-definition.json` defines the intended cluster including configuration including operators. - `cluster-lock.json` extends cluster definition adding distributed validator public keys and public shares. - `dkg/`: Distributed Key Generation command - Runs the dkg command that takes a cluster definition as input and generates a cluster lock file and private shares as output. From f56de9a7c3f215ed87a4526ff4c59e9396942f29 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 9 Nov 2024 17:14:06 +0000 Subject: [PATCH 66/89] build(deps): Bump golang.org/x/crypto from 0.28.0 to 0.29.0 (#3366) Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.28.0 to 0.29.0.
Commits
  • 6018723 go.mod: update golang.org/x dependencies
  • 71ed71b README: don't recommend go get
  • 750a45f sha3: add MarshalBinary, AppendBinary, and UnmarshalBinary
  • 36b1725 sha3: avoid trailing permutation
  • 80ea76e sha3: fix padding for long cSHAKE parameters
  • c17aa50 sha3: avoid buffer copy
  • 7cfb916 ssh: return unexpected msg error when server fails keyboard-interactive auth ...
  • b61b08d chacha20: extend ppc64le support to ppc64
  • 6c21748 internal/poly1305: extend ppc64le support to ppc64
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/crypto&package-manager=go_modules&previous-version=0.28.0&new-version=0.29.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 697da78c7c..0e3814bcf6 100644 --- a/go.mod +++ b/go.mod @@ -41,10 +41,10 @@ require ( go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.28.0 + golang.org/x/crypto v0.29.0 golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 golang.org/x/sync v0.9.0 - golang.org/x/term v0.25.0 + golang.org/x/term v0.26.0 golang.org/x/time v0.8.0 golang.org/x/tools v0.26.0 google.golang.org/protobuf v1.34.2 @@ -192,8 +192,8 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/mod v0.21.0 // indirect golang.org/x/net v0.30.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 // indirect diff --git a/go.sum b/go.sum index abd3c2a9c2..e2e9a4d82d 100644 --- a/go.sum +++ b/go.sum @@ -596,8 +596,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= @@ -673,17 +673,17 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= +golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= From dbfaf1a339e6d17f7a9100d524033c06f471a134 Mon Sep 17 00:00:00 2001 From: futreall <86553580+futreall@users.noreply.github.com> Date: Mon, 11 Nov 2024 12:28:39 +0200 Subject: [PATCH 67/89] docs: fix typos and improve clarity in documentation files (#3367) This PR addresses minor typos, formatting issues, and enhances clarity in the following documentation files: - **branching.md**: Fixed typographical errors and improved clarity in descriptions of branching and release models. - **contributing.md**: Corrected link formatting issues, spelling mistakes, and clarified language in contributing guidelines. - **dkg.md**: Fixed typographical errors and improved sentence structure for better readability. - **metrics.md**: Corrected typo in metric label name. These changes improve the overall readability and accuracy of the documentation, ensuring that users and contributors have clearer instructions and information. category: docs ticket: none --- cmd/markdown_internal_test.go | 2 +- docs/branching.md | 8 ++++---- docs/contributing.md | 12 ++++++------ docs/dkg.md | 8 ++++---- docs/metrics.md | 2 +- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/cmd/markdown_internal_test.go b/cmd/markdown_internal_test.go index 98815d0e39..bc8626bf0e 100644 --- a/cmd/markdown_internal_test.go +++ b/cmd/markdown_internal_test.go @@ -84,7 +84,7 @@ This document contains all the prometheus metrics exposed by a charon node. All metrics contain the following labels, so they are omitted from the table below: - 'cluster_hash': The cluster lock hash uniquely identifying the cluster. -- 'clustter_name': The cluster lock name. +- 'cluster_name': The cluster lock name. - 'cluster_network': The cluster network name; goerli, mainnet, etc. - 'cluster_peer': The name of this node in the cluster. It is determined from the operator ENR. diff --git a/docs/branching.md b/docs/branching.md index 56a8ce7444..8138026e75 100644 --- a/docs/branching.md +++ b/docs/branching.md @@ -19,11 +19,11 @@ We follow [Trunk Based Development](https://trunkbaseddevelopment.com/) as a bra ## Controlled introduction of change: -- Since a feature cannot be added as a single big merge of a big feature branch, tools and patterns are required that allow gradual controlled introduction of increment changes without breaking. +- Since a feature cannot be added as a single big merge of a big feature branch, tools and patterns are required that allow gradual controlled introduction of incremental changes without breaking. - New code can be added as “dead code”. So, it has not been integrated into the actual program yet. Once it is properly complete, it can be integrated in a single PR. - Some features should however not be enabled straight into prod/mainnet, but should be rolled-out slowly being first tested in `alpha` (internal devnet only), then `beta` (internal and external testnet), and then only `stable` (enabled everywhere). This can be achieved by simple [feature switches](https://trunkbaseddevelopment.com/feature-flags/) (if statements) that enable features based on their `feature_set` status. - Another powerful pattern to gradually introduce change is [branching by abstraction](https://trunkbaseddevelopment.com/branch-by-abstraction/). This basically introduces an abstraction layer at the point where a new feature has to replace an old feature (like an interface). Using dependency injection, the new feature can be integrated during testing/staging while the old feature is still being used in production. -- Note that both feature switches and/or abstraction layers used to roll out a feature should be removed once released to prod/main-net. +- Note that both feature switches and/or abstraction layers used to roll out a feature should be removed once released to prod/mainnet. ### Release Process @@ -44,7 +44,7 @@ The process to follow for the next v0.16.0 release is the following: 5. When all relevant changes have been included in main, a new “release branch”. It must be called `main-v0.16`. - Release branches are called `main-v0.X` - Release branches are high-risk branches, and must be treated with the same security mindset as the `main` branch. - - Note that github branch matching doesn’t support OR logic, so we chose a common `main*` prefix to identify all protected branches. + - Note that Github branch matching doesn’t support OR logic, so we chose a common `main*` prefix to identify all protected branches. 6. After the release branch has been created, the `main` branch app/version is manually updated to `v0.17-dev` and add `v0.17` to `version.Supported()` versions. - `v0.X-dev` indicates that the code is in the main branch. - It also indicates this is development only code not an official release. @@ -57,7 +57,7 @@ The process to follow for the next v0.16.0 release is the following: - Note that the `build-push-release` action should dynamically update the app/version to the value of the git tag when building the docker image. 9. Before a `v0.16.X` release is created, a `v0.16.X-rc[1-99]` release candidate needs to be created and thoroughly tested both internally and externally. 10. After a `v0.16.X` release was created, the release notes need to be created. - - The release github action does auto-generate release notes. + - The release Github action does auto-generate release notes. - If they are incorrect, manual release notes can be created via: `go run testutil/genchangelog/main.go --range=v0.15.0..v0.16.0`. Note that images are built and tagged for each commit on the main and release branch using the app/version tag, e.g. `v0.X-dev` for `main`, and `v0.X-rc` for release branches. Main branch commits are also tagged with `latest`. diff --git a/docs/contributing.md b/docs/contributing.md index 2ea143c1c2..4b5d6a1774 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -18,12 +18,12 @@ instead of opening a public issue or PR on GitHub. - If you have found a bug... - Check for existing bug reports of the same issue in GitHub. - Do not post about it publicly if it is a suspected vulnerability to protect Obol's users; - instead use `security@obol.tech`. + instead, use `security@obol.tech`. - Maybe send a message in relevant community channels if you are unsure whether you are seeing a technical issue. - Open a GitHub issue if everything else checks out 🤓 - Are you thinking of a small change that just makes sense? Feel free to submit a PR. - If you're envisioning a larger feature or are just looking for a discussion, - let's chat in the [Obol Discord](https://discord.com/invite/n6ebKsX46w)under `#dev-community`. + let's chat in the [Obol Discord](https://discord.com/invite/n6ebKsX46w) under `#dev-community`. - A quick sync before coding avoids conflicting work and makes large PRs much more likely to be accepted. - 👀 The Discord channel is currently _invite-only_ to prevent spam. Please ping a team member to get access. @@ -42,7 +42,7 @@ an associated issue with a design discussed and decided upon. Small bug fixes an improvements don't need issues. New features and bug fixes must have tests. Documentation may need to be updated. If you're -unsure what to update, open the PR, and we'll discuss during review. +unsure what to update, open the PR and we'll discuss during review. Note that PRs updating dependencies and new Go versions are not accepted. Please file an issue instead. @@ -75,7 +75,7 @@ Note: PRs can only be merged by obol-bulldozer bot. It is author's responsibilit - The PR title and body are used as the final squash-merged git commit message. - The PR's original git commits are therefore lost (so naming isn't specified) - **PR title format** is defined as: - - Following the [go team's commit format](https://github.com/golang/go/commits/master): `package[/path]: concise overview of change` + - Following the [Go team's commit format](https://github.com/golang/go/commits/master): `package[/path]: concise overview of change` - Prefix identifies the primary package affected by the change. - Prefix can be a single or double hierarchical package name, but not three or more. E.g. `app` , or `app/tracer`. - The rest of the title must be a concise high-level overview in the present tense and starting with lower case. @@ -90,7 +90,7 @@ Note: PRs can only be merged by obol-bulldozer bot. It is author's responsibilit ``` runner/tracer: add jaeger otel exporter -Adds the jaeger exporter to our opentelemetery infra. +Adds the jaeger exporter to our opentelemetry infra. category: feature ticket: #206 @@ -113,7 +113,7 @@ for each PR commit. But it is highly recommended running the githooks locally wh To install githooks: - Follow installation instructions [here](https://pre-commit.com/#installation) to install the `pre-commit` tool. -- Once installed, run `pre-commit install` in the project's root directory. This will setup the hooks. +- Once installed, run `pre-commit install` in the project's root directory. This will set up the hooks. - Note you can skip the hooks by committing with `-n`: `git commit -n -m "look mom no githooks"` To update githooks: diff --git a/docs/dkg.md b/docs/dkg.md index 60f0fbdead..2a2053a8f7 100644 --- a/docs/dkg.md +++ b/docs/dkg.md @@ -20,7 +20,7 @@ The charon client has the responsibility of securely completing a distributed ke A distributed key generation ceremony involves `Operators` and their `Charon clients`. -- An `Operator` is identified by their Ethereum address. They will sign with this address's private key to authenticate their charon client ahead of the ceremony. The signature will be of; a hash of the charon clients ENR public key, the `cluster_definition_hash`, and an incrementing `nonce`, allowing for a direct linkage between a user, their charon client, and the cluster this client is intended to service, while retaining the ability to update the charon client by incrementing the nonce value and re-signing like the standard ENR spec. +- An `Operator` is identified by their Ethereum address. They will sign with this address's private key to authenticate their charon client ahead of the ceremony. The signature will be of a hash of the charon client’s ENR public key, the `cluster_definition_hash`, and an incrementing `nonce`, allowing for a direct linkage between a user, their charon client, and the cluster this client is intended to service, while retaining the ability to update the charon client by incrementing the nonce value and re-signing like the standard ENR spec. - A `Charon client` is also identified by a public/private key pair, in this instance, the public key is represented as an [Ethereum Node Record](https://eips.ethereum.org/EIPS/eip-778) (ENR). This is a standard identity format for both EL and CL clients. These ENRs are used by each charon node to identify its cluster peers over the internet, and to communicate with one another in an [end to end encrypted manner](https://github.com/libp2p/go-libp2p-noise). These keys need to be created by each operator before they can participate in a cluster creation. @@ -37,7 +37,7 @@ This cluster-definition file is created with the help of the [Distributed Valida - The threshold of fault tolerance required (if not choosing the safe default) - The network (fork_version/chainId) that this cluster will validate on - These key pieces of information form the basis of the cluster configuration. These fields (and some technical fields like DKG algorithm to use) are serialised and merklised to produce the manifests `cluster_definition_hash`. This merkle root will be used to confirm that there is no ambiguity or deviation between manifests when they are provided to charon nodes. -- Once the leader is satisfied with the configuration they publish it to the launchpad's data availability layer for the other participants to access. (For early development the launchpad will use a centralised backend db to store the cluster configuration. Near production, solutions like IPFS or arweave may be more suitable for the long term decentralisation of the launchpad.) +- Once the leader is satisfied with the configuration, they publish it to the launchpad's data availability layer for the other participants to access. (For early development the launchpad will use a centralised backend db to store the cluster configuration. Near production, solutions like IPFS or arweave may be more suitable for the long term decentralisation of the launchpad.) - The leader will then share the URL to this ceremony with their intended participants. - Anyone that clicks the ceremony url, or inputs the `config_hash` when prompted on the landing page will be brought to the ceremony status page. (After completing all disclaimers and advisories) - A "Connect Wallet" button will be visible beneath the ceremony status container, a participant can click on it to connect their wallet to the site @@ -53,7 +53,7 @@ This cluster-definition file is created with the help of the [Distributed Valida ## Carrying out the DKG ceremony -Once participant has their cluster-definition file prepared, they will pass the file to charon's `dkg` command. Charon will read the ENRs in the cluster-definition, confirm that its ENR is present, and then will reach out to bootnodes that are deployed to find the other ENRs on the network. (Fresh ENRs just have a public key and an IP address of 0.0.0.0 until they are loaded into a live charon client, which will update the IP address and increment the ENRs nonce and resign with the clients private key. If an ENR with a higher nonce is seen to be a charon client, they will update the IP address of that ENR in their address book.) +Once participants has their cluster-definition file prepared, they will pass the file to charon's `dkg` command. Charon will read the ENRs in the cluster-definition, confirm that its ENR is present, and then will reach out to bootnodes that are deployed to find the other ENRs on the network. (Fresh ENRs just have a public key and an IP address of 0.0.0.0 until they are loaded into a live charon client, which will update the IP address and increment the ENRs nonce and resign with the clients private key. If an ENR with a higher nonce is seen to be a charon client, they will update the IP address of that ENR in their address book.) Once all clients in the cluster can establish a connection with one another and they each complete a handshake (confirm everyone has a matching `cluster_definition_hash`), the ceremony begins. @@ -74,7 +74,7 @@ Once the ceremony is complete, all participants should take a backup of the crea ## Preparing for validator activation -Once the ceremony is complete, and secure backups of key shares have been made by each operator. They must now load these key shares into their validator clients, and run the `charon run` command to turn it into operational mode. +Once the ceremony is complete and secure backups of key shares have been made by each operator. They must now load these key shares into their validator clients, and run the `charon run` command to turn it into operational mode. All operators should confirm that their charon client logs indicate all nodes are online and connected. They should also verify the readiness of their beacon clients and validator clients. Charon's grafana dashboard is a good way to see the readiness of the full cluster from its perspective. diff --git a/docs/metrics.md b/docs/metrics.md index 4bcfcbe96f..7d95d640bb 100644 --- a/docs/metrics.md +++ b/docs/metrics.md @@ -4,7 +4,7 @@ This document contains all the prometheus metrics exposed by a charon node. All metrics contain the following labels, so they are omitted from the table below: - `cluster_hash`: The cluster lock hash uniquely identifying the cluster. -- `clustter_name`: The cluster lock name. +- `cluster_name`: The cluster lock name. - `cluster_network`: The cluster network name; goerli, mainnet, etc. - `cluster_peer`: The name of this node in the cluster. It is determined from the operator ENR. From 1e0d4ce64c989588e1fed4c60de6a14e66b28df3 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Mon, 11 Nov 2024 12:13:51 +0100 Subject: [PATCH 68/89] cmd: add cluster lock and cluster definition to test peers (#3368) Sometimes users find it easier to test with already created `cluster-definition.json` or `cluster-lock.json` files. It saves copying and pasting ENRs and potentially making a mistake. category: feature ticket: none --- cmd/testpeers.go | 145 +++++++++++++++++++++++++++++---- cmd/testpeers_internal_test.go | 2 +- 2 files changed, 128 insertions(+), 19 deletions(-) diff --git a/cmd/testpeers.go b/cmd/testpeers.go index 8a771ef708..28b5960cb3 100644 --- a/cmd/testpeers.go +++ b/cmd/testpeers.go @@ -6,6 +6,7 @@ import ( "context" "crypto/sha256" "encoding/hex" + "encoding/json" "fmt" "io" "math" @@ -13,6 +14,7 @@ import ( "net" "net/http" "net/http/httptrace" + "os" "slices" "strings" "sync" @@ -31,19 +33,22 @@ import ( "github.com/obolnetwork/charon/app/errors" "github.com/obolnetwork/charon/app/log" "github.com/obolnetwork/charon/app/z" + "github.com/obolnetwork/charon/cluster" "github.com/obolnetwork/charon/eth2util/enr" "github.com/obolnetwork/charon/p2p" ) type testPeersConfig struct { testConfig - ENRs []string - P2P p2p.Config - Log log.Config - DataDir string - KeepAlive time.Duration - LoadTestDuration time.Duration - DirectConnectionTimeout time.Duration + ENRs []string + P2P p2p.Config + Log log.Config + DataDir string + KeepAlive time.Duration + LoadTestDuration time.Duration + DirectConnectionTimeout time.Duration + ClusterLockFilePath string + ClusterDefinitionFilePath string } type ( @@ -83,16 +88,42 @@ func newTestPeersCmd(runFunc func(context.Context, io.Writer, testPeersConfig) e bindDataDirFlag(cmd.Flags(), &config.DataDir) bindTestLogFlags(cmd.Flags(), &config.Log) + wrapPreRunE(cmd, func(cmd *cobra.Command, _ []string) error { + const ( + enrs = "enrs" + clusterLockFilePath = "cluster-lock-file-path" + clusterDefinitionFilePath = "cluster-definition-file-path" + ) + enrsValue := cmd.Flags().Lookup(enrs).Value.String() + clusterLockPathValue := cmd.Flags().Lookup(clusterLockFilePath).Value.String() + clusterDefinitionPathValue := cmd.Flags().Lookup(clusterDefinitionFilePath).Value.String() + + if enrsValue == "[]" && clusterLockPathValue == "" && clusterDefinitionPathValue == "" { + //nolint:revive // we use our own version of the errors package. + return errors.New(fmt.Sprintf("--%v, --%v or --%v must be specified.", enrs, clusterLockFilePath, clusterDefinitionFilePath)) + } + + if (enrsValue != "[]" && clusterLockPathValue != "") || + (enrsValue != "[]" && clusterDefinitionPathValue != "") || + (clusterLockPathValue != "" && clusterDefinitionPathValue != "") { + //nolint:revive // we use our own version of the errors package. + return errors.New(fmt.Sprintf("Only one of --%v, --%v or --%v should be specified.", enrs, clusterLockFilePath, clusterDefinitionFilePath)) + } + + return nil + }) + return cmd } func bindTestPeersFlags(cmd *cobra.Command, config *testPeersConfig) { const enrs = "enrs" - cmd.Flags().StringSliceVar(&config.ENRs, enrs, nil, "[REQUIRED] Comma-separated list of each peer ENR address.") + cmd.Flags().StringSliceVar(&config.ENRs, enrs, nil, "Comma-separated list of each peer ENR address.") cmd.Flags().DurationVar(&config.KeepAlive, "keep-alive", 30*time.Minute, "Time to keep TCP node alive after test completion, so connection is open for other peers to test on their end.") cmd.Flags().DurationVar(&config.LoadTestDuration, "load-test-duration", 30*time.Second, "Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned.") cmd.Flags().DurationVar(&config.DirectConnectionTimeout, "direct-connection-timeout", 2*time.Minute, "Time to keep trying to establish direct connection to peer.") - mustMarkFlagRequired(cmd, enrs) + cmd.Flags().StringVar(&config.ClusterLockFilePath, "cluster-lock-file-path", "", "Path to cluster lock file, used to fetch peers' ENR addresses.") + cmd.Flags().StringVar(&config.ClusterDefinitionFilePath, "cluster-definition-file-path", "", "Path to cluster definition file, used to fetch peers' ENR addresses.") } func bindTestLogFlags(flags *pflag.FlagSet, config *log.Config) { @@ -124,9 +155,83 @@ func supportedSelfTestCases() map[testCaseName]testCasePeerSelf { } } +func fetchPeersFromDefinition(path string) ([]string, error) { + f, err := os.ReadFile(path) + if err != nil { + return nil, errors.Wrap(err, "read definition file", z.Str("path", path)) + } + + var def cluster.Definition + err = json.Unmarshal(f, &def) + if err != nil { + return nil, errors.Wrap(err, "unmarshal definition json", z.Str("path", path)) + } + + var enrs []string + for _, o := range def.Operators { + enrs = append(enrs, o.ENR) + } + + if len(enrs) == 0 { + return nil, errors.New("no peers found in lock", z.Str("path", path)) + } + + return enrs, nil +} + +func fetchPeersFromLock(path string) ([]string, error) { + f, err := os.ReadFile(path) + if err != nil { + return nil, errors.Wrap(err, "read lock file", z.Str("path", path)) + } + + var lock cluster.Lock + err = json.Unmarshal(f, &lock) + if err != nil { + return nil, errors.Wrap(err, "unmarshal lock json", z.Str("path", path)) + } + + var enrs []string + for _, o := range lock.Operators { + enrs = append(enrs, o.ENR) + } + + if len(enrs) == 0 { + return nil, errors.New("no peers found in lock", z.Str("path", path)) + } + + return enrs, nil +} + +func fetchENRs(conf testPeersConfig) ([]string, error) { + var enrs []string + var err error + switch { + case len(conf.ENRs) != 0: + enrs = conf.ENRs + case conf.ClusterDefinitionFilePath != "": + enrs, err = fetchPeersFromDefinition(conf.ClusterDefinitionFilePath) + if err != nil { + return nil, err + } + case conf.ClusterLockFilePath != "": + enrs, err = fetchPeersFromLock(conf.ClusterLockFilePath) + if err != nil { + return nil, err + } + } + + return enrs, nil +} + func startTCPNode(ctx context.Context, conf testPeersConfig) (host.Host, func(), error) { - var p2pPeers []p2p.Peer - for i, enrString := range conf.ENRs { + enrs, err := fetchENRs(conf) + if err != nil { + return nil, nil, err + } + + var peers []p2p.Peer + for i, enrString := range enrs { enrRecord, err := enr.Parse(enrString) if err != nil { return nil, nil, errors.Wrap(err, "decode enr", z.Str("enr", enrString)) @@ -137,7 +242,7 @@ func startTCPNode(ctx context.Context, conf testPeersConfig) (host.Host, func(), return nil, nil, err } - p2pPeers = append(p2pPeers, p2pPeer) + peers = append(peers, p2pPeer) } p2pPrivKey, err := p2p.LoadPrivKey(conf.DataDir) @@ -150,22 +255,22 @@ func startTCPNode(ctx context.Context, conf testPeersConfig) (host.Host, func(), return nil, nil, err } - mePeer, err := p2p.NewPeerFromENR(meENR, len(conf.ENRs)) + mePeer, err := p2p.NewPeerFromENR(meENR, len(enrs)) if err != nil { return nil, nil, err } log.Info(ctx, "Self p2p name resolved", z.Any("name", mePeer.Name)) - p2pPeers = append(p2pPeers, mePeer) + peers = append(peers, mePeer) - allENRs := conf.ENRs + allENRs := enrs allENRs = append(allENRs, meENR.String()) slices.Sort(allENRs) allENRsString := strings.Join(allENRs, ",") allENRsHash := sha256.Sum256([]byte(allENRsString)) - return setupP2P(ctx, p2pPrivKey, conf.P2P, p2pPeers, allENRsHash[:]) + return setupP2P(ctx, p2pPrivKey, conf.P2P, peers, allENRsHash[:]) } func setupP2P(ctx context.Context, privKey *k1.PrivateKey, conf p2p.Config, peers []p2p.Peer, enrsHash []byte) (host.Host, func(), error) { @@ -420,7 +525,11 @@ func testAllPeers(ctx context.Context, queuedTestCases []testCaseName, allTestCa singlePeerResCh := make(chan map[string][]testResult) group, _ := errgroup.WithContext(ctx) - for _, enr := range conf.ENRs { + enrs, err := fetchENRs(conf) + if err != nil { + return err + } + for _, enr := range enrs { currENR := enr // TODO: can be removed after go1.22 version bump group.Go(func() error { return testSinglePeer(ctx, queuedTestCases, allTestCases, conf, tcpNode, currENR, singlePeerResCh) @@ -435,7 +544,7 @@ func testAllPeers(ctx context.Context, queuedTestCases []testCaseName, allTestCa doneReading <- true }() - err := group.Wait() + err = group.Wait() if err != nil { return errors.Wrap(err, "peers test errgroup") } diff --git a/cmd/testpeers_internal_test.go b/cmd/testpeers_internal_test.go index 1c15a71d8e..a2694a2b69 100644 --- a/cmd/testpeers_internal_test.go +++ b/cmd/testpeers_internal_test.go @@ -320,7 +320,7 @@ func TestPeersTestFlags(t *testing.T) { { name: "no enrs flag", args: []string{"peers"}, - expectedErr: "required flag(s) \"enrs\" not set", + expectedErr: "--enrs, --cluster-lock-file-path or --cluster-definition-file-path must be specified.", }, { name: "no output toml on quiet", From 22f6cf64e97b30eccd9f78af6cefa37718e0fa29 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Mon, 11 Nov 2024 17:07:30 +0100 Subject: [PATCH 69/89] cmd: beacon node simulation (#3361) Beacon node simulation test. This test aims to mimc the amount and types of requests a charon node sends to a beacon node. Requests are split into general cluster requests (independent of the amount of validators the charon node is servicing) and validator requests (requests are scaling based on the amount of validators). For lower number of validators (1 and 10) more aggressive approach is taken - you would see more frequent requests related to proposing and sync committee duties. As the validator number grows, the distribution evens out and gets closer to a real scenario. The time calculated for some duties is a sum of the RTT of multiple requests, because the duty requires that (i.e.: produce block + submit signed block), while others are the result of the RTT of a single request (i.e.: submit sync message). The tests performed are a best-estimate, it should be taken into an account that a lot of duties cannot be mimicked 1:1 - we cannot propose an actual block that will put an actual load on the beacon node. On submitting new signed block, the beacon node will quickly return 4XX error. In the future this can be optimised, to make as truthful of a request as possible, so that the BN is doing more work, before it recognises an issue. Running this test **should NOT** be considered an alternative to running a cluster on testnet to evaluate the performance of the Execution Layer and Consensus Layer nodes. Running an actual cluster on a network will always give you better perspective. category: feature ticket: none --- cmd/test.go | 6 +- cmd/testbeacon.go | 1427 ++++++++++++++++++++++++++++++- cmd/testbeacon_internal_test.go | 85 +- cmd/testmev.go | 5 +- cmd/testpeers.go | 4 +- 5 files changed, 1464 insertions(+), 63 deletions(-) diff --git a/cmd/test.go b/cmd/test.go index c202c6696e..5e62f1efc3 100644 --- a/cmd/test.go +++ b/cmd/test.go @@ -58,7 +58,7 @@ func newTestCmd(cmds ...*cobra.Command) *cobra.Command { func bindTestFlags(cmd *cobra.Command, config *testConfig) { cmd.Flags().StringVar(&config.OutputToml, "output-toml", "", "File path to which output can be written in TOML format.") cmd.Flags().StringSliceVar(&config.TestCases, "test-cases", nil, fmt.Sprintf("List of comma separated names of tests to be exeucted. Available tests are: %v", listTestCases(cmd))) - cmd.Flags().DurationVar(&config.Timeout, "timeout", 5*time.Minute, "Execution timeout for all tests.") + cmd.Flags().DurationVar(&config.Timeout, "timeout", time.Hour, "Execution timeout for all tests.") cmd.Flags().BoolVar(&config.Quiet, "quiet", false, "Do not print test results to stdout.") } @@ -141,6 +141,10 @@ func failedTestResult(testRes testResult, err error) testResult { return testRes } +func httpStatusError(code int) string { + return fmt.Sprintf("HTTP status code %v", code) +} + func (s *testResultError) UnmarshalText(data []byte) error { if len(data) == 0 { return nil diff --git a/cmd/testbeacon.go b/cmd/testbeacon.go index 78a14adb4a..e865ff01f5 100644 --- a/cmd/testbeacon.go +++ b/cmd/testbeacon.go @@ -11,7 +11,11 @@ import ( "math/rand" "net/http" "net/http/httptrace" + "os" + "path/filepath" + "sort" "strconv" + "strings" "sync" "time" @@ -27,13 +31,118 @@ import ( type testBeaconConfig struct { testConfig - Endpoints []string - EnableLoadTest bool - LoadTestDuration time.Duration + Endpoints []string + LoadTest bool + LoadTestDuration time.Duration + SimulationValidators int + SimulationFileDir string + SimulationDuration int + SimulationVerbose bool } type testCaseBeacon func(context.Context, *testBeaconConfig, string) testResult +type simParams struct { + TotalValidatorsCount int + AttestationValidatorsCount int // attestation + aggregation + ProposalValidatorsCount int // attestation + aggregation + proposals + SyncCommitteeValidatorsCount int // attestation + aggregation + proposals + sync committee + RequestIntensity RequestsIntensity +} + +type SimulationValues struct { + Endpoint string `json:"endpoint,omitempty"` + All []Duration `json:"all,omitempty"` + Min Duration `json:"min"` + Max Duration `json:"max"` + Median Duration `json:"median"` + Avg Duration `json:"avg"` +} + +type RequestsIntensity struct { + AttestationDuty time.Duration + AggregatorDuty time.Duration + ProposalDuty time.Duration + SyncCommitteeSubmit time.Duration + SyncCommitteeContribution time.Duration + SyncCommitteeSubscribe time.Duration +} + +type DutiesPerformed struct { + Attestation bool + Aggregation bool + Proposal bool + SyncCommittee bool +} + +type Simulation struct { + GeneralClusterRequests SimulationCluster `json:"general_cluster_requests"` + ValidatorsRequests SimulationValidators `json:"validators_requests"` +} + +type SimulationValidators struct { + Averaged SimulationSingleValidator `json:"averaged"` + AllValidators []SimulationSingleValidator `json:"all_validators,omitempty"` +} + +type SimulationSingleValidator struct { + AttestationDuty SimulationAttestation `json:"attestation_duty"` + AggregationDuty SimulationAggregation `json:"aggregation_duty"` + ProposalDuty SimulationProposal `json:"proposal_duty"` + SyncCommitteeDuties SimulationSyncCommittee `json:"sync_committee_duties"` + SimulationValues +} + +type SimulationAttestation struct { + GetAttestationDataRequest SimulationValues `json:"get_attestation_data_request"` + PostAttestationsRequest SimulationValues `json:"post_attestations_request"` + SimulationValues +} + +type SimulationAggregation struct { + GetAggregateAttestationRequest SimulationValues `json:"get_aggregate_attestation_request"` + PostAggregateAndProofsRequest SimulationValues `json:"post_aggregate_and_proofs_request"` + SimulationValues +} + +type SimulationProposal struct { + ProduceBlockRequest SimulationValues `json:"produce_block_request"` + PublishBlindedBlockRequest SimulationValues `json:"publish_blinded_block_request"` + SimulationValues +} + +type SimulationSyncCommittee struct { + MessageDuty SyncCommitteeMessageDuty `json:"message_duty"` + ContributionDuty SyncCommitteeContributionDuty `json:"contribution_duty"` + SubscribeSyncCommitteeRequest SimulationValues `json:"subscribe_sync_committee_request"` + SimulationValues +} + +type SyncCommitteeContributionDuty struct { + ProduceSyncCommitteeContributionRequest SimulationValues `json:"produce_sync_committee_contribution_request"` + SubmitSyncCommitteeContributionRequest SimulationValues `json:"submit_sync_committee_contribution_request"` + SimulationValues +} + +type SyncCommitteeMessageDuty struct { + SubmitSyncCommitteeMessageRequest SimulationValues `json:"submit_sync_committee_message_request"` +} + +type SimulationCluster struct { + AttestationsForBlockRequest SimulationValues `json:"attestations_for_block_request"` + ProposalDutiesForEpochRequest SimulationValues `json:"proposal_duties_for_epoch_request"` + SyncingRequest SimulationValues `json:"syncing_request"` + PeerCountRequest SimulationValues `json:"peer_count_request"` + BeaconCommitteeSubscriptionRequest SimulationValues `json:"beacon_committee_subscription_request"` + DutiesAttesterForEpochRequest SimulationValues `json:"duties_attester_for_epoch_request"` + DutiesSyncCommitteeForEpochRequest SimulationValues `json:"duties_sync_committee_for_epoch_request"` + BeaconHeadValidatorsRequest SimulationValues `json:"beacon_head_validators_request"` + BeaconGenesisRequest SimulationValues `json:"beacon_genesis_request"` + PrepBeaconProposerRequest SimulationValues `json:"prep_beacon_proposer_request"` + ConfigSpecRequest SimulationValues `json:"config_spec_request"` + NodeVersionRequest SimulationValues `json:"node_version_request"` +} + const ( thresholdBeaconMeasureAvg = 40 * time.Millisecond thresholdBeaconMeasurePoor = 100 * time.Millisecond @@ -41,6 +150,14 @@ const ( thresholdBeaconLoadPoor = 100 * time.Millisecond thresholdBeaconPeersAvg = 50 thresholdBeaconPeersPoor = 20 + + thresholdBeaconSimulationAvg = 200 * time.Millisecond + thresholdBeaconSimulationPoor = 400 * time.Millisecond + committeeSizePerSlot = 64 + subCommitteeSize = 4 + slotTime = 12 * time.Second + slotsInEpoch = 32 + epochTime = slotsInEpoch * slotTime ) func newTestBeaconCmd(runFunc func(context.Context, io.Writer, testBeaconConfig) error) *cobra.Command { @@ -69,8 +186,11 @@ func bindTestBeaconFlags(cmd *cobra.Command, config *testBeaconConfig) { const endpoints = "endpoints" cmd.Flags().StringSliceVar(&config.Endpoints, endpoints, nil, "[REQUIRED] Comma separated list of one or more beacon node endpoint URLs.") mustMarkFlagRequired(cmd, endpoints) - cmd.Flags().BoolVar(&config.EnableLoadTest, "enable-load-test", false, "Enable load test, not advisable when testing towards external beacon nodes.") + cmd.Flags().BoolVar(&config.LoadTest, "load-test", false, "Enable load test, not advisable when testing towards external beacon nodes.") cmd.Flags().DurationVar(&config.LoadTestDuration, "load-test-duration", 5*time.Second, "Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned.") + cmd.Flags().StringVar(&config.SimulationFileDir, "simulation-file-dir", "./", "JSON directory to which simulation file results will be written.") + cmd.Flags().IntVar(&config.SimulationDuration, "simulation-duration-in-slots", slotsInEpoch, "Time to keep running the simulation in slots.") + cmd.Flags().BoolVar(&config.SimulationVerbose, "simulation-verbose", false, "Show results for each request and each validator.") } func supportedBeaconTestCases() map[testCaseName]testCaseBeacon { @@ -80,6 +200,12 @@ func supportedBeaconTestCases() map[testCaseName]testCaseBeacon { {name: "isSynced", order: 3}: beaconIsSyncedTest, {name: "peerCount", order: 4}: beaconPeerCountTest, {name: "pingLoad", order: 5}: beaconPingLoadTest, + + {name: "simulate1", order: 6}: beaconSimulation1Test, + {name: "simulate10", order: 7}: beaconSimulation10Test, + {name: "simulate100", order: 8}: beaconSimulation100Test, + {name: "simulate500", order: 9}: beaconSimulation500Test, + {name: "simulate1000", order: 10}: beaconSimulation1000Test, } } @@ -232,7 +358,7 @@ func beaconPingTest(ctx context.Context, _ *testBeaconConfig, target string) tes defer resp.Body.Close() if resp.StatusCode > 399 { - return failedTestResult(testRes, errors.New("status code %v", z.Int("status_code", resp.StatusCode))) + return failedTestResult(testRes, errors.New(httpStatusError(resp.StatusCode))) } testRes.Verdict = testVerdictOk @@ -264,7 +390,7 @@ func beaconPingOnce(ctx context.Context, target string) (time.Duration, error) { defer resp.Body.Close() if resp.StatusCode > 399 { - return 0, errors.New("status code %v", z.Int("status_code", resp.StatusCode)) + return 0, errors.New(httpStatusError(resp.StatusCode)) } return firstByte, nil @@ -308,7 +434,7 @@ func pingBeaconContinuously(ctx context.Context, target string, resCh chan<- tim func beaconPingLoadTest(ctx context.Context, conf *testBeaconConfig, target string) testResult { testRes := testResult{Name: "BeaconLoad"} - if !conf.EnableLoadTest { + if !conf.LoadTest { testRes.Verdict = testVerdictSkipped return testRes } @@ -376,7 +502,7 @@ func beaconIsSyncedTest(ctx context.Context, _ *testBeaconConfig, target string) } if resp.StatusCode > 399 { - return failedTestResult(testRes, errors.New("status code %v", z.Int("status_code", resp.StatusCode))) + return failedTestResult(testRes, errors.New(httpStatusError(resp.StatusCode))) } b, err := io.ReadAll(resp.Body) @@ -424,7 +550,7 @@ func beaconPeerCountTest(ctx context.Context, _ *testBeaconConfig, target string } if resp.StatusCode > 399 { - return failedTestResult(testRes, errors.New("status code %v", z.Int("status_code", resp.StatusCode))) + return failedTestResult(testRes, errors.New(httpStatusError(resp.StatusCode))) } b, err := io.ReadAll(resp.Body) @@ -451,3 +577,1286 @@ func beaconPeerCountTest(ctx context.Context, _ *testBeaconConfig, target string return testRes } + +func beaconSimulation1Test(ctx context.Context, conf *testBeaconConfig, target string) testResult { + testRes := testResult{Name: "BeaconSimulation1Validator"} + if !conf.LoadTest { + testRes.Verdict = testVerdictSkipped + return testRes + } + + params := simParams{ + TotalValidatorsCount: 1, + AttestationValidatorsCount: 0, + ProposalValidatorsCount: 0, + SyncCommitteeValidatorsCount: 1, + RequestIntensity: RequestsIntensity{ + AttestationDuty: slotTime, + AggregatorDuty: slotTime * 2, + ProposalDuty: slotTime * 4, + SyncCommitteeSubmit: slotTime, + SyncCommitteeContribution: slotTime * 4, + SyncCommitteeSubscribe: epochTime, + }, + } + + return beaconSimulationTest(ctx, conf, target, testRes, params) +} + +func beaconSimulation10Test(ctx context.Context, conf *testBeaconConfig, target string) testResult { + testRes := testResult{Name: "BeaconSimulation10Validators"} + if !conf.LoadTest { + testRes.Verdict = testVerdictSkipped + return testRes + } + + params := simParams{ + TotalValidatorsCount: 10, + AttestationValidatorsCount: 6, + ProposalValidatorsCount: 3, + SyncCommitteeValidatorsCount: 1, + RequestIntensity: RequestsIntensity{ + AttestationDuty: slotTime, + AggregatorDuty: slotTime * 2, + ProposalDuty: slotTime * 4, + SyncCommitteeSubmit: slotTime, + SyncCommitteeContribution: slotTime * 4, + SyncCommitteeSubscribe: epochTime, + }, + } + + return beaconSimulationTest(ctx, conf, target, testRes, params) +} + +func beaconSimulation100Test(ctx context.Context, conf *testBeaconConfig, target string) testResult { + testRes := testResult{Name: "BeaconSimulation100Validators"} + if !conf.LoadTest { + testRes.Verdict = testVerdictSkipped + return testRes + } + + params := simParams{ + TotalValidatorsCount: 100, + AttestationValidatorsCount: 80, + ProposalValidatorsCount: 18, + SyncCommitteeValidatorsCount: 2, + RequestIntensity: RequestsIntensity{ + AttestationDuty: slotTime, + AggregatorDuty: slotTime * 2, + ProposalDuty: slotTime * 4, + SyncCommitteeSubmit: slotTime, + SyncCommitteeContribution: slotTime * 4, + SyncCommitteeSubscribe: epochTime, + }, + } + + return beaconSimulationTest(ctx, conf, target, testRes, params) +} + +func beaconSimulation500Test(ctx context.Context, conf *testBeaconConfig, target string) testResult { + testRes := testResult{Name: "BeaconSimulation500Validators"} + if !conf.LoadTest { + testRes.Verdict = testVerdictSkipped + return testRes + } + + params := simParams{ + TotalValidatorsCount: 500, + AttestationValidatorsCount: 450, + ProposalValidatorsCount: 45, + SyncCommitteeValidatorsCount: 5, + RequestIntensity: RequestsIntensity{ + AttestationDuty: slotTime, + AggregatorDuty: slotTime * 2, + ProposalDuty: slotTime * 4, + SyncCommitteeSubmit: slotTime, + SyncCommitteeContribution: slotTime * 4, + SyncCommitteeSubscribe: epochTime, + }, + } + + return beaconSimulationTest(ctx, conf, target, testRes, params) +} + +func beaconSimulation1000Test(ctx context.Context, conf *testBeaconConfig, target string) testResult { + testRes := testResult{Name: "BeaconSimulation1000Validators"} + if !conf.LoadTest { + testRes.Verdict = testVerdictSkipped + return testRes + } + + params := simParams{ + TotalValidatorsCount: 1000, + AttestationValidatorsCount: 930, + ProposalValidatorsCount: 65, + SyncCommitteeValidatorsCount: 5, + RequestIntensity: RequestsIntensity{ + AttestationDuty: slotTime, + AggregatorDuty: slotTime * 2, + ProposalDuty: slotTime * 4, + SyncCommitteeSubmit: slotTime, + SyncCommitteeContribution: slotTime * 4, + SyncCommitteeSubscribe: epochTime, + }, + } + + return beaconSimulationTest(ctx, conf, target, testRes, params) +} + +func beaconSimulationTest(ctx context.Context, conf *testBeaconConfig, target string, testRes testResult, params simParams) testResult { + duration := time.Duration(conf.SimulationDuration)*slotTime + time.Second + var wg sync.WaitGroup + + log.Info(ctx, "Running beacon node simulation...", + z.Any("validators_count", params.TotalValidatorsCount), + z.Any("target", target), + z.Any("duration_in_slots", conf.SimulationDuration), + z.Any("slot_duration", slotTime), + ) + + // start general cluster requests + simulationGeneralResCh := make(chan SimulationCluster, 1) + var simulationGeneralRes SimulationCluster + wg.Add(1) + log.Info(ctx, "Starting general cluster requests...") + go singleClusterSimulation(ctx, duration, target, simulationGeneralResCh, wg.Done) + + // start validator requests + simulationResCh := make(chan SimulationSingleValidator, params.TotalValidatorsCount) + simulationResAll := []SimulationSingleValidator{} + + log.Info(ctx, "Starting validators performing duties attestation, aggregation, proposal, sync committee...", + z.Any("validators", params.SyncCommitteeValidatorsCount), + ) + syncCommitteeValidatorsDuties := DutiesPerformed{Attestation: true, Aggregation: true, Proposal: true, SyncCommittee: true} + for range params.SyncCommitteeValidatorsCount { + wg.Add(1) + go singleValidatorSimulation(ctx, duration, target, simulationResCh, params.RequestIntensity, syncCommitteeValidatorsDuties, &wg) + } + + log.Info(ctx, "Starting validators performing duties attestation, aggregation, proposal...", + z.Any("validators", params.ProposalValidatorsCount), + ) + proposalValidatorsDuties := DutiesPerformed{Attestation: true, Aggregation: true, Proposal: true, SyncCommittee: false} + for range params.ProposalValidatorsCount { + wg.Add(1) + go singleValidatorSimulation(ctx, duration, target, simulationResCh, params.RequestIntensity, proposalValidatorsDuties, &wg) + } + + log.Info(ctx, "Starting validators performing duties attestation, aggregation...", + z.Any("validators", params.AttestationValidatorsCount), + ) + attesterValidatorsDuties := DutiesPerformed{Attestation: true, Aggregation: true, Proposal: false, SyncCommittee: false} + for range params.AttestationValidatorsCount { + wg.Add(1) + go singleValidatorSimulation(ctx, duration, target, simulationResCh, params.RequestIntensity, attesterValidatorsDuties, &wg) + } + + log.Info(ctx, "Waiting for simulation to complete...") + // evaluate results + wg.Wait() + close(simulationGeneralResCh) + close(simulationResCh) + log.Info(ctx, "Simulation finished, evaluating results...") + simulationGeneralRes = <-simulationGeneralResCh + for result := range simulationResCh { + simulationResAll = append(simulationResAll, result) + } + + averageValidatorResult := averageValidatorsResult(simulationResAll) + + finalSimulation := Simulation{ + GeneralClusterRequests: simulationGeneralRes, + ValidatorsRequests: SimulationValidators{ + Averaged: averageValidatorResult, + AllValidators: simulationResAll, + }, + } + + if !conf.SimulationVerbose { + finalSimulation = nonVerboseFinalSimulation(finalSimulation) + } + simulationResAllJSON, err := json.Marshal(finalSimulation) + if err != nil { + log.Error(ctx, "Failed to marshal simulation result", err) + } + err = os.WriteFile(filepath.Join(conf.SimulationFileDir, fmt.Sprintf("%v-validators.json", params.TotalValidatorsCount)), simulationResAllJSON, 0o644) //nolint:gosec + if err != nil { + log.Error(ctx, "Failed to write file", err) + } + + highestRTT := Duration{0} + for _, sim := range simulationResAll { + if sim.Max.Duration > highestRTT.Duration { + highestRTT = sim.Max + } + } + if highestRTT.Duration > thresholdBeaconSimulationPoor { + testRes.Verdict = testVerdictPoor + } else if highestRTT.Duration > thresholdBeaconSimulationAvg { + testRes.Verdict = testVerdictAvg + } else { + testRes.Verdict = testVerdictGood + } + testRes.Measurement = highestRTT.String() + + log.Info(ctx, "Validators simulation finished", + z.Any("validators_count", params.TotalValidatorsCount), + z.Any("target", target), + ) + + return testRes +} + +func singleClusterSimulation(ctx context.Context, simulationDuration time.Duration, target string, resultCh chan SimulationCluster, wgDone func()) { + defer wgDone() + // per slot requests + attestationsForBlockCh := make(chan time.Duration) + attestationsForBlockAll := []time.Duration{} + proposalDutiesForEpochCh := make(chan time.Duration) + proposalDutiesForEpochAll := []time.Duration{} + // per 10 sec requests + syncingCh := make(chan time.Duration) + syncingAll := []time.Duration{} + // per minute requests + peerCountCh := make(chan time.Duration) + peerCountAll := []time.Duration{} + // per 12 slots requests + beaconCommitteeSubCh := make(chan time.Duration) + beaconCommitteeSubAll := []time.Duration{} + // 3 times per epoch - at first slot of the epoch, at the last but one and the last + dutiesAttesterCh := make(chan time.Duration) + dutiesAttesterAll := []time.Duration{} + // 3 times per epoch - 10 seconds before the epoch - call for the epoch, at the time of epoch - call for the epoch and call for the epoch+256 + dutiesSyncCommitteeCh := make(chan time.Duration) + dutiesSyncCommitteeAll := []time.Duration{} + // once per epoch, at the beginning of the epoch + beaconHeadValidatorsCh := make(chan time.Duration) + beaconHeadValidatorsAll := []time.Duration{} + beaconGenesisCh := make(chan time.Duration) + beaconGenesisAll := []time.Duration{} + prepBeaconProposerCh := make(chan time.Duration) + prepBeaconProposerAll := []time.Duration{} + configSpecCh := make(chan time.Duration) + configSpecAll := []time.Duration{} + nodeVersionCh := make(chan time.Duration) // 7 seconds after start of epoch + nodeVersionAll := []time.Duration{} + // two endpoints called are not included: + // 1. /eth/v1/config/fork_schedule - it seemed at random every 240-600 epochs, didn't seem worth to do it + // 2. /eth/v1/events?topics=head - it happened only once for 26 hours, it didn't seem related to anything + + go clusterGeneralRequests(ctx, target, slotTime, simulationDuration, + attestationsForBlockCh, proposalDutiesForEpochCh, syncingCh, + peerCountCh, beaconCommitteeSubCh, dutiesAttesterCh, + dutiesSyncCommitteeCh, beaconHeadValidatorsCh, beaconGenesisCh, + prepBeaconProposerCh, configSpecCh, nodeVersionCh) + + finished := false + for !finished { + select { + case <-ctx.Done(): + finished = true + case result, ok := <-attestationsForBlockCh: + if !ok { + finished = true + continue + } + attestationsForBlockAll = append(attestationsForBlockAll, result) + case result, ok := <-proposalDutiesForEpochCh: + if !ok { + finished = true + continue + } + proposalDutiesForEpochAll = append(proposalDutiesForEpochAll, result) + case result, ok := <-syncingCh: + if !ok { + finished = true + continue + } + syncingAll = append(syncingAll, result) + case result, ok := <-peerCountCh: + if !ok { + finished = true + continue + } + peerCountAll = append(peerCountAll, result) + case result, ok := <-beaconCommitteeSubCh: + if !ok { + finished = true + continue + } + beaconCommitteeSubAll = append(beaconCommitteeSubAll, result) + case result, ok := <-dutiesAttesterCh: + if !ok { + finished = true + continue + } + dutiesAttesterAll = append(dutiesAttesterAll, result) + case result, ok := <-dutiesSyncCommitteeCh: + if !ok { + finished = true + continue + } + dutiesSyncCommitteeAll = append(dutiesSyncCommitteeAll, result) + case result, ok := <-beaconHeadValidatorsCh: + if !ok { + finished = true + continue + } + beaconHeadValidatorsAll = append(beaconHeadValidatorsAll, result) + case result, ok := <-beaconGenesisCh: + if !ok { + finished = true + continue + } + beaconGenesisAll = append(beaconGenesisAll, result) + case result, ok := <-prepBeaconProposerCh: + if !ok { + finished = true + continue + } + prepBeaconProposerAll = append(prepBeaconProposerAll, result) + case result, ok := <-configSpecCh: + if !ok { + finished = true + continue + } + configSpecAll = append(configSpecAll, result) + case result, ok := <-nodeVersionCh: + if !ok { + finished = true + continue + } + nodeVersionAll = append(nodeVersionAll, result) + } + } + + attestationsForBlockValues := generateSimulationValues(attestationsForBlockAll, "GET /eth/v1/beacon/blocks/{BLOCK}/attestations") + proposalDutiesForEpochValues := generateSimulationValues(proposalDutiesForEpochAll, "GET /eth/v1/validator/duties/proposer/{EPOCH}") + syncingValues := generateSimulationValues(syncingAll, "GET /eth/v1/node/syncing") + peerCountValues := generateSimulationValues(peerCountAll, "GET /eth/v1/node/peer_count") + beaconCommitteeSubValues := generateSimulationValues(beaconCommitteeSubAll, "POST /eth/v1/validator/beacon_committee_subscriptions") + dutiesAttesterValues := generateSimulationValues(dutiesAttesterAll, "POST /eth/v1/validator/duties/attester/{EPOCH}") + dutiesSyncCommitteeValues := generateSimulationValues(dutiesSyncCommitteeAll, "POST /eth/v1/validator/duties/sync/{EPOCH}") + beaconHeadValidatorsValues := generateSimulationValues(beaconHeadValidatorsAll, "POST /eth/v1/beacon/states/head/validators") + beaconGenesisValues := generateSimulationValues(beaconGenesisAll, "GET /eth/v1/beacon/genesis") + prepBeaconProposerValues := generateSimulationValues(prepBeaconProposerAll, "POST /eth/v1/validator/prepare_beacon_proposer") + configSpecValues := generateSimulationValues(configSpecAll, "GET /eth/v1/config/spec") + nodeVersionValues := generateSimulationValues(nodeVersionAll, "GET /eth/v1/node/version") + + generalResults := SimulationCluster{ + AttestationsForBlockRequest: attestationsForBlockValues, + ProposalDutiesForEpochRequest: proposalDutiesForEpochValues, + SyncingRequest: syncingValues, + PeerCountRequest: peerCountValues, + BeaconCommitteeSubscriptionRequest: beaconCommitteeSubValues, + DutiesAttesterForEpochRequest: dutiesAttesterValues, + DutiesSyncCommitteeForEpochRequest: dutiesSyncCommitteeValues, + BeaconHeadValidatorsRequest: beaconHeadValidatorsValues, + BeaconGenesisRequest: beaconGenesisValues, + PrepBeaconProposerRequest: prepBeaconProposerValues, + ConfigSpecRequest: configSpecValues, + NodeVersionRequest: nodeVersionValues, + } + + resultCh <- generalResults +} + +func clusterGeneralRequests( + ctx context.Context, target string, slotTime time.Duration, simulationDuration time.Duration, + attestationsForBlockCh chan time.Duration, proposalDutiesForEpochCh chan time.Duration, syncingCh chan time.Duration, + peerCountCh chan time.Duration, beaconCommitteeSubCh chan time.Duration, dutiesAttesterCh chan time.Duration, + dutiesSyncCommitteeCh chan time.Duration, beaconHeadValidatorsCh chan time.Duration, beaconGenesisCh chan time.Duration, + prepBeaconProposerCh chan time.Duration, configSpecCh chan time.Duration, nodeVersionCh chan time.Duration, +) { + defer func() { + close(proposalDutiesForEpochCh) + close(attestationsForBlockCh) + close(syncingCh) + close(peerCountCh) + close(beaconCommitteeSubCh) + close(dutiesAttesterCh) + close(dutiesSyncCommitteeCh) + close(beaconHeadValidatorsCh) + close(beaconGenesisCh) + close(prepBeaconProposerCh) + close(configSpecCh) + close(nodeVersionCh) + }() + // slot ticker + tickerSlot := time.NewTicker(slotTime) + defer tickerSlot.Stop() + // 12 slots ticker + ticker12Slots := time.NewTicker(12 * slotTime) + defer ticker12Slots.Stop() + // 10 sec ticker + ticker10Sec := time.NewTicker(10 * time.Second) + defer ticker10Sec.Stop() + // minute ticker + tickerMinute := time.NewTicker(time.Minute) + defer tickerMinute.Stop() + + slot, err := getCurrentSlot(ctx, target) + if err != nil { + log.Error(ctx, "Failed to get current slot", err) + slot = 1 + } + + pingCtx, cancel := context.WithTimeout(ctx, simulationDuration) + defer cancel() + + for pingCtx.Err() == nil { + select { + case <-tickerSlot.C: + slot++ + epoch := slot / slotsInEpoch + + // requests executed at every slot + attestationsResult, err := getAttestationsForBlock(ctx, target, slot-6) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getAttestationsForBlock failure", err) + } + attestationsForBlockCh <- attestationsResult + submitResult, err := getProposalDutiesForEpoch(ctx, target, epoch) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getProposalDutiesForEpoch failure", err) + } + proposalDutiesForEpochCh <- submitResult + + // requests executed at the first slot of the epoch + if slot%slotsInEpoch == 0 { + dutiesAttesterResult, err := getAttesterDutiesForEpoch(ctx, target, epoch) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getAttesterDutiesForEpoch failure", err) + } + dutiesAttesterCh <- dutiesAttesterResult + + dutiesSyncCommitteeResult, err := getSyncCommitteeDutiesForEpoch(ctx, target, epoch) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getSyncCommitteeDutiesForEpoch failure", err) + } + dutiesSyncCommitteeCh <- dutiesSyncCommitteeResult + + beaconHeadValidatorsResult, err := beaconHeadValidators(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected beaconHeadValidators failure", err) + } + beaconHeadValidatorsCh <- beaconHeadValidatorsResult + + beaconGenesisResult, err := beaconGenesis(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected beaconGenesis failure", err) + } + beaconGenesisCh <- beaconGenesisResult + + prepBeaconProposerResult, err := prepBeaconProposer(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected prepBeaconProposer failure", err) + } + prepBeaconProposerCh <- prepBeaconProposerResult + + configSpecResult, err := configSpec(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected configSpec failure", err) + } + configSpecCh <- configSpecResult + + nodeVersionResult, err := nodeVersion(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected nodeVersion failure", err) + } + nodeVersionCh <- nodeVersionResult + } + + // requests executed at the last but one slot of the epoch + if slot%slotsInEpoch == slotsInEpoch-2 { + dutiesAttesterResult, err := getAttesterDutiesForEpoch(ctx, target, epoch) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getAttesterDutiesForEpoch failure", err) + } + dutiesAttesterCh <- dutiesAttesterResult + } + + // requests executed at the last slot of the epoch + if slot%slotsInEpoch == slotsInEpoch-1 { + dutiesAttesterResult, err := getAttesterDutiesForEpoch(ctx, target, epoch) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getAttesterDutiesForEpoch failure", err) + } + dutiesAttesterCh <- dutiesAttesterResult + + dutiesSyncCommitteeResult, err := getSyncCommitteeDutiesForEpoch(ctx, target, epoch) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getSyncCommitteeDutiesForEpoch failure", err) + } + dutiesSyncCommitteeCh <- dutiesSyncCommitteeResult + + dutiesSyncCommitteeResultFuture, err := getSyncCommitteeDutiesForEpoch(ctx, target, epoch+256) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getSyncCommitteeDutiesForEpoch for the future epoch failure", err) + } + dutiesSyncCommitteeCh <- dutiesSyncCommitteeResultFuture + } + case <-ticker12Slots.C: + beaconCommitteeSubResult, err := beaconCommitteeSub(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected beaconCommitteeSub failure", err) + } + beaconCommitteeSubCh <- beaconCommitteeSubResult + case <-ticker10Sec.C: + getSyncingResult, err := getSyncing(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getSyncing failure", err) + } + syncingCh <- getSyncingResult + case <-tickerMinute.C: + peerCountResult, err := getPeerCount(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getPeerCount failure", err) + } + peerCountCh <- peerCountResult + case <-pingCtx.Done(): + } + } +} + +func singleValidatorSimulation(ctx context.Context, simulationDuration time.Duration, target string, resultCh chan SimulationSingleValidator, intensity RequestsIntensity, dutiesPerformed DutiesPerformed, wg *sync.WaitGroup) { + defer wg.Done() + // attestations + getAttestationDataCh := make(chan time.Duration) + getAttestationDataAll := []time.Duration{} + submitAttestationObjectCh := make(chan time.Duration) + submitAttestationObjectAll := []time.Duration{} + if dutiesPerformed.Attestation { + go attestationDuty(ctx, target, simulationDuration, intensity.AttestationDuty, getAttestationDataCh, submitAttestationObjectCh) + } + + // aggregations + getAggregateAttestationsCh := make(chan time.Duration) + getAggregateAttestationsAll := []time.Duration{} + submitAggregateAndProofsCh := make(chan time.Duration) + submitAggregateAndProofsAll := []time.Duration{} + if dutiesPerformed.Aggregation { + go aggregationDuty(ctx, target, simulationDuration, intensity.AggregatorDuty, getAggregateAttestationsCh, submitAggregateAndProofsCh) + } + + // proposals + produceBlockCh := make(chan time.Duration) + produceBlockAll := []time.Duration{} + publishBlindedBlockCh := make(chan time.Duration) + publishBlindedBlockAll := []time.Duration{} + if dutiesPerformed.Proposal { + go proposalDuty(ctx, target, simulationDuration, intensity.ProposalDuty, produceBlockCh, publishBlindedBlockCh) + } + + // sync_committee + syncCommitteeSubscriptionCh := make(chan time.Duration) + syncCommitteeSubscriptionAll := []time.Duration{} + submitSyncCommitteeMessageCh := make(chan time.Duration) + submitSyncCommitteeMessageAll := []time.Duration{} + produceSyncCommitteeContributionCh := make(chan time.Duration) + produceSyncCommitteeContributionAll := []time.Duration{} + submitSyncCommitteeContributionCh := make(chan time.Duration) + submitSyncCommitteeContributionAll := []time.Duration{} + if dutiesPerformed.SyncCommittee { + go syncCommitteeDuties(ctx, target, + simulationDuration, intensity.SyncCommitteeSubmit, intensity.SyncCommitteeSubscribe, intensity.SyncCommitteeContribution, + submitSyncCommitteeMessageCh, produceSyncCommitteeContributionCh, syncCommitteeSubscriptionCh, submitSyncCommitteeContributionCh) + } + + // capture results + finished := false + for !finished { + select { + case <-ctx.Done(): + finished = true + // attestations + case result, ok := <-getAttestationDataCh: + if !ok { + finished = true + continue + } + getAttestationDataAll = append(getAttestationDataAll, result) + case result, ok := <-submitAttestationObjectCh: + if !ok { + finished = true + continue + } + submitAttestationObjectAll = append(submitAttestationObjectAll, result) + // aggregations + case result, ok := <-getAggregateAttestationsCh: + if !ok { + finished = true + continue + } + getAggregateAttestationsAll = append(getAggregateAttestationsAll, result) + case result, ok := <-submitAggregateAndProofsCh: + if !ok { + finished = true + continue + } + submitAggregateAndProofsAll = append(submitAggregateAndProofsAll, result) + // proposals + case result, ok := <-produceBlockCh: + if !ok { + finished = true + continue + } + produceBlockAll = append(produceBlockAll, result) + case result, ok := <-publishBlindedBlockCh: + if !ok { + finished = true + continue + } + publishBlindedBlockAll = append(publishBlindedBlockAll, result) + // sync_committee + case result, ok := <-syncCommitteeSubscriptionCh: + if !ok { + finished = true + continue + } + syncCommitteeSubscriptionAll = append(syncCommitteeSubscriptionAll, result) + case result, ok := <-submitSyncCommitteeMessageCh: + if !ok { + finished = true + continue + } + submitSyncCommitteeMessageAll = append(submitSyncCommitteeMessageAll, result) + case result, ok := <-produceSyncCommitteeContributionCh: + if !ok { + finished = true + continue + } + produceSyncCommitteeContributionAll = append(produceSyncCommitteeContributionAll, result) + case result, ok := <-submitSyncCommitteeContributionCh: + + if !ok { + finished = true + continue + } + submitSyncCommitteeContributionAll = append(submitSyncCommitteeContributionAll, result) + } + } + + var allRequests []time.Duration + + // attestation results grouping + var attestationResult SimulationAttestation + if dutiesPerformed.Attestation { + getSimulationValues := generateSimulationValues(getAttestationDataAll, "GET /eth/v1/validator/attestation_data") + submitSimulationValues := generateSimulationValues(submitAttestationObjectAll, "POST /eth/v1/beacon/pool/attestations") + + cumulativeAttestation := []time.Duration{} + for i := range min(len(getAttestationDataAll), len(submitAttestationObjectAll)) { + cumulativeAttestation = append(cumulativeAttestation, getAttestationDataAll[i]+submitAttestationObjectAll[i]) + } + cumulativeSimulationValues := generateSimulationValues(cumulativeAttestation, "") + allRequests = append(allRequests, cumulativeAttestation...) + + attestationResult = SimulationAttestation{ + GetAttestationDataRequest: getSimulationValues, + PostAttestationsRequest: submitSimulationValues, + SimulationValues: cumulativeSimulationValues, + } + } + + // aggregation results grouping + var aggregationResults SimulationAggregation + if dutiesPerformed.Aggregation { + getAggregateSimulationValues := generateSimulationValues(getAggregateAttestationsAll, "GET /eth/v1/validator/aggregate_attestation") + submitAggregateSimulationValues := generateSimulationValues(submitAggregateAndProofsAll, "POST /eth/v1/validator/aggregate_and_proofs") + + cumulativeAggregations := []time.Duration{} + for i := range min(len(getAggregateAttestationsAll), len(submitAggregateAndProofsAll)) { + cumulativeAggregations = append(cumulativeAggregations, getAggregateAttestationsAll[i]+submitAggregateAndProofsAll[i]) + } + cumulativeAggregationsSimulationValues := generateSimulationValues(cumulativeAggregations, "") + allRequests = append(allRequests, cumulativeAggregations...) + + aggregationResults = SimulationAggregation{ + GetAggregateAttestationRequest: getAggregateSimulationValues, + PostAggregateAndProofsRequest: submitAggregateSimulationValues, + SimulationValues: cumulativeAggregationsSimulationValues, + } + } + + // proposal results grouping + var proposalResults SimulationProposal + if dutiesPerformed.Proposal { + produceBlockValues := generateSimulationValues(produceBlockAll, "GET /eth/v3/validator/blocks/{SLOT}") + publishBlindedBlockValues := generateSimulationValues(publishBlindedBlockAll, "POST /eth/v2/beacon/blinded") + + cumulativeProposals := []time.Duration{} + for i := range min(len(produceBlockAll), len(publishBlindedBlockAll)) { + cumulativeProposals = append(cumulativeProposals, produceBlockAll[i]+publishBlindedBlockAll[i]) + } + cumulativeProposalsSimulationValues := generateSimulationValues(cumulativeProposals, "") + allRequests = append(allRequests, cumulativeProposals...) + + proposalResults = SimulationProposal{ + ProduceBlockRequest: produceBlockValues, + PublishBlindedBlockRequest: publishBlindedBlockValues, + SimulationValues: cumulativeProposalsSimulationValues, + } + } + + // sync committee results grouping + var syncCommitteeResults SimulationSyncCommittee + if dutiesPerformed.SyncCommittee { + syncCommitteeAll := []time.Duration{} + syncCommitteeSubscriptionValues := generateSimulationValues(syncCommitteeSubscriptionAll, "POST /eth/v1/validator/sync_committee_subscriptions") + syncCommitteeAll = append(syncCommitteeAll, syncCommitteeSubscriptionAll...) + allRequests = append(allRequests, syncCommitteeSubscriptionAll...) + + submitSyncCommitteeMessageValues := generateSimulationValues(submitSyncCommitteeMessageAll, "POST /eth/v1/beacon/pool/sync_committees") + syncCommitteeAll = append(syncCommitteeAll, submitSyncCommitteeMessageAll...) + allRequests = append(allRequests, submitSyncCommitteeMessageAll...) + + produceSyncCommitteeContributionValues := generateSimulationValues(produceSyncCommitteeContributionAll, "GET /eth/v1/validator/sync_committee_contribution") + submitSyncCommitteeContributionValues := generateSimulationValues(submitSyncCommitteeContributionAll, "POST /eth/v1/validator/contribution_and_proofs") + + syncCommitteeContributionAll := []time.Duration{} + for i := range min(len(produceSyncCommitteeContributionAll), len(submitSyncCommitteeContributionAll)) { + syncCommitteeContributionAll = append(syncCommitteeContributionAll, produceSyncCommitteeContributionAll[i]+submitSyncCommitteeContributionAll[i]) + } + syncCommitteeContributionValues := generateSimulationValues(syncCommitteeContributionAll, "") + syncCommitteeAll = append(syncCommitteeAll, syncCommitteeContributionAll...) + allRequests = append(allRequests, syncCommitteeContributionAll...) + + cumulativeSyncCommitteesSimulationValues := generateSimulationValues(syncCommitteeAll, "") + + syncCommitteeResults = SimulationSyncCommittee{ + MessageDuty: SyncCommitteeMessageDuty{ + SubmitSyncCommitteeMessageRequest: submitSyncCommitteeMessageValues, + }, + ContributionDuty: SyncCommitteeContributionDuty{ + ProduceSyncCommitteeContributionRequest: produceSyncCommitteeContributionValues, + SubmitSyncCommitteeContributionRequest: submitSyncCommitteeContributionValues, + SimulationValues: syncCommitteeContributionValues, + }, + SubscribeSyncCommitteeRequest: syncCommitteeSubscriptionValues, + SimulationValues: cumulativeSyncCommitteesSimulationValues, + } + } + + allResult := generateSimulationValues(allRequests, "") + + resultCh <- SimulationSingleValidator{ + AttestationDuty: attestationResult, + AggregationDuty: aggregationResults, + ProposalDuty: proposalResults, + SyncCommitteeDuties: syncCommitteeResults, + SimulationValues: allResult, + } +} + +func aggregationDuty(ctx context.Context, target string, simulationDuration time.Duration, tickTime time.Duration, getAggregateAttestationsCh chan time.Duration, submitAggregateAndProofsCh chan time.Duration) { + defer close(getAggregateAttestationsCh) + defer close(submitAggregateAndProofsCh) + pingCtx, cancel := context.WithTimeout(ctx, simulationDuration) + defer cancel() + slot, err := getCurrentSlot(ctx, target) + if err != nil { + log.Error(ctx, "Failed to get current slot", err) + slot = 1 + } + + time.Sleep(randomizeStart(tickTime)) + ticker := time.NewTicker(tickTime) + defer ticker.Stop() + + for pingCtx.Err() == nil { + getResult, err := getAggregateAttestations(ctx, target, slot, "0x87db5c50a4586fa37662cf332382d56a0eeea688a7d7311a42735683dfdcbfa4") + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getAggregateAttestations failure", err) + } + submitResult, err := postAggregateAndProofs(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected aggregateAndProofs failure", err) + } + getAggregateAttestationsCh <- getResult + submitAggregateAndProofsCh <- submitResult + select { + case <-pingCtx.Done(): + case <-ticker.C: + slot += int(tickTime.Seconds()) / int(slotTime.Seconds()) + } + } +} + +func proposalDuty(ctx context.Context, target string, simulationDuration time.Duration, tickTime time.Duration, produceBlockCh chan time.Duration, publishBlindedBlockCh chan time.Duration) { + defer close(produceBlockCh) + defer close(publishBlindedBlockCh) + pingCtx, cancel := context.WithTimeout(ctx, simulationDuration) + defer cancel() + + time.Sleep(randomizeStart(tickTime)) + ticker := time.NewTicker(tickTime) + defer ticker.Stop() + slot, err := getCurrentSlot(ctx, target) + if err != nil { + log.Error(ctx, "Failed to get current slot", err) + slot = 1 + } + for pingCtx.Err() == nil { + produceResult, err := produceBlock(ctx, target, slot, "0x1fe79e4193450abda94aec753895cfb2aac2c2a930b6bab00fbb27ef6f4a69f4400ad67b5255b91837982b4c511ae1d94eae1cf169e20c11bd417c1fffdb1f99f4e13e2de68f3b5e73f1de677d73cd43e44bf9b133a79caf8e5fad06738e1b0c") + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected produceBlock failure", err) + } + publishResult, err := publishBlindedBlock(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected publishBlindedBlock failure", err) + } + produceBlockCh <- produceResult + publishBlindedBlockCh <- publishResult + select { + case <-pingCtx.Done(): + case <-ticker.C: + slot += int(tickTime.Seconds())/int(slotTime.Seconds()) + 1 // produce block for the next slot, as the current one might have already been proposed + } + } +} + +func attestationDuty(ctx context.Context, target string, simulationDuration time.Duration, tickTime time.Duration, getAttestationDataCh chan time.Duration, submitAttestationObjectCh chan time.Duration) { + defer close(getAttestationDataCh) + defer close(submitAttestationObjectCh) + pingCtx, cancel := context.WithTimeout(ctx, simulationDuration) + defer cancel() + + time.Sleep(randomizeStart(tickTime)) + ticker := time.NewTicker(tickTime) + defer ticker.Stop() + slot, err := getCurrentSlot(ctx, target) + if err != nil { + log.Error(ctx, "Failed to get current slot", err) + slot = 1 + } + for pingCtx.Err() == nil { + getResult, err := getAttestationData(ctx, target, slot, rand.Intn(committeeSizePerSlot)) //nolint:gosec // weak generator is not an issue here + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getAttestationData failure", err) + } + getAttestationDataCh <- getResult + + submitResult, err := submitAttestationObject(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected submitAttestationObject failure", err) + } + submitAttestationObjectCh <- submitResult + + select { + case <-pingCtx.Done(): + case <-ticker.C: + slot += int(tickTime.Seconds()) / int(slotTime.Seconds()) + } + } +} + +func syncCommitteeDuties( + ctx context.Context, target string, + simulationDuration time.Duration, tickTimeSubmit time.Duration, tickTimeSubscribe time.Duration, tickTimeContribution time.Duration, + submitSyncCommitteesCh chan time.Duration, produceSyncCommitteeContributionCh chan time.Duration, syncCommitteeSubscriptionCh chan time.Duration, syncCommitteeContributionCh chan time.Duration, +) { + go syncCommitteeContributionDuty(ctx, target, simulationDuration, tickTimeContribution, produceSyncCommitteeContributionCh, syncCommitteeContributionCh) + go syncCommitteeMessageDuty(ctx, target, simulationDuration, tickTimeSubmit, submitSyncCommitteesCh) + + defer close(syncCommitteeSubscriptionCh) + pingCtx, cancel := context.WithTimeout(ctx, simulationDuration) + defer cancel() + + time.Sleep(randomizeStart(tickTimeSubscribe)) + ticker := time.NewTicker(tickTimeSubscribe) + defer ticker.Stop() + + for pingCtx.Err() == nil { + subscribeResult, err := syncCommitteeSubscription(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected syncCommitteeSubscription failure", err) + } + syncCommitteeSubscriptionCh <- subscribeResult + + select { + case <-pingCtx.Done(): + case <-ticker.C: + } + } +} + +func syncCommitteeContributionDuty(ctx context.Context, target string, simulationDuration time.Duration, tickTime time.Duration, produceSyncCommitteeContributionCh chan time.Duration, syncCommitteeContributionCh chan time.Duration) { + defer close(produceSyncCommitteeContributionCh) + defer close(syncCommitteeContributionCh) + pingCtx, cancel := context.WithTimeout(ctx, simulationDuration) + defer cancel() + + time.Sleep(randomizeStart(tickTime)) + ticker := time.NewTicker(tickTime) + defer ticker.Stop() + + slot, err := getCurrentSlot(ctx, target) + if err != nil { + log.Error(ctx, "Failed to get current slot", err) + slot = 1 + } + for pingCtx.Err() == nil { + produceResult, err := produceSyncCommitteeContribution(ctx, target, slot, rand.Intn(subCommitteeSize), "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2") //nolint:gosec // weak generator is not an issue here + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected produceSyncCommitteeContribution failure", err) + } + produceSyncCommitteeContributionCh <- produceResult + contributeResult, err := submitSyncCommitteeContribution(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected submitSyncCommitteeContribution failure", err) + } + syncCommitteeContributionCh <- contributeResult + select { + case <-pingCtx.Done(): + case <-ticker.C: + slot += int(tickTime.Seconds()) / int(slotTime.Seconds()) + } + } +} + +func syncCommitteeMessageDuty(ctx context.Context, target string, simulationDuration time.Duration, tickTime time.Duration, submitSyncCommitteesCh chan time.Duration) { + defer close(submitSyncCommitteesCh) + pingCtx, cancel := context.WithTimeout(ctx, simulationDuration) + defer cancel() + + time.Sleep(randomizeStart(tickTime)) + ticker := time.NewTicker(tickTime) + defer ticker.Stop() + + for pingCtx.Err() == nil { + submitResult, err := submitSyncCommittee(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected submitSyncCommittee failure", err) + } + submitSyncCommitteesCh <- submitResult + select { + case <-pingCtx.Done(): + case <-ticker.C: + } + } +} + +func getCurrentSlot(ctx context.Context, target string) (int, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, target+"/eth/v1/node/syncing", nil) + if err != nil { + return 0, errors.Wrap(err, "create new http request") + } + resp, err := new(http.Client).Do(req) + if err != nil { + return 0, errors.Wrap(err, "call /eth/v1/node/syncing endpoint") + } + defer resp.Body.Close() + + if resp.StatusCode/100 != 2 { + return 0, errors.New("post failed", z.Int("status", resp.StatusCode)) + } + + type syncingResponseData struct { + HeadSlot string `json:"head_slot"` + } + type syncingResponse struct { + Data syncingResponseData `json:"data"` + } + var sr syncingResponse + if err := json.NewDecoder(resp.Body).Decode(&sr); err != nil { + return 0, errors.Wrap(err, "json unmarshal error") + } + + head, err := strconv.Atoi(sr.Data.HeadSlot) + if err != nil { + return 0, errors.Wrap(err, "head slot string to int") + } + + return head, nil +} + +// if verbose flag is not passed, don't output `All` field and results per single validator +func nonVerboseFinalSimulation(s Simulation) Simulation { + s.ValidatorsRequests.AllValidators = []SimulationSingleValidator{} + + s.ValidatorsRequests.Averaged.All = []Duration{} + s.ValidatorsRequests.Averaged.AggregationDuty.All = []Duration{} + s.ValidatorsRequests.Averaged.AggregationDuty.GetAggregateAttestationRequest.All = []Duration{} + s.ValidatorsRequests.Averaged.AggregationDuty.PostAggregateAndProofsRequest.All = []Duration{} + s.ValidatorsRequests.Averaged.AttestationDuty.All = []Duration{} + s.ValidatorsRequests.Averaged.AttestationDuty.GetAttestationDataRequest.All = []Duration{} + s.ValidatorsRequests.Averaged.AttestationDuty.PostAttestationsRequest.All = []Duration{} + s.ValidatorsRequests.Averaged.ProposalDuty.All = []Duration{} + s.ValidatorsRequests.Averaged.ProposalDuty.ProduceBlockRequest.All = []Duration{} + s.ValidatorsRequests.Averaged.ProposalDuty.PublishBlindedBlockRequest.All = []Duration{} + s.ValidatorsRequests.Averaged.SyncCommitteeDuties.All = []Duration{} + s.ValidatorsRequests.Averaged.SyncCommitteeDuties.ContributionDuty.All = []Duration{} + s.ValidatorsRequests.Averaged.SyncCommitteeDuties.ContributionDuty.ProduceSyncCommitteeContributionRequest.All = []Duration{} + s.ValidatorsRequests.Averaged.SyncCommitteeDuties.ContributionDuty.SubmitSyncCommitteeContributionRequest.All = []Duration{} + s.ValidatorsRequests.Averaged.SyncCommitteeDuties.MessageDuty.SubmitSyncCommitteeMessageRequest.All = []Duration{} + s.ValidatorsRequests.Averaged.SyncCommitteeDuties.SubscribeSyncCommitteeRequest.All = []Duration{} + + s.GeneralClusterRequests.AttestationsForBlockRequest.All = []Duration{} + s.GeneralClusterRequests.ProposalDutiesForEpochRequest.All = []Duration{} + s.GeneralClusterRequests.SyncingRequest.All = []Duration{} + s.GeneralClusterRequests.PeerCountRequest.All = []Duration{} + s.GeneralClusterRequests.BeaconCommitteeSubscriptionRequest.All = []Duration{} + s.GeneralClusterRequests.DutiesAttesterForEpochRequest.All = []Duration{} + s.GeneralClusterRequests.DutiesSyncCommitteeForEpochRequest.All = []Duration{} + s.GeneralClusterRequests.BeaconHeadValidatorsRequest.All = []Duration{} + s.GeneralClusterRequests.BeaconGenesisRequest.All = []Duration{} + s.GeneralClusterRequests.PrepBeaconProposerRequest.All = []Duration{} + s.GeneralClusterRequests.ConfigSpecRequest.All = []Duration{} + s.GeneralClusterRequests.NodeVersionRequest.All = []Duration{} + + return s +} + +func mapDurationToTime(dur []Duration) []time.Duration { + result := make([]time.Duration, len(dur)) + for i, e := range dur { + result[i] = e.Duration + } + + return result +} + +func generateSimulationValues(s []time.Duration, endpoint string) SimulationValues { + if len(s) == 0 { + return SimulationValues{ + Endpoint: endpoint, + All: []Duration{}, + Min: Duration{0}, + Max: Duration{0}, + Median: Duration{0}, + Avg: Duration{0}, + } + } + + sorted := make([]time.Duration, len(s)) + copy(sorted, s) + sort.Slice(sorted, func(i, j int) bool { + return sorted[i] < sorted[j] + }) + minVal := sorted[0] + maxVal := sorted[len(s)-1] + medianVal := sorted[len(s)/2] + var sum time.Duration + all := []Duration{} + for _, t := range s { + sum += t + all = append(all, Duration{t}) + } + avgVal := time.Duration(int(sum.Nanoseconds()) / len(s)) + + return SimulationValues{ + Endpoint: endpoint, + All: all, + Min: Duration{minVal}, + Max: Duration{maxVal}, + Median: Duration{medianVal}, + Avg: Duration{avgVal}, + } +} + +func averageValidatorsResult(s []SimulationSingleValidator) SimulationSingleValidator { + if len(s) == 0 { + return SimulationSingleValidator{} + } + + var attestation, attestationGetDuties, attestationPostData, + aggregation, aggregationGetAggregationAttestations, aggregationSubmitAggregateAndProofs, + proposal, proposalProduceBlock, proposalPublishBlindedBlock, + syncCommittee, syncCommitteeSubmitMessage, syncCommitteeProduceContribution, syncCommitteeSubmitContribution, syncCommitteeContribution, syncCommitteeSusbscription, + all []time.Duration + + for _, sim := range s { + attestationGetDuties = append(attestationGetDuties, mapDurationToTime(sim.AttestationDuty.GetAttestationDataRequest.All)...) + attestationPostData = append(attestationPostData, mapDurationToTime(sim.AttestationDuty.PostAttestationsRequest.All)...) + attestation = append(attestation, mapDurationToTime(sim.AttestationDuty.All)...) + aggregationGetAggregationAttestations = append(aggregationGetAggregationAttestations, mapDurationToTime(sim.AggregationDuty.GetAggregateAttestationRequest.All)...) + aggregationSubmitAggregateAndProofs = append(aggregationSubmitAggregateAndProofs, mapDurationToTime(sim.AggregationDuty.PostAggregateAndProofsRequest.All)...) + aggregation = append(aggregation, mapDurationToTime(sim.AggregationDuty.All)...) + proposalProduceBlock = append(proposalProduceBlock, mapDurationToTime(sim.ProposalDuty.ProduceBlockRequest.All)...) + proposalPublishBlindedBlock = append(proposalPublishBlindedBlock, mapDurationToTime(sim.ProposalDuty.PublishBlindedBlockRequest.All)...) + proposal = append(proposal, mapDurationToTime(sim.ProposalDuty.All)...) + syncCommitteeSubmitMessage = append(syncCommitteeSubmitMessage, mapDurationToTime(sim.SyncCommitteeDuties.MessageDuty.SubmitSyncCommitteeMessageRequest.All)...) + syncCommitteeProduceContribution = append(syncCommitteeProduceContribution, mapDurationToTime(sim.SyncCommitteeDuties.ContributionDuty.ProduceSyncCommitteeContributionRequest.All)...) + syncCommitteeSubmitContribution = append(syncCommitteeSubmitContribution, mapDurationToTime(sim.SyncCommitteeDuties.ContributionDuty.SubmitSyncCommitteeContributionRequest.All)...) + syncCommitteeContribution = append(syncCommitteeContribution, mapDurationToTime(sim.SyncCommitteeDuties.ContributionDuty.All)...) + syncCommitteeSusbscription = append(syncCommitteeSusbscription, mapDurationToTime(sim.SyncCommitteeDuties.SubscribeSyncCommitteeRequest.All)...) + syncCommittee = append(syncCommittee, mapDurationToTime(sim.SyncCommitteeDuties.All)...) + all = append(all, mapDurationToTime(sim.All)...) + } + + return SimulationSingleValidator{ + AttestationDuty: SimulationAttestation{ + GetAttestationDataRequest: generateSimulationValues(attestationGetDuties, "GET /eth/v1/validator/attestation_data"), + PostAttestationsRequest: generateSimulationValues(attestationPostData, "POST /eth/v1/beacon/pool/attestations"), + SimulationValues: generateSimulationValues(attestation, ""), + }, + AggregationDuty: SimulationAggregation{ + GetAggregateAttestationRequest: generateSimulationValues(aggregationGetAggregationAttestations, "GET /eth/v1/validator/aggregate_attestation"), + PostAggregateAndProofsRequest: generateSimulationValues(aggregationSubmitAggregateAndProofs, "POST /eth/v1/validator/aggregate_and_proofs"), + SimulationValues: generateSimulationValues(aggregation, ""), + }, + ProposalDuty: SimulationProposal{ + ProduceBlockRequest: generateSimulationValues(proposalProduceBlock, "GET /eth/v3/validator/blocks/{SLOT}"), + PublishBlindedBlockRequest: generateSimulationValues(proposalPublishBlindedBlock, "POST /eth/v2/beacon/blinded"), + SimulationValues: generateSimulationValues(proposal, ""), + }, + SyncCommitteeDuties: SimulationSyncCommittee{ + MessageDuty: SyncCommitteeMessageDuty{ + SubmitSyncCommitteeMessageRequest: generateSimulationValues(syncCommitteeSubmitMessage, "POST /eth/v1/beacon/pool/sync_committees"), + }, + ContributionDuty: SyncCommitteeContributionDuty{ + ProduceSyncCommitteeContributionRequest: generateSimulationValues(syncCommitteeProduceContribution, "GET /eth/v1/validator/sync_committee_contribution"), + SubmitSyncCommitteeContributionRequest: generateSimulationValues(syncCommitteeSubmitContribution, "POST /eth/v1/validator/contribution_and_proofs"), + SimulationValues: generateSimulationValues(syncCommitteeContribution, ""), + }, + SubscribeSyncCommitteeRequest: generateSimulationValues(syncCommitteeSusbscription, "POST /eth/v1/validator/sync_committee_subscriptions"), + SimulationValues: generateSimulationValues(syncCommittee, ""), + }, + SimulationValues: generateSimulationValues(all, ""), + } +} + +// randomize duty execution start to be in [0, n*slot), where n is the frequency of the request per slot +func randomizeStart(tickTime time.Duration) time.Duration { + return slotTime * time.Duration(rand.Intn(int((tickTime / slotTime)))) //nolint:gosec // weak generator is not an issue here +} + +func requestRTT(ctx context.Context, url string, method string, body io.Reader, expectedStatus int) (time.Duration, error) { + var start time.Time + var firstByte time.Duration + + trace := &httptrace.ClientTrace{ + GotFirstResponseByte: func() { + firstByte = time.Since(start) + }, + } + + start = time.Now() + req, err := http.NewRequestWithContext(httptrace.WithClientTrace(ctx, trace), method, url, body) + if err != nil { + return 0, errors.Wrap(err, "create new request with trace and context") + } + + resp, err := http.DefaultTransport.RoundTrip(req) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + if resp.StatusCode != expectedStatus { + data, err := io.ReadAll(resp.Body) + if err != nil { + log.Warn(ctx, "Unexpected status code", nil, z.Int("status_code", resp.StatusCode), z.Int("expected_status_code", expectedStatus), z.Str("endpoint", url)) + } else { + log.Warn(ctx, "Unexpected status code", nil, z.Int("status_code", resp.StatusCode), z.Int("expected_status_code", expectedStatus), z.Str("endpoint", url), z.Str("body", string(data))) + } + } + + return firstByte, nil +} + +// cluster requests +func getAttestationsForBlock(ctx context.Context, target string, block int) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/beacon/blocks/%v/attestations", target, block), http.MethodGet, nil, 200) +} + +func getProposalDutiesForEpoch(ctx context.Context, target string, epoch int) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/duties/proposer/%v", target, epoch), http.MethodGet, nil, 200) +} + +func getSyncing(ctx context.Context, target string) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/node/syncing", target), http.MethodGet, nil, 200) +} + +func getPeerCount(ctx context.Context, target string) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/node/peer_count", target), http.MethodGet, nil, 200) +} + +func beaconCommitteeSub(ctx context.Context, target string) (time.Duration, error) { + body := strings.NewReader(`[{"validator_index":"1","committee_index":"1","committees_at_slot":"1","slot":"1","is_aggregator":true}]`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/beacon_committee_subscriptions", target), http.MethodPost, body, 200) +} + +func getAttesterDutiesForEpoch(ctx context.Context, target string, epoch int) (time.Duration, error) { + body := strings.NewReader(`["1"]`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/duties/attester/%v", target, epoch), http.MethodPost, body, 200) +} + +func getSyncCommitteeDutiesForEpoch(ctx context.Context, target string, epoch int) (time.Duration, error) { + body := strings.NewReader(`["1"]`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/duties/sync/%v", target, epoch), http.MethodPost, body, 200) +} + +func beaconHeadValidators(ctx context.Context, target string) (time.Duration, error) { + body := strings.NewReader(`{"ids":["0xb6066945aa87a1e0e4b55e347d3a8a0ef7f0d9f7ef2c46abebadb25d7de176b83c88547e5f8644b659598063c845719a"]}`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/beacon/states/head/validators", target), http.MethodPost, body, 200) +} + +func beaconGenesis(ctx context.Context, target string) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/beacon/genesis", target), http.MethodGet, nil, 200) +} + +func prepBeaconProposer(ctx context.Context, target string) (time.Duration, error) { + body := strings.NewReader(`[{"validator_index":"1725802","fee_recipient":"0x74b1C2f5788510c9ecA5f56D367B0a3D8a15a430"}]`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/prepare_beacon_proposer", target), http.MethodPost, body, 200) +} + +func configSpec(ctx context.Context, target string) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/config/spec", target), http.MethodGet, nil, 200) +} + +func nodeVersion(ctx context.Context, target string) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/node/version", target), http.MethodGet, nil, 200) +} + +// attestation duty requests +func getAttestationData(ctx context.Context, target string, slot int, committeeIndex int) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/attestation_data?slot=%v&committee_index=%v", target, slot, committeeIndex), http.MethodGet, nil, 200) +} + +func submitAttestationObject(ctx context.Context, target string) (time.Duration, error) { + body := strings.NewReader(`{{"aggregation_bits":"0x01","signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505","data":{"slot":"1","index":"1","beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","source":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"target":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}}`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/beacon/pool/attestations", target), http.MethodPost, body, 400) +} + +// aggregation duty requests +func getAggregateAttestations(ctx context.Context, target string, slot int, attestationDataRoot string) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/aggregate_attestation?slot=%v&attestation_data_root=%v", target, slot, attestationDataRoot), http.MethodGet, nil, 404) +} + +func postAggregateAndProofs(ctx context.Context, target string) (time.Duration, error) { + body := strings.NewReader(`[{"message":{"aggregator_index":"1","aggregate":{"aggregation_bits":"0x01","signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505","data":{"slot":"1","index":"1","beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","source":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"target":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}},"selection_proof":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}]`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/aggregate_and_proofs", target), http.MethodPost, body, 400) +} + +// proposal duty requests +func produceBlock(ctx context.Context, target string, slot int, randaoReveal string) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v3/validator/blocks/%v?randao_reveal=%v", target, slot, randaoReveal), http.MethodGet, nil, 200) +} + +func publishBlindedBlock(ctx context.Context, target string) (time.Duration, error) { + body := strings.NewReader(`{"message":{"slot":"2872079","proposer_index":"1725813","parent_root":"0x05bea9b8e9cc28c4efa5586b4efac20b7a42c3112dbe144fb552b37ded249abd","state_root":"0x0138e6e8e956218aa534597a450a93c2c98f07da207077b4be05742279688da2","body":{"randao_reveal":"0x9880dad5a0e900906a1355da0697821af687b4c2cd861cd219f2d779c50a47d3c0335c08d840c86c167986ae0aaf50070b708fe93a83f66c99a4f931f9a520aebb0f5b11ca202c3d76343e30e49f43c0479e850af0e410333f7c59c4d37fa95a","eth1_data":{"deposit_root":"0x7dbea1a0af14d774da92d94a88d3bb1ae7abad16374da4db2c71dd086c84029e","deposit_count":"452100","block_hash":"0xc4bf450c9e362dcb2b50e76b45938c78d455acd1e1aec4e1ce4338ec023cd32a"},"graffiti":"0x636861726f6e2f76312e312e302d613139336638340000000000000000000000","proposer_slashings":[],"attester_slashings":[],"attestations":[{"aggregation_bits":"0xdbedbfa74eccaf3d7ef570bfdbbf84b4dffc5beede1c1f8b59feb8b3f2fbabdbdef3ceeb7b3dfdeeef8efcbdcd7bebbeff7adfff5ae3bf66bc5613feffef3deb987f7e7fff87ed6f8bbd1fffa57f1677efff646f0d3bd79fffdc5dfd78df6cf79fb7febff5dfdefb8e03","data":{"slot":"2872060","index":"12","beacon_block_root":"0x310506169f7f92dcd2bf00e8b4c2daac999566929395120fbbf4edd222e003eb","source":{"epoch":"89750","root":"0xcdb449d69e3e2d22378bfc2299ee1e9aeb1b2d15066022e854759dda73d1e219"},"target":{"epoch":"89751","root":"0x4ad0882f7adbb735c56b0b3f09d8e45dbd79db9528110f7117ec067f3a19eb0e"}},"signature":"0xa9d91d6cbc669ffcc8ba2435c633e0ec0eebecaa3acdcaa1454282ece1f816e8b853f00ba67ec1244703221efae4c834012819ca7b199354669f24ba8ab1c769f072c9f46b803082eac32e3611cd323eeb5b17fcd6201b41f3063834ff26ef53"}],"deposits":[],"voluntary_exits":[],"sync_aggregate":{"sync_committee_bits":"0xf9ff3ff7ffffb7dbfefddff5fffffefdbffffffffffedfefffffff7fbe9fdffffdb5feffffffbfdbefff3ffdf7f3fc6ff7fffbffff9df6fbbaf3beffefffffff","sync_committee_signature":"0xa9cf7d9f23a62e84f11851e2e4b3b929b1d03719a780b59ecba5daf57e21a0ceccaf13db4e1392a42e3603abeb839a2d16373dcdd5e696f11c5a809972c1e368d794f1c61d4d10b220df52616032f09b33912febf8c7a64f3ce067ab771c7ddf"},"execution_payload_header":{"parent_hash":"0x71c564f4a0c1dea921e8063fc620ccfa39c1b073e4ac0845ce7e9e6f909752de","fee_recipient":"0x148914866080716b10D686F5570631Fbb2207002","state_root":"0x89e74be562cd4a10eb20cdf674f65b1b0e53b33a7c3f2df848eb4f7e226742e0","receipts_root":"0x55b494ee1bb919e7abffaab1d5be05a109612c59a77406d929d77c0ce714f21d","logs_bloom":"0x20500886140245d001002010680c10411a2540420182810440a108800fc008440801180020011008004045005a2007826802e102000005c0c04030590004044810d0d20745c0904a4d583008a01758018001082024e40046000410020042400100012260220299a8084415e20002891224c132220010003a00006010020ed0c108920a13c0e200a1a00251100888c01408008132414068c88b028920440248209a280581a0e10800c14ea63082c1781308208b130508d4000400802d1224521094260912473404012810001503417b4050141100c1103004000c8900644560080472688450710084088800c4c80000c02008931188204c008009011784488060","prev_randao":"0xf4e9a4a7b88a3d349d779e13118b6d099f7773ec5323921343ac212df19c620f","block_number":"2643688","gas_limit":"30000000","gas_used":"24445884","timestamp":"1730367348","extra_data":"0x546974616e2028746974616e6275696c6465722e78797a29","base_fee_per_gas":"122747440","block_hash":"0x7524d779d328159e4d9ee8a4b04c4b251261da9a6da1d1461243125faa447227","transactions_root":"0x7e8a3391a77eaea563bf4e0ca4cf3190425b591ed8572818924c38f7e423c257","withdrawals_root":"0x61a5653b614ec3db0745ae5568e6de683520d84bc3db2dedf6a5158049cee807","blob_gas_used":"0","excess_blob_gas":"0"},"bls_to_execution_changes":[],"blob_kzg_commitments":[]}},"signature":"0x94320e6aecd65da3ef3e55e45208978844b262fe21cacbb0a8448b2caf21e8619b205c830116d8aad0a2c55d879fb571123a3fcf31b515f9508eb346ecd3de2db07cea6700379c00831cfb439f4aeb3bfa164395367c8d8befb92aa6682eae51"}`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v2/beacon/blinded", target), http.MethodPost, body, 404) +} + +// sync committee duty requests +func submitSyncCommittee(ctx context.Context, target string) (time.Duration, error) { + body := strings.NewReader(`{{"aggregation_bits":"0x01","signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505","data":{"slot":"1","index":"1","beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","source":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"target":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}}`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/beacon/pool/sync_committees", target), http.MethodPost, body, 400) +} + +func produceSyncCommitteeContribution(ctx context.Context, target string, slot int, subCommitteeIndex int, beaconBlockRoot string) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/sync_committee_contribution?slot=%v&subcommittee_index=%v&beacon_block_root=%v", target, slot, subCommitteeIndex, beaconBlockRoot), http.MethodGet, nil, 404) +} + +func syncCommitteeSubscription(ctx context.Context, target string) (time.Duration, error) { + body := strings.NewReader(`[{"message":{"aggregator_index":"1","aggregate":{"aggregation_bits":"0x01","signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505","data":{"slot":"1","index":"1","beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","source":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"target":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}},"selection_proof":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}]`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/sync_committee_subscriptions", target), http.MethodPost, body, 400) +} + +func submitSyncCommitteeContribution(ctx context.Context, target string) (time.Duration, error) { + body := strings.NewReader(`[{"message":{"aggregator_index":"1","contribution":{"slot":"1","beacon_block_root":"0xace2cad95a1b113457ccc680372880694a3ef820584d04a165aa2bda0f261950","subcommittee_index":"3","aggregation_bits":"0xfffffbfff7ddffffbef3bfffebffff7f","signature":"0xaa4cf0db0677555025fe12223572e67b509b0b24a2b07dc162aed38522febb2a64ad293e6dbfa1b81481eec250a2cdb61619456291f8d0e3f86097a42a71985d6dabd256107af8b4dfc2982a7d67ac63e2d6b7d59d24a9e87546c71b9c68ca1f"},"selection_proof":"0xb177453ba19233da0625b354d6a43e8621b676243ec4aa5dbb269ac750079cc23fced007ea6cdc1bfb6cc0e2fc796fbb154abed04d9aac7c1171810085beff2b9e5cff961975dbdce4199f39d97b4c46339e26eb7946762394905dbdb9818afe"},"signature":"0x8f73f3185164454f6807549bcbf9d1b0b5516279f35ead1a97812da5db43088de344fdc46aaafd20650bd6685515fb4e18f9f053e9e3691065f8a87f6160456ef8aa550f969ef8260368aae3e450e8763c6317f40b09863ad9b265a0e618e472"}]`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/contribution_and_proofs", target), http.MethodPost, body, 200) +} diff --git a/cmd/testbeacon_internal_test.go b/cmd/testbeacon_internal_test.go index 13d5d933ab..e54cda3481 100644 --- a/cmd/testbeacon_internal_test.go +++ b/cmd/testbeacon_internal_test.go @@ -56,6 +56,11 @@ func TestBeaconTest(t *testing.T) { {Name: "isSynced", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, {Name: "peerCount", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "simulate1", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "simulate10", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "simulate100", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "simulate500", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "simulate1000", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, }, @@ -73,22 +78,7 @@ func TestBeaconTest(t *testing.T) { Endpoints: []string{endpoint1, endpoint2}, }, expected: testCategoryResult{ - Targets: map[string][]testResult{ - endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "isSynced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "peerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - }, - endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "isSynced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "peerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - }, - }, + Targets: defaultFailingBNTests(t, endpoint1, endpoint2, port1, port2), }, expectedErr: "", }, @@ -127,22 +117,7 @@ func TestBeaconTest(t *testing.T) { Endpoints: []string{endpoint1, endpoint2}, }, expected: testCategoryResult{ - Targets: map[string][]testResult{ - endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "isSynced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "peerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - }, - endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "isSynced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "peerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - }, - }, + Targets: defaultFailingBNTests(t, endpoint1, endpoint2, port1, port2), }, expectedErr: "", }, @@ -195,22 +170,7 @@ func TestBeaconTest(t *testing.T) { Endpoints: []string{endpoint1, endpoint2}, }, expected: testCategoryResult{ - Targets: map[string][]testResult{ - endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "isSynced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "peerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - }, - endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "isSynced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "peerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - }, - }, + Targets: defaultFailingBNTests(t, endpoint1, endpoint2, port1, port2), Score: categoryScoreC, CategoryName: beaconTestCategory, }, @@ -252,6 +212,35 @@ func TestBeaconTest(t *testing.T) { } } +func defaultFailingBNTests(_ *testing.T, endpoint1 string, endpoint2 string, port1 int, port2 int) map[string][]testResult { + return map[string][]testResult{ + endpoint1: { + {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "isSynced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "peerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "simulate1", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "simulate10", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "simulate100", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "simulate500", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "simulate1000", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + }, + endpoint2: { + {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "isSynced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "peerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "simulate1", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "simulate10", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "simulate100", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "simulate500", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "simulate1000", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + }, + } +} + func startHealthyMockedBeaconNode(t *testing.T) *httptest.Server { t.Helper() diff --git a/cmd/testmev.go b/cmd/testmev.go index cb1ffd4ff5..709b118572 100644 --- a/cmd/testmev.go +++ b/cmd/testmev.go @@ -15,7 +15,6 @@ import ( "golang.org/x/sync/errgroup" "github.com/obolnetwork/charon/app/errors" - "github.com/obolnetwork/charon/app/z" ) type testMEVConfig struct { @@ -213,7 +212,7 @@ func mevPingTest(ctx context.Context, _ *testMEVConfig, target string) testResul defer resp.Body.Close() if resp.StatusCode > 399 { - return failedTestResult(testRes, errors.New("status code %v", z.Int("status_code", resp.StatusCode))) + return failedTestResult(testRes, errors.New(httpStatusError(resp.StatusCode))) } testRes.Verdict = testVerdictOk @@ -247,7 +246,7 @@ func mevPingMeasureTest(ctx context.Context, _ *testMEVConfig, target string) te defer resp.Body.Close() if resp.StatusCode > 399 { - return failedTestResult(testRes, errors.New("status code %v", z.Int("status_code", resp.StatusCode))) + return failedTestResult(testRes, errors.New(httpStatusError(resp.StatusCode))) } if firstByte > thresholdMEVMeasurePoor { diff --git a/cmd/testpeers.go b/cmd/testpeers.go index 28b5960cb3..77312360ea 100644 --- a/cmd/testpeers.go +++ b/cmd/testpeers.go @@ -864,7 +864,7 @@ func relayPingTest(ctx context.Context, _ *testPeersConfig, target string) testR defer resp.Body.Close() if resp.StatusCode > 399 { - return failedTestResult(testRes, errors.New("status code %v", z.Int("status_code", resp.StatusCode))) + return failedTestResult(testRes, errors.New(httpStatusError(resp.StatusCode))) } testRes.Verdict = testVerdictOk @@ -897,7 +897,7 @@ func relayPingMeasureTest(ctx context.Context, _ *testPeersConfig, target string defer resp.Body.Close() if resp.StatusCode > 399 { - return failedTestResult(testRes, errors.New("status code %v", z.Int("status_code", resp.StatusCode))) + return failedTestResult(testRes, errors.New(httpStatusError(resp.StatusCode))) } if firstByte > thresholdRelayMeasurePoor { From 993f806f8c16f8a9fa6937b325185f42a4bd0b17 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 07:27:19 +0000 Subject: [PATCH 70/89] build(deps): Bump go.opentelemetry.io/otel/trace from 1.31.0 to 1.32.0 (#3375) Bumps [go.opentelemetry.io/otel/trace](https://github.com/open-telemetry/opentelemetry-go) from 1.31.0 to 1.32.0.
Changelog

Sourced from go.opentelemetry.io/otel/trace's changelog.

[1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08

Added

  • Add go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter, which can be used to disable exemplar recording. (#5850)
  • Add go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter, which can be used to configure the exemplar filter used by the metrics SDK. (#5850)
  • Add ExemplarReservoirProviderSelector and DefaultExemplarReservoirProviderSelector to go.opentelemetry.io/otel/sdk/metric, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861)
  • Add ExemplarReservoirProviderSelector to go.opentelemetry.io/otel/sdk/metric.Stream to allow using views to configure the exemplar reservoir to use for a metric. (#5861)
  • Add ReservoirProvider, HistogramReservoirProvider and FixedSizeReservoirProvider to go.opentelemetry.io/otel/sdk/metric/exemplar to make it convenient to use providers of Reservoirs. (#5861)
  • The go.opentelemetry.io/otel/semconv/v1.27.0 package. The package contains semantic conventions from the v1.27.0 version of the OpenTelemetry Semantic Conventions. (#5894)
  • Add Attributes attribute.Set field to Scope in go.opentelemetry.io/otel/sdk/instrumentation. (#5903)
  • Add Attributes attribute.Set field to ScopeRecords in go.opentelemetry.io/otel/log/logtest. (#5927)
  • go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc adds instrumentation scope attributes. (#5934)
  • go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp adds instrumentation scope attributes. (#5934)
  • go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc adds instrumentation scope attributes. (#5935)
  • go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp adds instrumentation scope attributes. (#5935)
  • go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc adds instrumentation scope attributes. (#5933)
  • go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp adds instrumentation scope attributes. (#5933)
  • go.opentelemetry.io/otel/exporters/prometheus adds instrumentation scope attributes in otel_scope_info metric as labels. (#5932)

Changed

  • Support scope attributes and make them as identifying for Tracer in go.opentelemetry.io/otel and go.opentelemetry.io/otel/sdk/trace. (#5924)
  • Support scope attributes and make them as identifying for Meter in go.opentelemetry.io/otel and go.opentelemetry.io/otel/sdk/metric. (#5926)
  • Support scope attributes and make them as identifying for Logger in go.opentelemetry.io/otel and go.opentelemetry.io/otel/sdk/log. (#5925)
  • Make schema URL and scope attributes as identifying for Tracer in go.opentelemetry.io/otel/bridge/opentracing. (#5931)
  • Clear unneeded slice elements to allow GC to collect the objects in go.opentelemetry.io/otel/sdk/metric and go.opentelemetry.io/otel/sdk/trace. (#5804)

Fixed

  • Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881)
  • go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc now keeps the metadata already present in the context when WithHeaders is used. (#5892)
  • go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc now keeps the metadata already present in the context when WithHeaders is used. (#5911)
  • go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc now keeps the metadata already present in the context when WithHeaders is used. (#5915)
  • Fix go.opentelemetry.io/otel/exporters/prometheus trying to add exemplars to Gauge metrics, which is unsupported. (#5912)
  • Fix WithEndpointURL to always use a secure connection when an https URL is passed in go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc. (#5944)
  • Fix WithEndpointURL to always use a secure connection when an https URL is passed in go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp. (#5944)
  • Fix WithEndpointURL to always use a secure connection when an https URL is passed in go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc. (#5944)
  • Fix WithEndpointURL to always use a secure connection when an https URL is passed in go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp. (#5944)
  • Fix incorrect metrics generated from callbacks when multiple readers are used in go.opentelemetry.io/otel/sdk/metric. (#5900)

Removed

Commits
  • 7cfbd86 Release v1.32.0/v0.54.0/v0.8.0/v0.0.11 (#5960)
  • 2be617e fix(deps): update github.com/opentracing-contrib/go-grpc/test digest to 51a56...
  • 6db18df fix(deps): update module github.com/opentracing-contrib/go-grpc to v0.1.0 (#5...
  • ef12bf8 chore(deps): update golang.org/x (#5957)
  • 85eb76f Allow GC to collect unneeded slice elements (#5804)
  • 1492efa Fix incorrect metrics getting generated from multiple readers (#5900)
  • d2b0663 fix(deps): update module go.opentelemetry.io/build-tools/multimod to v0.15.0 ...
  • 394cbd2 chore(deps): update lycheeverse/lychee-action action to v2.1.0 (#5950)
  • 37b2537 fix(deps): update github.com/opentracing-contrib/go-grpc digest to e3cbcab (#...
  • 7f68356 fix(deps): update module go.opentelemetry.io/build-tools/semconvgen to v0.15....
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=go.opentelemetry.io/otel/trace&package-manager=go_modules&previous-version=1.31.0&new-version=1.32.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 0e3814bcf6..c158970f13 100644 --- a/go.mod +++ b/go.mod @@ -33,11 +33,11 @@ require ( github.com/stretchr/testify v1.9.0 github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.4.1 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 - go.opentelemetry.io/otel v1.31.0 + go.opentelemetry.io/otel v1.32.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 go.opentelemetry.io/otel/sdk v1.31.0 - go.opentelemetry.io/otel/trace v1.31.0 + go.opentelemetry.io/otel/trace v1.32.0 go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 go.uber.org/zap v1.27.0 @@ -184,7 +184,7 @@ require ( github.com/subosito/gotenv v1.6.0 // indirect github.com/vbatts/tar-split v0.11.5 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect - go.opentelemetry.io/otel/metric v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.17.1 // indirect go.uber.org/fx v1.20.1 // indirect diff --git a/go.sum b/go.sum index e2e9a4d82d..93a39ed0a1 100644 --- a/go.sum +++ b/go.sum @@ -546,8 +546,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= @@ -556,14 +556,14 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k= go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= From 1928e1dd16279b2dd04a06a8163dbfd094931f75 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 07:28:49 +0000 Subject: [PATCH 71/89] build(deps): Bump golang.org/x/tools from 0.26.0 to 0.27.0 (#3373) Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.26.0 to 0.27.0.
Commits
  • 4d2b19f go.mod: update golang.org/x dependencies
  • 6368677 gopls/internal/golang: strength reduce ComputeImportFixEdits
  • 777f155 gopls/internal/golang: show package attributes on hover
  • 8a0e08f gopls/doc: add missing doc link
  • 61415be gopls/internal/cache: guard against malformed paths in port.matches
  • 9a89d3a internal/analysisinternal: avoid sub-token spans in TypeErrorEndPos
  • 1115af6 internal/expect: support named arguments f(a, b, c=d, e="f")
  • 0b9e499 go/{expect,packages/packagestest}: mention the tag+delete proposal
  • efcd2bd internal/packagestest: fork go/packages/packagestest
  • 0e9ed3d go/packages: do not mutate Config
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/tools&package-manager=go_modules&previous-version=0.26.0&new-version=0.27.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index c158970f13..0e984f7643 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( golang.org/x/sync v0.9.0 golang.org/x/term v0.26.0 golang.org/x/time v0.8.0 - golang.org/x/tools v0.26.0 + golang.org/x/tools v0.27.0 google.golang.org/protobuf v1.34.2 gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) @@ -190,8 +190,8 @@ require ( go.uber.org/fx v1.20.1 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/mod v0.21.0 // indirect - golang.org/x/net v0.30.0 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/net v0.31.0 // indirect golang.org/x/sys v0.27.0 // indirect golang.org/x/text v0.20.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect diff --git a/go.sum b/go.sum index 93a39ed0a1..70e018c440 100644 --- a/go.sum +++ b/go.sum @@ -610,8 +610,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= -golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -629,8 +629,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -700,8 +700,8 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 74f1fa2edb6a681907778d92256e772909167a6a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 07:41:59 +0000 Subject: [PATCH 72/89] build(deps): Bump go.opentelemetry.io/otel/sdk from 1.31.0 to 1.32.0 (#3371) Bumps [go.opentelemetry.io/otel/sdk](https://github.com/open-telemetry/opentelemetry-go) from 1.31.0 to 1.32.0.
Changelog

Sourced from go.opentelemetry.io/otel/sdk's changelog.

[1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08

Added

  • Add go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter, which can be used to disable exemplar recording. (#5850)
  • Add go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter, which can be used to configure the exemplar filter used by the metrics SDK. (#5850)
  • Add ExemplarReservoirProviderSelector and DefaultExemplarReservoirProviderSelector to go.opentelemetry.io/otel/sdk/metric, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861)
  • Add ExemplarReservoirProviderSelector to go.opentelemetry.io/otel/sdk/metric.Stream to allow using views to configure the exemplar reservoir to use for a metric. (#5861)
  • Add ReservoirProvider, HistogramReservoirProvider and FixedSizeReservoirProvider to go.opentelemetry.io/otel/sdk/metric/exemplar to make it convenient to use providers of Reservoirs. (#5861)
  • The go.opentelemetry.io/otel/semconv/v1.27.0 package. The package contains semantic conventions from the v1.27.0 version of the OpenTelemetry Semantic Conventions. (#5894)
  • Add Attributes attribute.Set field to Scope in go.opentelemetry.io/otel/sdk/instrumentation. (#5903)
  • Add Attributes attribute.Set field to ScopeRecords in go.opentelemetry.io/otel/log/logtest. (#5927)
  • go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc adds instrumentation scope attributes. (#5934)
  • go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp adds instrumentation scope attributes. (#5934)
  • go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc adds instrumentation scope attributes. (#5935)
  • go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp adds instrumentation scope attributes. (#5935)
  • go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc adds instrumentation scope attributes. (#5933)
  • go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp adds instrumentation scope attributes. (#5933)
  • go.opentelemetry.io/otel/exporters/prometheus adds instrumentation scope attributes in otel_scope_info metric as labels. (#5932)

Changed

  • Support scope attributes and make them as identifying for Tracer in go.opentelemetry.io/otel and go.opentelemetry.io/otel/sdk/trace. (#5924)
  • Support scope attributes and make them as identifying for Meter in go.opentelemetry.io/otel and go.opentelemetry.io/otel/sdk/metric. (#5926)
  • Support scope attributes and make them as identifying for Logger in go.opentelemetry.io/otel and go.opentelemetry.io/otel/sdk/log. (#5925)
  • Make schema URL and scope attributes as identifying for Tracer in go.opentelemetry.io/otel/bridge/opentracing. (#5931)
  • Clear unneeded slice elements to allow GC to collect the objects in go.opentelemetry.io/otel/sdk/metric and go.opentelemetry.io/otel/sdk/trace. (#5804)

Fixed

  • Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881)
  • go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc now keeps the metadata already present in the context when WithHeaders is used. (#5892)
  • go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc now keeps the metadata already present in the context when WithHeaders is used. (#5911)
  • go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc now keeps the metadata already present in the context when WithHeaders is used. (#5915)
  • Fix go.opentelemetry.io/otel/exporters/prometheus trying to add exemplars to Gauge metrics, which is unsupported. (#5912)
  • Fix WithEndpointURL to always use a secure connection when an https URL is passed in go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc. (#5944)
  • Fix WithEndpointURL to always use a secure connection when an https URL is passed in go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp. (#5944)
  • Fix WithEndpointURL to always use a secure connection when an https URL is passed in go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc. (#5944)
  • Fix WithEndpointURL to always use a secure connection when an https URL is passed in go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp. (#5944)
  • Fix incorrect metrics generated from callbacks when multiple readers are used in go.opentelemetry.io/otel/sdk/metric. (#5900)

Removed

Commits
  • 7cfbd86 Release v1.32.0/v0.54.0/v0.8.0/v0.0.11 (#5960)
  • 2be617e fix(deps): update github.com/opentracing-contrib/go-grpc/test digest to 51a56...
  • 6db18df fix(deps): update module github.com/opentracing-contrib/go-grpc to v0.1.0 (#5...
  • ef12bf8 chore(deps): update golang.org/x (#5957)
  • 85eb76f Allow GC to collect unneeded slice elements (#5804)
  • 1492efa Fix incorrect metrics getting generated from multiple readers (#5900)
  • d2b0663 fix(deps): update module go.opentelemetry.io/build-tools/multimod to v0.15.0 ...
  • 394cbd2 chore(deps): update lycheeverse/lychee-action action to v2.1.0 (#5950)
  • 37b2537 fix(deps): update github.com/opentracing-contrib/go-grpc digest to e3cbcab (#...
  • 7f68356 fix(deps): update module go.opentelemetry.io/build-tools/semconvgen to v0.15....
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=go.opentelemetry.io/otel/sdk&package-manager=go_modules&previous-version=1.31.0&new-version=1.32.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 0e984f7643..d72af2d22c 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( go.opentelemetry.io/otel v1.32.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 - go.opentelemetry.io/otel/sdk v1.31.0 + go.opentelemetry.io/otel/sdk v1.32.0 go.opentelemetry.io/otel/trace v1.32.0 go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 diff --git a/go.sum b/go.sum index 70e018c440..ff75a96fbe 100644 --- a/go.sum +++ b/go.sum @@ -558,8 +558,8 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6Bm go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64= go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k= go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY= go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= From e412b5726932dcaf78996b7783484ff31b2f0211 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 08:13:50 +0000 Subject: [PATCH 73/89] build(deps): Bump chainsafe/lodestar from v1.22.0 to v1.23.0 in /testutil/compose/static/lodestar (#3376) Bumps chainsafe/lodestar from v1.22.0 to v1.23.0. [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=chainsafe/lodestar&package-manager=docker&previous-version=v1.22.0&new-version=v1.23.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- testutil/compose/static/lodestar/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testutil/compose/static/lodestar/Dockerfile b/testutil/compose/static/lodestar/Dockerfile index 9755fe549f..68e4d90774 100644 --- a/testutil/compose/static/lodestar/Dockerfile +++ b/testutil/compose/static/lodestar/Dockerfile @@ -1,4 +1,4 @@ -FROM chainsafe/lodestar:v1.22.0 +FROM chainsafe/lodestar:v1.23.0 RUN apt-get update && apt-get install -y curl jq wget From 765ec3e235d3d2d464a7299d0b212b1c1eb29f06 Mon Sep 17 00:00:00 2001 From: Dmitry <98899785+mdqst@users.noreply.github.com> Date: Tue, 12 Nov 2024 11:16:27 +0300 Subject: [PATCH 74/89] docs: typo in README.md (#3369) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Снимок экрана 2024-11-11 в 19 08 00 In the "Go Guidelines" section, the word "principals" corrected to "**principles**" (the correct spelling). category: docs ticket: none --- docs/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/README.md b/docs/README.md index 8bc6bddde0..b32cc817e4 100644 --- a/docs/README.md +++ b/docs/README.md @@ -8,6 +8,6 @@ This page acts as an index for the charon (pronounced 'kharon') markdown documen - [Architecture](architecture.md): Overview of charon cluster and node architecture - [Project Structure](structure.md): Project folder structure - [Branching and Release Model](branching.md): Git branching and release model -- [Go Guidelines](goguidelines.md): Guidelines and principals relating to go development +- [Go Guidelines](goguidelines.md): Guidelines and principles relating to go development - [Contributing](contributing.md): How to contribute to charon; githooks, PR templates, etc. - [Distributed Key Generation](dkg.md): How charon can create distributed validator key shares remotely from a cluster-definition file. From 88ebdbdf8f80fda6efd1059689182745cf96253d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 08:59:54 +0000 Subject: [PATCH 75/89] build(deps): Bump go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp from 0.56.0 to 0.57.0 (#3372) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp](https://github.com/open-telemetry/opentelemetry-go-contrib) from 0.56.0 to 0.57.0.
Release notes

Sourced from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp's releases.

Release v1.32.0/v0.57.0/v0.26.0/v0.12.0/v0.7.0/v0.5.0/v0.4.0

Overview

Added

  • Add the WithSource option to the go.opentelemetry.io/contrib/bridges/otelslog log bridge to set the code.* attributes in the log record that includes the source location where the record was emitted. (#6253)
  • Add ContextWithStartTime and StartTimeFromContext to go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp, which allows setting the start time using go context. (#6137)
  • Set the code.* attributes in go.opentelemetry.io/contrib/bridges/otelzap if the zap.Logger was created with the AddCaller or AddStacktrace option. (#6268)
  • Add a LogProcessor to go.opentelemetry.io/contrib/processors/baggagecopy to copy baggage members to log records. (#6277)
    • Use baggagecopy.NewLogProcessor when configuring a Log Provider.
      • NewLogProcessor accepts a Filter function type that selects which baggage members are added to the log record.

Changed

  • Transform raw (slog.KindAny) attribute values to matching log.Value types. For example, []string{"foo", "bar"} attribute value is now transformed to log.SliceValue(log.StringValue("foo"), log.StringValue("bar")) instead of log.String("[foo bar"]). (#6254)
  • Upgrade go.opentelemetry.io/otel/semconv/v1.17.0 to go.opentelemetry.io/otel/semconv/v1.21.0 in go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo. (#6272)
  • Resource doesn't merge with defaults if a valid resource is configured in go.opentelemetry.io/contrib/config. (#6289)

Fixed

  • Transform nil attribute values to log.Value zero value instead of panicking in go.opentelemetry.io/contrib/bridges/otellogrus. (#6237)
  • Transform nil attribute values to log.Value zero value instead of panicking in go.opentelemetry.io/contrib/bridges/otelzap. (#6237)
  • Transform nil attribute values to log.Value zero value instead of log.StringValue("<nil>") in go.opentelemetry.io/contrib/bridges/otelslog. (#6246)
  • Fix NewClientHandler so that rpc.client.request.* metrics measure requests instead of responses and rpc.client.responses.* metrics measure responses instead of requests in go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc. (#6250)
  • Fix issue in go.opentelemetry.io/contrib/config causing otelprom.WithResourceAsConstantLabels configuration to not be respected. (#6260)
  • otel.Handle is no longer called on a successful shutdown of the Prometheus exporter in go.opentelemetry.io/contrib/config. (#6299)

What's Changed

... (truncated)

Changelog

Sourced from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp's changelog.

[1.32.0/0.57.0/0.26.0/0.12.0/0.7.0/0.5.0/0.4.0] - 2024-11-08

Added

  • Add the WithSource option to the go.opentelemetry.io/contrib/bridges/otelslog log bridge to set the code.* attributes in the log record that includes the source location where the record was emitted. (#6253)
  • Add ContextWithStartTime and StartTimeFromContext to go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp, which allows setting the start time using go context. (#6137)
  • Set the code.* attributes in go.opentelemetry.io/contrib/bridges/otelzap if the zap.Logger was created with the AddCaller or AddStacktrace option. (#6268)
  • Add a LogProcessor to go.opentelemetry.io/contrib/processors/baggagecopy to copy baggage members to log records. (#6277)
    • Use baggagecopy.NewLogProcessor when configuring a Log Provider.
      • NewLogProcessor accepts a Filter function type that selects which baggage members are added to the log record.

Changed

  • Transform raw (slog.KindAny) attribute values to matching log.Value types. For example, []string{"foo", "bar"} attribute value is now transformed to log.SliceValue(log.StringValue("foo"), log.StringValue("bar")) instead of log.String("[foo bar"]). (#6254)
  • Upgrade go.opentelemetry.io/otel/semconv/v1.17.0 to go.opentelemetry.io/otel/semconv/v1.21.0 in go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo. (#6272)
  • Resource doesn't merge with defaults if a valid resource is configured in go.opentelemetry.io/contrib/config. (#6289)

Fixed

  • Transform nil attribute values to log.Value zero value instead of panicking in go.opentelemetry.io/contrib/bridges/otellogrus. (#6237)
  • Transform nil attribute values to log.Value zero value instead of panicking in go.opentelemetry.io/contrib/bridges/otelzap. (#6237)
  • Transform nil attribute values to log.Value zero value instead of log.StringValue("<nil>") in go.opentelemetry.io/contrib/bridges/otelslog. (#6246)
  • Fix NewClientHandler so that rpc.client.request.* metrics measure requests instead of responses and rpc.client.responses.* metrics measure responses instead of requests in go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc. (#6250)
  • Fix issue in go.opentelemetry.io/contrib/config causing otelprom.WithResourceAsConstantLabels configuration to not be respected. (#6260)
  • otel.Handle is no longer called on a successful shutdown of the Prometheus exporter in go.opentelemetry.io/contrib/config. (#6299)
Commits
  • 519f10d Release v1.32.0/v0.57.0/v0.26.0/v0.12.0/v0.7.0/v0.5.0/v0.4.0 (#6311)
  • bb01131 fix(deps): update module google.golang.org/grpc to v1.68.0 (#6306)
  • b559799 fix(deps): update aws-sdk-go-v2 monorepo (#6308)
  • 0beb27c fix(deps): update module go.opentelemetry.io/build-tools/multimod to v0.15.0 ...
  • 4e7a11a chore(deps): update module github.com/klauspost/cpuid/v2 to v2.2.9 (#6304)
  • 8e0db19 config: don't log an error on close (#6299)
  • da04e2d fix(deps): update module go.opentelemetry.io/build-tools/crosslink to v0.15.0...
  • f658e10 fix(deps): update module go.opentelemetry.io/build-tools/gotmpl to v0.15.0 (#...
  • 6b29ae8 chore(deps): update otel/opentelemetry-collector-contrib docker tag to v0.113...
  • 68e20fe fix(deps): update aws-sdk-go-v2 monorepo (#6298)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp&package-manager=go_modules&previous-version=0.56.0&new-version=0.57.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d72af2d22c..c5c62a5902 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.4.1 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 go.opentelemetry.io/otel v1.32.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 diff --git a/go.sum b/go.sum index ff75a96fbe..e1627ecb94 100644 --- a/go.sum +++ b/go.sum @@ -544,8 +544,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= From 683918a74c24f78619ada0e92c1f87fb029d0f0b Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Tue, 12 Nov 2024 10:00:07 +0100 Subject: [PATCH 76/89] cmd: test command UX (#3370) Multiple changes to improve UX of the test command: 1. Add `charon test all` that runs all 5 test commands 2. Shorten the ENRs and MEV relays URLs hashes when outputting results 3. Sort the targets of the tests (so that it's easier to follow consecutive re-ran tests) Also do some refactoring of the functions. category: feature ticket: none --- cmd/ascii.go | 2 +- cmd/cmd.go | 1 + cmd/test.go | 88 +++- cmd/testall.go | 97 ++++ cmd/testbeacon.go | 240 ++++----- cmd/testmev.go | 71 +-- cmd/testpeers.go | 857 ++++++++++++++++----------------- cmd/testpeers_internal_test.go | 24 +- cmd/testperformance.go | 382 +++++++-------- cmd/testvalidator.go | 84 ++-- 10 files changed, 958 insertions(+), 888 deletions(-) create mode 100644 cmd/testall.go diff --git a/cmd/ascii.go b/cmd/ascii.go index ca81f1fcbe..54603a69c7 100644 --- a/cmd/ascii.go +++ b/cmd/ascii.go @@ -39,7 +39,7 @@ func validatorASCII() []string { func mevASCII() []string { return []string{ - "__ __ ________ __ ", + " __ __ ________ __ ", "| \\/ | ____\\ \\ / / ", "| \\ / | |__ \\ \\ / / ", "| |\\/| | __| \\ \\/ / ", diff --git a/cmd/cmd.go b/cmd/cmd.go index 1eda3c9cdb..51dc3a7f83 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -48,6 +48,7 @@ func New() *cobra.Command { newCombineCmd(newCombineFunc), newAlphaCmd( newTestCmd( + newTestAllCmd(runTestAll), newTestPeersCmd(runTestPeers), newTestBeaconCmd(runTestBeacon), newTestValidatorCmd(runTestValidator), diff --git a/cmd/test.go b/cmd/test.go index 5e62f1efc3..79d2bc9883 100644 --- a/cmd/test.go +++ b/cmd/test.go @@ -6,8 +6,11 @@ import ( "context" "fmt" "io" + "net/http" + "net/http/httptrace" "os" "os/signal" + "slices" "sort" "strings" "syscall" @@ -16,6 +19,7 @@ import ( "github.com/pelletier/go-toml/v2" "github.com/spf13/cobra" + "github.com/spf13/pflag" "golang.org/x/exp/maps" "github.com/obolnetwork/charon/app/errors" @@ -34,6 +38,7 @@ const ( validatorTestCategory = "validator" mevTestCategory = "mev" performanceTestCategory = "performance" + allTestCategory = "all" ) type testConfig struct { @@ -62,6 +67,13 @@ func bindTestFlags(cmd *cobra.Command, config *testConfig) { cmd.Flags().BoolVar(&config.Quiet, "quiet", false, "Do not print test results to stdout.") } +func bindTestLogFlags(flags *pflag.FlagSet, config *log.Config) { + flags.StringVar(&config.Format, "log-format", "console", "Log format; console, logfmt or json") + flags.StringVar(&config.Level, "log-level", "info", "Log level; debug, info, warn or error") + flags.StringVar(&config.Color, "log-color", "auto", "Log color; auto, force, disable.") + flags.StringVar(&config.LogOutputPath, "log-output-path", "", "Path in which to write on-disk logs.") +} + func listTestCases(cmd *cobra.Command) []string { var testCaseNames []testCaseName switch cmd.Name() { @@ -76,6 +88,16 @@ func listTestCases(cmd *cobra.Command) []string { testCaseNames = maps.Keys(supportedMEVTestCases()) case performanceTestCategory: testCaseNames = maps.Keys(supportedPerformanceTestCases()) + case allTestCategory: + testCaseNames = slices.Concat( + maps.Keys(supportedPeerTestCases()), + maps.Keys(supportedSelfTestCases()), + maps.Keys(supportedRelayTestCases()), + maps.Keys(supportedBeaconTestCases()), + maps.Keys(supportedValidatorTestCases()), + maps.Keys(supportedMEVTestCases()), + maps.Keys(supportedPerformanceTestCases()), + ) default: log.Warn(cmd.Context(), "Unknown command for listing test cases", nil, z.Str("name", cmd.Name())) } @@ -229,12 +251,14 @@ func writeResultToWriter(res testCategoryResult, w io.Writer) error { lines = append(lines, "") lines = append(lines, fmt.Sprintf("%-64s%s", "TEST NAME", "RESULT")) suggestions := []string{} - for target, testResults := range res.Targets { - if target != "" && len(testResults) > 0 { + targets := maps.Keys(res.Targets) + slices.Sort(targets) + for _, target := range targets { + if target != "" && len(res.Targets[target]) > 0 { lines = append(lines, "") lines = append(lines, target) } - for _, singleTestRes := range testResults { + for _, singleTestRes := range res.Targets[target] { testOutput := "" testOutput += fmt.Sprintf("%-64s", singleTestRes.Name) if singleTestRes.Measurement != "" { @@ -273,6 +297,30 @@ func writeResultToWriter(res testCategoryResult, w io.Writer) error { return nil } +func evaluateHighestRTTScores(testResCh chan time.Duration, testRes testResult, avg time.Duration, poor time.Duration) testResult { + highestRTT := time.Duration(0) + for rtt := range testResCh { + if rtt > highestRTT { + highestRTT = rtt + } + } + + return evaluateRTT(highestRTT, testRes, avg, poor) +} + +func evaluateRTT(rtt time.Duration, testRes testResult, avg time.Duration, poor time.Duration) testResult { + if rtt == 0 || rtt > poor { + testRes.Verdict = testVerdictPoor + } else if rtt > avg { + testRes.Verdict = testVerdictAvg + } else { + testRes.Verdict = testVerdictGood + } + testRes.Measurement = Duration{rtt}.String() + + return testRes +} + func calculateScore(results []testResult) categoryScore { // TODO(kalo): calculate score more elaborately (potentially use weights) avg := 0 @@ -348,3 +396,37 @@ func sleepWithContext(ctx context.Context, d time.Duration) { case <-timer.C: } } + +func requestRTT(ctx context.Context, url string, method string, body io.Reader, expectedStatus int) (time.Duration, error) { + var start time.Time + var firstByte time.Duration + + trace := &httptrace.ClientTrace{ + GotFirstResponseByte: func() { + firstByte = time.Since(start) + }, + } + + start = time.Now() + req, err := http.NewRequestWithContext(httptrace.WithClientTrace(ctx, trace), method, url, body) + if err != nil { + return 0, errors.Wrap(err, "create new request with trace and context") + } + + resp, err := http.DefaultTransport.RoundTrip(req) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + if resp.StatusCode != expectedStatus { + data, err := io.ReadAll(resp.Body) + if err != nil { + log.Warn(ctx, "Unexpected status code", nil, z.Int("status_code", resp.StatusCode), z.Int("expected_status_code", expectedStatus), z.Str("endpoint", url)) + } else { + log.Warn(ctx, "Unexpected status code", nil, z.Int("status_code", resp.StatusCode), z.Int("expected_status_code", expectedStatus), z.Str("endpoint", url), z.Str("body", string(data))) + } + } + + return firstByte, nil +} diff --git a/cmd/testall.go b/cmd/testall.go new file mode 100644 index 0000000000..7aab22fe93 --- /dev/null +++ b/cmd/testall.go @@ -0,0 +1,97 @@ +// Copyright © 2022-2024 Obol Labs Inc. Licensed under the terms of a Business Source License 1.1 + +package cmd + +import ( + "context" + "io" + + "github.com/spf13/cobra" + + "github.com/obolnetwork/charon/app/errors" +) + +type testAllConfig struct { + testConfig + Peers testPeersConfig + Beacon testBeaconConfig + Validator testValidatorConfig + MEV testMEVConfig + Performance testPerformanceConfig +} + +func newTestAllCmd(runFunc func(context.Context, io.Writer, testAllConfig) error) *cobra.Command { + var config testAllConfig + + cmd := &cobra.Command{ + Use: "all", + Short: "Run tests towards peer nodes, beacon nodes, validator client, MEV relays, own hardware and internet connectivity.", + Long: `Run tests towards peer nodes, beacon nodes, validator client, MEV relays, own hardware and internet connectivity. Verify that Charon can efficiently do its duties on the tested setup.`, + Args: cobra.NoArgs, + PreRunE: func(cmd *cobra.Command, _ []string) error { + return mustOutputToFileOnQuiet(cmd) + }, + RunE: func(cmd *cobra.Command, _ []string) error { + return runFunc(cmd.Context(), cmd.OutOrStdout(), config) + }, + } + + bindTestFlags(cmd, &config.testConfig) + + bindTestPeersFlags(cmd, &config.Peers, "peers-") + bindTestBeaconFlags(cmd, &config.Beacon, "beacon-") + bindTestValidatorFlags(cmd, &config.Validator, "validator-") + bindTestMEVFlags(cmd, &config.MEV, "mev-") + bindTestPerformanceFlags(cmd, &config.Performance, "performance-") + + bindP2PFlags(cmd, &config.Peers.P2P) + bindDataDirFlag(cmd.Flags(), &config.Peers.DataDir) + bindTestLogFlags(cmd.Flags(), &config.Peers.Log) + + wrapPreRunE(cmd, func(cmd *cobra.Command, _ []string) error { + testCasesPresent := cmd.Flags().Lookup("test-cases").Changed + + if testCasesPresent { + //nolint:revive // we use our own version of the errors package + return errors.New("test-cases cannot be specified when explicitly running all test cases.") + } + + return nil + }) + + return cmd +} + +func runTestAll(ctx context.Context, w io.Writer, cfg testAllConfig) (err error) { + cfg.Beacon.testConfig = cfg.testConfig + err = runTestBeacon(ctx, w, cfg.Beacon) + if err != nil { + return err + } + + cfg.Validator.testConfig = cfg.testConfig + err = runTestValidator(ctx, w, cfg.Validator) + if err != nil { + return err + } + + cfg.MEV.testConfig = cfg.testConfig + err = runTestMEV(ctx, w, cfg.MEV) + if err != nil { + return err + } + + cfg.Performance.testConfig = cfg.testConfig + err = runTestPerformance(ctx, w, cfg.Performance) + if err != nil { + return err + } + + cfg.Peers.testConfig = cfg.testConfig + err = runTestPeers(ctx, w, cfg.Peers) + if err != nil { + return err + } + + return nil +} diff --git a/cmd/testbeacon.go b/cmd/testbeacon.go index e865ff01f5..ce75cfd274 100644 --- a/cmd/testbeacon.go +++ b/cmd/testbeacon.go @@ -10,7 +10,6 @@ import ( "math" "math/rand" "net/http" - "net/http/httptrace" "os" "path/filepath" "sort" @@ -177,20 +176,18 @@ func newTestBeaconCmd(runFunc func(context.Context, io.Writer, testBeaconConfig) } bindTestFlags(cmd, &config.testConfig) - bindTestBeaconFlags(cmd, &config) + bindTestBeaconFlags(cmd, &config, "") return cmd } -func bindTestBeaconFlags(cmd *cobra.Command, config *testBeaconConfig) { - const endpoints = "endpoints" - cmd.Flags().StringSliceVar(&config.Endpoints, endpoints, nil, "[REQUIRED] Comma separated list of one or more beacon node endpoint URLs.") - mustMarkFlagRequired(cmd, endpoints) - cmd.Flags().BoolVar(&config.LoadTest, "load-test", false, "Enable load test, not advisable when testing towards external beacon nodes.") - cmd.Flags().DurationVar(&config.LoadTestDuration, "load-test-duration", 5*time.Second, "Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned.") - cmd.Flags().StringVar(&config.SimulationFileDir, "simulation-file-dir", "./", "JSON directory to which simulation file results will be written.") - cmd.Flags().IntVar(&config.SimulationDuration, "simulation-duration-in-slots", slotsInEpoch, "Time to keep running the simulation in slots.") - cmd.Flags().BoolVar(&config.SimulationVerbose, "simulation-verbose", false, "Show results for each request and each validator.") +func bindTestBeaconFlags(cmd *cobra.Command, config *testBeaconConfig, flagsPrefix string) { + cmd.Flags().StringSliceVar(&config.Endpoints, flagsPrefix+"endpoints", nil, "[REQUIRED] Comma separated list of one or more beacon node endpoint URLs.") + cmd.Flags().BoolVar(&config.LoadTest, flagsPrefix+"load-test", false, "Enable load test, not advisable when testing towards external beacon nodes.") + cmd.Flags().DurationVar(&config.LoadTestDuration, flagsPrefix+"load-test-duration", 5*time.Second, "Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned.") + cmd.Flags().IntVar(&config.SimulationDuration, flagsPrefix+"simulation-duration-in-slots", slotsInEpoch, "Time to keep running the simulation in slots.") + cmd.Flags().BoolVar(&config.SimulationVerbose, flagsPrefix+"simulation-verbose", false, "Show results for each request and each validator.") + mustMarkFlagRequired(cmd, flagsPrefix+"endpoints") } func supportedBeaconTestCases() map[testCaseName]testCaseBeacon { @@ -210,6 +207,8 @@ func supportedBeaconTestCases() map[testCaseName]testCaseBeacon { } func runTestBeacon(ctx context.Context, w io.Writer, cfg testBeaconConfig) (err error) { + log.Info(ctx, "Starting beacon node test") + testCases := supportedBeaconTestCases() queuedTests := filterTests(maps.Keys(testCases), cfg.testConfig) if len(queuedTests) == 0 { @@ -266,6 +265,8 @@ func runTestBeacon(ctx context.Context, w io.Writer, cfg testBeaconConfig) (err return nil } +// beacon node tests + func testAllBeacons(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCaseBeacon, conf testBeaconConfig, allBeaconsResCh chan map[string][]testResult) { defer close(allBeaconsResCh) // run tests for all beacon nodes @@ -366,36 +367,6 @@ func beaconPingTest(ctx context.Context, _ *testBeaconConfig, target string) tes return testRes } -func beaconPingOnce(ctx context.Context, target string) (time.Duration, error) { - var start time.Time - var firstByte time.Duration - - trace := &httptrace.ClientTrace{ - GotFirstResponseByte: func() { - firstByte = time.Since(start) - }, - } - - start = time.Now() - targetEndpoint := fmt.Sprintf("%v/eth/v1/node/health", target) - req, err := http.NewRequestWithContext(httptrace.WithClientTrace(ctx, trace), http.MethodGet, targetEndpoint, nil) - if err != nil { - return 0, errors.Wrap(err, "create new request with trace and context") - } - - resp, err := http.DefaultTransport.RoundTrip(req) - if err != nil { - return 0, err - } - defer resp.Body.Close() - - if resp.StatusCode > 399 { - return 0, errors.New(httpStatusError(resp.StatusCode)) - } - - return firstByte, nil -} - func beaconPingMeasureTest(ctx context.Context, _ *testBeaconConfig, target string) testResult { testRes := testResult{Name: "PingMeasure"} @@ -404,34 +375,11 @@ func beaconPingMeasureTest(ctx context.Context, _ *testBeaconConfig, target stri return failedTestResult(testRes, err) } - if rtt > thresholdBeaconMeasurePoor { - testRes.Verdict = testVerdictPoor - } else if rtt > thresholdBeaconMeasureAvg { - testRes.Verdict = testVerdictAvg - } else { - testRes.Verdict = testVerdictGood - } - testRes.Measurement = Duration{rtt}.String() + testRes = evaluateRTT(rtt, testRes, thresholdBeaconMeasureAvg, thresholdBeaconMeasurePoor) return testRes } -func pingBeaconContinuously(ctx context.Context, target string, resCh chan<- time.Duration) { - for { - rtt, err := beaconPingOnce(ctx, target) - if err != nil { - return - } - select { - case <-ctx.Done(): - return - case resCh <- rtt: - awaitTime := rand.Intn(100) //nolint:gosec // weak generator is not an issue here - sleepWithContext(ctx, time.Duration(awaitTime)*time.Millisecond) - } - } -} - func beaconPingLoadTest(ctx context.Context, conf *testBeaconConfig, target string) testResult { testRes := testResult{Name: "BeaconLoad"} if !conf.LoadTest { @@ -465,20 +413,7 @@ func beaconPingLoadTest(ctx context.Context, conf *testBeaconConfig, target stri close(testResCh) log.Info(ctx, "Ping load tests finished", z.Any("target", target)) - highestRTT := time.Duration(0) - for rtt := range testResCh { - if rtt > highestRTT { - highestRTT = rtt - } - } - if highestRTT > thresholdBeaconLoadPoor { - testRes.Verdict = testVerdictPoor - } else if highestRTT > thresholdBeaconLoadAvg { - testRes.Verdict = testVerdictAvg - } else { - testRes.Verdict = testVerdictGood - } - testRes.Measurement = Duration{highestRTT}.String() + testRes = evaluateHighestRTTScores(testResCh, testRes, thresholdBeaconLoadAvg, thresholdBeaconLoadPoor) return testRes } @@ -578,6 +513,30 @@ func beaconPeerCountTest(ctx context.Context, _ *testBeaconConfig, target string return testRes } +// helper functions + +func beaconPingOnce(ctx context.Context, target string) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/node/health", target), http.MethodGet, nil, 200) +} + +func pingBeaconContinuously(ctx context.Context, target string, resCh chan<- time.Duration) { + for { + rtt, err := beaconPingOnce(ctx, target) + if err != nil { + return + } + select { + case <-ctx.Done(): + return + case resCh <- rtt: + awaitTime := rand.Intn(100) //nolint:gosec // weak generator is not an issue here + sleepWithContext(ctx, time.Duration(awaitTime)*time.Millisecond) + } + } +} + +// beacon simulation tests + func beaconSimulation1Test(ctx context.Context, conf *testBeaconConfig, target string) testResult { testRes := testResult{Name: "BeaconSimulation1Validator"} if !conf.LoadTest { @@ -808,6 +767,8 @@ func beaconSimulationTest(ctx context.Context, conf *testBeaconConfig, target st return testRes } +// requests per 1 cluster + func singleClusterSimulation(ctx context.Context, simulationDuration time.Duration, target string, resultCh chan SimulationCluster, wgDone func()) { defer wgDone() // per slot requests @@ -1120,6 +1081,8 @@ func clusterGeneralRequests( } } +// requests per 1 validator + func singleValidatorSimulation(ctx context.Context, simulationDuration time.Duration, target string, resultCh chan SimulationSingleValidator, intensity RequestsIntensity, dutiesPerformed DutiesPerformed, wg *sync.WaitGroup) { defer wg.Done() // attestations @@ -1350,6 +1313,41 @@ func singleValidatorSimulation(ctx context.Context, simulationDuration time.Dura } } +func attestationDuty(ctx context.Context, target string, simulationDuration time.Duration, tickTime time.Duration, getAttestationDataCh chan time.Duration, submitAttestationObjectCh chan time.Duration) { + defer close(getAttestationDataCh) + defer close(submitAttestationObjectCh) + pingCtx, cancel := context.WithTimeout(ctx, simulationDuration) + defer cancel() + + time.Sleep(randomizeStart(tickTime)) + ticker := time.NewTicker(tickTime) + defer ticker.Stop() + slot, err := getCurrentSlot(ctx, target) + if err != nil { + log.Error(ctx, "Failed to get current slot", err) + slot = 1 + } + for pingCtx.Err() == nil { + getResult, err := getAttestationData(ctx, target, slot, rand.Intn(committeeSizePerSlot)) //nolint:gosec // weak generator is not an issue here + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getAttestationData failure", err) + } + getAttestationDataCh <- getResult + + submitResult, err := submitAttestationObject(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected submitAttestationObject failure", err) + } + submitAttestationObjectCh <- submitResult + + select { + case <-pingCtx.Done(): + case <-ticker.C: + slot += int(tickTime.Seconds()) / int(slotTime.Seconds()) + } + } +} + func aggregationDuty(ctx context.Context, target string, simulationDuration time.Duration, tickTime time.Duration, getAggregateAttestationsCh chan time.Duration, submitAggregateAndProofsCh chan time.Duration) { defer close(getAggregateAttestationsCh) defer close(submitAggregateAndProofsCh) @@ -1417,41 +1415,6 @@ func proposalDuty(ctx context.Context, target string, simulationDuration time.Du } } -func attestationDuty(ctx context.Context, target string, simulationDuration time.Duration, tickTime time.Duration, getAttestationDataCh chan time.Duration, submitAttestationObjectCh chan time.Duration) { - defer close(getAttestationDataCh) - defer close(submitAttestationObjectCh) - pingCtx, cancel := context.WithTimeout(ctx, simulationDuration) - defer cancel() - - time.Sleep(randomizeStart(tickTime)) - ticker := time.NewTicker(tickTime) - defer ticker.Stop() - slot, err := getCurrentSlot(ctx, target) - if err != nil { - log.Error(ctx, "Failed to get current slot", err) - slot = 1 - } - for pingCtx.Err() == nil { - getResult, err := getAttestationData(ctx, target, slot, rand.Intn(committeeSizePerSlot)) //nolint:gosec // weak generator is not an issue here - if err != nil && !errors.Is(err, context.Canceled) { - log.Error(ctx, "Unexpected getAttestationData failure", err) - } - getAttestationDataCh <- getResult - - submitResult, err := submitAttestationObject(ctx, target) - if err != nil && !errors.Is(err, context.Canceled) { - log.Error(ctx, "Unexpected submitAttestationObject failure", err) - } - submitAttestationObjectCh <- submitResult - - select { - case <-pingCtx.Done(): - case <-ticker.C: - slot += int(tickTime.Seconds()) / int(slotTime.Seconds()) - } - } -} - func syncCommitteeDuties( ctx context.Context, target string, simulationDuration time.Duration, tickTimeSubmit time.Duration, tickTimeSubscribe time.Duration, tickTimeContribution time.Duration, @@ -1538,6 +1501,8 @@ func syncCommitteeMessageDuty(ctx context.Context, target string, simulationDura } } +// simulation helper functions + func getCurrentSlot(ctx context.Context, target string) (int, error) { req, err := http.NewRequestWithContext(ctx, http.MethodGet, target+"/eth/v1/node/syncing", nil) if err != nil { @@ -1723,41 +1688,8 @@ func randomizeStart(tickTime time.Duration) time.Duration { return slotTime * time.Duration(rand.Intn(int((tickTime / slotTime)))) //nolint:gosec // weak generator is not an issue here } -func requestRTT(ctx context.Context, url string, method string, body io.Reader, expectedStatus int) (time.Duration, error) { - var start time.Time - var firstByte time.Duration +// simulation http requests - cluster - trace := &httptrace.ClientTrace{ - GotFirstResponseByte: func() { - firstByte = time.Since(start) - }, - } - - start = time.Now() - req, err := http.NewRequestWithContext(httptrace.WithClientTrace(ctx, trace), method, url, body) - if err != nil { - return 0, errors.Wrap(err, "create new request with trace and context") - } - - resp, err := http.DefaultTransport.RoundTrip(req) - if err != nil { - return 0, err - } - defer resp.Body.Close() - - if resp.StatusCode != expectedStatus { - data, err := io.ReadAll(resp.Body) - if err != nil { - log.Warn(ctx, "Unexpected status code", nil, z.Int("status_code", resp.StatusCode), z.Int("expected_status_code", expectedStatus), z.Str("endpoint", url)) - } else { - log.Warn(ctx, "Unexpected status code", nil, z.Int("status_code", resp.StatusCode), z.Int("expected_status_code", expectedStatus), z.Str("endpoint", url), z.Str("body", string(data))) - } - } - - return firstByte, nil -} - -// cluster requests func getAttestationsForBlock(ctx context.Context, target string, block int) (time.Duration, error) { return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/beacon/blocks/%v/attestations", target, block), http.MethodGet, nil, 200) } @@ -1811,7 +1743,8 @@ func nodeVersion(ctx context.Context, target string) (time.Duration, error) { return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/node/version", target), http.MethodGet, nil, 200) } -// attestation duty requests +// simulation http requests - attestation duty + func getAttestationData(ctx context.Context, target string, slot int, committeeIndex int) (time.Duration, error) { return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/attestation_data?slot=%v&committee_index=%v", target, slot, committeeIndex), http.MethodGet, nil, 200) } @@ -1821,7 +1754,8 @@ func submitAttestationObject(ctx context.Context, target string) (time.Duration, return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/beacon/pool/attestations", target), http.MethodPost, body, 400) } -// aggregation duty requests +// simulation http requests - aggregation duty + func getAggregateAttestations(ctx context.Context, target string, slot int, attestationDataRoot string) (time.Duration, error) { return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/aggregate_attestation?slot=%v&attestation_data_root=%v", target, slot, attestationDataRoot), http.MethodGet, nil, 404) } @@ -1831,7 +1765,8 @@ func postAggregateAndProofs(ctx context.Context, target string) (time.Duration, return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/aggregate_and_proofs", target), http.MethodPost, body, 400) } -// proposal duty requests +// simulation http requests - proposal duty + func produceBlock(ctx context.Context, target string, slot int, randaoReveal string) (time.Duration, error) { return requestRTT(ctx, fmt.Sprintf("%v/eth/v3/validator/blocks/%v?randao_reveal=%v", target, slot, randaoReveal), http.MethodGet, nil, 200) } @@ -1841,7 +1776,8 @@ func publishBlindedBlock(ctx context.Context, target string) (time.Duration, err return requestRTT(ctx, fmt.Sprintf("%v/eth/v2/beacon/blinded", target), http.MethodPost, body, 404) } -// sync committee duty requests +// simulation http requests - sync committee duty + func submitSyncCommittee(ctx context.Context, target string) (time.Duration, error) { body := strings.NewReader(`{{"aggregation_bits":"0x01","signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505","data":{"slot":"1","index":"1","beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","source":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"target":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}}`) return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/beacon/pool/sync_committees", target), http.MethodPost, body, 400) diff --git a/cmd/testmev.go b/cmd/testmev.go index 709b118572..0bf49e395e 100644 --- a/cmd/testmev.go +++ b/cmd/testmev.go @@ -7,7 +7,7 @@ import ( "fmt" "io" "net/http" - "net/http/httptrace" + "strings" "time" "github.com/spf13/cobra" @@ -15,6 +15,7 @@ import ( "golang.org/x/sync/errgroup" "github.com/obolnetwork/charon/app/errors" + "github.com/obolnetwork/charon/app/log" ) type testMEVConfig struct { @@ -34,8 +35,8 @@ func newTestMEVCmd(runFunc func(context.Context, io.Writer, testMEVConfig) error cmd := &cobra.Command{ Use: "mev", - Short: "Run multiple tests towards mev nodes", - Long: `Run multiple tests towards mev nodes. Verify that Charon can efficiently interact with MEV Node(s).`, + Short: "Run multiple tests towards MEV relays", + Long: `Run multiple tests towards MEV relays. Verify that Charon can efficiently interact with MEV relay(s).`, Args: cobra.NoArgs, PreRunE: func(cmd *cobra.Command, _ []string) error { return mustOutputToFileOnQuiet(cmd) @@ -46,13 +47,13 @@ func newTestMEVCmd(runFunc func(context.Context, io.Writer, testMEVConfig) error } bindTestFlags(cmd, &config.testConfig) - bindTestMEVFlags(cmd, &config) + bindTestMEVFlags(cmd, &config, "") return cmd } -func bindTestMEVFlags(cmd *cobra.Command, config *testMEVConfig) { - const endpoints = "endpoints" +func bindTestMEVFlags(cmd *cobra.Command, config *testMEVConfig, flagsPrefix string) { + endpoints := flagsPrefix + "endpoints" cmd.Flags().StringSliceVar(&config.Endpoints, endpoints, nil, "[REQUIRED] Comma separated list of one or more MEV relay endpoint URLs.") mustMarkFlagRequired(cmd, endpoints) } @@ -65,6 +66,8 @@ func supportedMEVTestCases() map[testCaseName]testCaseMEV { } func runTestMEV(ctx context.Context, w io.Writer, cfg testMEVConfig) (err error) { + log.Info(ctx, "Starting MEV relays test") + testCases := supportedMEVTestCases() queuedTests := filterTests(maps.Keys(testCases), cfg.testConfig) if len(queuedTests) == 0 { @@ -121,6 +124,8 @@ func runTestMEV(ctx context.Context, w io.Writer, cfg testMEVConfig) (err error) return nil } +// mev relays tests + func testAllMEVs(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCaseMEV, conf testMEVConfig, allMEVsResCh chan map[string][]testResult) { defer close(allMEVsResCh) // run tests for all mev nodes @@ -179,7 +184,8 @@ func testSingleMEV(ctx context.Context, queuedTestCases []testCaseName, allTestC } } - resCh <- map[string][]testResult{target: allTestRes} + relayName := formatMEVRelayName(target) + resCh <- map[string][]testResult{relayName: allTestRes} return nil } @@ -223,40 +229,35 @@ func mevPingTest(ctx context.Context, _ *testMEVConfig, target string) testResul func mevPingMeasureTest(ctx context.Context, _ *testMEVConfig, target string) testResult { testRes := testResult{Name: "PingMeasure"} - var start time.Time - var firstByte time.Duration - - trace := &httptrace.ClientTrace{ - GotFirstResponseByte: func() { - firstByte = time.Since(start) - }, - } - - start = time.Now() - targetEndpoint := fmt.Sprintf("%v/eth/v1/builder/status", target) - req, err := http.NewRequestWithContext(httptrace.WithClientTrace(ctx, trace), http.MethodGet, targetEndpoint, nil) + rtt, err := requestRTT(ctx, fmt.Sprintf("%v/eth/v1/builder/status", target), http.MethodGet, nil, 200) if err != nil { return failedTestResult(testRes, err) } - resp, err := http.DefaultTransport.RoundTrip(req) - if err != nil { - return failedTestResult(testRes, err) - } - defer resp.Body.Close() + testRes = evaluateRTT(rtt, testRes, thresholdMEVMeasureAvg, thresholdMEVMeasurePoor) - if resp.StatusCode > 399 { - return failedTestResult(testRes, errors.New(httpStatusError(resp.StatusCode))) - } + return testRes +} + +// helper functions - if firstByte > thresholdMEVMeasurePoor { - testRes.Verdict = testVerdictPoor - } else if firstByte > thresholdMEVMeasureAvg { - testRes.Verdict = testVerdictAvg - } else { - testRes.Verdict = testVerdictGood +// Shorten the hash of the MEV relay endpoint +// Example: https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net +// to https://0xac6e...37ae@boost-relay.flashbots.net +func formatMEVRelayName(urlString string) string { + splitScheme := strings.Split(urlString, "://") + if len(splitScheme) == 1 { + return urlString + } + hashSplit := strings.Split(splitScheme[1], "@") + if len(hashSplit) == 1 { + return urlString } - testRes.Measurement = Duration{firstByte}.String() + hash := hashSplit[0] + if !strings.HasPrefix(hash, "0x") || len(hash) < 18 { + return urlString + } + hashShort := hash[:6] + "..." + hash[len(hash)-4:] - return testRes + return splitScheme[0] + "://" + hashShort + "@" + hashSplit[1] } diff --git a/cmd/testpeers.go b/cmd/testpeers.go index 77312360ea..987c448c58 100644 --- a/cmd/testpeers.go +++ b/cmd/testpeers.go @@ -13,7 +13,6 @@ import ( "math/rand" "net" "net/http" - "net/http/httptrace" "os" "slices" "strings" @@ -26,7 +25,6 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/p2p/protocol/ping" "github.com/spf13/cobra" - "github.com/spf13/pflag" "golang.org/x/exp/maps" "golang.org/x/sync/errgroup" @@ -83,7 +81,7 @@ func newTestPeersCmd(runFunc func(context.Context, io.Writer, testPeersConfig) e } bindTestFlags(cmd, &config.testConfig) - bindTestPeersFlags(cmd, &config) + bindTestPeersFlags(cmd, &config, "") bindP2PFlags(cmd, &config.P2P) bindDataDirFlag(cmd.Flags(), &config.DataDir) bindTestLogFlags(cmd.Flags(), &config.Log) @@ -116,21 +114,13 @@ func newTestPeersCmd(runFunc func(context.Context, io.Writer, testPeersConfig) e return cmd } -func bindTestPeersFlags(cmd *cobra.Command, config *testPeersConfig) { - const enrs = "enrs" - cmd.Flags().StringSliceVar(&config.ENRs, enrs, nil, "Comma-separated list of each peer ENR address.") - cmd.Flags().DurationVar(&config.KeepAlive, "keep-alive", 30*time.Minute, "Time to keep TCP node alive after test completion, so connection is open for other peers to test on their end.") - cmd.Flags().DurationVar(&config.LoadTestDuration, "load-test-duration", 30*time.Second, "Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned.") - cmd.Flags().DurationVar(&config.DirectConnectionTimeout, "direct-connection-timeout", 2*time.Minute, "Time to keep trying to establish direct connection to peer.") - cmd.Flags().StringVar(&config.ClusterLockFilePath, "cluster-lock-file-path", "", "Path to cluster lock file, used to fetch peers' ENR addresses.") - cmd.Flags().StringVar(&config.ClusterDefinitionFilePath, "cluster-definition-file-path", "", "Path to cluster definition file, used to fetch peers' ENR addresses.") -} - -func bindTestLogFlags(flags *pflag.FlagSet, config *log.Config) { - flags.StringVar(&config.Format, "log-format", "console", "Log format; console, logfmt or json") - flags.StringVar(&config.Level, "log-level", "info", "Log level; debug, info, warn or error") - flags.StringVar(&config.Color, "log-color", "auto", "Log color; auto, force, disable.") - flags.StringVar(&config.LogOutputPath, "log-output-path", "", "Path in which to write on-disk logs.") +func bindTestPeersFlags(cmd *cobra.Command, config *testPeersConfig, flagsPrefix string) { + cmd.Flags().StringSliceVar(&config.ENRs, flagsPrefix+"enrs", nil, "[REQUIRED] Comma-separated list of each peer ENR address.") + cmd.Flags().DurationVar(&config.KeepAlive, flagsPrefix+"keep-alive", 30*time.Minute, "Time to keep TCP node alive after test completion, so connection is open for other peers to test on their end.") + cmd.Flags().DurationVar(&config.LoadTestDuration, flagsPrefix+"load-test-duration", 30*time.Second, "Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned.") + cmd.Flags().DurationVar(&config.DirectConnectionTimeout, flagsPrefix+"direct-connection-timeout", 2*time.Minute, "Time to keep trying to establish direct connection to peer.") + cmd.Flags().StringVar(&config.ClusterLockFilePath, flagsPrefix+"cluster-lock-file-path", "", "Path to cluster lock file, used to fetch peers' ENR addresses.") + cmd.Flags().StringVar(&config.ClusterDefinitionFilePath, flagsPrefix+"cluster-definition-file-path", "", "Path to cluster definition file, used to fetch peers' ENR addresses.") } func supportedPeerTestCases() map[testCaseName]testCasePeer { @@ -155,196 +145,9 @@ func supportedSelfTestCases() map[testCaseName]testCasePeerSelf { } } -func fetchPeersFromDefinition(path string) ([]string, error) { - f, err := os.ReadFile(path) - if err != nil { - return nil, errors.Wrap(err, "read definition file", z.Str("path", path)) - } - - var def cluster.Definition - err = json.Unmarshal(f, &def) - if err != nil { - return nil, errors.Wrap(err, "unmarshal definition json", z.Str("path", path)) - } - - var enrs []string - for _, o := range def.Operators { - enrs = append(enrs, o.ENR) - } - - if len(enrs) == 0 { - return nil, errors.New("no peers found in lock", z.Str("path", path)) - } - - return enrs, nil -} - -func fetchPeersFromLock(path string) ([]string, error) { - f, err := os.ReadFile(path) - if err != nil { - return nil, errors.Wrap(err, "read lock file", z.Str("path", path)) - } - - var lock cluster.Lock - err = json.Unmarshal(f, &lock) - if err != nil { - return nil, errors.Wrap(err, "unmarshal lock json", z.Str("path", path)) - } - - var enrs []string - for _, o := range lock.Operators { - enrs = append(enrs, o.ENR) - } - - if len(enrs) == 0 { - return nil, errors.New("no peers found in lock", z.Str("path", path)) - } - - return enrs, nil -} - -func fetchENRs(conf testPeersConfig) ([]string, error) { - var enrs []string - var err error - switch { - case len(conf.ENRs) != 0: - enrs = conf.ENRs - case conf.ClusterDefinitionFilePath != "": - enrs, err = fetchPeersFromDefinition(conf.ClusterDefinitionFilePath) - if err != nil { - return nil, err - } - case conf.ClusterLockFilePath != "": - enrs, err = fetchPeersFromLock(conf.ClusterLockFilePath) - if err != nil { - return nil, err - } - } - - return enrs, nil -} - -func startTCPNode(ctx context.Context, conf testPeersConfig) (host.Host, func(), error) { - enrs, err := fetchENRs(conf) - if err != nil { - return nil, nil, err - } - - var peers []p2p.Peer - for i, enrString := range enrs { - enrRecord, err := enr.Parse(enrString) - if err != nil { - return nil, nil, errors.Wrap(err, "decode enr", z.Str("enr", enrString)) - } - - p2pPeer, err := p2p.NewPeerFromENR(enrRecord, i) - if err != nil { - return nil, nil, err - } - - peers = append(peers, p2pPeer) - } - - p2pPrivKey, err := p2p.LoadPrivKey(conf.DataDir) - if err != nil { - return nil, nil, err - } - - meENR, err := enr.New(p2pPrivKey) - if err != nil { - return nil, nil, err - } - - mePeer, err := p2p.NewPeerFromENR(meENR, len(enrs)) - if err != nil { - return nil, nil, err - } - - log.Info(ctx, "Self p2p name resolved", z.Any("name", mePeer.Name)) - - peers = append(peers, mePeer) - - allENRs := enrs - allENRs = append(allENRs, meENR.String()) - slices.Sort(allENRs) - allENRsString := strings.Join(allENRs, ",") - allENRsHash := sha256.Sum256([]byte(allENRsString)) - - return setupP2P(ctx, p2pPrivKey, conf.P2P, peers, allENRsHash[:]) -} - -func setupP2P(ctx context.Context, privKey *k1.PrivateKey, conf p2p.Config, peers []p2p.Peer, enrsHash []byte) (host.Host, func(), error) { - var peerIDs []peer.ID - for _, peer := range peers { - peerIDs = append(peerIDs, peer.ID) - } - - if err := p2p.VerifyP2PKey(peers, privKey); err != nil { - return nil, nil, err - } - - relays, err := p2p.NewRelays(ctx, conf.Relays, hex.EncodeToString(enrsHash)) - if err != nil { - return nil, nil, err - } - - connGater, err := p2p.NewConnGater(peerIDs, relays) - if err != nil { - return nil, nil, err - } - - tcpNode, err := p2p.NewTCPNode(ctx, conf, privKey, connGater, false) - if err != nil { - return nil, nil, err - } - - p2p.RegisterConnectionLogger(ctx, tcpNode, peerIDs) - - for _, relay := range relays { - go p2p.NewRelayReserver(tcpNode, relay)(ctx) - } - - go p2p.NewRelayRouter(tcpNode, peerIDs, relays)(ctx) - - return tcpNode, func() { - err := tcpNode.Close() - if err != nil && !errors.Is(err, context.Canceled) { - log.Error(ctx, "Close TCP node", err) - } - }, nil -} - -func pingPeerOnce(ctx context.Context, tcpNode host.Host, peer p2p.Peer) (ping.Result, error) { - pingSvc := ping.NewPingService(tcpNode) - pingCtx, cancel := context.WithCancel(ctx) - defer cancel() - pingChan := pingSvc.Ping(pingCtx, peer.ID) - result, ok := <-pingChan - if !ok { - return ping.Result{}, errors.New("ping channel closed") - } - - return result, nil -} - -func pingPeerContinuously(ctx context.Context, tcpNode host.Host, peer p2p.Peer, resCh chan<- ping.Result) { - for { - r, err := pingPeerOnce(ctx, tcpNode, peer) - if err != nil { - return - } - - select { - case <-ctx.Done(): - return - case resCh <- r: - awaitTime := rand.Intn(100) //nolint:gosec // weak generator is not an issue here - sleepWithContext(ctx, time.Duration(awaitTime)*time.Millisecond) - } - } -} - func runTestPeers(ctx context.Context, w io.Writer, conf testPeersConfig) error { + log.Info(ctx, "Starting charon peers and relays test") + relayTestCases := supportedRelayTestCases() queuedTestsRelay := filterTests(maps.Keys(relayTestCases), conf.testConfig) sortTests(queuedTestsRelay) @@ -438,57 +241,77 @@ func runTestPeers(ctx context.Context, w io.Writer, conf testPeersConfig) error return nil } -func testAllRelays(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCaseRelay, conf testPeersConfig, allRelaysResCh chan map[string][]testResult) error { - // run tests for all relays - allRelayRes := make(map[string][]testResult) - singleRelayResCh := make(chan map[string][]testResult) +// charon peers tests + +func testAllPeers(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCasePeer, conf testPeersConfig, tcpNode host.Host, allPeersResCh chan map[string][]testResult) error { + // run tests for all peer nodes + allPeersRes := make(map[string][]testResult) + singlePeerResCh := make(chan map[string][]testResult) group, _ := errgroup.WithContext(ctx) - for _, relay := range conf.P2P.Relays { + enrs, err := fetchENRs(conf) + if err != nil { + return err + } + for _, enr := range enrs { + currENR := enr // TODO: can be removed after go1.22 version bump group.Go(func() error { - return testSingleRelay(ctx, queuedTestCases, allTestCases, conf, relay, singleRelayResCh) + return testSinglePeer(ctx, queuedTestCases, allTestCases, conf, tcpNode, currENR, singlePeerResCh) }) } doneReading := make(chan bool) go func() { - for singleRelayRes := range singleRelayResCh { - maps.Copy(allRelayRes, singleRelayRes) + for singlePeerRes := range singlePeerResCh { + maps.Copy(allPeersRes, singlePeerRes) } doneReading <- true }() - err := group.Wait() + err = group.Wait() if err != nil { - return errors.Wrap(err, "relays test errgroup") + return errors.Wrap(err, "peers test errgroup") } - close(singleRelayResCh) + close(singlePeerResCh) <-doneReading - allRelaysResCh <- allRelayRes + allPeersResCh <- allPeersRes return nil } -func testSingleRelay(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCaseRelay, conf testPeersConfig, target string, allTestResCh chan map[string][]testResult) error { +func testSinglePeer(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCasePeer, conf testPeersConfig, tcpNode host.Host, target string, allTestResCh chan map[string][]testResult) error { singleTestResCh := make(chan testResult) allTestRes := []testResult{} - relayName := fmt.Sprintf("relay %v", target) + enrTarget, err := enr.Parse(target) + if err != nil { + return err + } + peerTarget, err := p2p.NewPeerFromENR(enrTarget, 0) + if err != nil { + return err + } + + formatENR := target[:13] + "..." + target[len(target)-4:] // enr:- + first 8 chars + ... + last 4 chars + nameENR := fmt.Sprintf("peer %v %v", peerTarget.Name, formatENR) + if len(queuedTestCases) == 0 { - allTestResCh <- map[string][]testResult{relayName: allTestRes} + allTestResCh <- map[string][]testResult{nameENR: allTestRes} return nil } - // run all relay tests for a relay, pushing each completed test to the channel until all are complete or timeout occurs - go runRelayTest(ctx, queuedTestCases, allTestCases, conf, target, singleTestResCh) + // run all peers tests for a peer, pushing each completed test to the channel until all are complete or timeout occurs + go runPeerTest(ctx, queuedTestCases, allTestCases, conf, tcpNode, peerTarget, singleTestResCh) testCounter := 0 finished := false for !finished { var testName string select { case <-ctx.Done(): - testName = queuedTestCases[testCounter].name - allTestRes = append(allTestRes, testResult{Name: testName, Verdict: testVerdictFail, Error: errTimeoutInterrupted}) + if testCounter < len(queuedTestCases) { + testName = queuedTestCases[testCounter].name + allTestRes = append(allTestRes, testResult{Name: testName, Verdict: testVerdictFail, Error: errTimeoutInterrupted}) + } finished = true case result, ok := <-singleTestResCh: if !ok { @@ -502,165 +325,20 @@ func testSingleRelay(ctx context.Context, queuedTestCases []testCaseName, allTes } } - allTestResCh <- map[string][]testResult{relayName: allTestRes} + allTestResCh <- map[string][]testResult{nameENR: allTestRes} return nil } -func runRelayTest(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCaseRelay, conf testPeersConfig, target string, testResCh chan testResult) { +func runPeerTest(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCasePeer, conf testPeersConfig, tcpNode host.Host, target p2p.Peer, testResCh chan testResult) { defer close(testResCh) for _, t := range queuedTestCases { select { case <-ctx.Done(): + testResCh <- failedTestResult(testResult{Name: t.name}, errTimeoutInterrupted) return default: - testResCh <- allTestCases[t](ctx, &conf, target) - } - } -} - -func testAllPeers(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCasePeer, conf testPeersConfig, tcpNode host.Host, allPeersResCh chan map[string][]testResult) error { - // run tests for all peer nodes - allPeersRes := make(map[string][]testResult) - singlePeerResCh := make(chan map[string][]testResult) - group, _ := errgroup.WithContext(ctx) - - enrs, err := fetchENRs(conf) - if err != nil { - return err - } - for _, enr := range enrs { - currENR := enr // TODO: can be removed after go1.22 version bump - group.Go(func() error { - return testSinglePeer(ctx, queuedTestCases, allTestCases, conf, tcpNode, currENR, singlePeerResCh) - }) - } - - doneReading := make(chan bool) - go func() { - for singlePeerRes := range singlePeerResCh { - maps.Copy(allPeersRes, singlePeerRes) - } - doneReading <- true - }() - - err = group.Wait() - if err != nil { - return errors.Wrap(err, "peers test errgroup") - } - close(singlePeerResCh) - <-doneReading - - allPeersResCh <- allPeersRes - - return nil -} - -func testSinglePeer(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCasePeer, conf testPeersConfig, tcpNode host.Host, target string, allTestResCh chan map[string][]testResult) error { - singleTestResCh := make(chan testResult) - allTestRes := []testResult{} - enrTarget, err := enr.Parse(target) - if err != nil { - return err - } - peerTarget, err := p2p.NewPeerFromENR(enrTarget, 0) - if err != nil { - return err - } - - nameENR := fmt.Sprintf("peer %v %v", peerTarget.Name, target) - - if len(queuedTestCases) == 0 { - allTestResCh <- map[string][]testResult{nameENR: allTestRes} - return nil - } - - // run all peers tests for a peer, pushing each completed test to the channel until all are complete or timeout occurs - go runPeerTest(ctx, queuedTestCases, allTestCases, conf, tcpNode, peerTarget, singleTestResCh) - testCounter := 0 - finished := false - for !finished { - var testName string - select { - case <-ctx.Done(): - if testCounter < len(queuedTestCases) { - testName = queuedTestCases[testCounter].name - allTestRes = append(allTestRes, testResult{Name: testName, Verdict: testVerdictFail, Error: errTimeoutInterrupted}) - } - finished = true - case result, ok := <-singleTestResCh: - if !ok { - finished = true - continue - } - testName = queuedTestCases[testCounter].name - testCounter++ - result.Name = testName - allTestRes = append(allTestRes, result) - } - } - - allTestResCh <- map[string][]testResult{nameENR: allTestRes} - - return nil -} - -func runPeerTest(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCasePeer, conf testPeersConfig, tcpNode host.Host, target p2p.Peer, testResCh chan testResult) { - defer close(testResCh) - for _, t := range queuedTestCases { - select { - case <-ctx.Done(): - testResCh <- failedTestResult(testResult{Name: t.name}, errTimeoutInterrupted) - return - default: - testResCh <- allTestCases[t](ctx, &conf, tcpNode, target) - } - } -} - -func testSelf(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCasePeerSelf, conf testPeersConfig, allTestResCh chan map[string][]testResult) error { - singleTestResCh := make(chan testResult) - allTestRes := []testResult{} - if len(queuedTestCases) == 0 { - allTestResCh <- map[string][]testResult{"self": allTestRes} - return nil - } - go runSelfTest(ctx, queuedTestCases, allTestCases, conf, singleTestResCh) - - testCounter := 0 - finished := false - for !finished { - var testName string - select { - case <-ctx.Done(): - testName = queuedTestCases[testCounter].name - allTestRes = append(allTestRes, testResult{Name: testName, Verdict: testVerdictFail, Error: errTimeoutInterrupted}) - finished = true - case result, ok := <-singleTestResCh: - if !ok { - finished = true - continue - } - testName = queuedTestCases[testCounter].name - testCounter++ - result.Name = testName - allTestRes = append(allTestRes, result) - } - } - - allTestResCh <- map[string][]testResult{"self": allTestRes} - - return nil -} - -func runSelfTest(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCasePeerSelf, conf testPeersConfig, ch chan testResult) { - defer close(ch) - for _, t := range queuedTestCases { - select { - case <-ctx.Done(): - return - default: - ch <- allTestCases[t](ctx, &conf) + testResCh <- allTestCases[t](ctx, &conf, tcpNode, target) } } } @@ -736,7 +414,7 @@ func peerPingLoadTest(ctx context.Context, conf *testPeersConfig, tcpNode host.H ) testRes := testResult{Name: "PingLoad"} - testResCh := make(chan ping.Result, math.MaxInt16) + testResCh := make(chan time.Duration, math.MaxInt16) pingCtx, cancel := context.WithTimeout(ctx, conf.LoadTestDuration) defer cancel() ticker := time.NewTicker(time.Second) @@ -758,48 +436,11 @@ func peerPingLoadTest(ctx context.Context, conf *testPeersConfig, tcpNode host.H close(testResCh) log.Info(ctx, "Ping load tests finished", z.Any("target", peer.Name)) - highestRTT := time.Duration(0) - for val := range testResCh { - if val.RTT > highestRTT { - highestRTT = val.RTT - } - } - if highestRTT > thresholdPeersLoadPoor { - testRes.Verdict = testVerdictPoor - } else if highestRTT > thresholdPeersLoadAvg { - testRes.Verdict = testVerdictAvg - } else { - testRes.Verdict = testVerdictGood - } - testRes.Measurement = Duration{highestRTT}.String() + testRes = evaluateHighestRTTScores(testResCh, testRes, thresholdPeersLoadAvg, thresholdPeersLoadPoor) return testRes } -func dialLibp2pTCPIP(ctx context.Context, address string) error { - d := net.Dialer{Timeout: time.Second} - conn, err := d.DialContext(ctx, "tcp", address) - if err != nil { - return errors.Wrap(err, "net dial") - } - defer conn.Close() - buf := new(strings.Builder) - _, err = io.CopyN(buf, conn, 19) - if err != nil { - return errors.Wrap(err, "io copy") - } - if !strings.Contains(buf.String(), "/multistream/1.0.0") { - return errors.New("multistream not found", z.Any("found", buf.String()), z.Any("address", address)) - } - - err = conn.Close() - if err != nil { - return errors.Wrap(err, "close conn") - } - - return nil -} - func peerDirectConnTest(ctx context.Context, conf *testPeersConfig, tcpNode host.Host, p2pPeer p2p.Peer) testResult { testRes := testResult{Name: "DirectConn"} @@ -830,6 +471,55 @@ func peerDirectConnTest(ctx context.Context, conf *testPeersConfig, tcpNode host return testRes } +// self tests + +func testSelf(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCasePeerSelf, conf testPeersConfig, allTestResCh chan map[string][]testResult) error { + singleTestResCh := make(chan testResult) + allTestRes := []testResult{} + if len(queuedTestCases) == 0 { + allTestResCh <- map[string][]testResult{"self": allTestRes} + return nil + } + go runSelfTest(ctx, queuedTestCases, allTestCases, conf, singleTestResCh) + + testCounter := 0 + finished := false + for !finished { + var testName string + select { + case <-ctx.Done(): + testName = queuedTestCases[testCounter].name + allTestRes = append(allTestRes, testResult{Name: testName, Verdict: testVerdictFail, Error: errTimeoutInterrupted}) + finished = true + case result, ok := <-singleTestResCh: + if !ok { + finished = true + continue + } + testName = queuedTestCases[testCounter].name + testCounter++ + result.Name = testName + allTestRes = append(allTestRes, result) + } + } + + allTestResCh <- map[string][]testResult{"self": allTestRes} + + return nil +} + +func runSelfTest(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCasePeerSelf, conf testPeersConfig, ch chan testResult) { + defer close(ch) + for _, t := range queuedTestCases { + select { + case <-ctx.Done(): + return + default: + ch <- allTestCases[t](ctx, &conf) + } + } +} + func libp2pTCPPortOpenTest(ctx context.Context, cfg *testPeersConfig) testResult { testRes := testResult{Name: "Libp2pTCPPortOpen"} @@ -849,48 +539,98 @@ func libp2pTCPPortOpenTest(ctx context.Context, cfg *testPeersConfig) testResult return testRes } -func relayPingTest(ctx context.Context, _ *testPeersConfig, target string) testResult { - testRes := testResult{Name: "PingRelay"} +// charon relays tests - client := http.Client{} - req, err := http.NewRequestWithContext(ctx, http.MethodGet, target, nil) - if err != nil { - return failedTestResult(testRes, err) +func testAllRelays(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCaseRelay, conf testPeersConfig, allRelaysResCh chan map[string][]testResult) error { + // run tests for all relays + allRelayRes := make(map[string][]testResult) + singleRelayResCh := make(chan map[string][]testResult) + group, _ := errgroup.WithContext(ctx) + + for _, relay := range conf.P2P.Relays { + group.Go(func() error { + return testSingleRelay(ctx, queuedTestCases, allTestCases, conf, relay, singleRelayResCh) + }) } - resp, err := client.Do(req) + + doneReading := make(chan bool) + go func() { + for singleRelayRes := range singleRelayResCh { + maps.Copy(allRelayRes, singleRelayRes) + } + doneReading <- true + }() + + err := group.Wait() if err != nil { - return failedTestResult(testRes, err) + return errors.Wrap(err, "relays test errgroup") } - defer resp.Body.Close() + close(singleRelayResCh) + <-doneReading - if resp.StatusCode > 399 { - return failedTestResult(testRes, errors.New(httpStatusError(resp.StatusCode))) - } + allRelaysResCh <- allRelayRes - testRes.Verdict = testVerdictOk - - return testRes + return nil } -func relayPingMeasureTest(ctx context.Context, _ *testPeersConfig, target string) testResult { - testRes := testResult{Name: "PingMeasureRelay"} +func testSingleRelay(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCaseRelay, conf testPeersConfig, target string, allTestResCh chan map[string][]testResult) error { + singleTestResCh := make(chan testResult) + allTestRes := []testResult{} + relayName := fmt.Sprintf("relay %v", target) + if len(queuedTestCases) == 0 { + allTestResCh <- map[string][]testResult{relayName: allTestRes} + return nil + } + + // run all relay tests for a relay, pushing each completed test to the channel until all are complete or timeout occurs + go runRelayTest(ctx, queuedTestCases, allTestCases, conf, target, singleTestResCh) + testCounter := 0 + finished := false + for !finished { + var testName string + select { + case <-ctx.Done(): + testName = queuedTestCases[testCounter].name + allTestRes = append(allTestRes, testResult{Name: testName, Verdict: testVerdictFail, Error: errTimeoutInterrupted}) + finished = true + case result, ok := <-singleTestResCh: + if !ok { + finished = true + continue + } + testName = queuedTestCases[testCounter].name + testCounter++ + result.Name = testName + allTestRes = append(allTestRes, result) + } + } - var start time.Time - var firstByte time.Duration + allTestResCh <- map[string][]testResult{relayName: allTestRes} - trace := &httptrace.ClientTrace{ - GotFirstResponseByte: func() { - firstByte = time.Since(start) - }, + return nil +} + +func runRelayTest(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCaseRelay, conf testPeersConfig, target string, testResCh chan testResult) { + defer close(testResCh) + for _, t := range queuedTestCases { + select { + case <-ctx.Done(): + return + default: + testResCh <- allTestCases[t](ctx, &conf, target) + } } +} + +func relayPingTest(ctx context.Context, _ *testPeersConfig, target string) testResult { + testRes := testResult{Name: "PingRelay"} - start = time.Now() - req, err := http.NewRequestWithContext(httptrace.WithClientTrace(ctx, trace), http.MethodGet, target, nil) + client := http.Client{} + req, err := http.NewRequestWithContext(ctx, http.MethodGet, target, nil) if err != nil { return failedTestResult(testRes, err) } - - resp, err := http.DefaultTransport.RoundTrip(req) + resp, err := client.Do(req) if err != nil { return failedTestResult(testRes, err) } @@ -900,14 +640,235 @@ func relayPingMeasureTest(ctx context.Context, _ *testPeersConfig, target string return failedTestResult(testRes, errors.New(httpStatusError(resp.StatusCode))) } - if firstByte > thresholdRelayMeasurePoor { - testRes.Verdict = testVerdictPoor - } else if firstByte > thresholdRelayMeasureAvg { - testRes.Verdict = testVerdictAvg - } else { - testRes.Verdict = testVerdictGood + testRes.Verdict = testVerdictOk + + return testRes +} + +func relayPingMeasureTest(ctx context.Context, _ *testPeersConfig, target string) testResult { + testRes := testResult{Name: "PingMeasureRelay"} + + rtt, err := requestRTT(ctx, target, http.MethodGet, nil, 200) + if err != nil { + return failedTestResult(testRes, err) } - testRes.Measurement = Duration{firstByte}.String() + + testRes = evaluateRTT(rtt, testRes, thresholdRelayMeasureAvg, thresholdRelayMeasurePoor) return testRes } + +// helper functions + +func fetchPeersFromDefinition(path string) ([]string, error) { + f, err := os.ReadFile(path) + if err != nil { + return nil, errors.Wrap(err, "read definition file", z.Str("path", path)) + } + + var def cluster.Definition + err = json.Unmarshal(f, &def) + if err != nil { + return nil, errors.Wrap(err, "unmarshal definition json", z.Str("path", path)) + } + + var enrs []string + for _, o := range def.Operators { + enrs = append(enrs, o.ENR) + } + + if len(enrs) == 0 { + return nil, errors.New("no peers found in lock", z.Str("path", path)) + } + + return enrs, nil +} + +func fetchPeersFromLock(path string) ([]string, error) { + f, err := os.ReadFile(path) + if err != nil { + return nil, errors.Wrap(err, "read lock file", z.Str("path", path)) + } + + var lock cluster.Lock + err = json.Unmarshal(f, &lock) + if err != nil { + return nil, errors.Wrap(err, "unmarshal lock json", z.Str("path", path)) + } + + var enrs []string + for _, o := range lock.Operators { + enrs = append(enrs, o.ENR) + } + + if len(enrs) == 0 { + return nil, errors.New("no peers found in lock", z.Str("path", path)) + } + + return enrs, nil +} + +func fetchENRs(conf testPeersConfig) ([]string, error) { + var enrs []string + var err error + switch { + case len(conf.ENRs) != 0: + enrs = conf.ENRs + case conf.ClusterDefinitionFilePath != "": + enrs, err = fetchPeersFromDefinition(conf.ClusterDefinitionFilePath) + if err != nil { + return nil, err + } + case conf.ClusterLockFilePath != "": + enrs, err = fetchPeersFromLock(conf.ClusterLockFilePath) + if err != nil { + return nil, err + } + } + + return enrs, nil +} + +func startTCPNode(ctx context.Context, conf testPeersConfig) (host.Host, func(), error) { + enrs, err := fetchENRs(conf) + if err != nil { + return nil, nil, err + } + + var peers []p2p.Peer + for i, enrString := range enrs { + enrRecord, err := enr.Parse(enrString) + if err != nil { + return nil, nil, errors.Wrap(err, "decode enr", z.Str("enr", enrString)) + } + + p2pPeer, err := p2p.NewPeerFromENR(enrRecord, i) + if err != nil { + return nil, nil, err + } + + peers = append(peers, p2pPeer) + } + + p2pPrivKey, err := p2p.LoadPrivKey(conf.DataDir) + if err != nil { + return nil, nil, err + } + + meENR, err := enr.New(p2pPrivKey) + if err != nil { + return nil, nil, err + } + + mePeer, err := p2p.NewPeerFromENR(meENR, len(enrs)) + if err != nil { + return nil, nil, err + } + + log.Info(ctx, "Self p2p name resolved", z.Any("name", mePeer.Name)) + + peers = append(peers, mePeer) + + allENRs := enrs + allENRs = append(allENRs, meENR.String()) + slices.Sort(allENRs) + allENRsString := strings.Join(allENRs, ",") + allENRsHash := sha256.Sum256([]byte(allENRsString)) + + return setupP2P(ctx, p2pPrivKey, conf.P2P, peers, allENRsHash[:]) +} + +func setupP2P(ctx context.Context, privKey *k1.PrivateKey, conf p2p.Config, peers []p2p.Peer, enrsHash []byte) (host.Host, func(), error) { + var peerIDs []peer.ID + for _, peer := range peers { + peerIDs = append(peerIDs, peer.ID) + } + + if err := p2p.VerifyP2PKey(peers, privKey); err != nil { + return nil, nil, err + } + + relays, err := p2p.NewRelays(ctx, conf.Relays, hex.EncodeToString(enrsHash)) + if err != nil { + return nil, nil, err + } + + connGater, err := p2p.NewConnGater(peerIDs, relays) + if err != nil { + return nil, nil, err + } + + tcpNode, err := p2p.NewTCPNode(ctx, conf, privKey, connGater, false) + if err != nil { + return nil, nil, err + } + + p2p.RegisterConnectionLogger(ctx, tcpNode, peerIDs) + + for _, relay := range relays { + go p2p.NewRelayReserver(tcpNode, relay)(ctx) + } + + go p2p.NewRelayRouter(tcpNode, peerIDs, relays)(ctx) + + return tcpNode, func() { + err := tcpNode.Close() + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Close TCP node", err) + } + }, nil +} + +func pingPeerOnce(ctx context.Context, tcpNode host.Host, peer p2p.Peer) (ping.Result, error) { + pingSvc := ping.NewPingService(tcpNode) + pingCtx, cancel := context.WithCancel(ctx) + defer cancel() + pingChan := pingSvc.Ping(pingCtx, peer.ID) + result, ok := <-pingChan + if !ok { + return ping.Result{}, errors.New("ping channel closed") + } + + return result, nil +} + +func pingPeerContinuously(ctx context.Context, tcpNode host.Host, peer p2p.Peer, resCh chan<- time.Duration) { + for { + r, err := pingPeerOnce(ctx, tcpNode, peer) + if err != nil { + return + } + + select { + case <-ctx.Done(): + return + case resCh <- r.RTT: + awaitTime := rand.Intn(100) //nolint:gosec // weak generator is not an issue here + sleepWithContext(ctx, time.Duration(awaitTime)*time.Millisecond) + } + } +} + +func dialLibp2pTCPIP(ctx context.Context, address string) error { + d := net.Dialer{Timeout: time.Second} + conn, err := d.DialContext(ctx, "tcp", address) + if err != nil { + return errors.Wrap(err, "net dial") + } + defer conn.Close() + buf := new(strings.Builder) + _, err = io.CopyN(buf, conn, 19) + if err != nil { + return errors.Wrap(err, "io copy") + } + if !strings.Contains(buf.String(), "/multistream/1.0.0") { + return errors.New("multistream not found", z.Any("found", buf.String()), z.Any("address", address)) + } + + err = conn.Close() + if err != nil { + return errors.Wrap(err, "close conn") + } + + return nil +} diff --git a/cmd/testpeers_internal_test.go b/cmd/testpeers_internal_test.go index a2694a2b69..0d766c0f36 100644 --- a/cmd/testpeers_internal_test.go +++ b/cmd/testpeers_internal_test.go @@ -78,19 +78,19 @@ func TestPeersTest(t *testing.T) { {Name: "pingRelay", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, {Name: "pingMeasureRelay", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, }, - "peer inexpensive-farm enr:-HW4QBHlcyD3fYWUMADiOv4OxODaL5wJG0a7P7d_ltu4VZe1MibZ1N-twFaoaq0BoCtXcY71etxLJGeEZT5p3XCO6GOAgmlkgnY0iXNlY3AyNTZrMaEDI2HRUlVBag__njkOWEEQRLlC9ylIVCrIXOuNBSlrx6o": { + "peer inexpensive-farm enr:-HW4QBHlc...rx6o": { {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, {Name: "pingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, {Name: "directConn", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, }, - "peer anxious-pencil enr:-HW4QDwUF804f4WhUjwcp4JJ-PrRH0glQZv8s2cVHlBRPJ3SYcYO-dvJGsKhztffrski5eujJkl8oAc983MZy6-PqF2AgmlkgnY0iXNlY3AyNTZrMaECPEPryjkmUBnQFyjmMw9rl7DVtKL0243nN5iepqsvKDw": { + "peer anxious-pencil enr:-HW4QDwUF...vKDw": { {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, {Name: "pingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, {Name: "directConn", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, }, - "peer important-pen enr:-HW4QPSBgUTag8oZs3zIsgWzlBUrSgT8pgZmFJa7HWwKXUcRLlISa68OJtp-JTzhUXsJ2vSGwKGACn0OTatWdJATxn-AgmlkgnY0iXNlY3AyNTZrMaECA3R_ffXLXCLJsfEwf6xeoAFgWnDIOdq8kS0Yqkhwbr0": { + "peer important-pen enr:-HW4QPSBg...wbr0": { {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, {Name: "pingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, @@ -144,13 +144,13 @@ func TestPeersTest(t *testing.T) { {Name: "pingRelay", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, {Name: "pingMeasureRelay", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, }, - "peer inexpensive-farm enr:-HW4QBHlcyD3fYWUMADiOv4OxODaL5wJG0a7P7d_ltu4VZe1MibZ1N-twFaoaq0BoCtXcY71etxLJGeEZT5p3XCO6GOAgmlkgnY0iXNlY3AyNTZrMaEDI2HRUlVBag__njkOWEEQRLlC9ylIVCrIXOuNBSlrx6o": { + "peer inexpensive-farm enr:-HW4QBHlc...rx6o": { {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, - "peer anxious-pencil enr:-HW4QDwUF804f4WhUjwcp4JJ-PrRH0glQZv8s2cVHlBRPJ3SYcYO-dvJGsKhztffrski5eujJkl8oAc983MZy6-PqF2AgmlkgnY0iXNlY3AyNTZrMaECPEPryjkmUBnQFyjmMw9rl7DVtKL0243nN5iepqsvKDw": { + "peer anxious-pencil enr:-HW4QDwUF...vKDw": { {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, - "peer important-pen enr:-HW4QPSBgUTag8oZs3zIsgWzlBUrSgT8pgZmFJa7HWwKXUcRLlISa68OJtp-JTzhUXsJ2vSGwKGACn0OTatWdJATxn-AgmlkgnY0iXNlY3AyNTZrMaECA3R_ffXLXCLJsfEwf6xeoAFgWnDIOdq8kS0Yqkhwbr0": { + "peer important-pen enr:-HW4QPSBg...wbr0": { {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, }, @@ -204,13 +204,13 @@ func TestPeersTest(t *testing.T) { expected: testCategoryResult{ CategoryName: peersTestCategory, Targets: map[string][]testResult{ - "peer inexpensive-farm enr:-HW4QBHlcyD3fYWUMADiOv4OxODaL5wJG0a7P7d_ltu4VZe1MibZ1N-twFaoaq0BoCtXcY71etxLJGeEZT5p3XCO6GOAgmlkgnY0iXNlY3AyNTZrMaEDI2HRUlVBag__njkOWEEQRLlC9ylIVCrIXOuNBSlrx6o": { + "peer inexpensive-farm enr:-HW4QBHlc...rx6o": { {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, - "peer anxious-pencil enr:-HW4QDwUF804f4WhUjwcp4JJ-PrRH0glQZv8s2cVHlBRPJ3SYcYO-dvJGsKhztffrski5eujJkl8oAc983MZy6-PqF2AgmlkgnY0iXNlY3AyNTZrMaECPEPryjkmUBnQFyjmMw9rl7DVtKL0243nN5iepqsvKDw": { + "peer anxious-pencil enr:-HW4QDwUF...vKDw": { {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, - "peer important-pen enr:-HW4QPSBgUTag8oZs3zIsgWzlBUrSgT8pgZmFJa7HWwKXUcRLlISa68OJtp-JTzhUXsJ2vSGwKGACn0OTatWdJATxn-AgmlkgnY0iXNlY3AyNTZrMaECA3R_ffXLXCLJsfEwf6xeoAFgWnDIOdq8kS0Yqkhwbr0": { + "peer important-pen enr:-HW4QPSBg...wbr0": { {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, }, @@ -247,13 +247,13 @@ func TestPeersTest(t *testing.T) { {Name: "pingRelay", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, {Name: "pingMeasureRelay", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, }, - "peer inexpensive-farm enr:-HW4QBHlcyD3fYWUMADiOv4OxODaL5wJG0a7P7d_ltu4VZe1MibZ1N-twFaoaq0BoCtXcY71etxLJGeEZT5p3XCO6GOAgmlkgnY0iXNlY3AyNTZrMaEDI2HRUlVBag__njkOWEEQRLlC9ylIVCrIXOuNBSlrx6o": { + "peer inexpensive-farm enr:-HW4QBHlc...rx6o": { {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, - "peer anxious-pencil enr:-HW4QDwUF804f4WhUjwcp4JJ-PrRH0glQZv8s2cVHlBRPJ3SYcYO-dvJGsKhztffrski5eujJkl8oAc983MZy6-PqF2AgmlkgnY0iXNlY3AyNTZrMaECPEPryjkmUBnQFyjmMw9rl7DVtKL0243nN5iepqsvKDw": { + "peer anxious-pencil enr:-HW4QDwUF...vKDw": { {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, - "peer important-pen enr:-HW4QPSBgUTag8oZs3zIsgWzlBUrSgT8pgZmFJa7HWwKXUcRLlISa68OJtp-JTzhUXsJ2vSGwKGACn0OTatWdJATxn-AgmlkgnY0iXNlY3AyNTZrMaECA3R_ffXLXCLJsfEwf6xeoAFgWnDIOdq8kS0Yqkhwbr0": { + "peer important-pen enr:-HW4QPSBg...wbr0": { {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, }, diff --git a/cmd/testperformance.go b/cmd/testperformance.go index 69e5980d4a..5cdc0b99f9 100644 --- a/cmd/testperformance.go +++ b/cmd/testperformance.go @@ -92,16 +92,16 @@ func newTestPerformanceCmd(runFunc func(context.Context, io.Writer, testPerforma } bindTestFlags(cmd, &config.testConfig) - bindTestPerformanceFlags(cmd, &config) + bindTestPerformanceFlags(cmd, &config, "") return cmd } -func bindTestPerformanceFlags(cmd *cobra.Command, config *testPerformanceConfig) { - cmd.Flags().StringVar(&config.DiskIOTestFileDir, "disk-io-test-file-dir", "", "Directory at which disk performance will be measured. If none specified, current user's home directory will be used.") - cmd.Flags().IntVar(&config.DiskIOBlockSizeKb, "disk-io-block-size-kb", 4096, "The block size in kilobytes used for I/O units. Same value applies for both reads and writes.") - cmd.Flags().StringSliceVar(&config.InternetTestServersOnly, "internet-test-servers-only", []string{}, "List of specific server names to be included for the internet tests, the best performing one is chosen. If not provided, closest and best performing servers are chosen automatically.") - cmd.Flags().StringSliceVar(&config.InternetTestServersExclude, "internet-test-servers-exclude", []string{}, "List of server names to be excluded from the tests. To be specified only if you experience issues with a server that is wrongly considered best performing.") +func bindTestPerformanceFlags(cmd *cobra.Command, config *testPerformanceConfig, flagsPrefix string) { + cmd.Flags().StringVar(&config.DiskIOTestFileDir, flagsPrefix+"disk-io-test-file-dir", "", "Directory at which disk performance will be measured. If none specified, current user's home directory will be used.") + cmd.Flags().IntVar(&config.DiskIOBlockSizeKb, flagsPrefix+"disk-io-block-size-kb", 4096, "The block size in kilobytes used for I/O units. Same value applies for both reads and writes.") + cmd.Flags().StringSliceVar(&config.InternetTestServersOnly, flagsPrefix+"internet-test-servers-only", []string{}, "List of specific server names to be included for the internet tests, the best performing one is chosen. If not provided, closest and best performing servers are chosen automatically.") + cmd.Flags().StringSliceVar(&config.InternetTestServersExclude, flagsPrefix+"internet-test-servers-exclude", []string{}, "List of server names to be excluded from the tests. To be specified only if you experience issues with a server that is wrongly considered best performing.") } func supportedPerformanceTestCases() map[testCaseName]func(context.Context, *testPerformanceConfig) testResult { @@ -119,6 +119,8 @@ func supportedPerformanceTestCases() map[testCaseName]func(context.Context, *tes } func runTestPerformance(ctx context.Context, w io.Writer, cfg testPerformanceConfig) (err error) { + log.Info(ctx, "Starting machine performance and network connectivity test") + testCases := supportedPerformanceTestCases() queuedTests := filterTests(maps.Keys(testCases), cfg.testConfig) if len(queuedTests) == 0 { @@ -174,6 +176,8 @@ func runTestPerformance(ctx context.Context, w io.Writer, cfg testPerformanceCon return nil } +// hardware and internet connectivity performance tests + func testSinglePerformance(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]func(context.Context, *testPerformanceConfig) testResult, cfg testPerformanceConfig, resCh chan map[string][]testResult) { defer close(resCh) singleTestResCh := make(chan testResult) @@ -217,27 +221,6 @@ func testPerformance(ctx context.Context, queuedTests []testCaseName, allTests m } } -func fioCommand(ctx context.Context, filename string, blocksize int, operation string) ([]byte, error) { - //nolint:gosec - cmd, err := exec.CommandContext(ctx, "fio", - "--name=fioTest", - fmt.Sprintf("--filename=%v/fiotest", filename), - fmt.Sprintf("--size=%vMb", diskOpsMBsTotal/diskOpsNumOfJobs), - fmt.Sprintf("--blocksize=%vk", blocksize), - fmt.Sprintf("--numjobs=%v", diskOpsNumOfJobs), - fmt.Sprintf("--rw=%v", operation), - "--direct=1", - "--runtime=60s", - "--group_reporting", - "--output-format=json", - ).Output() - if err != nil { - return nil, errors.Wrap(err, "exec fio command") - } - - return cmd, nil -} - func performanceDiskWriteSpeedTest(ctx context.Context, conf *testPerformanceConfig) testResult { testRes := testResult{Name: "DiskWriteSpeed"} @@ -444,86 +427,6 @@ func performanceDiskReadIOPSTest(ctx context.Context, conf *testPerformanceConfi return testRes } -func availableMemoryLinux(context.Context) (int64, error) { - file, err := os.Open("/proc/meminfo") - if err != nil { - return 0, errors.Wrap(err, "open /proc/meminfo") - } - scanner := bufio.NewScanner(file) - if scanner.Err() != nil { - return 0, errors.Wrap(err, "new scanner") - } - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, "MemAvailable") { - continue - } - splitText := strings.Split(line, ": ") - kbs := strings.Trim(strings.Split(splitText[1], "kB")[0], " ") - kbsInt, err := strconv.ParseInt(kbs, 10, 64) - if err != nil { - return 0, errors.Wrap(err, "parse MemAvailable int") - } - - return kbsInt * 1024, nil - } - - return 0, errors.New("memAvailable not found in /proc/meminfo") -} - -func availableMemoryMacos(ctx context.Context) (int64, error) { - pageSizeBytes, err := exec.CommandContext(ctx, "pagesize").Output() - if err != nil { - return 0, errors.Wrap(err, "run pagesize") - } - memorySizePerPage, err := strconv.ParseInt(strings.TrimSuffix(string(pageSizeBytes), "\n"), 10, 64) - if err != nil { - return 0, errors.Wrap(err, "parse memorySizePerPage int") - } - - out, err := exec.CommandContext(ctx, "vm_stat").Output() - if err != nil { - return 0, errors.Wrap(err, "run vm_stat") - } - outBuf := bytes.NewBuffer(out) - scanner := bufio.NewScanner(outBuf) - if scanner.Err() != nil { - return 0, errors.Wrap(err, "new scanner") - } - - var pagesFree, pagesInactive, pagesSpeculative int64 - for scanner.Scan() { - line := scanner.Text() - splitText := strings.Split(line, ": ") - - var bytes int64 - var err error - switch { - case strings.Contains(splitText[0], "Pages free"): - bytes, err = strconv.ParseInt(strings.Trim(strings.Split(splitText[1], ".")[0], " "), 10, 64) - if err != nil { - return 0, errors.Wrap(err, "parse Pages free int") - } - pagesFree = bytes - case strings.Contains(splitText[0], "Pages inactive"): - bytes, err = strconv.ParseInt(strings.Trim(strings.Split(splitText[1], ".")[0], " "), 10, 64) - if err != nil { - return 0, errors.Wrap(err, "parse Pages inactive int") - } - pagesInactive = bytes - case strings.Contains(splitText[0], "Pages speculative"): - bytes, err = strconv.ParseInt(strings.Trim(strings.Split(splitText[1], ".")[0], " "), 10, 64) - if err != nil { - return 0, errors.Wrap(err, "parse Pages speculative int") - } - pagesSpeculative = bytes - } - } - - return ((pagesFree + pagesInactive + pagesSpeculative) * memorySizePerPage), nil -} - func performanceAvailableMemoryTest(ctx context.Context, _ *testPerformanceConfig) testResult { testRes := testResult{Name: "AvailableMemory"} @@ -559,49 +462,6 @@ func performanceAvailableMemoryTest(ctx context.Context, _ *testPerformanceConfi return testRes } -func totalMemoryLinux(context.Context) (int64, error) { - file, err := os.Open("/proc/meminfo") - if err != nil { - return 0, errors.Wrap(err, "open /proc/meminfo") - } - scanner := bufio.NewScanner(file) - if scanner.Err() != nil { - return 0, errors.Wrap(err, "new scanner") - } - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, "MemTotal") { - continue - } - splitText := strings.Split(line, ": ") - kbs := strings.Trim(strings.Split(splitText[1], "kB")[0], " ") - kbsInt, err := strconv.ParseInt(kbs, 10, 64) - if err != nil { - return 0, errors.Wrap(err, "parse MemTotal int") - } - - return kbsInt * 1024, nil - } - - return 0, errors.New("memTotal not found in /proc/meminfo") -} - -func totalMemoryMacos(ctx context.Context) (int64, error) { - out, err := exec.CommandContext(ctx, "sysctl", "hw.memsize").Output() - if err != nil { - return 0, errors.Wrap(err, "run sysctl hw.memsize") - } - - memSize := strings.TrimSuffix(strings.Split(string(out), ": ")[1], "\n") - memSizeInt, err := strconv.ParseInt(memSize, 10, 64) - if err != nil { - return 0, errors.Wrap(err, "parse memSize int") - } - - return memSizeInt, nil -} - func performanceTotalMemoryTest(ctx context.Context, _ *testPerformanceConfig) testResult { testRes := testResult{Name: "TotalMemory"} @@ -637,44 +497,6 @@ func performanceTotalMemoryTest(ctx context.Context, _ *testPerformanceConfig) t return testRes } -func fetchOoklaServer(_ context.Context, conf *testPerformanceConfig) (speedtest.Server, error) { - speedtestClient := speedtest.New() - - serverList, err := speedtestClient.FetchServers() - if err != nil { - return speedtest.Server{}, errors.Wrap(err, "fetch Ookla servers") - } - - var targets speedtest.Servers - - if len(conf.InternetTestServersOnly) != 0 { - for _, server := range serverList { - if slices.Contains(conf.InternetTestServersOnly, server.Name) { - targets = append(targets, server) - } - } - } - - if len(conf.InternetTestServersExclude) != 0 { - for _, server := range serverList { - if !slices.Contains(conf.InternetTestServersExclude, server.Name) { - targets = append(targets, server) - } - } - } - - if targets == nil { - targets = serverList - } - - servers, err := targets.FindServer([]int{}) - if err != nil { - return speedtest.Server{}, errors.Wrap(err, "find Ookla server") - } - - return *servers[0], nil -} - func performanceInternetLatencyTest(ctx context.Context, conf *testPerformanceConfig) testResult { testRes := testResult{Name: "InternetLatency"} @@ -770,3 +592,187 @@ func performanceInternetUploadSpeedTest(ctx context.Context, conf *testPerforman return testRes } + +// helper functions + +func fioCommand(ctx context.Context, filename string, blocksize int, operation string) ([]byte, error) { + //nolint:gosec + cmd, err := exec.CommandContext(ctx, "fio", + "--name=fioTest", + fmt.Sprintf("--filename=%v/fiotest", filename), + fmt.Sprintf("--size=%vMb", diskOpsMBsTotal/diskOpsNumOfJobs), + fmt.Sprintf("--blocksize=%vk", blocksize), + fmt.Sprintf("--numjobs=%v", diskOpsNumOfJobs), + fmt.Sprintf("--rw=%v", operation), + "--direct=1", + "--runtime=60s", + "--group_reporting", + "--output-format=json", + ).Output() + if err != nil { + return nil, errors.Wrap(err, "exec fio command") + } + + return cmd, nil +} + +func availableMemoryLinux(context.Context) (int64, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return 0, errors.Wrap(err, "open /proc/meminfo") + } + scanner := bufio.NewScanner(file) + if scanner.Err() != nil { + return 0, errors.Wrap(err, "new scanner") + } + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, "MemAvailable") { + continue + } + splitText := strings.Split(line, ": ") + kbs := strings.Trim(strings.Split(splitText[1], "kB")[0], " ") + kbsInt, err := strconv.ParseInt(kbs, 10, 64) + if err != nil { + return 0, errors.Wrap(err, "parse MemAvailable int") + } + + return kbsInt * 1024, nil + } + + return 0, errors.New("memAvailable not found in /proc/meminfo") +} + +func availableMemoryMacos(ctx context.Context) (int64, error) { + pageSizeBytes, err := exec.CommandContext(ctx, "pagesize").Output() + if err != nil { + return 0, errors.Wrap(err, "run pagesize") + } + memorySizePerPage, err := strconv.ParseInt(strings.TrimSuffix(string(pageSizeBytes), "\n"), 10, 64) + if err != nil { + return 0, errors.Wrap(err, "parse memorySizePerPage int") + } + + out, err := exec.CommandContext(ctx, "vm_stat").Output() + if err != nil { + return 0, errors.Wrap(err, "run vm_stat") + } + outBuf := bytes.NewBuffer(out) + scanner := bufio.NewScanner(outBuf) + if scanner.Err() != nil { + return 0, errors.Wrap(err, "new scanner") + } + + var pagesFree, pagesInactive, pagesSpeculative int64 + for scanner.Scan() { + line := scanner.Text() + splitText := strings.Split(line, ": ") + + var bytes int64 + var err error + switch { + case strings.Contains(splitText[0], "Pages free"): + bytes, err = strconv.ParseInt(strings.Trim(strings.Split(splitText[1], ".")[0], " "), 10, 64) + if err != nil { + return 0, errors.Wrap(err, "parse Pages free int") + } + pagesFree = bytes + case strings.Contains(splitText[0], "Pages inactive"): + bytes, err = strconv.ParseInt(strings.Trim(strings.Split(splitText[1], ".")[0], " "), 10, 64) + if err != nil { + return 0, errors.Wrap(err, "parse Pages inactive int") + } + pagesInactive = bytes + case strings.Contains(splitText[0], "Pages speculative"): + bytes, err = strconv.ParseInt(strings.Trim(strings.Split(splitText[1], ".")[0], " "), 10, 64) + if err != nil { + return 0, errors.Wrap(err, "parse Pages speculative int") + } + pagesSpeculative = bytes + } + } + + return ((pagesFree + pagesInactive + pagesSpeculative) * memorySizePerPage), nil +} + +func totalMemoryLinux(context.Context) (int64, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return 0, errors.Wrap(err, "open /proc/meminfo") + } + scanner := bufio.NewScanner(file) + if scanner.Err() != nil { + return 0, errors.Wrap(err, "new scanner") + } + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, "MemTotal") { + continue + } + splitText := strings.Split(line, ": ") + kbs := strings.Trim(strings.Split(splitText[1], "kB")[0], " ") + kbsInt, err := strconv.ParseInt(kbs, 10, 64) + if err != nil { + return 0, errors.Wrap(err, "parse MemTotal int") + } + + return kbsInt * 1024, nil + } + + return 0, errors.New("memTotal not found in /proc/meminfo") +} + +func totalMemoryMacos(ctx context.Context) (int64, error) { + out, err := exec.CommandContext(ctx, "sysctl", "hw.memsize").Output() + if err != nil { + return 0, errors.Wrap(err, "run sysctl hw.memsize") + } + + memSize := strings.TrimSuffix(strings.Split(string(out), ": ")[1], "\n") + memSizeInt, err := strconv.ParseInt(memSize, 10, 64) + if err != nil { + return 0, errors.Wrap(err, "parse memSize int") + } + + return memSizeInt, nil +} + +func fetchOoklaServer(_ context.Context, conf *testPerformanceConfig) (speedtest.Server, error) { + speedtestClient := speedtest.New() + + serverList, err := speedtestClient.FetchServers() + if err != nil { + return speedtest.Server{}, errors.Wrap(err, "fetch Ookla servers") + } + + var targets speedtest.Servers + + if len(conf.InternetTestServersOnly) != 0 { + for _, server := range serverList { + if slices.Contains(conf.InternetTestServersOnly, server.Name) { + targets = append(targets, server) + } + } + } + + if len(conf.InternetTestServersExclude) != 0 { + for _, server := range serverList { + if !slices.Contains(conf.InternetTestServersExclude, server.Name) { + targets = append(targets, server) + } + } + } + + if targets == nil { + targets = serverList + } + + servers, err := targets.FindServer([]int{}) + if err != nil { + return speedtest.Server{}, errors.Wrap(err, "find Ookla server") + } + + return *servers[0], nil +} diff --git a/cmd/testvalidator.go b/cmd/testvalidator.go index d60a19b29a..39d134e6ed 100644 --- a/cmd/testvalidator.go +++ b/cmd/testvalidator.go @@ -49,14 +49,14 @@ func newTestValidatorCmd(runFunc func(context.Context, io.Writer, testValidatorC } bindTestFlags(cmd, &config.testConfig) - bindTestValidatorFlags(cmd, &config) + bindTestValidatorFlags(cmd, &config, "") return cmd } -func bindTestValidatorFlags(cmd *cobra.Command, config *testValidatorConfig) { - cmd.Flags().StringVar(&config.APIAddress, "validator-api-address", "127.0.0.1:3600", "Listening address (ip and port) for validator-facing traffic proxying the beacon-node API.") - cmd.Flags().DurationVar(&config.LoadTestDuration, "load-test-duration", 5*time.Second, "Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned.") +func bindTestValidatorFlags(cmd *cobra.Command, config *testValidatorConfig, flagsPrefix string) { + cmd.Flags().StringVar(&config.APIAddress, flagsPrefix+"validator-api-address", "127.0.0.1:3600", "Listening address (ip and port) for validator-facing traffic proxying the beacon-node API.") + cmd.Flags().DurationVar(&config.LoadTestDuration, flagsPrefix+"load-test-duration", 5*time.Second, "Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned.") } func supportedValidatorTestCases() map[testCaseName]func(context.Context, *testValidatorConfig) testResult { @@ -68,6 +68,8 @@ func supportedValidatorTestCases() map[testCaseName]func(context.Context, *testV } func runTestValidator(ctx context.Context, w io.Writer, cfg testValidatorConfig) (err error) { + log.Info(ctx, "Starting validator client test") + testCases := supportedValidatorTestCases() queuedTests := filterTests(maps.Keys(testCases), cfg.testConfig) if len(queuedTests) == 0 { @@ -124,6 +126,8 @@ func runTestValidator(ctx context.Context, w io.Writer, cfg testValidatorConfig) return nil } +// validator client tests + func testSingleValidator(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]func(context.Context, *testValidatorConfig) testResult, cfg testValidatorConfig, resCh chan map[string][]testResult) { defer close(resCh) singleTestResCh := make(chan testResult) @@ -194,41 +198,11 @@ func validatorPingMeasureTest(ctx context.Context, conf *testValidatorConfig) te defer conn.Close() rtt := time.Since(before) - if rtt > thresholdValidatorMeasurePoor { - testRes.Verdict = testVerdictPoor - } else if rtt > thresholdValidatorMeasureAvg { - testRes.Verdict = testVerdictAvg - } else { - testRes.Verdict = testVerdictGood - } - testRes.Measurement = Duration{rtt}.String() + testRes = evaluateRTT(rtt, testRes, thresholdValidatorMeasureAvg, thresholdValidatorMeasurePoor) return testRes } -func pingValidatorContinuously(ctx context.Context, address string, resCh chan<- time.Duration) { - d := net.Dialer{Timeout: time.Second} - for { - before := time.Now() - conn, err := d.DialContext(ctx, "tcp", address) - if err != nil { - return - } - rtt := time.Since(before) - err = conn.Close() - if err != nil { - return - } - select { - case <-ctx.Done(): - return - case resCh <- rtt: - awaitTime := rand.Intn(100) //nolint:gosec // weak generator is not an issue here - sleepWithContext(ctx, time.Duration(awaitTime)*time.Millisecond) - } - } -} - func validatorPingLoadTest(ctx context.Context, conf *testValidatorConfig) testResult { log.Info(ctx, "Running ping load tests...", z.Any("duration", conf.LoadTestDuration), @@ -258,20 +232,32 @@ func validatorPingLoadTest(ctx context.Context, conf *testValidatorConfig) testR close(testResCh) log.Info(ctx, "Ping load tests finished", z.Any("target", conf.APIAddress)) - highestRTT := time.Duration(0) - for rtt := range testResCh { - if rtt > highestRTT { - highestRTT = rtt - } - } - if highestRTT > thresholdValidatorLoadPoor { - testRes.Verdict = testVerdictPoor - } else if highestRTT > thresholdValidatorLoadAvg { - testRes.Verdict = testVerdictAvg - } else { - testRes.Verdict = testVerdictGood - } - testRes.Measurement = Duration{highestRTT}.String() + testRes = evaluateHighestRTTScores(testResCh, testRes, thresholdValidatorLoadAvg, thresholdValidatorLoadPoor) return testRes } + +// helper functions + +func pingValidatorContinuously(ctx context.Context, address string, resCh chan<- time.Duration) { + d := net.Dialer{Timeout: time.Second} + for { + before := time.Now() + conn, err := d.DialContext(ctx, "tcp", address) + if err != nil { + return + } + rtt := time.Since(before) + err = conn.Close() + if err != nil { + return + } + select { + case <-ctx.Done(): + return + case resCh <- rtt: + awaitTime := rand.Intn(100) //nolint:gosec // weak generator is not an issue here + sleepWithContext(ctx, time.Duration(awaitTime)*time.Millisecond) + } + } +} From 1ce12116328547d95f72cd38e1ad92cecb17db37 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 09:00:26 +0000 Subject: [PATCH 77/89] build(deps): Bump go.opentelemetry.io/otel/exporters/stdout/stdouttrace from 1.31.0 to 1.32.0 (#3374) Bumps [go.opentelemetry.io/otel/exporters/stdout/stdouttrace](https://github.com/open-telemetry/opentelemetry-go) from 1.31.0 to 1.32.0.
Changelog

Sourced from go.opentelemetry.io/otel/exporters/stdout/stdouttrace's changelog.

[1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08

Added

  • Add go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter, which can be used to disable exemplar recording. (#5850)
  • Add go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter, which can be used to configure the exemplar filter used by the metrics SDK. (#5850)
  • Add ExemplarReservoirProviderSelector and DefaultExemplarReservoirProviderSelector to go.opentelemetry.io/otel/sdk/metric, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861)
  • Add ExemplarReservoirProviderSelector to go.opentelemetry.io/otel/sdk/metric.Stream to allow using views to configure the exemplar reservoir to use for a metric. (#5861)
  • Add ReservoirProvider, HistogramReservoirProvider and FixedSizeReservoirProvider to go.opentelemetry.io/otel/sdk/metric/exemplar to make it convenient to use providers of Reservoirs. (#5861)
  • The go.opentelemetry.io/otel/semconv/v1.27.0 package. The package contains semantic conventions from the v1.27.0 version of the OpenTelemetry Semantic Conventions. (#5894)
  • Add Attributes attribute.Set field to Scope in go.opentelemetry.io/otel/sdk/instrumentation. (#5903)
  • Add Attributes attribute.Set field to ScopeRecords in go.opentelemetry.io/otel/log/logtest. (#5927)
  • go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc adds instrumentation scope attributes. (#5934)
  • go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp adds instrumentation scope attributes. (#5934)
  • go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc adds instrumentation scope attributes. (#5935)
  • go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp adds instrumentation scope attributes. (#5935)
  • go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc adds instrumentation scope attributes. (#5933)
  • go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp adds instrumentation scope attributes. (#5933)
  • go.opentelemetry.io/otel/exporters/prometheus adds instrumentation scope attributes in otel_scope_info metric as labels. (#5932)

Changed

  • Support scope attributes and make them as identifying for Tracer in go.opentelemetry.io/otel and go.opentelemetry.io/otel/sdk/trace. (#5924)
  • Support scope attributes and make them as identifying for Meter in go.opentelemetry.io/otel and go.opentelemetry.io/otel/sdk/metric. (#5926)
  • Support scope attributes and make them as identifying for Logger in go.opentelemetry.io/otel and go.opentelemetry.io/otel/sdk/log. (#5925)
  • Make schema URL and scope attributes as identifying for Tracer in go.opentelemetry.io/otel/bridge/opentracing. (#5931)
  • Clear unneeded slice elements to allow GC to collect the objects in go.opentelemetry.io/otel/sdk/metric and go.opentelemetry.io/otel/sdk/trace. (#5804)

Fixed

  • Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881)
  • go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc now keeps the metadata already present in the context when WithHeaders is used. (#5892)
  • go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc now keeps the metadata already present in the context when WithHeaders is used. (#5911)
  • go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc now keeps the metadata already present in the context when WithHeaders is used. (#5915)
  • Fix go.opentelemetry.io/otel/exporters/prometheus trying to add exemplars to Gauge metrics, which is unsupported. (#5912)
  • Fix WithEndpointURL to always use a secure connection when an https URL is passed in go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc. (#5944)
  • Fix WithEndpointURL to always use a secure connection when an https URL is passed in go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp. (#5944)
  • Fix WithEndpointURL to always use a secure connection when an https URL is passed in go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc. (#5944)
  • Fix WithEndpointURL to always use a secure connection when an https URL is passed in go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp. (#5944)
  • Fix incorrect metrics generated from callbacks when multiple readers are used in go.opentelemetry.io/otel/sdk/metric. (#5900)

Removed

Commits
  • 7cfbd86 Release v1.32.0/v0.54.0/v0.8.0/v0.0.11 (#5960)
  • 2be617e fix(deps): update github.com/opentracing-contrib/go-grpc/test digest to 51a56...
  • 6db18df fix(deps): update module github.com/opentracing-contrib/go-grpc to v0.1.0 (#5...
  • ef12bf8 chore(deps): update golang.org/x (#5957)
  • 85eb76f Allow GC to collect unneeded slice elements (#5804)
  • 1492efa Fix incorrect metrics getting generated from multiple readers (#5900)
  • d2b0663 fix(deps): update module go.opentelemetry.io/build-tools/multimod to v0.15.0 ...
  • 394cbd2 chore(deps): update lycheeverse/lychee-action action to v2.1.0 (#5950)
  • 37b2537 fix(deps): update github.com/opentracing-contrib/go-grpc digest to e3cbcab (#...
  • 7f68356 fix(deps): update module go.opentelemetry.io/build-tools/semconvgen to v0.15....
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=go.opentelemetry.io/otel/exporters/stdout/stdouttrace&package-manager=go_modules&previous-version=1.31.0&new-version=1.32.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c5c62a5902..341f5b8103 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 go.opentelemetry.io/otel v1.32.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 go.opentelemetry.io/otel/sdk v1.32.0 go.opentelemetry.io/otel/trace v1.32.0 go.uber.org/automaxprocs v1.6.0 diff --git a/go.sum b/go.sum index e1627ecb94..f465f7658a 100644 --- a/go.sum +++ b/go.sum @@ -554,8 +554,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYa go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= From 2bbd97746d6df3ab3e748b58167513db6556dcde Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Thu, 14 Nov 2024 09:32:06 +0100 Subject: [PATCH 78/89] cmd: charon test mev create block tests (#3378) We have had tests to test latency towards MEV relay, however, it was a simple ping. Those tests add creation of real block headers. category: feature ticket: none --- cmd/test.go | 6 + cmd/testbeacon.go | 5 - cmd/testmev.go | 359 ++++++++++++++++++++++++++++++++++- cmd/testmev_internal_test.go | 41 +++- go.mod | 7 +- go.sum | 16 +- 6 files changed, 405 insertions(+), 29 deletions(-) diff --git a/cmd/test.go b/cmd/test.go index 79d2bc9883..b048c9b985 100644 --- a/cmd/test.go +++ b/cmd/test.go @@ -39,6 +39,12 @@ const ( mevTestCategory = "mev" performanceTestCategory = "performance" allTestCategory = "all" + + committeeSizePerSlot = 64 + subCommitteeSize = 4 + slotTime = 12 * time.Second + slotsInEpoch = 32 + epochTime = slotsInEpoch * slotTime ) type testConfig struct { diff --git a/cmd/testbeacon.go b/cmd/testbeacon.go index ce75cfd274..12bd4c8fbd 100644 --- a/cmd/testbeacon.go +++ b/cmd/testbeacon.go @@ -152,11 +152,6 @@ const ( thresholdBeaconSimulationAvg = 200 * time.Millisecond thresholdBeaconSimulationPoor = 400 * time.Millisecond - committeeSizePerSlot = 64 - subCommitteeSize = 4 - slotTime = 12 * time.Second - slotsInEpoch = 32 - epochTime = slotsInEpoch * slotTime ) func newTestBeaconCmd(runFunc func(context.Context, io.Writer, testBeaconConfig) error) *cobra.Command { diff --git a/cmd/testmev.go b/cmd/testmev.go index 0bf49e395e..b799558894 100644 --- a/cmd/testmev.go +++ b/cmd/testmev.go @@ -3,24 +3,36 @@ package cmd import ( + "bytes" "context" + "encoding/hex" + "encoding/json" "fmt" "io" "net/http" + "net/http/httptrace" + "strconv" "strings" "time" + builderspec "github.com/attestantio/go-builder-client/spec" + eth2deneb "github.com/attestantio/go-eth2-client/api/v1/deneb" + eth2a "github.com/attestantio/go-eth2-client/spec/altair" + eth2p0 "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/spf13/cobra" "golang.org/x/exp/maps" "golang.org/x/sync/errgroup" "github.com/obolnetwork/charon/app/errors" "github.com/obolnetwork/charon/app/log" + "github.com/obolnetwork/charon/app/z" ) type testMEVConfig struct { testConfig - Endpoints []string + Endpoints []string + BeaconNodeEndpoint string + LoadTestBlocks uint } type testCaseMEV func(context.Context, *testMEVConfig, string) testResult @@ -28,8 +40,12 @@ type testCaseMEV func(context.Context, *testMEVConfig, string) testResult const ( thresholdMEVMeasureAvg = 40 * time.Millisecond thresholdMEVMeasurePoor = 100 * time.Millisecond + thresholdMEVBlockAvg = 500 * time.Millisecond + thresholdMEVBlockPoor = 800 * time.Millisecond ) +var errStatusCodeNot200 = errors.New("status code not 200 OK") + func newTestMEVCmd(runFunc func(context.Context, io.Writer, testMEVConfig) error) *cobra.Command { var config testMEVConfig @@ -53,15 +69,19 @@ func newTestMEVCmd(runFunc func(context.Context, io.Writer, testMEVConfig) error } func bindTestMEVFlags(cmd *cobra.Command, config *testMEVConfig, flagsPrefix string) { - endpoints := flagsPrefix + "endpoints" - cmd.Flags().StringSliceVar(&config.Endpoints, endpoints, nil, "[REQUIRED] Comma separated list of one or more MEV relay endpoint URLs.") - mustMarkFlagRequired(cmd, endpoints) + cmd.Flags().StringSliceVar(&config.Endpoints, flagsPrefix+"endpoints", nil, "[REQUIRED] Comma separated list of one or more MEV relay endpoint URLs.") + cmd.Flags().StringVar(&config.BeaconNodeEndpoint, flagsPrefix+"beacon-node-endpoint", "", "[REQUIRED] Beacon node endpoint URL used for block creation test.") + cmd.Flags().UintVar(&config.LoadTestBlocks, flagsPrefix+"load-test-blocks", 3, "Amount of blocks the 'createMultipleBlocks' test will create.") + mustMarkFlagRequired(cmd, flagsPrefix+"endpoints") + mustMarkFlagRequired(cmd, flagsPrefix+"beacon-node-endpoint") } func supportedMEVTestCases() map[testCaseName]testCaseMEV { return map[testCaseName]testCaseMEV{ - {name: "ping", order: 1}: mevPingTest, - {name: "pingMeasure", order: 2}: mevPingMeasureTest, + {name: "ping", order: 1}: mevPingTest, + {name: "pingMeasure", order: 2}: mevPingMeasureTest, + {name: "createBlock", order: 3}: mevCreateBlockTest, + {name: "createMultipleBlocks", order: 4}: mevCreateMultipleBlocksTest, } } @@ -239,6 +259,116 @@ func mevPingMeasureTest(ctx context.Context, _ *testMEVConfig, target string) te return testRes } +func mevCreateBlockTest(ctx context.Context, conf *testMEVConfig, target string) testResult { + testRes := testResult{Name: "CreateBlock"} + + latestBlock, err := latestBeaconBlock(ctx, conf.BeaconNodeEndpoint) + if err != nil { + return failedTestResult(testRes, err) + } + + // wait for beginning of next slot, as the block for current one might have already been proposed + latestBlockTSUnix, err := strconv.ParseInt(latestBlock.Body.ExecutionPayload.Timestamp, 10, 64) + if err != nil { + return failedTestResult(testRes, err) + } + latestBlockTS := time.Unix(latestBlockTSUnix, 0) + nextBlockTS := latestBlockTS.Add(slotTime) + for time.Now().Before(nextBlockTS) && ctx.Err() == nil { + sleepWithContext(ctx, time.Millisecond) + } + + latestSlot, err := strconv.ParseInt(latestBlock.Slot, 10, 64) + if err != nil { + return failedTestResult(testRes, err) + } + nextSlot := latestSlot + 1 + epoch := nextSlot / slotsInEpoch + proposerDuties, err := fetchProposersForEpoch(ctx, conf, epoch) + if err != nil { + return failedTestResult(testRes, err) + } + + log.Info(ctx, "Starting attempts for block creation", z.Any("mev_relay", target)) + rtt, err := createMEVBlock(ctx, conf, target, nextSlot, latestBlock, proposerDuties) + if err != nil { + return failedTestResult(testRes, err) + } + testRes = evaluateRTT(rtt, testRes, thresholdMEVBlockAvg, thresholdMEVBlockPoor) + + return testRes +} + +func mevCreateMultipleBlocksTest(ctx context.Context, conf *testMEVConfig, target string) testResult { + testRes := testResult{Name: "CreateMultipleBlocks"} + + latestBlock, err := latestBeaconBlock(ctx, conf.BeaconNodeEndpoint) + if err != nil { + return failedTestResult(testRes, err) + } + + // wait for beginning of next slot, as the block for current one might have already been proposed + latestBlockTSUnix, err := strconv.ParseInt(latestBlock.Body.ExecutionPayload.Timestamp, 10, 64) + if err != nil { + failedTestResult(testRes, err) + } + latestBlockTS := time.Unix(latestBlockTSUnix, 0) + nextBlockTS := latestBlockTS.Add(slotTime) + for time.Now().Before(nextBlockTS) && ctx.Err() == nil { + sleepWithContext(ctx, time.Millisecond) + } + + latestSlot, err := strconv.ParseInt(latestBlock.Slot, 10, 64) + if err != nil { + return failedTestResult(testRes, err) + } + nextSlot := latestSlot + 1 + epoch := nextSlot / slotsInEpoch + proposerDuties, err := fetchProposersForEpoch(ctx, conf, epoch) + if err != nil { + return failedTestResult(testRes, err) + } + + allBlocksRTT := []time.Duration{} + log.Info(ctx, "Starting attempts for multiple block creation", z.Any("mev_relay", target), z.Any("blocks", conf.LoadTestBlocks)) + for ctx.Err() == nil { + startIteration := time.Now() + rtt, err := createMEVBlock(ctx, conf, target, nextSlot, latestBlock, proposerDuties) + if err != nil { + return failedTestResult(testRes, err) + } + allBlocksRTT = append(allBlocksRTT, rtt) + if len(allBlocksRTT) == int(conf.LoadTestBlocks) { + break + } + // wait for the next slot - time it took createMEVBlock - 1 sec + sleepWithContext(ctx, slotTime-time.Since(startIteration)%slotTime-time.Second) + startBeaconBlockFetch := time.Now() + // get the new latest block, produced during 'nextSlot' + latestBlock, err = latestBeaconBlock(ctx, conf.BeaconNodeEndpoint) + if err != nil { + return failedTestResult(testRes, err) + } + latestSlot, err := strconv.ParseInt(latestBlock.Slot, 10, 64) + if err != nil { + return failedTestResult(testRes, err) + } + nextSlot = latestSlot + 1 + // wait 1 second - the time it took to fetch the latest block + sleepWithContext(ctx, time.Second-time.Since(startBeaconBlockFetch)) + } + + totalRTT := time.Duration(0) + for _, rtt := range allBlocksRTT { + totalRTT += rtt + } + averageRTT := totalRTT / time.Duration(len(allBlocksRTT)) + + testRes = evaluateRTT(averageRTT, testRes, thresholdMEVBlockAvg, thresholdMEVBlockPoor) + + return testRes +} + // helper functions // Shorten the hash of the MEV relay endpoint @@ -261,3 +391,220 @@ func formatMEVRelayName(urlString string) string { return splitScheme[0] + "://" + hashShort + "@" + hashSplit[1] } + +func getBlockHeader(ctx context.Context, target string, nextSlot int64, blockHash string, validatorPubKey string) (builderspec.VersionedSignedBuilderBid, time.Duration, error) { + var start time.Time + var firstByte time.Duration + trace := &httptrace.ClientTrace{ + GotFirstResponseByte: func() { + firstByte = time.Since(start) + }, + } + start = time.Now() + req, err := http.NewRequestWithContext( + httptrace.WithClientTrace(ctx, trace), + http.MethodGet, + fmt.Sprintf("%v/eth/v1/builder/header/%v/%v/%v", target, nextSlot, blockHash, validatorPubKey), + nil) + if err != nil { + return builderspec.VersionedSignedBuilderBid{}, 0, errors.Wrap(err, "http request") + } + + resp, err := http.DefaultTransport.RoundTrip(req) + if err != nil { + return builderspec.VersionedSignedBuilderBid{}, 0, errors.Wrap(err, "http request rtt") + } + defer resp.Body.Close() + + // the current proposer was not registered with the builder, wait for next block + if resp.StatusCode != http.StatusOK { + return builderspec.VersionedSignedBuilderBid{}, 0, errStatusCodeNot200 + } + rttGetHeader := firstByte + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return builderspec.VersionedSignedBuilderBid{}, 0, errors.Wrap(err, "http response body") + } + + var builderBid builderspec.VersionedSignedBuilderBid + err = json.Unmarshal(bodyBytes, &builderBid) + if err != nil { + return builderspec.VersionedSignedBuilderBid{}, 0, errors.Wrap(err, "http response json") + } + + return builderBid, rttGetHeader, nil +} + +func createMEVBlock(ctx context.Context, conf *testMEVConfig, target string, nextSlot int64, latestBlock BeaconBlockMessage, proposerDuties []ProposerDutiesData) (time.Duration, error) { + var rttGetHeader time.Duration + var builderBid builderspec.VersionedSignedBuilderBid + for ctx.Err() == nil { + startIteration := time.Now() + epoch := nextSlot / slotsInEpoch + + validatorPubKey, err := getValidatorPKForSlot(proposerDuties, nextSlot) + if err != nil { + // if no PK found, refresh the proposerDuties + proposerDuties, err = fetchProposersForEpoch(ctx, conf, epoch) + if err != nil { + return 0, err + } + validatorPubKey, err = getValidatorPKForSlot(proposerDuties, nextSlot) + if err != nil { + return 0, err + } + } + + builderBid, rttGetHeader, err = getBlockHeader(ctx, target, nextSlot, latestBlock.Body.ExecutionPayload.BlockHash, validatorPubKey) + if err != nil { + // the current proposer was not registered with the builder, wait for next block + if errors.Is(err, errStatusCodeNot200) { + sleepWithContext(ctx, slotTime-time.Since(startIteration)-time.Second) + startBeaconBlockFetch := time.Now() + latestBlock, err = latestBeaconBlock(ctx, conf.BeaconNodeEndpoint) + if err != nil { + return 0, err + } + nextSlot++ + // wait 1 second - the time it took to fetch the latest block + sleepWithContext(ctx, time.Second-time.Since(startBeaconBlockFetch)) + + continue + } + + return 0, err + } + log.Info(ctx, "Created block headers for slot", z.Any("slot", nextSlot), z.Any("target", target)) + + break + } + + blindedBeaconBlock := eth2deneb.BlindedBeaconBlock{ + Slot: 0, + ProposerIndex: 0, + ParentRoot: eth2p0.Root{}, + StateRoot: eth2p0.Root{}, + Body: ð2deneb.BlindedBeaconBlockBody{ + RANDAOReveal: eth2p0.BLSSignature{}, + ETH1Data: ð2p0.ETH1Data{}, + Graffiti: eth2p0.Hash32{}, + ProposerSlashings: []*eth2p0.ProposerSlashing{}, + AttesterSlashings: []*eth2p0.AttesterSlashing{}, + Attestations: []*eth2p0.Attestation{}, + Deposits: []*eth2p0.Deposit{}, + VoluntaryExits: []*eth2p0.SignedVoluntaryExit{}, + SyncAggregate: ð2a.SyncAggregate{}, + ExecutionPayloadHeader: builderBid.Deneb.Message.Header, + }, + } + + sig, err := hex.DecodeString("b9251a82040d4620b8c5665f328ee6c2eaa02d31d71d153f4abba31a7922a981e541e85283f0ced387d26e86aef9386d18c6982b9b5f8759882fe7f25a328180d86e146994ef19d28bc1432baf29751dec12b5f3d65dbbe224d72cf900c6831a") + if err != nil { + return 0, errors.Wrap(err, "decode signature") + } + + payload := eth2deneb.SignedBlindedBeaconBlock{ + Message: &blindedBeaconBlock, + Signature: eth2p0.BLSSignature(sig), + } + payloadJSON, err := json.Marshal(payload) + if err != nil { + return 0, errors.Wrap(err, "signed blinded beacon block json payload marshal") + } + rttSubmitBlock, err := requestRTT(ctx, target+"/eth/v1/builder/blinded_blocks", http.MethodPost, bytes.NewReader(payloadJSON), 400) + if err != nil { + return 0, err + } + + return rttGetHeader + rttSubmitBlock, nil +} + +type BeaconBlock struct { + Data BeaconBlockData `json:"data"` +} + +type BeaconBlockData struct { + Message BeaconBlockMessage `json:"message"` +} + +type BeaconBlockMessage struct { + Slot string `json:"slot"` + Body BeaconBlockBody `json:"body"` +} + +type BeaconBlockBody struct { + ExecutionPayload BeaconBlockExecPayload `json:"execution_payload"` +} + +type BeaconBlockExecPayload struct { + BlockHash string `json:"block_hash"` + Timestamp string `json:"timestamp"` +} + +func latestBeaconBlock(ctx context.Context, endpoint string) (BeaconBlockMessage, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%v/eth/v2/beacon/blocks/head", endpoint), nil) + if err != nil { + return BeaconBlockMessage{}, errors.Wrap(err, "http request") + } + resp, err := new(http.Client).Do(req) + if err != nil { + return BeaconBlockMessage{}, errors.Wrap(err, "http request do") + } + defer resp.Body.Close() + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return BeaconBlockMessage{}, errors.Wrap(err, "http response body") + } + + var beaconBlock BeaconBlock + err = json.Unmarshal(bodyBytes, &beaconBlock) + if err != nil { + return BeaconBlockMessage{}, errors.Wrap(err, "http response json") + } + + return beaconBlock.Data.Message, nil +} + +type ProposerDuties struct { + Data []ProposerDutiesData `json:"data"` +} + +type ProposerDutiesData struct { + PubKey string `json:"pubkey"` + Slot string `json:"slot"` +} + +func fetchProposersForEpoch(ctx context.Context, conf *testMEVConfig, epoch int64) ([]ProposerDutiesData, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%v/eth/v1/validator/duties/proposer/%v", conf.BeaconNodeEndpoint, epoch), nil) + if err != nil { + return nil, errors.Wrap(err, "http request") + } + resp, err := new(http.Client).Do(req) + if err != nil { + return nil, errors.Wrap(err, "http request do") + } + defer resp.Body.Close() + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "http response body") + } + + var proposerDuties ProposerDuties + err = json.Unmarshal(bodyBytes, &proposerDuties) + if err != nil { + return nil, errors.Wrap(err, "http response json") + } + + return proposerDuties.Data, nil +} + +func getValidatorPKForSlot(proposers []ProposerDutiesData, slot int64) (string, error) { + slotString := strconv.FormatInt(slot, 10) + for _, s := range proposers { + if s.Slot == slotString { + return s.PubKey, nil + } + } + + return "", errors.New("slot not found") +} diff --git a/cmd/testmev_internal_test.go b/cmd/testmev_internal_test.go index 3b62aecb44..e692712199 100644 --- a/cmd/testmev_internal_test.go +++ b/cmd/testmev_internal_test.go @@ -26,6 +26,8 @@ func TestMEVTest(t *testing.T) { endpoint1 := fmt.Sprintf("http://localhost:%v", port1) port2 := testutil.GetFreePort(t) endpoint2 := fmt.Sprintf("http://localhost:%v", port2) + port3 := testutil.GetFreePort(t) + endpoint3 := fmt.Sprintf("http://localhost:%v", port3) mockedMEVNode := StartHealthyMockedMEVNode(t) defer mockedMEVNode.Close() @@ -46,13 +48,16 @@ func TestMEVTest(t *testing.T) { TestCases: nil, Timeout: time.Minute, }, - Endpoints: []string{mockedMEVNode.URL}, + Endpoints: []string{mockedMEVNode.URL}, + BeaconNodeEndpoint: endpoint3, }, expected: testCategoryResult{ Targets: map[string][]testResult{ mockedMEVNode.URL: { {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "createBlock", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "createMultipleBlocks", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, }, @@ -67,17 +72,22 @@ func TestMEVTest(t *testing.T) { TestCases: nil, Timeout: time.Minute, }, - Endpoints: []string{endpoint1, endpoint2}, + Endpoints: []string{endpoint1, endpoint2}, + BeaconNodeEndpoint: endpoint3, }, expected: testCategoryResult{ Targets: map[string][]testResult{ endpoint1: { {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "createBlock", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, + {Name: "createMultipleBlocks", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, }, endpoint2: { {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "createBlock", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, + {Name: "createMultipleBlocks", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, }, }, }, @@ -92,7 +102,8 @@ func TestMEVTest(t *testing.T) { TestCases: nil, Timeout: 100 * time.Nanosecond, }, - Endpoints: []string{endpoint1, endpoint2}, + Endpoints: []string{endpoint1, endpoint2}, + BeaconNodeEndpoint: endpoint3, }, expected: testCategoryResult{ Targets: map[string][]testResult{ @@ -115,17 +126,22 @@ func TestMEVTest(t *testing.T) { TestCases: nil, Timeout: time.Minute, }, - Endpoints: []string{endpoint1, endpoint2}, + Endpoints: []string{endpoint1, endpoint2}, + BeaconNodeEndpoint: endpoint3, }, expected: testCategoryResult{ Targets: map[string][]testResult{ endpoint1: { {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "createBlock", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, + {Name: "createMultipleBlocks", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, }, endpoint2: { {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "createBlock", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, + {Name: "createMultipleBlocks", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, }, }, }, @@ -140,7 +156,8 @@ func TestMEVTest(t *testing.T) { TestCases: []string{"notSupportedTest"}, Timeout: time.Minute, }, - Endpoints: []string{endpoint1, endpoint2}, + Endpoints: []string{endpoint1, endpoint2}, + BeaconNodeEndpoint: endpoint3, }, expected: testCategoryResult{}, expectedErr: "test case not supported", @@ -154,7 +171,8 @@ func TestMEVTest(t *testing.T) { TestCases: []string{"ping"}, Timeout: time.Minute, }, - Endpoints: []string{endpoint1, endpoint2}, + Endpoints: []string{endpoint1, endpoint2}, + BeaconNodeEndpoint: endpoint3, }, expected: testCategoryResult{ Targets: map[string][]testResult{ @@ -177,17 +195,22 @@ func TestMEVTest(t *testing.T) { TestCases: nil, Timeout: time.Minute, }, - Endpoints: []string{endpoint1, endpoint2}, + Endpoints: []string{endpoint1, endpoint2}, + BeaconNodeEndpoint: endpoint3, }, expected: testCategoryResult{ Targets: map[string][]testResult{ endpoint1: { {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "createBlock", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, + {Name: "createMultipleBlocks", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, }, endpoint2: { {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "createBlock", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, + {Name: "createMultipleBlocks", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, }, }, Score: categoryScoreC, @@ -247,13 +270,13 @@ func TestMEVTestFlags(t *testing.T) { }{ { name: "default scenario", - args: []string{"mev", "--endpoints=\"test.endpoint\""}, + args: []string{"mev", "--endpoints=\"test.endpoint\"", "--beacon-node-endpoint=\"test.endpoint\""}, expectedErr: "", }, { name: "no endpoints flag", args: []string{"mev"}, - expectedErr: "required flag(s) \"endpoints\" not set", + expectedErr: "required flag(s) \"beacon-node-endpoint\", \"endpoints\" not set", }, { name: "no output toml on quiet", diff --git a/go.mod b/go.mod index 341f5b8103..db19cf76e8 100644 --- a/go.mod +++ b/go.mod @@ -3,12 +3,13 @@ module github.com/obolnetwork/charon go 1.22 require ( + github.com/attestantio/go-builder-client v0.5.1 github.com/attestantio/go-eth2-client v0.21.10 github.com/bufbuild/buf v1.35.1 github.com/coinbase/kryptology v1.5.6-0.20220316191335-269410e1b06b github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/ferranbt/fastssz v0.1.4 - github.com/golang/snappy v0.0.4 + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/google/gofuzz v1.2.0 github.com/gorilla/mux v1.8.1 github.com/herumi/bls-eth-go-binary v1.36.1 @@ -59,11 +60,11 @@ require ( connectrpc.com/otelconnect v0.7.0 // indirect filippo.io/edwards25519 v1.0.0-rc.1 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.7.0 // indirect + github.com/bits-and-blooms/bitset v1.13.0 // indirect github.com/btcsuite/btcd v0.22.3 // indirect github.com/bufbuild/protocompile v0.14.0 // indirect github.com/bufbuild/protoplugin v0.0.0-20240323223605-e2735f6c31ee // indirect diff --git a/go.sum b/go.sum index f465f7658a..bbe69d01a5 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGy github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/ObolNetwork/go-eth2-client v0.21.11-0.20240822135044-f0a5b21e02c6 h1:VEBrga7Dn5SwvJQEG3i2K7IAUQQvEmUulWxXoBDimnM= github.com/ObolNetwork/go-eth2-client v0.21.11-0.20240822135044-f0a5b21e02c6/go.mod h1:d7ZPNrMX8jLfIgML5u7QZxFo2AukLM+5m08iMaLdqb8= github.com/ObolNetwork/kryptology v0.0.0-20231016091344-eed023b6cac8 h1:IXoKQKGzebwtIzKADtZyAjL3MIr0m3zQFxlSxxWIdCU= @@ -31,6 +31,8 @@ github.com/ObolNetwork/kryptology v0.0.0-20231016091344-eed023b6cac8/go.mod h1:q github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/attestantio/go-builder-client v0.5.1 h1:zFeiWZrKBj43HkpaGchImkAjvarWdFv0gACLkBY0Pbs= +github.com/attestantio/go-builder-client v0.5.1/go.mod h1:1/ewo8zF6++C6Fldvtq5hjhp9ZAafIK91Vp7XrmUZsE= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= @@ -38,8 +40,8 @@ github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= -github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= +github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd v0.22.3 h1:kYNaWFvOw6xvqP0vR20RP1Zq1DVMBxEO8QN5d1/EfNg= github.com/btcsuite/btcd v0.22.3/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= @@ -191,8 +193,8 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84= github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= @@ -754,6 +756,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= From 4049c4a985a2efc562b6100b4f1ea90ba2ddd743 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Thu, 14 Nov 2024 14:58:35 +0100 Subject: [PATCH 79/89] cmd: add version check to beacon node tests (#3379) A sanity check for the user if they are testing against a beacon node they expect. It's not a test at its original sense per se, as we always return Ok, it's more for information of the user. category: feature ticket: none --- app/log/loki/lokipb/v1/loki.pb.go | 2 +- app/peerinfo/peerinfopb/v1/peerinfo.pb.go | 2 +- app/protonil/testdata/v1/test.pb.go | 2 +- cluster/manifestpb/v1/manifest.pb.go | 2 +- cmd/testbeacon.go | 88 +++++++++++++++++------ cmd/testbeacon_internal_test.go | 6 ++ core/corepb/v1/consensus.pb.go | 2 +- core/corepb/v1/core.pb.go | 2 +- core/corepb/v1/parsigex.pb.go | 2 +- core/corepb/v1/priority.pb.go | 2 +- dkg/dkgpb/v1/bcast.pb.go | 2 +- dkg/dkgpb/v1/frost.pb.go | 2 +- dkg/dkgpb/v1/nodesigs.pb.go | 2 +- dkg/dkgpb/v1/sync.pb.go | 2 +- 14 files changed, 85 insertions(+), 33 deletions(-) diff --git a/app/log/loki/lokipb/v1/loki.pb.go b/app/log/loki/lokipb/v1/loki.pb.go index 4b6d54ab0f..5b76d1b5f5 100644 --- a/app/log/loki/lokipb/v1/loki.pb.go +++ b/app/log/loki/lokipb/v1/loki.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: app/log/loki/lokipb/v1/loki.proto diff --git a/app/peerinfo/peerinfopb/v1/peerinfo.pb.go b/app/peerinfo/peerinfopb/v1/peerinfo.pb.go index daaa604547..5524c0716f 100644 --- a/app/peerinfo/peerinfopb/v1/peerinfo.pb.go +++ b/app/peerinfo/peerinfopb/v1/peerinfo.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: app/peerinfo/peerinfopb/v1/peerinfo.proto diff --git a/app/protonil/testdata/v1/test.pb.go b/app/protonil/testdata/v1/test.pb.go index 8b50c2d1b7..c69299f731 100644 --- a/app/protonil/testdata/v1/test.pb.go +++ b/app/protonil/testdata/v1/test.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: app/protonil/testdata/v1/test.proto diff --git a/cluster/manifestpb/v1/manifest.pb.go b/cluster/manifestpb/v1/manifest.pb.go index aa6b000c12..961f5fc880 100644 --- a/cluster/manifestpb/v1/manifest.pb.go +++ b/cluster/manifestpb/v1/manifest.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: cluster/manifestpb/v1/manifest.proto diff --git a/cmd/testbeacon.go b/cmd/testbeacon.go index 12bd4c8fbd..e96b6ae8b6 100644 --- a/cmd/testbeacon.go +++ b/cmd/testbeacon.go @@ -143,13 +143,12 @@ type SimulationCluster struct { } const ( - thresholdBeaconMeasureAvg = 40 * time.Millisecond - thresholdBeaconMeasurePoor = 100 * time.Millisecond - thresholdBeaconLoadAvg = 40 * time.Millisecond - thresholdBeaconLoadPoor = 100 * time.Millisecond - thresholdBeaconPeersAvg = 50 - thresholdBeaconPeersPoor = 20 - + thresholdBeaconMeasureAvg = 40 * time.Millisecond + thresholdBeaconMeasurePoor = 100 * time.Millisecond + thresholdBeaconLoadAvg = 40 * time.Millisecond + thresholdBeaconLoadPoor = 100 * time.Millisecond + thresholdBeaconPeersAvg = 50 + thresholdBeaconPeersPoor = 20 thresholdBeaconSimulationAvg = 200 * time.Millisecond thresholdBeaconSimulationPoor = 400 * time.Millisecond ) @@ -189,15 +188,16 @@ func supportedBeaconTestCases() map[testCaseName]testCaseBeacon { return map[testCaseName]testCaseBeacon{ {name: "ping", order: 1}: beaconPingTest, {name: "pingMeasure", order: 2}: beaconPingMeasureTest, - {name: "isSynced", order: 3}: beaconIsSyncedTest, - {name: "peerCount", order: 4}: beaconPeerCountTest, - {name: "pingLoad", order: 5}: beaconPingLoadTest, + {name: "version", order: 3}: beaconVersionTest, + {name: "isSynced", order: 4}: beaconIsSyncedTest, + {name: "peerCount", order: 5}: beaconPeerCountTest, + {name: "pingLoad", order: 6}: beaconPingLoadTest, - {name: "simulate1", order: 6}: beaconSimulation1Test, - {name: "simulate10", order: 7}: beaconSimulation10Test, - {name: "simulate100", order: 8}: beaconSimulation100Test, - {name: "simulate500", order: 9}: beaconSimulation500Test, - {name: "simulate1000", order: 10}: beaconSimulation1000Test, + {name: "simulate1", order: 7}: beaconSimulation1Test, + {name: "simulate10", order: 8}: beaconSimulation10Test, + {name: "simulate100", order: 9}: beaconSimulation100Test, + {name: "simulate500", order: 10}: beaconSimulation500Test, + {name: "simulate1000", order: 11}: beaconSimulation1000Test, } } @@ -341,13 +341,12 @@ func runBeaconTest(ctx context.Context, queuedTestCases []testCaseName, allTestC func beaconPingTest(ctx context.Context, _ *testBeaconConfig, target string) testResult { testRes := testResult{Name: "Ping"} - client := http.Client{} targetEndpoint := fmt.Sprintf("%v/eth/v1/node/health", target) req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetEndpoint, nil) if err != nil { return failedTestResult(testRes, err) } - resp, err := client.Do(req) + resp, err := new(http.Client).Do(req) if err != nil { return failedTestResult(testRes, err) } @@ -375,6 +374,55 @@ func beaconPingMeasureTest(ctx context.Context, _ *testBeaconConfig, target stri return testRes } +func beaconVersionTest(ctx context.Context, _ *testBeaconConfig, target string) testResult { + testRes := testResult{Name: "version"} + + type versionData struct { + Version string `json:"version"` + } + type versionResponse struct { + Data versionData `json:"data"` + } + + targetEndpoint := fmt.Sprintf("%v/eth/v1/node/version", target) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetEndpoint, nil) + if err != nil { + return failedTestResult(testRes, err) + } + resp, err := new(http.Client).Do(req) + if err != nil { + return failedTestResult(testRes, err) + } + + if resp.StatusCode > 399 { + return failedTestResult(testRes, errors.New(httpStatusError(resp.StatusCode))) + } + + b, err := io.ReadAll(resp.Body) + if err != nil { + return failedTestResult(testRes, err) + } + defer resp.Body.Close() + + var versionResp versionResponse + err = json.Unmarshal(b, &versionResp) + if err != nil { + return failedTestResult(testRes, err) + } + + // keep only provider, version and platform + splitVersion := strings.Split(versionResp.Data.Version, "/") + if len(splitVersion) > 3 { + splitVersion = splitVersion[:3] + } + version := strings.Join(splitVersion, "/") + + testRes.Measurement = version + testRes.Verdict = testVerdictOk + + return testRes +} + func beaconPingLoadTest(ctx context.Context, conf *testBeaconConfig, target string) testResult { testRes := testResult{Name: "BeaconLoad"} if !conf.LoadTest { @@ -420,13 +468,12 @@ func beaconIsSyncedTest(ctx context.Context, _ *testBeaconConfig, target string) Data eth2v1.SyncState `json:"data"` } - client := http.Client{} targetEndpoint := fmt.Sprintf("%v/eth/v1/node/syncing", target) req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetEndpoint, nil) if err != nil { return failedTestResult(testRes, err) } - resp, err := client.Do(req) + resp, err := new(http.Client).Do(req) if err != nil { return failedTestResult(testRes, err) } @@ -468,13 +515,12 @@ func beaconPeerCountTest(ctx context.Context, _ *testBeaconConfig, target string Meta peerCountResponseMeta `json:"meta"` } - client := http.Client{} targetEndpoint := fmt.Sprintf("%v/eth/v1/node/peers?state=connected", target) req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetEndpoint, nil) if err != nil { return failedTestResult(testRes, err) } - resp, err := client.Do(req) + resp, err := new(http.Client).Do(req) if err != nil { return failedTestResult(testRes, err) } diff --git a/cmd/testbeacon_internal_test.go b/cmd/testbeacon_internal_test.go index e54cda3481..382b64e367 100644 --- a/cmd/testbeacon_internal_test.go +++ b/cmd/testbeacon_internal_test.go @@ -53,6 +53,7 @@ func TestBeaconTest(t *testing.T) { mockedBeaconNode.URL: { {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "version", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, {Name: "isSynced", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, {Name: "peerCount", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, @@ -217,6 +218,7 @@ func defaultFailingBNTests(_ *testing.T, endpoint1 string, endpoint2 string, por endpoint1: { {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "version", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, {Name: "isSynced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, {Name: "peerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, @@ -229,6 +231,7 @@ func defaultFailingBNTests(_ *testing.T, endpoint1 string, endpoint2 string, por endpoint2: { {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "version", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, {Name: "isSynced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, {Name: "peerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, @@ -253,6 +256,9 @@ func startHealthyMockedBeaconNode(t *testing.T) *httptest.Server { case "/eth/v1/node/peers": _, err := w.Write([]byte(`{"meta":{"count":500}}`)) require.NoError(t, err) + case "/eth/v1/node/version": + _, err := w.Write([]byte(`{"data":{"version":"BeaconNodeProvider/v1.0.0/linux_x86_64"}}`)) + require.NoError(t, err) } w.WriteHeader(http.StatusOK) })) diff --git a/core/corepb/v1/consensus.pb.go b/core/corepb/v1/consensus.pb.go index 82ceec09ae..c2258c1d4f 100644 --- a/core/corepb/v1/consensus.pb.go +++ b/core/corepb/v1/consensus.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: core/corepb/v1/consensus.proto diff --git a/core/corepb/v1/core.pb.go b/core/corepb/v1/core.pb.go index 7b472d6fd6..be16005911 100644 --- a/core/corepb/v1/core.pb.go +++ b/core/corepb/v1/core.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: core/corepb/v1/core.proto diff --git a/core/corepb/v1/parsigex.pb.go b/core/corepb/v1/parsigex.pb.go index aa767fd59a..3c62dcc4a7 100644 --- a/core/corepb/v1/parsigex.pb.go +++ b/core/corepb/v1/parsigex.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: core/corepb/v1/parsigex.proto diff --git a/core/corepb/v1/priority.pb.go b/core/corepb/v1/priority.pb.go index e40e12cc88..ac41a161b8 100644 --- a/core/corepb/v1/priority.pb.go +++ b/core/corepb/v1/priority.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: core/corepb/v1/priority.proto diff --git a/dkg/dkgpb/v1/bcast.pb.go b/dkg/dkgpb/v1/bcast.pb.go index 2b31dc35ea..e18bb72a9a 100644 --- a/dkg/dkgpb/v1/bcast.pb.go +++ b/dkg/dkgpb/v1/bcast.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: dkg/dkgpb/v1/bcast.proto diff --git a/dkg/dkgpb/v1/frost.pb.go b/dkg/dkgpb/v1/frost.pb.go index 84e3f20281..5a51cb4d6a 100644 --- a/dkg/dkgpb/v1/frost.pb.go +++ b/dkg/dkgpb/v1/frost.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: dkg/dkgpb/v1/frost.proto diff --git a/dkg/dkgpb/v1/nodesigs.pb.go b/dkg/dkgpb/v1/nodesigs.pb.go index c76d912c8b..d19659b6f3 100644 --- a/dkg/dkgpb/v1/nodesigs.pb.go +++ b/dkg/dkgpb/v1/nodesigs.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: dkg/dkgpb/v1/nodesigs.proto diff --git a/dkg/dkgpb/v1/sync.pb.go b/dkg/dkgpb/v1/sync.pb.go index 53d1a1e2f6..8e48bda752 100644 --- a/dkg/dkgpb/v1/sync.pb.go +++ b/dkg/dkgpb/v1/sync.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: dkg/dkgpb/v1/sync.proto From b3452ebc31e9d688011660f5176339ac99d14be8 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Thu, 14 Nov 2024 16:24:52 +0100 Subject: [PATCH 80/89] cmd: rename test performance to test infra command (#3380) As we discussed previously `test performance` is a bit vague command. Renaming it to `test infra`. category: refactor ticket: none --- cmd/ascii.go | 14 ++--- cmd/cmd.go | 2 +- cmd/test.go | 24 ++++----- cmd/testall.go | 16 +++--- cmd/testperformance.go | 78 ++++++++++++++-------------- cmd/testperformance_internal_test.go | 42 +++++++-------- 6 files changed, 88 insertions(+), 88 deletions(-) diff --git a/cmd/ascii.go b/cmd/ascii.go index 54603a69c7..6b4d6de64b 100644 --- a/cmd/ascii.go +++ b/cmd/ascii.go @@ -48,14 +48,14 @@ func mevASCII() []string { } } -func performanceASCII() []string { +func infraASCII() []string { return []string{ - " _____ __ ", - "| __ \\ / _| ", - "| |__) |__ _ __| |_ ___ _ __ _ __ ___ __ _ _ __ ___ ___ ", - "| ___/ _ \\ '__| _/ _ \\| '__| '_ ` _ \\ / _` | '_ \\ / __/ _ \\ ", - "| | | __/ | | || (_) | | | | | | | | (_| | | | | (_| __/ ", - "|_| \\___|_| |_| \\___/|_| |_| |_| |_|\\__,_|_| |_|\\___\\___| ", + " _____ __ ", + "|_ _| / _| ", + " | | _ __ | |_ _ __ __ _ ", + " | | | '_ \\| _| '__/ _` | ", + " _| |_| | | | | | | | (_| | ", + "|_____|_| |_|_| |_| \\__,_| ", } } diff --git a/cmd/cmd.go b/cmd/cmd.go index 51dc3a7f83..387702e901 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -53,7 +53,7 @@ func New() *cobra.Command { newTestBeaconCmd(runTestBeacon), newTestValidatorCmd(runTestValidator), newTestMEVCmd(runTestMEV), - newTestPerformanceCmd(runTestPerformance), + newTestInfraCmd(runTestInfra), ), newAddValidatorsCmd(runAddValidatorsSolo), newViewClusterManifestCmd(runViewClusterManifest), diff --git a/cmd/test.go b/cmd/test.go index b048c9b985..df9c67375b 100644 --- a/cmd/test.go +++ b/cmd/test.go @@ -33,12 +33,12 @@ var ( ) const ( - peersTestCategory = "peers" - beaconTestCategory = "beacon" - validatorTestCategory = "validator" - mevTestCategory = "mev" - performanceTestCategory = "performance" - allTestCategory = "all" + peersTestCategory = "peers" + beaconTestCategory = "beacon" + validatorTestCategory = "validator" + mevTestCategory = "mev" + infraTestCategory = "infra" + allTestCategory = "all" committeeSizePerSlot = 64 subCommitteeSize = 4 @@ -58,7 +58,7 @@ func newTestCmd(cmds ...*cobra.Command) *cobra.Command { root := &cobra.Command{ Use: "test", Short: "Test subcommands provide test suite to evaluate current cluster setup", - Long: `Test subcommands provide test suite to evaluate current cluster setup. The full validator stack can be tested - charon peers, consensus layer, validator client, MEV. Current machine's performance can be examined as well.`, + Long: `Test subcommands provide test suite to evaluate current cluster setup. The full validator stack can be tested - charon peers, consensus layer, validator client, MEV. Current machine's infra can be examined as well.`, } root.AddCommand(cmds...) @@ -92,8 +92,8 @@ func listTestCases(cmd *cobra.Command) []string { testCaseNames = maps.Keys(supportedValidatorTestCases()) case mevTestCategory: testCaseNames = maps.Keys(supportedMEVTestCases()) - case performanceTestCategory: - testCaseNames = maps.Keys(supportedPerformanceTestCases()) + case infraTestCategory: + testCaseNames = maps.Keys(supportedInfraTestCases()) case allTestCategory: testCaseNames = slices.Concat( maps.Keys(supportedPeerTestCases()), @@ -102,7 +102,7 @@ func listTestCases(cmd *cobra.Command) []string { maps.Keys(supportedBeaconTestCases()), maps.Keys(supportedValidatorTestCases()), maps.Keys(supportedMEVTestCases()), - maps.Keys(supportedPerformanceTestCases()), + maps.Keys(supportedInfraTestCases()), ) default: log.Warn(cmd.Context(), "Unknown command for listing test cases", nil, z.Str("name", cmd.Name())) @@ -239,8 +239,8 @@ func writeResultToWriter(res testCategoryResult, w io.Writer) error { lines = append(lines, validatorASCII()...) case mevTestCategory: lines = append(lines, mevASCII()...) - case performanceTestCategory: - lines = append(lines, performanceASCII()...) + case infraTestCategory: + lines = append(lines, infraASCII()...) default: lines = append(lines, categoryDefaultASCII()...) } diff --git a/cmd/testall.go b/cmd/testall.go index 7aab22fe93..06bc41a534 100644 --- a/cmd/testall.go +++ b/cmd/testall.go @@ -13,11 +13,11 @@ import ( type testAllConfig struct { testConfig - Peers testPeersConfig - Beacon testBeaconConfig - Validator testValidatorConfig - MEV testMEVConfig - Performance testPerformanceConfig + Peers testPeersConfig + Beacon testBeaconConfig + Validator testValidatorConfig + MEV testMEVConfig + Infra testInfraConfig } func newTestAllCmd(runFunc func(context.Context, io.Writer, testAllConfig) error) *cobra.Command { @@ -42,7 +42,7 @@ func newTestAllCmd(runFunc func(context.Context, io.Writer, testAllConfig) error bindTestBeaconFlags(cmd, &config.Beacon, "beacon-") bindTestValidatorFlags(cmd, &config.Validator, "validator-") bindTestMEVFlags(cmd, &config.MEV, "mev-") - bindTestPerformanceFlags(cmd, &config.Performance, "performance-") + bindTestInfraFlags(cmd, &config.Infra, "infra-") bindP2PFlags(cmd, &config.Peers.P2P) bindDataDirFlag(cmd.Flags(), &config.Peers.DataDir) @@ -81,8 +81,8 @@ func runTestAll(ctx context.Context, w io.Writer, cfg testAllConfig) (err error) return err } - cfg.Performance.testConfig = cfg.testConfig - err = runTestPerformance(ctx, w, cfg.Performance) + cfg.Infra.testConfig = cfg.testConfig + err = runTestInfra(ctx, w, cfg.Infra) if err != nil { return err } diff --git a/cmd/testperformance.go b/cmd/testperformance.go index 5cdc0b99f9..c2160b6764 100644 --- a/cmd/testperformance.go +++ b/cmd/testperformance.go @@ -26,7 +26,7 @@ import ( "github.com/obolnetwork/charon/app/z" ) -type testPerformanceConfig struct { +type testInfraConfig struct { testConfig DiskIOTestFileDir string DiskIOBlockSizeKb int @@ -75,13 +75,13 @@ const ( var errFioNotFound = errors.New("fio command not found, install fio from https://fio.readthedocs.io/en/latest/fio_doc.html#binary-packages or using the package manager of your choice (apt, yum, brew, etc.)") -func newTestPerformanceCmd(runFunc func(context.Context, io.Writer, testPerformanceConfig) error) *cobra.Command { - var config testPerformanceConfig +func newTestInfraCmd(runFunc func(context.Context, io.Writer, testInfraConfig) error) *cobra.Command { + var config testInfraConfig cmd := &cobra.Command{ - Use: "performance", - Short: "Run multiple hardware and connectivity performance tests", - Long: `Run multiple hardware and connectivity performance tests. Verify that Charon is running on host with sufficient capabilities.`, + Use: "infra", + Short: "Run multiple hardware and internet connectivity tests", + Long: `Run multiple hardware and internet connectivity tests. Verify that Charon is running on host with sufficient capabilities.`, Args: cobra.NoArgs, PreRunE: func(cmd *cobra.Command, _ []string) error { return mustOutputToFileOnQuiet(cmd) @@ -92,36 +92,36 @@ func newTestPerformanceCmd(runFunc func(context.Context, io.Writer, testPerforma } bindTestFlags(cmd, &config.testConfig) - bindTestPerformanceFlags(cmd, &config, "") + bindTestInfraFlags(cmd, &config, "") return cmd } -func bindTestPerformanceFlags(cmd *cobra.Command, config *testPerformanceConfig, flagsPrefix string) { +func bindTestInfraFlags(cmd *cobra.Command, config *testInfraConfig, flagsPrefix string) { cmd.Flags().StringVar(&config.DiskIOTestFileDir, flagsPrefix+"disk-io-test-file-dir", "", "Directory at which disk performance will be measured. If none specified, current user's home directory will be used.") cmd.Flags().IntVar(&config.DiskIOBlockSizeKb, flagsPrefix+"disk-io-block-size-kb", 4096, "The block size in kilobytes used for I/O units. Same value applies for both reads and writes.") cmd.Flags().StringSliceVar(&config.InternetTestServersOnly, flagsPrefix+"internet-test-servers-only", []string{}, "List of specific server names to be included for the internet tests, the best performing one is chosen. If not provided, closest and best performing servers are chosen automatically.") cmd.Flags().StringSliceVar(&config.InternetTestServersExclude, flagsPrefix+"internet-test-servers-exclude", []string{}, "List of server names to be excluded from the tests. To be specified only if you experience issues with a server that is wrongly considered best performing.") } -func supportedPerformanceTestCases() map[testCaseName]func(context.Context, *testPerformanceConfig) testResult { - return map[testCaseName]func(context.Context, *testPerformanceConfig) testResult{ - {name: "diskWriteSpeed", order: 1}: performanceDiskWriteSpeedTest, - {name: "diskWriteIOPS", order: 2}: performanceDiskWriteIOPSTest, - {name: "diskReadSpeed", order: 3}: performanceDiskReadSpeedTest, - {name: "diskReadIOPS", order: 4}: performanceDiskReadIOPSTest, - {name: "availableMemory", order: 5}: performanceAvailableMemoryTest, - {name: "totalMemory", order: 6}: performanceTotalMemoryTest, - {name: "internetLatency", order: 7}: performanceInternetLatencyTest, - {name: "internetDownloadSpeed", order: 8}: performanceInternetDownloadSpeedTest, - {name: "internetUploadSpeed", order: 9}: performanceInternetUploadSpeedTest, +func supportedInfraTestCases() map[testCaseName]func(context.Context, *testInfraConfig) testResult { + return map[testCaseName]func(context.Context, *testInfraConfig) testResult{ + {name: "diskWriteSpeed", order: 1}: infraDiskWriteSpeedTest, + {name: "diskWriteIOPS", order: 2}: infraDiskWriteIOPSTest, + {name: "diskReadSpeed", order: 3}: infraDiskReadSpeedTest, + {name: "diskReadIOPS", order: 4}: infraDiskReadIOPSTest, + {name: "availableMemory", order: 5}: infraAvailableMemoryTest, + {name: "totalMemory", order: 6}: infraTotalMemoryTest, + {name: "internetLatency", order: 7}: infraInternetLatencyTest, + {name: "internetDownloadSpeed", order: 8}: infraInternetDownloadSpeedTest, + {name: "internetUploadSpeed", order: 9}: infraInternetUploadSpeedTest, } } -func runTestPerformance(ctx context.Context, w io.Writer, cfg testPerformanceConfig) (err error) { - log.Info(ctx, "Starting machine performance and network connectivity test") +func runTestInfra(ctx context.Context, w io.Writer, cfg testInfraConfig) (err error) { + log.Info(ctx, "Starting hardware performance and network connectivity test") - testCases := supportedPerformanceTestCases() + testCases := supportedInfraTestCases() queuedTests := filterTests(maps.Keys(testCases), cfg.testConfig) if len(queuedTests) == 0 { return errors.New("test case not supported") @@ -135,7 +135,7 @@ func runTestPerformance(ctx context.Context, w io.Writer, cfg testPerformanceCon testResults := make(map[string][]testResult) startTime := time.Now() - go testSinglePerformance(timeoutCtx, queuedTests, testCases, cfg, testResultsChan) + go testSingleInfra(timeoutCtx, queuedTests, testCases, cfg, testResultsChan) for result := range testResultsChan { maps.Copy(testResults, result) @@ -153,7 +153,7 @@ func runTestPerformance(ctx context.Context, w io.Writer, cfg testPerformanceCon } res := testCategoryResult{ - CategoryName: performanceTestCategory, + CategoryName: infraTestCategory, Targets: testResults, ExecutionTime: execTime, Score: score, @@ -176,14 +176,14 @@ func runTestPerformance(ctx context.Context, w io.Writer, cfg testPerformanceCon return nil } -// hardware and internet connectivity performance tests +// hardware and internet connectivity tests -func testSinglePerformance(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]func(context.Context, *testPerformanceConfig) testResult, cfg testPerformanceConfig, resCh chan map[string][]testResult) { +func testSingleInfra(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]func(context.Context, *testInfraConfig) testResult, cfg testInfraConfig, resCh chan map[string][]testResult) { defer close(resCh) singleTestResCh := make(chan testResult) allTestRes := []testResult{} - // run all performance tests for a performance client, pushing each completed test to the channel until all are complete or timeout occurs - go testPerformance(ctx, queuedTestCases, allTestCases, cfg, singleTestResCh) + // run all infra tests for a client, pushing each completed test to the channel until all are complete or timeout occurs + go testInfra(ctx, queuedTestCases, allTestCases, cfg, singleTestResCh) testCounter := 0 finished := false @@ -209,7 +209,7 @@ func testSinglePerformance(ctx context.Context, queuedTestCases []testCaseName, resCh <- map[string][]testResult{"local": allTestRes} } -func testPerformance(ctx context.Context, queuedTests []testCaseName, allTests map[testCaseName]func(context.Context, *testPerformanceConfig) testResult, cfg testPerformanceConfig, ch chan testResult) { +func testInfra(ctx context.Context, queuedTests []testCaseName, allTests map[testCaseName]func(context.Context, *testInfraConfig) testResult, cfg testInfraConfig, ch chan testResult) { defer close(ch) for _, t := range queuedTests { select { @@ -221,7 +221,7 @@ func testPerformance(ctx context.Context, queuedTests []testCaseName, allTests m } } -func performanceDiskWriteSpeedTest(ctx context.Context, conf *testPerformanceConfig) testResult { +func infraDiskWriteSpeedTest(ctx context.Context, conf *testInfraConfig) testResult { testRes := testResult{Name: "DiskWriteSpeed"} var err error @@ -273,7 +273,7 @@ func performanceDiskWriteSpeedTest(ctx context.Context, conf *testPerformanceCon return testRes } -func performanceDiskWriteIOPSTest(ctx context.Context, conf *testPerformanceConfig) testResult { +func infraDiskWriteIOPSTest(ctx context.Context, conf *testInfraConfig) testResult { testRes := testResult{Name: "DiskWriteIOPS"} var err error @@ -324,7 +324,7 @@ func performanceDiskWriteIOPSTest(ctx context.Context, conf *testPerformanceConf return testRes } -func performanceDiskReadSpeedTest(ctx context.Context, conf *testPerformanceConfig) testResult { +func infraDiskReadSpeedTest(ctx context.Context, conf *testInfraConfig) testResult { testRes := testResult{Name: "DiskReadSpeed"} var err error @@ -376,7 +376,7 @@ func performanceDiskReadSpeedTest(ctx context.Context, conf *testPerformanceConf return testRes } -func performanceDiskReadIOPSTest(ctx context.Context, conf *testPerformanceConfig) testResult { +func infraDiskReadIOPSTest(ctx context.Context, conf *testInfraConfig) testResult { testRes := testResult{Name: "DiskReadIOPS"} var err error @@ -427,7 +427,7 @@ func performanceDiskReadIOPSTest(ctx context.Context, conf *testPerformanceConfi return testRes } -func performanceAvailableMemoryTest(ctx context.Context, _ *testPerformanceConfig) testResult { +func infraAvailableMemoryTest(ctx context.Context, _ *testInfraConfig) testResult { testRes := testResult{Name: "AvailableMemory"} var availableMemory int64 @@ -462,7 +462,7 @@ func performanceAvailableMemoryTest(ctx context.Context, _ *testPerformanceConfi return testRes } -func performanceTotalMemoryTest(ctx context.Context, _ *testPerformanceConfig) testResult { +func infraTotalMemoryTest(ctx context.Context, _ *testInfraConfig) testResult { testRes := testResult{Name: "TotalMemory"} var totalMemory int64 @@ -497,7 +497,7 @@ func performanceTotalMemoryTest(ctx context.Context, _ *testPerformanceConfig) t return testRes } -func performanceInternetLatencyTest(ctx context.Context, conf *testPerformanceConfig) testResult { +func infraInternetLatencyTest(ctx context.Context, conf *testInfraConfig) testResult { testRes := testResult{Name: "InternetLatency"} server, err := fetchOoklaServer(ctx, conf) @@ -529,7 +529,7 @@ func performanceInternetLatencyTest(ctx context.Context, conf *testPerformanceCo return testRes } -func performanceInternetDownloadSpeedTest(ctx context.Context, conf *testPerformanceConfig) testResult { +func infraInternetDownloadSpeedTest(ctx context.Context, conf *testInfraConfig) testResult { testRes := testResult{Name: "InternetDownloadSpeed"} server, err := fetchOoklaServer(ctx, conf) @@ -561,7 +561,7 @@ func performanceInternetDownloadSpeedTest(ctx context.Context, conf *testPerform return testRes } -func performanceInternetUploadSpeedTest(ctx context.Context, conf *testPerformanceConfig) testResult { +func infraInternetUploadSpeedTest(ctx context.Context, conf *testInfraConfig) testResult { testRes := testResult{Name: "InternetUploadSpeed"} server, err := fetchOoklaServer(ctx, conf) @@ -739,7 +739,7 @@ func totalMemoryMacos(ctx context.Context) (int64, error) { return memSizeInt, nil } -func fetchOoklaServer(_ context.Context, conf *testPerformanceConfig) (speedtest.Server, error) { +func fetchOoklaServer(_ context.Context, conf *testInfraConfig) (speedtest.Server, error) { speedtestClient := speedtest.New() serverList, err := speedtestClient.FetchServers() diff --git a/cmd/testperformance_internal_test.go b/cmd/testperformance_internal_test.go index 80487491f9..fe98692f38 100644 --- a/cmd/testperformance_internal_test.go +++ b/cmd/testperformance_internal_test.go @@ -17,19 +17,19 @@ import ( "github.com/obolnetwork/charon/app/errors" ) -//go:generate go test . -run=TestPerformanceTest -update +//go:generate go test . -run=TestInfraTest -update -func TestPerformanceTest(t *testing.T) { +func TestInfraTest(t *testing.T) { tests := []struct { name string - config testPerformanceConfig + config testInfraConfig expected testCategoryResult expectedErr string cleanup func(*testing.T, string) }{ { name: "default scenario", - config: testPerformanceConfig{ + config: testInfraConfig{ testConfig: testConfig{ OutputToml: "", Quiet: false, @@ -47,13 +47,13 @@ func TestPerformanceTest(t *testing.T) { }, }, Score: categoryScoreC, - CategoryName: performanceTestCategory, + CategoryName: infraTestCategory, }, expectedErr: "", }, { name: "timeout", - config: testPerformanceConfig{ + config: testInfraConfig{ testConfig: testConfig{ OutputToml: "", Quiet: false, @@ -69,13 +69,13 @@ func TestPerformanceTest(t *testing.T) { }, }, Score: categoryScoreC, - CategoryName: performanceTestCategory, + CategoryName: infraTestCategory, }, expectedErr: "", }, { name: "quiet", - config: testPerformanceConfig{ + config: testInfraConfig{ testConfig: testConfig{ OutputToml: "", Quiet: true, @@ -93,13 +93,13 @@ func TestPerformanceTest(t *testing.T) { }, }, Score: categoryScoreC, - CategoryName: performanceTestCategory, + CategoryName: infraTestCategory, }, expectedErr: "", }, { name: "unsupported test", - config: testPerformanceConfig{ + config: testInfraConfig{ testConfig: testConfig{ OutputToml: "", Quiet: false, @@ -110,13 +110,13 @@ func TestPerformanceTest(t *testing.T) { }, expected: testCategoryResult{ Score: categoryScoreC, - CategoryName: performanceTestCategory, + CategoryName: infraTestCategory, }, expectedErr: "test case not supported", }, { name: "custom test cases", - config: testPerformanceConfig{ + config: testInfraConfig{ testConfig: testConfig{ OutputToml: "", Quiet: false, @@ -132,13 +132,13 @@ func TestPerformanceTest(t *testing.T) { }, }, Score: categoryScoreC, - CategoryName: performanceTestCategory, + CategoryName: infraTestCategory, }, expectedErr: "", }, { name: "write to file", - config: testPerformanceConfig{ + config: testInfraConfig{ testConfig: testConfig{ OutputToml: "./write-to-file-test.toml.tmp", Quiet: false, @@ -156,7 +156,7 @@ func TestPerformanceTest(t *testing.T) { }, }, Score: categoryScoreA, - CategoryName: performanceTestCategory, + CategoryName: infraTestCategory, }, expectedErr: "", cleanup: func(t *testing.T, p string) { @@ -170,7 +170,7 @@ func TestPerformanceTest(t *testing.T) { t.Run(test.name, func(t *testing.T) { var buf bytes.Buffer ctx := context.Background() - err := runTestPerformance(ctx, &buf, test.config) + err := runTestInfra(ctx, &buf, test.config) if test.expectedErr != "" { require.ErrorContains(t, err, test.expectedErr) return @@ -196,7 +196,7 @@ func TestPerformanceTest(t *testing.T) { } } -func StartHealthyPerformanceClient(t *testing.T, port int, ready chan bool) error { +func StartHealthyInfraClient(t *testing.T, port int, ready chan bool) error { t.Helper() defer close(ready) @@ -215,7 +215,7 @@ func StartHealthyPerformanceClient(t *testing.T, port int, ready chan bool) erro } } -func TestPerformanceTestFlags(t *testing.T) { +func TestInfraTestFlags(t *testing.T) { tests := []struct { name string args []string @@ -223,19 +223,19 @@ func TestPerformanceTestFlags(t *testing.T) { }{ { name: "default scenario", - args: []string{"performance", "--disk-io-block-size-kb=1"}, + args: []string{"infra", "--disk-io-block-size-kb=1"}, expectedErr: "", }, { name: "no output toml on quiet", - args: []string{"performance", "--disk-io-block-size-kb=1", "--quiet"}, + args: []string{"infra", "--disk-io-block-size-kb=1", "--quiet"}, expectedErr: "on --quiet, an --output-toml is required", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - cmd := newAlphaCmd(newTestPerformanceCmd(func(context.Context, io.Writer, testPerformanceConfig) error { return nil })) + cmd := newAlphaCmd(newTestInfraCmd(func(context.Context, io.Writer, testInfraConfig) error { return nil })) cmd.SetArgs(test.args) err := cmd.Execute() if test.expectedErr != "" { From b2b9d2b45dbee6eaaeb46d90563bd93e9e0528dc Mon Sep 17 00:00:00 2001 From: Anthony PHAM Date: Thu, 14 Nov 2024 16:29:00 +0100 Subject: [PATCH 81/89] *: fix trigger dispatch repo (#3381) fix trigger dispatch repo category: feature ticket: none --- .github/workflows/build-push-deploy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-push-deploy.yml b/.github/workflows/build-push-deploy.yml index e4207501d1..7e779a349d 100644 --- a/.github/workflows/build-push-deploy.yml +++ b/.github/workflows/build-push-deploy.yml @@ -92,6 +92,6 @@ jobs: uses: peter-evans/repository-dispatch@v2 with: token: ${{ secrets.CHARON_K8S_REPO_ACCESS_TOKEN }} - repository: ObolNetwork/charon-k8s + repository: ObolNetwork/obol-infrastructure event-type: charon-package-published client-payload: '{"sha": "${{ github.sha }}"}' From fac5b691630cf1c9fe994db80eb8fac062ed6833 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 15 Nov 2024 08:48:22 +0000 Subject: [PATCH 82/89] build(deps): Bump google.golang.org/protobuf from 1.34.2 to 1.35.2 (#3382) Bumps google.golang.org/protobuf from 1.34.2 to 1.35.2. [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=google.golang.org/protobuf&package-manager=go_modules&previous-version=1.34.2&new-version=1.35.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index db19cf76e8..1dc48e8e55 100644 --- a/go.mod +++ b/go.mod @@ -48,7 +48,7 @@ require ( golang.org/x/term v0.26.0 golang.org/x/time v0.8.0 golang.org/x/tools v0.27.0 - google.golang.org/protobuf v1.34.2 + google.golang.org/protobuf v1.35.2 gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) diff --git a/go.sum b/go.sum index bbe69d01a5..1b357c59da 100644 --- a/go.sum +++ b/go.sum @@ -732,8 +732,8 @@ google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/Knetic/govaluate.v3 v3.0.0 h1:18mUyIt4ZlRlFZAAfVetz4/rzlJs9yhN+U02F4u1AOc= gopkg.in/Knetic/govaluate.v3 v3.0.0/go.mod h1:csKLBORsPbafmSCGTEh3U7Ozmsuq8ZSIlKk1bcqph0E= gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y= From 529d4cf285e20865b404e090359aa17a96654b8f Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Fri, 15 Nov 2024 14:45:48 +0100 Subject: [PATCH 83/89] cmd: test command file output improvements (#3384) Couple of small improvements over the test command: - Swap TOML with JSON file on output - Change the file format to include all 5 categories, so commands like `test all` can successfully write all results to the file - Safe write to the file (using temp file) - Put the MEV create block tests under a `--load-test` flag, so we don't put external infra under excess in the default case An some small fixes that have been missed: - Rename the testperformance file to testinfra - Add the beacon node simulations file directory for results as a flag category: refactor ticket: none --- cmd/test.go | 98 +++++++++++++++---- cmd/testbeacon.go | 8 +- cmd/testbeacon_internal_test.go | 24 ++--- cmd/{testperformance.go => testinfra.go} | 4 +- ...nal_test.go => testinfra_internal_test.go} | 22 ++--- cmd/testmev.go | 18 +++- cmd/testmev_internal_test.go | 33 ++++--- cmd/testpeers.go | 4 +- cmd/testpeers_internal_test.go | 50 ++++++---- cmd/testvalidator.go | 4 +- cmd/testvalidator_internal_test.go | 22 ++--- go.mod | 2 +- go.sum | 5 +- 13 files changed, 193 insertions(+), 101 deletions(-) rename cmd/{testperformance.go => testinfra.go} (99%) rename cmd/{testperformance_internal_test.go => testinfra_internal_test.go} (93%) diff --git a/cmd/test.go b/cmd/test.go index df9c67375b..dfc00901cb 100644 --- a/cmd/test.go +++ b/cmd/test.go @@ -4,12 +4,14 @@ package cmd import ( "context" + "encoding/json" "fmt" "io" "net/http" "net/http/httptrace" "os" "os/signal" + "path/filepath" "slices" "sort" "strings" @@ -17,7 +19,6 @@ import ( "time" "unicode/utf8" - "github.com/pelletier/go-toml/v2" "github.com/spf13/cobra" "github.com/spf13/pflag" "golang.org/x/exp/maps" @@ -48,7 +49,7 @@ const ( ) type testConfig struct { - OutputToml string + OutputJSON string Quiet bool TestCases []string Timeout time.Duration @@ -67,7 +68,7 @@ func newTestCmd(cmds ...*cobra.Command) *cobra.Command { } func bindTestFlags(cmd *cobra.Command, config *testConfig) { - cmd.Flags().StringVar(&config.OutputToml, "output-toml", "", "File path to which output can be written in TOML format.") + cmd.Flags().StringVar(&config.OutputJSON, "output-json", "", "File path to which output can be written in JSON format.") cmd.Flags().StringSliceVar(&config.TestCases, "test-cases", nil, fmt.Sprintf("List of comma separated names of tests to be exeucted. Available tests are: %v", listTestCases(cmd))) cmd.Flags().DurationVar(&config.Timeout, "timeout", time.Hour, "Execution timeout for all tests.") cmd.Flags().BoolVar(&config.Quiet, "quiet", false, "Do not print test results to stdout.") @@ -117,8 +118,8 @@ func listTestCases(cmd *cobra.Command) []string { } func mustOutputToFileOnQuiet(cmd *cobra.Command) error { - if cmd.Flag("quiet").Changed && !cmd.Flag("output-toml").Changed { - return errors.New("on --quiet, an --output-toml is required") + if cmd.Flag("quiet").Changed && !cmd.Flag("output-json").Changed { + return errors.New("on --quiet, an --output-json is required") } return nil @@ -150,16 +151,15 @@ const ( categoryScoreC categoryScore = "C" ) -// toml fails on marshaling errors to string, so we wrap the errors and add custom marshal type testResultError struct{ error } type testResult struct { - Name string - Verdict testVerdict - Measurement string - Suggestion string - Error testResultError - IsAcceptable bool + Name string `json:"name"` + Verdict testVerdict `json:"verdict"` + Measurement string `json:"measurement,omitempty"` + Suggestion string `json:"suggestion,omitempty"` + Error testResultError `json:"error,omitempty"` + IsAcceptable bool `json:"-"` } func failedTestResult(testRes testResult, err error) testResult { @@ -198,10 +198,10 @@ type testCaseName struct { } type testCategoryResult struct { - CategoryName string - Targets map[string][]testResult - ExecutionTime Duration - Score categoryScore + CategoryName string `json:"category_name,omitempty"` + Targets map[string][]testResult `json:"targets,omitempty"` + ExecutionTime Duration `json:"execution_time,omitempty"` + Score categoryScore `json:"score,omitempty"` } func appendScore(cat []string, score []string) []string { @@ -213,15 +213,73 @@ func appendScore(cat []string, score []string) []string { return res } +type fileResult struct { + Peers testCategoryResult `json:"charon_peers,omitempty"` + Beacon testCategoryResult `json:"beacon_node,omitempty"` + Validator testCategoryResult `json:"validator_client,omitempty"` + MEV testCategoryResult `json:"mev,omitempty"` + Infra testCategoryResult `json:"infra,omitempty"` +} + func writeResultToFile(res testCategoryResult, path string) error { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o444) + // open or create a file + existingFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644) if err != nil { return errors.Wrap(err, "create/open file") } - defer f.Close() - err = toml.NewEncoder(f).Encode(res) + defer existingFile.Close() + stat, err := existingFile.Stat() + if err != nil { + return errors.Wrap(err, "get file stat") + } + // read file contents or default to empty structure + var file fileResult + if stat.Size() == 0 { + file = fileResult{} + } else { + err = json.NewDecoder(existingFile).Decode(&file) + if err != nil { + return errors.Wrap(err, "decode fileResult from JSON") + } + } + + switch res.CategoryName { + case peersTestCategory: + file.Peers = res + case beaconTestCategory: + file.Beacon = res + case validatorTestCategory: + file.Validator = res + case mevTestCategory: + file.MEV = res + case infraTestCategory: + file.Infra = res + } + + // write data to temp file + tmpFile, err := os.CreateTemp(filepath.Dir(path), fmt.Sprintf("%v-tmp-*.json", filepath.Base(path))) + if err != nil { + return errors.Wrap(err, "create temp file") + } + defer tmpFile.Close() + err = tmpFile.Chmod(0o644) + if err != nil { + return errors.Wrap(err, "chmod temp file") + } + + fileContentJSON, err := json.Marshal(file) + if err != nil { + return errors.Wrap(err, "marshal fileResult to JSON") + } + + _, err = tmpFile.Write(fileContentJSON) + if err != nil { + return errors.Wrap(err, "write json to file") + } + + err = os.Rename(tmpFile.Name(), path) if err != nil { - return errors.Wrap(err, "encode testCategoryResult to TOML") + return errors.Wrap(err, "rename temp file") } return nil diff --git a/cmd/testbeacon.go b/cmd/testbeacon.go index e96b6ae8b6..b5cb34231c 100644 --- a/cmd/testbeacon.go +++ b/cmd/testbeacon.go @@ -180,6 +180,7 @@ func bindTestBeaconFlags(cmd *cobra.Command, config *testBeaconConfig, flagsPref cmd.Flags().BoolVar(&config.LoadTest, flagsPrefix+"load-test", false, "Enable load test, not advisable when testing towards external beacon nodes.") cmd.Flags().DurationVar(&config.LoadTestDuration, flagsPrefix+"load-test-duration", 5*time.Second, "Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned.") cmd.Flags().IntVar(&config.SimulationDuration, flagsPrefix+"simulation-duration-in-slots", slotsInEpoch, "Time to keep running the simulation in slots.") + cmd.Flags().StringVar(&config.SimulationFileDir, flagsPrefix+"simulation-file-dir", "./", "Time to keep running the simulation in slots.") cmd.Flags().BoolVar(&config.SimulationVerbose, flagsPrefix+"simulation-verbose", false, "Show results for each request and each validator.") mustMarkFlagRequired(cmd, flagsPrefix+"endpoints") } @@ -250,8 +251,8 @@ func runTestBeacon(ctx context.Context, w io.Writer, cfg testBeaconConfig) (err } } - if cfg.OutputToml != "" { - err = writeResultToFile(res, cfg.OutputToml) + if cfg.OutputJSON != "" { + err = writeResultToFile(res, cfg.OutputJSON) if err != nil { return err } @@ -270,9 +271,8 @@ func testAllBeacons(ctx context.Context, queuedTestCases []testCaseName, allTest group, _ := errgroup.WithContext(ctx) for _, endpoint := range conf.Endpoints { - currEndpoint := endpoint // TODO: can be removed after go1.22 version bump group.Go(func() error { - return testSingleBeacon(ctx, queuedTestCases, allTestCases, conf, currEndpoint, singleBeaconResCh) + return testSingleBeacon(ctx, queuedTestCases, allTestCases, conf, endpoint, singleBeaconResCh) }) } diff --git a/cmd/testbeacon_internal_test.go b/cmd/testbeacon_internal_test.go index 382b64e367..0c0897c9df 100644 --- a/cmd/testbeacon_internal_test.go +++ b/cmd/testbeacon_internal_test.go @@ -41,7 +41,7 @@ func TestBeaconTest(t *testing.T) { name: "default scenario", config: testBeaconConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: nil, Timeout: time.Minute, @@ -71,7 +71,7 @@ func TestBeaconTest(t *testing.T) { name: "connection refused", config: testBeaconConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: nil, Timeout: time.Minute, @@ -87,7 +87,7 @@ func TestBeaconTest(t *testing.T) { name: "timeout", config: testBeaconConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: nil, Timeout: 100 * time.Nanosecond, @@ -110,7 +110,7 @@ func TestBeaconTest(t *testing.T) { name: "quiet", config: testBeaconConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: true, TestCases: nil, Timeout: time.Minute, @@ -126,7 +126,7 @@ func TestBeaconTest(t *testing.T) { name: "unsupported test", config: testBeaconConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: []string{"notSupportedTest"}, Timeout: time.Minute, @@ -140,7 +140,7 @@ func TestBeaconTest(t *testing.T) { name: "custom test cases", config: testBeaconConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: []string{"ping"}, Timeout: time.Minute, @@ -163,7 +163,7 @@ func TestBeaconTest(t *testing.T) { name: "write to file", config: testBeaconConfig{ testConfig: testConfig{ - OutputToml: "./write-to-file-test.toml.tmp", + OutputJSON: "./write-to-file-test.json.tmp", Quiet: false, TestCases: nil, Timeout: time.Minute, @@ -196,7 +196,7 @@ func TestBeaconTest(t *testing.T) { } defer func() { if test.cleanup != nil { - test.cleanup(t, test.config.OutputToml) + test.cleanup(t, test.config.OutputJSON) } }() @@ -206,8 +206,8 @@ func TestBeaconTest(t *testing.T) { testWriteOut(t, test.expected, buf) } - if test.config.OutputToml != "" { - testWriteFile(t, test.expected, test.config.OutputToml) + if test.config.OutputJSON != "" { + testWriteFile(t, test.expected, test.config.OutputJSON) } }) } @@ -281,9 +281,9 @@ func TestBeaconTestFlags(t *testing.T) { expectedErr: "required flag(s) \"endpoints\" not set", }, { - name: "no output toml on quiet", + name: "no output json on quiet", args: []string{"beacon", "--endpoints=\"test.endpoint\"", "--quiet"}, - expectedErr: "on --quiet, an --output-toml is required", + expectedErr: "on --quiet, an --output-json is required", }, } diff --git a/cmd/testperformance.go b/cmd/testinfra.go similarity index 99% rename from cmd/testperformance.go rename to cmd/testinfra.go index c2160b6764..cb0b2c945f 100644 --- a/cmd/testperformance.go +++ b/cmd/testinfra.go @@ -166,8 +166,8 @@ func runTestInfra(ctx context.Context, w io.Writer, cfg testInfraConfig) (err er } } - if cfg.OutputToml != "" { - err = writeResultToFile(res, cfg.OutputToml) + if cfg.OutputJSON != "" { + err = writeResultToFile(res, cfg.OutputJSON) if err != nil { return err } diff --git a/cmd/testperformance_internal_test.go b/cmd/testinfra_internal_test.go similarity index 93% rename from cmd/testperformance_internal_test.go rename to cmd/testinfra_internal_test.go index fe98692f38..46ee70d72b 100644 --- a/cmd/testperformance_internal_test.go +++ b/cmd/testinfra_internal_test.go @@ -31,7 +31,7 @@ func TestInfraTest(t *testing.T) { name: "default scenario", config: testInfraConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: []string{"availableMemory", "totalMemory", "internetLatency"}, Timeout: time.Minute, @@ -55,7 +55,7 @@ func TestInfraTest(t *testing.T) { name: "timeout", config: testInfraConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: nil, Timeout: 100 * time.Nanosecond, @@ -77,7 +77,7 @@ func TestInfraTest(t *testing.T) { name: "quiet", config: testInfraConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: true, TestCases: []string{"availableMemory", "totalMemory", "internetLatency"}, Timeout: time.Minute, @@ -101,7 +101,7 @@ func TestInfraTest(t *testing.T) { name: "unsupported test", config: testInfraConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: []string{"notSupportedTest"}, Timeout: time.Minute, @@ -118,7 +118,7 @@ func TestInfraTest(t *testing.T) { name: "custom test cases", config: testInfraConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: []string{"totalMemory"}, Timeout: time.Minute, @@ -140,7 +140,7 @@ func TestInfraTest(t *testing.T) { name: "write to file", config: testInfraConfig{ testConfig: testConfig{ - OutputToml: "./write-to-file-test.toml.tmp", + OutputJSON: "./write-to-file-test.json.tmp", Quiet: false, TestCases: []string{"availableMemory", "totalMemory", "internetLatency"}, Timeout: time.Minute, @@ -179,7 +179,7 @@ func TestInfraTest(t *testing.T) { } defer func() { if test.cleanup != nil { - test.cleanup(t, test.config.OutputToml) + test.cleanup(t, test.config.OutputJSON) } }() @@ -189,8 +189,8 @@ func TestInfraTest(t *testing.T) { testWriteOut(t, test.expected, buf) } - if test.config.OutputToml != "" { - testWriteFile(t, test.expected, test.config.OutputToml) + if test.config.OutputJSON != "" { + testWriteFile(t, test.expected, test.config.OutputJSON) } }) } @@ -227,9 +227,9 @@ func TestInfraTestFlags(t *testing.T) { expectedErr: "", }, { - name: "no output toml on quiet", + name: "no output json on quiet", args: []string{"infra", "--disk-io-block-size-kb=1", "--quiet"}, - expectedErr: "on --quiet, an --output-toml is required", + expectedErr: "on --quiet, an --output-json is required", }, } diff --git a/cmd/testmev.go b/cmd/testmev.go index b799558894..f640b85054 100644 --- a/cmd/testmev.go +++ b/cmd/testmev.go @@ -32,6 +32,7 @@ type testMEVConfig struct { testConfig Endpoints []string BeaconNodeEndpoint string + LoadTest bool LoadTestBlocks uint } @@ -65,15 +66,26 @@ func newTestMEVCmd(runFunc func(context.Context, io.Writer, testMEVConfig) error bindTestFlags(cmd, &config.testConfig) bindTestMEVFlags(cmd, &config, "") + wrapPreRunE(cmd, func(cmd *cobra.Command, _ []string) error { + loadTest := cmd.Flags().Lookup("load-test").Value.String() + beaconNodeEndpoint := cmd.Flags().Lookup("beacon-node-endpoint").Value.String() + + if loadTest == "true" && beaconNodeEndpoint == "" { + return errors.New("beacon-node-endpoint should be specified when load-test is") + } + + return nil + }) + return cmd } func bindTestMEVFlags(cmd *cobra.Command, config *testMEVConfig, flagsPrefix string) { cmd.Flags().StringSliceVar(&config.Endpoints, flagsPrefix+"endpoints", nil, "[REQUIRED] Comma separated list of one or more MEV relay endpoint URLs.") cmd.Flags().StringVar(&config.BeaconNodeEndpoint, flagsPrefix+"beacon-node-endpoint", "", "[REQUIRED] Beacon node endpoint URL used for block creation test.") + cmd.Flags().BoolVar(&config.LoadTest, flagsPrefix+"load-test", false, "Enable load test.") cmd.Flags().UintVar(&config.LoadTestBlocks, flagsPrefix+"load-test-blocks", 3, "Amount of blocks the 'createMultipleBlocks' test will create.") mustMarkFlagRequired(cmd, flagsPrefix+"endpoints") - mustMarkFlagRequired(cmd, flagsPrefix+"beacon-node-endpoint") } func supportedMEVTestCases() map[testCaseName]testCaseMEV { @@ -134,8 +146,8 @@ func runTestMEV(ctx context.Context, w io.Writer, cfg testMEVConfig) (err error) } } - if cfg.OutputToml != "" { - err = writeResultToFile(res, cfg.OutputToml) + if cfg.OutputJSON != "" { + err = writeResultToFile(res, cfg.OutputJSON) if err != nil { return err } diff --git a/cmd/testmev_internal_test.go b/cmd/testmev_internal_test.go index e692712199..5519904b9f 100644 --- a/cmd/testmev_internal_test.go +++ b/cmd/testmev_internal_test.go @@ -43,7 +43,7 @@ func TestMEVTest(t *testing.T) { name: "default scenario", config: testMEVConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: nil, Timeout: time.Minute, @@ -67,7 +67,7 @@ func TestMEVTest(t *testing.T) { name: "connection refused", config: testMEVConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: nil, Timeout: time.Minute, @@ -97,7 +97,7 @@ func TestMEVTest(t *testing.T) { name: "timeout", config: testMEVConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: nil, Timeout: 100 * time.Nanosecond, @@ -121,7 +121,7 @@ func TestMEVTest(t *testing.T) { name: "quiet", config: testMEVConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: true, TestCases: nil, Timeout: time.Minute, @@ -151,7 +151,7 @@ func TestMEVTest(t *testing.T) { name: "unsupported test", config: testMEVConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: []string{"notSupportedTest"}, Timeout: time.Minute, @@ -166,7 +166,7 @@ func TestMEVTest(t *testing.T) { name: "custom test cases", config: testMEVConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: []string{"ping"}, Timeout: time.Minute, @@ -190,7 +190,7 @@ func TestMEVTest(t *testing.T) { name: "write to file", config: testMEVConfig{ testConfig: testConfig{ - OutputToml: "./write-to-file-test.toml.tmp", + OutputJSON: "./write-to-file-test.json.tmp", Quiet: false, TestCases: nil, Timeout: time.Minute, @@ -237,7 +237,7 @@ func TestMEVTest(t *testing.T) { } defer func() { if test.cleanup != nil { - test.cleanup(t, test.config.OutputToml) + test.cleanup(t, test.config.OutputJSON) } }() @@ -247,8 +247,8 @@ func TestMEVTest(t *testing.T) { testWriteOut(t, test.expected, buf) } - if test.config.OutputToml != "" { - testWriteFile(t, test.expected, test.config.OutputToml) + if test.config.OutputJSON != "" { + testWriteFile(t, test.expected, test.config.OutputJSON) } }) } @@ -270,18 +270,23 @@ func TestMEVTestFlags(t *testing.T) { }{ { name: "default scenario", - args: []string{"mev", "--endpoints=\"test.endpoint\"", "--beacon-node-endpoint=\"test.endpoint\""}, + args: []string{"mev", "--endpoints=\"test.endpoint\""}, expectedErr: "", }, { name: "no endpoints flag", args: []string{"mev"}, - expectedErr: "required flag(s) \"beacon-node-endpoint\", \"endpoints\" not set", + expectedErr: "required flag(s) \"endpoints\" not set", }, { - name: "no output toml on quiet", + name: "no output json on quiet", args: []string{"mev", "--endpoints=\"test.endpoint\"", "--quiet"}, - expectedErr: "on --quiet, an --output-toml is required", + expectedErr: "on --quiet, an --output-json is required", + }, + { + name: "no beacon node endpoint flag on load test", + args: []string{"mev", "--endpoints=\"test.endpoint\"", "--load-test"}, + expectedErr: "beacon-node-endpoint should be specified when load-test is", }, } diff --git a/cmd/testpeers.go b/cmd/testpeers.go index 987c448c58..a800768e56 100644 --- a/cmd/testpeers.go +++ b/cmd/testpeers.go @@ -228,8 +228,8 @@ func runTestPeers(ctx context.Context, w io.Writer, conf testPeersConfig) error } } - if conf.OutputToml != "" { - err = writeResultToFile(res, conf.OutputToml) + if conf.OutputJSON != "" { + err = writeResultToFile(res, conf.OutputJSON) if err != nil { return err } diff --git a/cmd/testpeers_internal_test.go b/cmd/testpeers_internal_test.go index 0d766c0f36..56d6d611f0 100644 --- a/cmd/testpeers_internal_test.go +++ b/cmd/testpeers_internal_test.go @@ -6,6 +6,7 @@ import ( "bytes" "context" "encoding/base64" + "encoding/json" "fmt" "io" "net/http" @@ -17,7 +18,6 @@ import ( k1 "github.com/decred/dcrd/dcrec/secp256k1/v4" "github.com/libp2p/go-libp2p/core/peer" - "github.com/pelletier/go-toml/v2" "github.com/stretchr/testify/require" "golang.org/x/exp/maps" @@ -50,7 +50,7 @@ func TestPeersTest(t *testing.T) { name: "default scenario", config: testPeersConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: nil, Timeout: 10 * time.Second, @@ -118,7 +118,7 @@ func TestPeersTest(t *testing.T) { name: "quiet", config: testPeersConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: true, TestCases: nil, Timeout: 3 * time.Second, @@ -162,7 +162,7 @@ func TestPeersTest(t *testing.T) { name: "unsupported test", config: testPeersConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: []string{"notSupportedTest"}, Timeout: 200 * time.Millisecond, @@ -185,7 +185,7 @@ func TestPeersTest(t *testing.T) { name: "custom test cases", config: testPeersConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: []string{"ping"}, Timeout: 200 * time.Millisecond, @@ -222,7 +222,7 @@ func TestPeersTest(t *testing.T) { name: "write to file", config: testPeersConfig{ testConfig: testConfig{ - OutputToml: "./write-to-file-test.toml.tmp", + OutputJSON: "./write-to-file-test.json.tmp", Quiet: false, Timeout: 3 * time.Second, }, @@ -289,7 +289,7 @@ func TestPeersTest(t *testing.T) { } defer func() { if test.cleanup != nil { - test.cleanup(t, conf.OutputToml) + test.cleanup(t, conf.OutputJSON) } }() @@ -299,8 +299,8 @@ func TestPeersTest(t *testing.T) { testWriteOut(t, test.expected, buf) } - if test.config.OutputToml != "" { - testWriteFile(t, test.expected, test.config.OutputToml) + if test.config.OutputJSON != "" { + testWriteFile(t, test.expected, test.config.OutputJSON) } }) } @@ -323,9 +323,9 @@ func TestPeersTestFlags(t *testing.T) { expectedErr: "--enrs, --cluster-lock-file-path or --cluster-definition-file-path must be specified.", }, { - name: "no output toml on quiet", + name: "no output json on quiet", args: []string{"peers", "--enrs=\"test.endpoint\"", "--quiet"}, - expectedErr: "on --quiet, an --output-toml is required", + expectedErr: "on --quiet, an --output-json is required", }, } @@ -379,14 +379,30 @@ func testWriteFile(t *testing.T, expectedRes testCategoryResult, path string) { t.Helper() file, err := os.ReadFile(path) require.NoError(t, err) - var res testCategoryResult - err = toml.Unmarshal(file, &res) + var res fileResult + err = json.Unmarshal(file, &res) require.NoError(t, err) - require.Equal(t, expectedRes.CategoryName, res.CategoryName) - require.Equal(t, len(expectedRes.Targets), len(res.Targets)) + var actualRes testCategoryResult + switch expectedRes.CategoryName { + case peersTestCategory: + actualRes = res.Peers + case beaconTestCategory: + actualRes = res.Beacon + case validatorTestCategory: + actualRes = res.Validator + case mevTestCategory: + actualRes = res.MEV + case infraTestCategory: + actualRes = res.Infra + default: + t.Error("unknown category") + } + + require.Equal(t, expectedRes.CategoryName, actualRes.CategoryName) + require.Equal(t, len(expectedRes.Targets), len(actualRes.Targets)) checkFinalScore := true - for targetName, testResults := range res.Targets { + for targetName, testResults := range actualRes.Targets { for idx, testRes := range testResults { // do not test verdicts based on measurements if expectedRes.Targets[targetName][idx].Verdict == testVerdictOk || expectedRes.Targets[targetName][idx].Verdict == testVerdictFail { @@ -406,7 +422,7 @@ func testWriteFile(t *testing.T, expectedRes testCategoryResult, path string) { } // check final score only if there are no tests based on actual measurement if checkFinalScore { - require.Equal(t, expectedRes.Score, res.Score) + require.Equal(t, expectedRes.Score, actualRes.Score) } } diff --git a/cmd/testvalidator.go b/cmd/testvalidator.go index 39d134e6ed..57139a839f 100644 --- a/cmd/testvalidator.go +++ b/cmd/testvalidator.go @@ -116,8 +116,8 @@ func runTestValidator(ctx context.Context, w io.Writer, cfg testValidatorConfig) } } - if cfg.OutputToml != "" { - err = writeResultToFile(res, cfg.OutputToml) + if cfg.OutputJSON != "" { + err = writeResultToFile(res, cfg.OutputJSON) if err != nil { return err } diff --git a/cmd/testvalidator_internal_test.go b/cmd/testvalidator_internal_test.go index 88ff1a8fb9..9f4ab003f6 100644 --- a/cmd/testvalidator_internal_test.go +++ b/cmd/testvalidator_internal_test.go @@ -42,7 +42,7 @@ func TestValidatorTest(t *testing.T) { name: "default scenario", config: testValidatorConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: nil, Timeout: time.Minute, @@ -66,7 +66,7 @@ func TestValidatorTest(t *testing.T) { name: "timeout", config: testValidatorConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: nil, Timeout: 100 * time.Nanosecond, @@ -88,7 +88,7 @@ func TestValidatorTest(t *testing.T) { name: "quiet", config: testValidatorConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: true, TestCases: nil, Timeout: time.Minute, @@ -112,7 +112,7 @@ func TestValidatorTest(t *testing.T) { name: "unsupported test", config: testValidatorConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: []string{"notSupportedTest"}, Timeout: time.Minute, @@ -129,7 +129,7 @@ func TestValidatorTest(t *testing.T) { name: "custom test cases", config: testValidatorConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: []string{"ping"}, Timeout: time.Minute, @@ -151,7 +151,7 @@ func TestValidatorTest(t *testing.T) { name: "write to file", config: testValidatorConfig{ testConfig: testConfig{ - OutputToml: "./write-to-file-test.toml.tmp", + OutputJSON: "./write-to-file-test.json.tmp", Quiet: false, TestCases: nil, Timeout: time.Minute, @@ -190,7 +190,7 @@ func TestValidatorTest(t *testing.T) { } defer func() { if test.cleanup != nil { - test.cleanup(t, test.config.OutputToml) + test.cleanup(t, test.config.OutputJSON) } }() @@ -200,8 +200,8 @@ func TestValidatorTest(t *testing.T) { testWriteOut(t, test.expected, buf) } - if test.config.OutputToml != "" { - testWriteFile(t, test.expected, test.config.OutputToml) + if test.config.OutputJSON != "" { + testWriteFile(t, test.expected, test.config.OutputJSON) } }) } @@ -238,9 +238,9 @@ func TestValidatorTestFlags(t *testing.T) { expectedErr: "", }, { - name: "no output toml on quiet", + name: "no output json on quiet", args: []string{"validator", "--validator-api-address=\"test.endpoint\"", "--quiet"}, - expectedErr: "on --quiet, an --output-toml is required", + expectedErr: "on --quiet, an --output-json is required", }, } diff --git a/go.mod b/go.mod index 1dc48e8e55..8ea8e29e6d 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,6 @@ require ( github.com/libp2p/go-libp2p v0.33.2 github.com/libp2p/go-msgio v0.3.0 github.com/multiformats/go-multiaddr v0.14.0 - github.com/pelletier/go-toml/v2 v2.2.3 github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 github.com/protolambda/eth2-shuffle v1.1.0 @@ -160,6 +159,7 @@ require ( github.com/opencontainers/image-spec v1.1.0 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pk910/dynamic-ssz v0.0.3 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect diff --git a/go.sum b/go.sum index 1b357c59da..1c61887630 100644 --- a/go.sum +++ b/go.sum @@ -407,8 +407,8 @@ github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTm github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= -github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pk910/dynamic-ssz v0.0.3 h1:fCWzFowq9P6SYCc7NtJMkZcIHk+r5hSVD+32zVi6Aio= github.com/pk910/dynamic-ssz v0.0.3/go.mod h1:b6CrLaB2X7pYA+OSEEbkgXDEcRnjLOZIxZTsMuO/Y9c= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= @@ -526,6 +526,7 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= From 94f7f505b5b18a1975d323f762dc4cb8a541f405 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Mon, 18 Nov 2024 12:32:56 +0100 Subject: [PATCH 84/89] cmd: add custom number of validators beacon simulation test (#3385) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add a custom number of validators beacon simulation test, so users can test for their specific scenario. - Fix some test names inconsistencies - Improve the `charon test all` UX - print results to terminal after all tests complete - Add missed MEV load test check - Move the test command out of alpha state 🎉 category: feature ticket: none --- cmd/cmd.go | 16 ++-- cmd/testall.go | 30 ++++++-- cmd/testbeacon.go | 99 +++++++++++++++++-------- cmd/testbeacon_internal_test.go | 85 +++++++++++---------- cmd/testinfra.go | 37 +++++----- cmd/testinfra_internal_test.go | 36 ++++----- cmd/testmev.go | 37 ++++++---- cmd/testmev_internal_test.go | 115 +++++++++++++++++------------ cmd/testpeers.go | 38 +++++----- cmd/testpeers_internal_test.go | 68 ++++++++--------- cmd/testvalidator.go | 28 +++---- cmd/testvalidator_internal_test.go | 34 ++++----- 12 files changed, 359 insertions(+), 264 deletions(-) diff --git a/cmd/cmd.go b/cmd/cmd.go index 387702e901..0b152c01d0 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -47,14 +47,6 @@ func New() *cobra.Command { ), newCombineCmd(newCombineFunc), newAlphaCmd( - newTestCmd( - newTestAllCmd(runTestAll), - newTestPeersCmd(runTestPeers), - newTestBeaconCmd(runTestBeacon), - newTestValidatorCmd(runTestValidator), - newTestMEVCmd(runTestMEV), - newTestInfraCmd(runTestInfra), - ), newAddValidatorsCmd(runAddValidatorsSolo), newViewClusterManifestCmd(runViewClusterManifest), ), @@ -65,6 +57,14 @@ func New() *cobra.Command { newFetchExitCmd(runFetchExit), ), newUnsafeCmd(newRunCmd(app.Run, true)), + newTestCmd( + newTestAllCmd(runTestAll), + newTestPeersCmd(runTestPeers), + newTestBeaconCmd(runTestBeacon), + newTestValidatorCmd(runTestValidator), + newTestMEVCmd(runTestMEV), + newTestInfraCmd(runTestInfra), + ), ) } diff --git a/cmd/testall.go b/cmd/testall.go index 06bc41a534..995cc99cbc 100644 --- a/cmd/testall.go +++ b/cmd/testall.go @@ -64,34 +64,54 @@ func newTestAllCmd(runFunc func(context.Context, io.Writer, testAllConfig) error func runTestAll(ctx context.Context, w io.Writer, cfg testAllConfig) (err error) { cfg.Beacon.testConfig = cfg.testConfig - err = runTestBeacon(ctx, w, cfg.Beacon) + cfg.Beacon.Quiet = true + var results []testCategoryResult + beaconRes, err := runTestBeacon(ctx, w, cfg.Beacon) if err != nil { return err } + results = append(results, beaconRes) cfg.Validator.testConfig = cfg.testConfig - err = runTestValidator(ctx, w, cfg.Validator) + cfg.Validator.Quiet = true + validatorRes, err := runTestValidator(ctx, w, cfg.Validator) if err != nil { return err } + results = append(results, validatorRes) cfg.MEV.testConfig = cfg.testConfig - err = runTestMEV(ctx, w, cfg.MEV) + cfg.MEV.Quiet = true + mevRes, err := runTestMEV(ctx, w, cfg.MEV) if err != nil { return err } + results = append(results, mevRes) cfg.Infra.testConfig = cfg.testConfig - err = runTestInfra(ctx, w, cfg.Infra) + cfg.Infra.Quiet = true + infraRes, err := runTestInfra(ctx, w, cfg.Infra) if err != nil { return err } + results = append(results, infraRes) cfg.Peers.testConfig = cfg.testConfig - err = runTestPeers(ctx, w, cfg.Peers) + cfg.Peers.Quiet = true + peersRes, err := runTestPeers(ctx, w, cfg.Peers) if err != nil { return err } + results = append(results, peersRes) + + if !cfg.Quiet { + for _, res := range results { + err = writeResultToWriter(res, w) + if err != nil { + return err + } + } + } return nil } diff --git a/cmd/testbeacon.go b/cmd/testbeacon.go index b5cb34231c..70b240961a 100644 --- a/cmd/testbeacon.go +++ b/cmd/testbeacon.go @@ -37,6 +37,7 @@ type testBeaconConfig struct { SimulationFileDir string SimulationDuration int SimulationVerbose bool + SimulationCustom int } type testCaseBeacon func(context.Context, *testBeaconConfig, string) testResult @@ -153,7 +154,7 @@ const ( thresholdBeaconSimulationPoor = 400 * time.Millisecond ) -func newTestBeaconCmd(runFunc func(context.Context, io.Writer, testBeaconConfig) error) *cobra.Command { +func newTestBeaconCmd(runFunc func(context.Context, io.Writer, testBeaconConfig) (testCategoryResult, error)) *cobra.Command { var config testBeaconConfig cmd := &cobra.Command{ @@ -165,7 +166,8 @@ func newTestBeaconCmd(runFunc func(context.Context, io.Writer, testBeaconConfig) return mustOutputToFileOnQuiet(cmd) }, RunE: func(cmd *cobra.Command, _ []string) error { - return runFunc(cmd.Context(), cmd.OutOrStdout(), config) + _, err := runFunc(cmd.Context(), cmd.OutOrStdout(), config) + return err }, } @@ -182,33 +184,35 @@ func bindTestBeaconFlags(cmd *cobra.Command, config *testBeaconConfig, flagsPref cmd.Flags().IntVar(&config.SimulationDuration, flagsPrefix+"simulation-duration-in-slots", slotsInEpoch, "Time to keep running the simulation in slots.") cmd.Flags().StringVar(&config.SimulationFileDir, flagsPrefix+"simulation-file-dir", "./", "Time to keep running the simulation in slots.") cmd.Flags().BoolVar(&config.SimulationVerbose, flagsPrefix+"simulation-verbose", false, "Show results for each request and each validator.") + cmd.Flags().IntVar(&config.SimulationCustom, flagsPrefix+"simulation-custom", 0, "Run custom simulation with the specified amount of validators.") mustMarkFlagRequired(cmd, flagsPrefix+"endpoints") } func supportedBeaconTestCases() map[testCaseName]testCaseBeacon { return map[testCaseName]testCaseBeacon{ - {name: "ping", order: 1}: beaconPingTest, - {name: "pingMeasure", order: 2}: beaconPingMeasureTest, - {name: "version", order: 3}: beaconVersionTest, - {name: "isSynced", order: 4}: beaconIsSyncedTest, - {name: "peerCount", order: 5}: beaconPeerCountTest, - {name: "pingLoad", order: 6}: beaconPingLoadTest, + {name: "Ping", order: 1}: beaconPingTest, + {name: "PingMeasure", order: 2}: beaconPingMeasureTest, + {name: "Version", order: 3}: beaconVersionTest, + {name: "Synced", order: 4}: beaconIsSyncedTest, + {name: "PeerCount", order: 5}: beaconPeerCountTest, + {name: "PingLoad", order: 6}: beaconPingLoadTest, - {name: "simulate1", order: 7}: beaconSimulation1Test, - {name: "simulate10", order: 8}: beaconSimulation10Test, - {name: "simulate100", order: 9}: beaconSimulation100Test, - {name: "simulate500", order: 10}: beaconSimulation500Test, - {name: "simulate1000", order: 11}: beaconSimulation1000Test, + {name: "Simulate1", order: 7}: beaconSimulation1Test, + {name: "Simulate10", order: 8}: beaconSimulation10Test, + {name: "Simulate100", order: 9}: beaconSimulation100Test, + {name: "Simulate500", order: 10}: beaconSimulation500Test, + {name: "Simulate1000", order: 11}: beaconSimulation1000Test, + {name: "SimulateCustom", order: 12}: beaconSimulationCustomTest, } } -func runTestBeacon(ctx context.Context, w io.Writer, cfg testBeaconConfig) (err error) { +func runTestBeacon(ctx context.Context, w io.Writer, cfg testBeaconConfig) (res testCategoryResult, err error) { log.Info(ctx, "Starting beacon node test") testCases := supportedBeaconTestCases() queuedTests := filterTests(maps.Keys(testCases), cfg.testConfig) if len(queuedTests) == 0 { - return errors.New("test case not supported") + return res, errors.New("test case not supported") } sortTests(queuedTests) @@ -237,7 +241,7 @@ func runTestBeacon(ctx context.Context, w io.Writer, cfg testBeaconConfig) (err } } - res := testCategoryResult{ + res = testCategoryResult{ CategoryName: beaconTestCategory, Targets: testResults, ExecutionTime: execTime, @@ -247,18 +251,18 @@ func runTestBeacon(ctx context.Context, w io.Writer, cfg testBeaconConfig) (err if !cfg.Quiet { err = writeResultToWriter(res, w) if err != nil { - return err + return res, err } } if cfg.OutputJSON != "" { err = writeResultToFile(res, cfg.OutputJSON) if err != nil { - return err + return res, err } } - return nil + return res, nil } // beacon node tests @@ -314,9 +318,7 @@ func testSingleBeacon(ctx context.Context, queuedTestCases []testCaseName, allTe finished = true break } - testName = queuedTestCases[testCounter].name testCounter++ - result.Name = testName allTestRes = append(allTestRes, result) } } @@ -375,7 +377,7 @@ func beaconPingMeasureTest(ctx context.Context, _ *testBeaconConfig, target stri } func beaconVersionTest(ctx context.Context, _ *testBeaconConfig, target string) testResult { - testRes := testResult{Name: "version"} + testRes := testResult{Name: "Version"} type versionData struct { Version string `json:"version"` @@ -424,7 +426,7 @@ func beaconVersionTest(ctx context.Context, _ *testBeaconConfig, target string) } func beaconPingLoadTest(ctx context.Context, conf *testBeaconConfig, target string) testResult { - testRes := testResult{Name: "BeaconLoad"} + testRes := testResult{Name: "PingLoad"} if !conf.LoadTest { testRes.Verdict = testVerdictSkipped return testRes @@ -462,7 +464,7 @@ func beaconPingLoadTest(ctx context.Context, conf *testBeaconConfig, target stri } func beaconIsSyncedTest(ctx context.Context, _ *testBeaconConfig, target string) testResult { - testRes := testResult{Name: "isSynced"} + testRes := testResult{Name: "Synced"} type isSyncedResponse struct { Data eth2v1.SyncState `json:"data"` @@ -505,7 +507,7 @@ func beaconIsSyncedTest(ctx context.Context, _ *testBeaconConfig, target string) } func beaconPeerCountTest(ctx context.Context, _ *testBeaconConfig, target string) testResult { - testRes := testResult{Name: "peerCount"} + testRes := testResult{Name: "PeerCount"} type peerCountResponseMeta struct { Count int `json:"count"` @@ -579,7 +581,7 @@ func pingBeaconContinuously(ctx context.Context, target string, resCh chan<- tim // beacon simulation tests func beaconSimulation1Test(ctx context.Context, conf *testBeaconConfig, target string) testResult { - testRes := testResult{Name: "BeaconSimulation1Validator"} + testRes := testResult{Name: "Simulate1"} if !conf.LoadTest { testRes.Verdict = testVerdictSkipped return testRes @@ -604,7 +606,7 @@ func beaconSimulation1Test(ctx context.Context, conf *testBeaconConfig, target s } func beaconSimulation10Test(ctx context.Context, conf *testBeaconConfig, target string) testResult { - testRes := testResult{Name: "BeaconSimulation10Validators"} + testRes := testResult{Name: "Simulate10"} if !conf.LoadTest { testRes.Verdict = testVerdictSkipped return testRes @@ -629,7 +631,7 @@ func beaconSimulation10Test(ctx context.Context, conf *testBeaconConfig, target } func beaconSimulation100Test(ctx context.Context, conf *testBeaconConfig, target string) testResult { - testRes := testResult{Name: "BeaconSimulation100Validators"} + testRes := testResult{Name: "Simulate100"} if !conf.LoadTest { testRes.Verdict = testVerdictSkipped return testRes @@ -654,7 +656,7 @@ func beaconSimulation100Test(ctx context.Context, conf *testBeaconConfig, target } func beaconSimulation500Test(ctx context.Context, conf *testBeaconConfig, target string) testResult { - testRes := testResult{Name: "BeaconSimulation500Validators"} + testRes := testResult{Name: "Simulate500"} if !conf.LoadTest { testRes.Verdict = testVerdictSkipped return testRes @@ -679,7 +681,7 @@ func beaconSimulation500Test(ctx context.Context, conf *testBeaconConfig, target } func beaconSimulation1000Test(ctx context.Context, conf *testBeaconConfig, target string) testResult { - testRes := testResult{Name: "BeaconSimulation1000Validators"} + testRes := testResult{Name: "Simulate1000"} if !conf.LoadTest { testRes.Verdict = testVerdictSkipped return testRes @@ -703,6 +705,43 @@ func beaconSimulation1000Test(ctx context.Context, conf *testBeaconConfig, targe return beaconSimulationTest(ctx, conf, target, testRes, params) } +func beaconSimulationCustomTest(ctx context.Context, conf *testBeaconConfig, target string) testResult { + testRes := testResult{Name: "SimulateCustom"} + if conf.SimulationCustom < 1 { + testRes.Verdict = testVerdictSkipped + return testRes + } + testRes.Name = fmt.Sprintf("Simulate%v", conf.SimulationCustom) + + total := conf.SimulationCustom + syncCommittees := total / 100 + if syncCommittees == 0 { + syncCommittees++ + } + proposals := total / 15 + if proposals == 0 && (total-syncCommittees != 0) { + proposals++ + } + attestations := total - syncCommittees - proposals + + params := simParams{ + TotalValidatorsCount: total, + AttestationValidatorsCount: attestations, + ProposalValidatorsCount: proposals, + SyncCommitteeValidatorsCount: syncCommittees, + RequestIntensity: RequestsIntensity{ + AttestationDuty: slotTime, + AggregatorDuty: slotTime * 2, + ProposalDuty: slotTime * 4, + SyncCommitteeSubmit: slotTime, + SyncCommitteeContribution: slotTime * 4, + SyncCommitteeSubscribe: epochTime, + }, + } + + return beaconSimulationTest(ctx, conf, target, testRes, params) +} + func beaconSimulationTest(ctx context.Context, conf *testBeaconConfig, target string, testRes testResult, params simParams) testResult { duration := time.Duration(conf.SimulationDuration)*slotTime + time.Second var wg sync.WaitGroup diff --git a/cmd/testbeacon_internal_test.go b/cmd/testbeacon_internal_test.go index 0c0897c9df..38ec77e2bc 100644 --- a/cmd/testbeacon_internal_test.go +++ b/cmd/testbeacon_internal_test.go @@ -51,17 +51,18 @@ func TestBeaconTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ mockedBeaconNode.URL: { - {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "version", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "isSynced", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "peerCount", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "simulate1", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "simulate10", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "simulate100", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "simulate500", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "simulate1000", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Version", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Synced", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PeerCount", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate1", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate10", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate100", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate500", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate1000", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "SimulateCustom", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, }, @@ -97,10 +98,10 @@ func TestBeaconTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, }, }, @@ -142,7 +143,7 @@ func TestBeaconTest(t *testing.T) { testConfig: testConfig{ OutputJSON: "", Quiet: false, - TestCases: []string{"ping"}, + TestCases: []string{"Ping"}, Timeout: time.Minute, }, Endpoints: []string{endpoint1, endpoint2}, @@ -150,10 +151,10 @@ func TestBeaconTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, }, endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, }, }, }, @@ -187,7 +188,7 @@ func TestBeaconTest(t *testing.T) { t.Run(test.name, func(t *testing.T) { var buf bytes.Buffer ctx := context.Background() - err := runTestBeacon(ctx, &buf, test.config) + _, err := runTestBeacon(ctx, &buf, test.config) if test.expectedErr != "" { require.ErrorContains(t, err, test.expectedErr) return @@ -216,30 +217,32 @@ func TestBeaconTest(t *testing.T) { func defaultFailingBNTests(_ *testing.T, endpoint1 string, endpoint2 string, port1 int, port2 int) map[string][]testResult { return map[string][]testResult{ endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "version", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "isSynced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "peerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "simulate1", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "simulate10", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "simulate100", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "simulate500", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "simulate1000", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "PingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "Version", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "Synced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "PeerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "PingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate1", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate10", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate100", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate500", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate1000", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "SimulateCustom", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, }, endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "version", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "isSynced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "peerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "simulate1", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "simulate10", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "simulate100", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "simulate500", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "simulate1000", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "PingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "Version", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "Synced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "PeerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "PingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate1", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate10", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate100", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate500", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate1000", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "SimulateCustom", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, }, } } @@ -289,7 +292,9 @@ func TestBeaconTestFlags(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - cmd := newAlphaCmd(newTestBeaconCmd(func(context.Context, io.Writer, testBeaconConfig) error { return nil })) + cmd := newAlphaCmd(newTestBeaconCmd(func(context.Context, io.Writer, testBeaconConfig) (testCategoryResult, error) { + return testCategoryResult{}, nil + })) cmd.SetArgs(test.args) err := cmd.Execute() if test.expectedErr != "" { diff --git a/cmd/testinfra.go b/cmd/testinfra.go index cb0b2c945f..4eae88f396 100644 --- a/cmd/testinfra.go +++ b/cmd/testinfra.go @@ -75,7 +75,7 @@ const ( var errFioNotFound = errors.New("fio command not found, install fio from https://fio.readthedocs.io/en/latest/fio_doc.html#binary-packages or using the package manager of your choice (apt, yum, brew, etc.)") -func newTestInfraCmd(runFunc func(context.Context, io.Writer, testInfraConfig) error) *cobra.Command { +func newTestInfraCmd(runFunc func(context.Context, io.Writer, testInfraConfig) (res testCategoryResult, err error)) *cobra.Command { var config testInfraConfig cmd := &cobra.Command{ @@ -87,7 +87,8 @@ func newTestInfraCmd(runFunc func(context.Context, io.Writer, testInfraConfig) e return mustOutputToFileOnQuiet(cmd) }, RunE: func(cmd *cobra.Command, _ []string) error { - return runFunc(cmd.Context(), cmd.OutOrStdout(), config) + _, err := runFunc(cmd.Context(), cmd.OutOrStdout(), config) + return err }, } @@ -106,25 +107,25 @@ func bindTestInfraFlags(cmd *cobra.Command, config *testInfraConfig, flagsPrefix func supportedInfraTestCases() map[testCaseName]func(context.Context, *testInfraConfig) testResult { return map[testCaseName]func(context.Context, *testInfraConfig) testResult{ - {name: "diskWriteSpeed", order: 1}: infraDiskWriteSpeedTest, - {name: "diskWriteIOPS", order: 2}: infraDiskWriteIOPSTest, - {name: "diskReadSpeed", order: 3}: infraDiskReadSpeedTest, - {name: "diskReadIOPS", order: 4}: infraDiskReadIOPSTest, - {name: "availableMemory", order: 5}: infraAvailableMemoryTest, - {name: "totalMemory", order: 6}: infraTotalMemoryTest, - {name: "internetLatency", order: 7}: infraInternetLatencyTest, - {name: "internetDownloadSpeed", order: 8}: infraInternetDownloadSpeedTest, - {name: "internetUploadSpeed", order: 9}: infraInternetUploadSpeedTest, + {name: "DiskWriteSpeed", order: 1}: infraDiskWriteSpeedTest, + {name: "DiskWriteIOPS", order: 2}: infraDiskWriteIOPSTest, + {name: "DiskReadSpeed", order: 3}: infraDiskReadSpeedTest, + {name: "DiskReadIOPS", order: 4}: infraDiskReadIOPSTest, + {name: "AvailableMemory", order: 5}: infraAvailableMemoryTest, + {name: "TotalMemory", order: 6}: infraTotalMemoryTest, + {name: "InternetLatency", order: 7}: infraInternetLatencyTest, + {name: "InternetDownloadSpeed", order: 8}: infraInternetDownloadSpeedTest, + {name: "InternetUploadSpeed", order: 9}: infraInternetUploadSpeedTest, } } -func runTestInfra(ctx context.Context, w io.Writer, cfg testInfraConfig) (err error) { +func runTestInfra(ctx context.Context, w io.Writer, cfg testInfraConfig) (res testCategoryResult, err error) { log.Info(ctx, "Starting hardware performance and network connectivity test") testCases := supportedInfraTestCases() queuedTests := filterTests(maps.Keys(testCases), cfg.testConfig) if len(queuedTests) == 0 { - return errors.New("test case not supported") + return res, errors.New("test case not supported") } sortTests(queuedTests) @@ -152,7 +153,7 @@ func runTestInfra(ctx context.Context, w io.Writer, cfg testInfraConfig) (err er } } - res := testCategoryResult{ + res = testCategoryResult{ CategoryName: infraTestCategory, Targets: testResults, ExecutionTime: execTime, @@ -162,18 +163,18 @@ func runTestInfra(ctx context.Context, w io.Writer, cfg testInfraConfig) (err er if !cfg.Quiet { err = writeResultToWriter(res, w) if err != nil { - return err + return res, err } } if cfg.OutputJSON != "" { err = writeResultToFile(res, cfg.OutputJSON) if err != nil { - return err + return res, err } } - return nil + return res, nil } // hardware and internet connectivity tests @@ -199,9 +200,7 @@ func testSingleInfra(ctx context.Context, queuedTestCases []testCaseName, allTes finished = true break } - testName = queuedTestCases[testCounter].name testCounter++ - result.Name = testName allTestRes = append(allTestRes, result) } } diff --git a/cmd/testinfra_internal_test.go b/cmd/testinfra_internal_test.go index 46ee70d72b..ffa732581c 100644 --- a/cmd/testinfra_internal_test.go +++ b/cmd/testinfra_internal_test.go @@ -33,7 +33,7 @@ func TestInfraTest(t *testing.T) { testConfig: testConfig{ OutputJSON: "", Quiet: false, - TestCases: []string{"availableMemory", "totalMemory", "internetLatency"}, + TestCases: []string{"AvailableMemory", "TotalMemory", "InternetLatency"}, Timeout: time.Minute, }, DiskIOBlockSizeKb: 1, @@ -41,9 +41,9 @@ func TestInfraTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ "local": { - {Name: "availableMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "totalMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "internetLatency", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "AvailableMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "TotalMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "InternetLatency", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreC, @@ -65,7 +65,7 @@ func TestInfraTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ "local": { - {Name: "diskWriteSpeed", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "DiskWriteSpeed", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, }, Score: categoryScoreC, @@ -79,7 +79,7 @@ func TestInfraTest(t *testing.T) { testConfig: testConfig{ OutputJSON: "", Quiet: true, - TestCases: []string{"availableMemory", "totalMemory", "internetLatency"}, + TestCases: []string{"AvailableMemory", "TotalMemory", "InternetLatency"}, Timeout: time.Minute, }, DiskIOBlockSizeKb: 1, @@ -87,9 +87,9 @@ func TestInfraTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ "local": { - {Name: "availableMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "totalMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "internetLatency", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "AvailableMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "TotalMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "InternetLatency", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreC, @@ -120,7 +120,7 @@ func TestInfraTest(t *testing.T) { testConfig: testConfig{ OutputJSON: "", Quiet: false, - TestCases: []string{"totalMemory"}, + TestCases: []string{"TotalMemory"}, Timeout: time.Minute, }, DiskIOBlockSizeKb: 1, @@ -128,7 +128,7 @@ func TestInfraTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ "local": { - {Name: "totalMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "TotalMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreC, @@ -142,7 +142,7 @@ func TestInfraTest(t *testing.T) { testConfig: testConfig{ OutputJSON: "./write-to-file-test.json.tmp", Quiet: false, - TestCases: []string{"availableMemory", "totalMemory", "internetLatency"}, + TestCases: []string{"AvailableMemory", "TotalMemory", "InternetLatency"}, Timeout: time.Minute, }, DiskIOBlockSizeKb: 1, @@ -150,9 +150,9 @@ func TestInfraTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ "local": { - {Name: "availableMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "totalMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "internetLatency", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "AvailableMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "TotalMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "InternetLatency", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreA, @@ -170,7 +170,7 @@ func TestInfraTest(t *testing.T) { t.Run(test.name, func(t *testing.T) { var buf bytes.Buffer ctx := context.Background() - err := runTestInfra(ctx, &buf, test.config) + _, err := runTestInfra(ctx, &buf, test.config) if test.expectedErr != "" { require.ErrorContains(t, err, test.expectedErr) return @@ -235,7 +235,9 @@ func TestInfraTestFlags(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - cmd := newAlphaCmd(newTestInfraCmd(func(context.Context, io.Writer, testInfraConfig) error { return nil })) + cmd := newAlphaCmd(newTestInfraCmd(func(context.Context, io.Writer, testInfraConfig) (testCategoryResult, error) { + return testCategoryResult{}, nil + })) cmd.SetArgs(test.args) err := cmd.Execute() if test.expectedErr != "" { diff --git a/cmd/testmev.go b/cmd/testmev.go index f640b85054..248f0646bf 100644 --- a/cmd/testmev.go +++ b/cmd/testmev.go @@ -47,7 +47,7 @@ const ( var errStatusCodeNot200 = errors.New("status code not 200 OK") -func newTestMEVCmd(runFunc func(context.Context, io.Writer, testMEVConfig) error) *cobra.Command { +func newTestMEVCmd(runFunc func(context.Context, io.Writer, testMEVConfig) (testCategoryResult, error)) *cobra.Command { var config testMEVConfig cmd := &cobra.Command{ @@ -59,7 +59,8 @@ func newTestMEVCmd(runFunc func(context.Context, io.Writer, testMEVConfig) error return mustOutputToFileOnQuiet(cmd) }, RunE: func(cmd *cobra.Command, _ []string) error { - return runFunc(cmd.Context(), cmd.OutOrStdout(), config) + _, err := runFunc(cmd.Context(), cmd.OutOrStdout(), config) + return err }, } @@ -90,20 +91,20 @@ func bindTestMEVFlags(cmd *cobra.Command, config *testMEVConfig, flagsPrefix str func supportedMEVTestCases() map[testCaseName]testCaseMEV { return map[testCaseName]testCaseMEV{ - {name: "ping", order: 1}: mevPingTest, - {name: "pingMeasure", order: 2}: mevPingMeasureTest, - {name: "createBlock", order: 3}: mevCreateBlockTest, - {name: "createMultipleBlocks", order: 4}: mevCreateMultipleBlocksTest, + {name: "Ping", order: 1}: mevPingTest, + {name: "PingMeasure", order: 2}: mevPingMeasureTest, + {name: "CreateBlock", order: 3}: mevCreateBlockTest, + {name: "CreateMultipleBlocks", order: 4}: mevCreateMultipleBlocksTest, } } -func runTestMEV(ctx context.Context, w io.Writer, cfg testMEVConfig) (err error) { +func runTestMEV(ctx context.Context, w io.Writer, cfg testMEVConfig) (res testCategoryResult, err error) { log.Info(ctx, "Starting MEV relays test") testCases := supportedMEVTestCases() queuedTests := filterTests(maps.Keys(testCases), cfg.testConfig) if len(queuedTests) == 0 { - return errors.New("test case not supported") + return res, errors.New("test case not supported") } sortTests(queuedTests) @@ -132,7 +133,7 @@ func runTestMEV(ctx context.Context, w io.Writer, cfg testMEVConfig) (err error) } } - res := testCategoryResult{ + res = testCategoryResult{ CategoryName: mevTestCategory, Targets: testResults, ExecutionTime: execTime, @@ -142,18 +143,18 @@ func runTestMEV(ctx context.Context, w io.Writer, cfg testMEVConfig) (err error) if !cfg.Quiet { err = writeResultToWriter(res, w) if err != nil { - return err + return res, err } } if cfg.OutputJSON != "" { err = writeResultToFile(res, cfg.OutputJSON) if err != nil { - return err + return res, err } } - return nil + return res, nil } // mev relays tests @@ -209,9 +210,7 @@ func testSingleMEV(ctx context.Context, queuedTestCases []testCaseName, allTestC finished = true break } - testName = queuedTestCases[testCounter].name testCounter++ - result.Name = testName allTestRes = append(allTestRes, result) } } @@ -274,6 +273,11 @@ func mevPingMeasureTest(ctx context.Context, _ *testMEVConfig, target string) te func mevCreateBlockTest(ctx context.Context, conf *testMEVConfig, target string) testResult { testRes := testResult{Name: "CreateBlock"} + if !conf.LoadTest { + testRes.Verdict = testVerdictSkipped + return testRes + } + latestBlock, err := latestBeaconBlock(ctx, conf.BeaconNodeEndpoint) if err != nil { return failedTestResult(testRes, err) @@ -314,6 +318,11 @@ func mevCreateBlockTest(ctx context.Context, conf *testMEVConfig, target string) func mevCreateMultipleBlocksTest(ctx context.Context, conf *testMEVConfig, target string) testResult { testRes := testResult{Name: "CreateMultipleBlocks"} + if !conf.LoadTest { + testRes.Verdict = testVerdictSkipped + return testRes + } + latestBlock, err := latestBeaconBlock(ctx, conf.BeaconNodeEndpoint) if err != nil { return failedTestResult(testRes, err) diff --git a/cmd/testmev_internal_test.go b/cmd/testmev_internal_test.go index 5519904b9f..5032d87006 100644 --- a/cmd/testmev_internal_test.go +++ b/cmd/testmev_internal_test.go @@ -54,17 +54,17 @@ func TestMEVTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ mockedMEVNode.URL: { - {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "createBlock", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "createMultipleBlocks", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateBlock", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateMultipleBlocks", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, }, expectedErr: "", }, { - name: "connection refused", + name: "default load scenario", config: testMEVConfig{ testConfig: testConfig{ OutputJSON: "", @@ -72,22 +72,46 @@ func TestMEVTest(t *testing.T) { TestCases: nil, Timeout: time.Minute, }, - Endpoints: []string{endpoint1, endpoint2}, + Endpoints: []string{mockedMEVNode.URL}, + LoadTest: true, BeaconNodeEndpoint: endpoint3, }, + expected: testCategoryResult{ + Targets: map[string][]testResult{ + mockedMEVNode.URL: { + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateBlock", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateMultipleBlocks", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{}}, + }, + }, + }, + expectedErr: "", + }, + { + name: "connection refused", + config: testMEVConfig{ + testConfig: testConfig{ + OutputJSON: "", + Quiet: false, + TestCases: nil, + Timeout: time.Minute, + }, + Endpoints: []string{endpoint1, endpoint2}, + }, expected: testCategoryResult{ Targets: map[string][]testResult{ endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "createBlock", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, - {Name: "createMultipleBlocks", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "PingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "CreateBlock", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateMultipleBlocks", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, }, endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "createBlock", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, - {Name: "createMultipleBlocks", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "PingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "CreateBlock", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateMultipleBlocks", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, }, @@ -102,16 +126,15 @@ func TestMEVTest(t *testing.T) { TestCases: nil, Timeout: 100 * time.Nanosecond, }, - Endpoints: []string{endpoint1, endpoint2}, - BeaconNodeEndpoint: endpoint3, + Endpoints: []string{endpoint1, endpoint2}, }, expected: testCategoryResult{ Targets: map[string][]testResult{ endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, }, }, @@ -126,22 +149,21 @@ func TestMEVTest(t *testing.T) { TestCases: nil, Timeout: time.Minute, }, - Endpoints: []string{endpoint1, endpoint2}, - BeaconNodeEndpoint: endpoint3, + Endpoints: []string{endpoint1, endpoint2}, }, expected: testCategoryResult{ Targets: map[string][]testResult{ endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "createBlock", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, - {Name: "createMultipleBlocks", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "PingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "CreateBlock", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateMultipleBlocks", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, }, endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "createBlock", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, - {Name: "createMultipleBlocks", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "PingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "CreateBlock", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateMultipleBlocks", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, }, @@ -156,8 +178,7 @@ func TestMEVTest(t *testing.T) { TestCases: []string{"notSupportedTest"}, Timeout: time.Minute, }, - Endpoints: []string{endpoint1, endpoint2}, - BeaconNodeEndpoint: endpoint3, + Endpoints: []string{endpoint1, endpoint2}, }, expected: testCategoryResult{}, expectedErr: "test case not supported", @@ -168,19 +189,18 @@ func TestMEVTest(t *testing.T) { testConfig: testConfig{ OutputJSON: "", Quiet: false, - TestCases: []string{"ping"}, + TestCases: []string{"Ping"}, Timeout: time.Minute, }, - Endpoints: []string{endpoint1, endpoint2}, - BeaconNodeEndpoint: endpoint3, + Endpoints: []string{endpoint1, endpoint2}, }, expected: testCategoryResult{ Targets: map[string][]testResult{ endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, }, endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, }, }, }, @@ -195,22 +215,21 @@ func TestMEVTest(t *testing.T) { TestCases: nil, Timeout: time.Minute, }, - Endpoints: []string{endpoint1, endpoint2}, - BeaconNodeEndpoint: endpoint3, + Endpoints: []string{endpoint1, endpoint2}, }, expected: testCategoryResult{ Targets: map[string][]testResult{ endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "createBlock", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, - {Name: "createMultipleBlocks", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "PingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "CreateBlock", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateMultipleBlocks", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, }, endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "createBlock", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, - {Name: "createMultipleBlocks", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port3))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "PingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "CreateBlock", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateMultipleBlocks", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreC, @@ -228,7 +247,7 @@ func TestMEVTest(t *testing.T) { t.Run(test.name, func(t *testing.T) { var buf bytes.Buffer ctx := context.Background() - err := runTestMEV(ctx, &buf, test.config) + _, err := runTestMEV(ctx, &buf, test.config) if test.expectedErr != "" { require.ErrorContains(t, err, test.expectedErr) return @@ -292,7 +311,9 @@ func TestMEVTestFlags(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - cmd := newAlphaCmd(newTestMEVCmd(func(context.Context, io.Writer, testMEVConfig) error { return nil })) + cmd := newAlphaCmd(newTestMEVCmd(func(context.Context, io.Writer, testMEVConfig) (testCategoryResult, error) { + return testCategoryResult{}, nil + })) cmd.SetArgs(test.args) err := cmd.Execute() if test.expectedErr != "" { diff --git a/cmd/testpeers.go b/cmd/testpeers.go index a800768e56..3843cfdd83 100644 --- a/cmd/testpeers.go +++ b/cmd/testpeers.go @@ -64,7 +64,7 @@ const ( thresholdRelayMeasurePoor = 240 * time.Millisecond ) -func newTestPeersCmd(runFunc func(context.Context, io.Writer, testPeersConfig) error) *cobra.Command { +func newTestPeersCmd(runFunc func(context.Context, io.Writer, testPeersConfig) (testCategoryResult, error)) *cobra.Command { var config testPeersConfig cmd := &cobra.Command{ @@ -76,7 +76,8 @@ func newTestPeersCmd(runFunc func(context.Context, io.Writer, testPeersConfig) e return mustOutputToFileOnQuiet(cmd) }, RunE: func(cmd *cobra.Command, _ []string) error { - return runFunc(cmd.Context(), cmd.OutOrStdout(), config) + _, err := runFunc(cmd.Context(), cmd.OutOrStdout(), config) + return err }, } @@ -125,27 +126,27 @@ func bindTestPeersFlags(cmd *cobra.Command, config *testPeersConfig, flagsPrefix func supportedPeerTestCases() map[testCaseName]testCasePeer { return map[testCaseName]testCasePeer{ - {name: "ping", order: 1}: peerPingTest, - {name: "pingMeasure", order: 2}: peerPingMeasureTest, - {name: "pingLoad", order: 3}: peerPingLoadTest, - {name: "directConn", order: 4}: peerDirectConnTest, + {name: "Ping", order: 1}: peerPingTest, + {name: "PingMeasure", order: 2}: peerPingMeasureTest, + {name: "PingLoad", order: 3}: peerPingLoadTest, + {name: "DirectConn", order: 4}: peerDirectConnTest, } } func supportedRelayTestCases() map[testCaseName]testCaseRelay { return map[testCaseName]testCaseRelay{ - {name: "pingRelay", order: 1}: relayPingTest, - {name: "pingMeasureRelay", order: 2}: relayPingMeasureTest, + {name: "PingRelay", order: 1}: relayPingTest, + {name: "PingMeasureRelay", order: 2}: relayPingMeasureTest, } } func supportedSelfTestCases() map[testCaseName]testCasePeerSelf { return map[testCaseName]testCasePeerSelf{ - {name: "libp2pTCPPortOpenTest", order: 1}: libp2pTCPPortOpenTest, + {name: "Libp2pTCPPortOpen", order: 1}: libp2pTCPPortOpenTest, } } -func runTestPeers(ctx context.Context, w io.Writer, conf testPeersConfig) error { +func runTestPeers(ctx context.Context, w io.Writer, conf testPeersConfig) (res testCategoryResult, err error) { log.Info(ctx, "Starting charon peers and relays test") relayTestCases := supportedRelayTestCases() @@ -161,7 +162,8 @@ func runTestPeers(ctx context.Context, w io.Writer, conf testPeersConfig) error sortTests(queuedTestsSelf) if len(queuedTestsPeer) == 0 && len(queuedTestsSelf) == 0 { - return errors.New("test case not supported") + err = errors.New("test case not supported") + return res, err } timeoutCtx, cancel := context.WithTimeout(ctx, conf.Timeout) @@ -172,7 +174,7 @@ func runTestPeers(ctx context.Context, w io.Writer, conf testPeersConfig) error tcpNode, shutdown, err := startTCPNode(ctx, conf) if err != nil { - return err + return res, err } defer shutdown() @@ -200,7 +202,7 @@ func runTestPeers(ctx context.Context, w io.Writer, conf testPeersConfig) error err = group.Wait() execTime := Duration{time.Since(startTime)} if err != nil { - return errors.Wrap(err, "peers test errgroup") + return res, errors.Wrap(err, "peers test errgroup") } close(testResultsChan) <-doneReading @@ -214,7 +216,7 @@ func runTestPeers(ctx context.Context, w io.Writer, conf testPeersConfig) error } } - res := testCategoryResult{ + res = testCategoryResult{ CategoryName: peersTestCategory, Targets: testResults, ExecutionTime: execTime, @@ -224,21 +226,21 @@ func runTestPeers(ctx context.Context, w io.Writer, conf testPeersConfig) error if !conf.Quiet { err = writeResultToWriter(res, w) if err != nil { - return err + return res, err } } if conf.OutputJSON != "" { err = writeResultToFile(res, conf.OutputJSON) if err != nil { - return err + return res, err } } log.Info(ctx, "Keeping TCP node alive for peers until keep-alive time is reached...") blockAndWait(ctx, conf.KeepAlive) - return nil + return res, nil } // charon peers tests @@ -318,9 +320,7 @@ func testSinglePeer(ctx context.Context, queuedTestCases []testCaseName, allTest finished = true continue } - testName = queuedTestCases[testCounter].name testCounter++ - result.Name = testName allTestRes = append(allTestRes, result) } } diff --git a/cmd/testpeers_internal_test.go b/cmd/testpeers_internal_test.go index 56d6d611f0..9622c2a7d8 100644 --- a/cmd/testpeers_internal_test.go +++ b/cmd/testpeers_internal_test.go @@ -72,29 +72,29 @@ func TestPeersTest(t *testing.T) { CategoryName: peersTestCategory, Targets: map[string][]testResult{ "self": { - {Name: "libp2pTCPPortOpenTest", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Libp2pTCPPortOpen", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, }, fmt.Sprintf("relay %v", relayAddr): { - {Name: "pingRelay", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasureRelay", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingRelay", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasureRelay", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, }, "peer inexpensive-farm enr:-HW4QBHlc...rx6o": { - {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "directConn", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "DirectConn", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, }, "peer anxious-pencil enr:-HW4QDwUF...vKDw": { - {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "directConn", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "DirectConn", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, }, "peer important-pen enr:-HW4QPSBg...wbr0": { - {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "directConn", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "DirectConn", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreC, @@ -138,20 +138,20 @@ func TestPeersTest(t *testing.T) { CategoryName: peersTestCategory, Targets: map[string][]testResult{ "self": { - {Name: "libp2pTCPPortOpenTest", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Libp2pTCPPortOpen", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, }, fmt.Sprintf("relay %v", relayAddr): { - {Name: "pingRelay", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasureRelay", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingRelay", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasureRelay", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, }, "peer inexpensive-farm enr:-HW4QBHlc...rx6o": { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, "peer anxious-pencil enr:-HW4QDwUF...vKDw": { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, "peer important-pen enr:-HW4QPSBg...wbr0": { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, }, Score: categoryScoreC, @@ -187,7 +187,7 @@ func TestPeersTest(t *testing.T) { testConfig: testConfig{ OutputJSON: "", Quiet: false, - TestCases: []string{"ping"}, + TestCases: []string{"Ping"}, Timeout: 200 * time.Millisecond, }, ENRs: []string{ @@ -205,13 +205,13 @@ func TestPeersTest(t *testing.T) { CategoryName: peersTestCategory, Targets: map[string][]testResult{ "peer inexpensive-farm enr:-HW4QBHlc...rx6o": { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, "peer anxious-pencil enr:-HW4QDwUF...vKDw": { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, "peer important-pen enr:-HW4QPSBg...wbr0": { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, }, Score: categoryScoreC, @@ -241,20 +241,20 @@ func TestPeersTest(t *testing.T) { CategoryName: peersTestCategory, Targets: map[string][]testResult{ "self": { - {Name: "libp2pTCPPortOpenTest", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Libp2pTCPPortOpen", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, }, fmt.Sprintf("relay %v", relayAddr): { - {Name: "pingRelay", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasureRelay", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingRelay", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasureRelay", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, }, "peer inexpensive-farm enr:-HW4QBHlc...rx6o": { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, "peer anxious-pencil enr:-HW4QDwUF...vKDw": { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, "peer important-pen enr:-HW4QPSBg...wbr0": { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, }, Score: categoryScoreC, @@ -280,7 +280,7 @@ func TestPeersTest(t *testing.T) { } var buf bytes.Buffer - err = runTestPeers(ctx, &buf, conf) + _, err = runTestPeers(ctx, &buf, conf) if test.expectedErr != "" { require.ErrorContains(t, err, test.expectedErr) return @@ -331,7 +331,9 @@ func TestPeersTestFlags(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - cmd := newAlphaCmd(newTestPeersCmd(func(context.Context, io.Writer, testPeersConfig) error { return nil })) + cmd := newAlphaCmd(newTestPeersCmd(func(context.Context, io.Writer, testPeersConfig) (testCategoryResult, error) { + return testCategoryResult{}, nil + })) cmd.SetArgs(test.args) err := cmd.Execute() if test.expectedErr != "" { diff --git a/cmd/testvalidator.go b/cmd/testvalidator.go index 57139a839f..42c7342232 100644 --- a/cmd/testvalidator.go +++ b/cmd/testvalidator.go @@ -32,7 +32,7 @@ const ( thresholdValidatorLoadPoor = 240 * time.Millisecond ) -func newTestValidatorCmd(runFunc func(context.Context, io.Writer, testValidatorConfig) error) *cobra.Command { +func newTestValidatorCmd(runFunc func(context.Context, io.Writer, testValidatorConfig) (testCategoryResult, error)) *cobra.Command { var config testValidatorConfig cmd := &cobra.Command{ @@ -44,7 +44,8 @@ func newTestValidatorCmd(runFunc func(context.Context, io.Writer, testValidatorC return mustOutputToFileOnQuiet(cmd) }, RunE: func(cmd *cobra.Command, _ []string) error { - return runFunc(cmd.Context(), cmd.OutOrStdout(), config) + _, err := runFunc(cmd.Context(), cmd.OutOrStdout(), config) + return err }, } @@ -61,19 +62,20 @@ func bindTestValidatorFlags(cmd *cobra.Command, config *testValidatorConfig, fla func supportedValidatorTestCases() map[testCaseName]func(context.Context, *testValidatorConfig) testResult { return map[testCaseName]func(context.Context, *testValidatorConfig) testResult{ - {name: "ping", order: 1}: validatorPingTest, - {name: "pingMeasure", order: 2}: validatorPingMeasureTest, - {name: "pingLoad", order: 3}: validatorPingLoadTest, + {name: "Ping", order: 1}: validatorPingTest, + {name: "PingMeasure", order: 2}: validatorPingMeasureTest, + {name: "PingLoad", order: 3}: validatorPingLoadTest, } } -func runTestValidator(ctx context.Context, w io.Writer, cfg testValidatorConfig) (err error) { +func runTestValidator(ctx context.Context, w io.Writer, cfg testValidatorConfig) (res testCategoryResult, err error) { log.Info(ctx, "Starting validator client test") testCases := supportedValidatorTestCases() queuedTests := filterTests(maps.Keys(testCases), cfg.testConfig) if len(queuedTests) == 0 { - return errors.New("test case not supported") + err = errors.New("test case not supported") + return res, err } sortTests(queuedTests) @@ -102,7 +104,7 @@ func runTestValidator(ctx context.Context, w io.Writer, cfg testValidatorConfig) } } - res := testCategoryResult{ + res = testCategoryResult{ CategoryName: validatorTestCategory, Targets: testResults, ExecutionTime: execTime, @@ -112,18 +114,18 @@ func runTestValidator(ctx context.Context, w io.Writer, cfg testValidatorConfig) if !cfg.Quiet { err = writeResultToWriter(res, w) if err != nil { - return err + return res, err } } if cfg.OutputJSON != "" { err = writeResultToFile(res, cfg.OutputJSON) if err != nil { - return err + return res, err } } - return nil + return res, nil } // validator client tests @@ -149,9 +151,7 @@ func testSingleValidator(ctx context.Context, queuedTestCases []testCaseName, al finished = true break } - testName = queuedTestCases[testCounter].name testCounter++ - result.Name = testName allTestRes = append(allTestRes, result) } } @@ -208,7 +208,7 @@ func validatorPingLoadTest(ctx context.Context, conf *testValidatorConfig) testR z.Any("duration", conf.LoadTestDuration), z.Any("target", conf.APIAddress), ) - testRes := testResult{Name: "ValidatorLoad"} + testRes := testResult{Name: "PingLoad"} testResCh := make(chan time.Duration, math.MaxInt16) pingCtx, cancel := context.WithTimeout(ctx, conf.LoadTestDuration) diff --git a/cmd/testvalidator_internal_test.go b/cmd/testvalidator_internal_test.go index 9f4ab003f6..578dcaac99 100644 --- a/cmd/testvalidator_internal_test.go +++ b/cmd/testvalidator_internal_test.go @@ -44,7 +44,6 @@ func TestValidatorTest(t *testing.T) { testConfig: testConfig{ OutputJSON: "", Quiet: false, - TestCases: nil, Timeout: time.Minute, }, APIAddress: validatorAPIAddress, @@ -52,9 +51,9 @@ func TestValidatorTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ validatorAPIAddress: { - {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreA, @@ -68,7 +67,6 @@ func TestValidatorTest(t *testing.T) { testConfig: testConfig{ OutputJSON: "", Quiet: false, - TestCases: nil, Timeout: 100 * time.Nanosecond, }, APIAddress: validatorAPIAddress, @@ -76,7 +74,7 @@ func TestValidatorTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ validatorAPIAddress: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, }, Score: categoryScoreC, @@ -90,7 +88,6 @@ func TestValidatorTest(t *testing.T) { testConfig: testConfig{ OutputJSON: "", Quiet: true, - TestCases: nil, Timeout: time.Minute, }, APIAddress: validatorAPIAddress, @@ -98,9 +95,9 @@ func TestValidatorTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ validatorAPIAddress: { - {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreA, @@ -131,7 +128,7 @@ func TestValidatorTest(t *testing.T) { testConfig: testConfig{ OutputJSON: "", Quiet: false, - TestCases: []string{"ping"}, + TestCases: []string{"Ping"}, Timeout: time.Minute, }, APIAddress: validatorAPIAddress, @@ -139,7 +136,7 @@ func TestValidatorTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ validatorAPIAddress: { - {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreA, @@ -153,7 +150,6 @@ func TestValidatorTest(t *testing.T) { testConfig: testConfig{ OutputJSON: "./write-to-file-test.json.tmp", Quiet: false, - TestCases: nil, Timeout: time.Minute, }, APIAddress: validatorAPIAddress, @@ -161,9 +157,9 @@ func TestValidatorTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ validatorAPIAddress: { - {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreA, @@ -181,7 +177,7 @@ func TestValidatorTest(t *testing.T) { t.Run(test.name, func(t *testing.T) { var buf bytes.Buffer ctx := context.Background() - err := runTestValidator(ctx, &buf, test.config) + _, err := runTestValidator(ctx, &buf, test.config) if test.expectedErr != "" { require.ErrorContains(t, err, test.expectedErr) return @@ -246,7 +242,9 @@ func TestValidatorTestFlags(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - cmd := newAlphaCmd(newTestValidatorCmd(func(context.Context, io.Writer, testValidatorConfig) error { return nil })) + cmd := newAlphaCmd(newTestValidatorCmd(func(context.Context, io.Writer, testValidatorConfig) (testCategoryResult, error) { + return testCategoryResult{}, nil + })) cmd.SetArgs(test.args) err := cmd.Execute() if test.expectedErr != "" { From b28d6c4f82aa42ff5f5934c7185ef27c21be53ad Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev <24719519+KaloyanTanev@users.noreply.github.com> Date: Wed, 4 Sep 2024 11:06:01 +0200 Subject: [PATCH 85/89] *: bump go to 1.23 (#3250) Bump go version to 1.23. category: misc ticket: #3251 --- .github/workflows/codeql-analysis.yml | 2 +- .pre-commit-config.yaml | 2 +- go.mod | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 9c6ba92dcc..a05d7464cf 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -21,7 +21,7 @@ on: - cron: '18 19 * * 6' env: - GOLANG_VERSION: '1.22' + GOLANG_VERSION: '1.23' jobs: analyze: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 82f26597fb..beb5019b99 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,7 +15,7 @@ repos: rev: v0.0.3 hooks: - id: check-go-version - args: [ -v=go1.22 ] # Only check minor version locally + args: [ -v=go1.23 ] # Only check minor version locally pass_filenames: false additional_dependencies: [ packaging ] - id: check-licence-header diff --git a/go.mod b/go.mod index 8ea8e29e6d..2d23396f7e 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/obolnetwork/charon -go 1.22 +go 1.23 require ( github.com/attestantio/go-builder-client v0.5.1 From 956ccf399cd98a109fd0f91ebd4eeed6f42362f8 Mon Sep 17 00:00:00 2001 From: Andrei Smirnov Date: Wed, 16 Oct 2024 16:58:34 +0300 Subject: [PATCH 86/89] core/consensus: logging leader index (#3334) Added `core_consensus_decided_leader_index` gauge to reflect the leader index of QBFT decision round. Also, logging essential decision round data as a debug message. category: feature ticket: none --- core/consensus/component.go | 24 ++++++++++++++++++++---- core/consensus/metrics.go | 11 ++++++++++- docs/metrics.md | 1 + 3 files changed, 31 insertions(+), 5 deletions(-) diff --git a/core/consensus/component.go b/core/consensus/component.go index fe5ffb1dde..c98f07f896 100644 --- a/core/consensus/component.go +++ b/core/consensus/component.go @@ -432,15 +432,31 @@ func (c *Component) runInstance(ctx context.Context, duty core.Duty) (err error) } // Instrument consensus instance. - var decided bool + var ( + decided bool + nodes = len(c.peers) + ) + decideCallback := func(qcommit []qbft.Msg[core.Duty, [32]byte]) { + round := qcommit[0].Round() decided = true - decidedRoundsGauge.WithLabelValues(duty.Type.String(), string(roundTimer.Type())).Set(float64(qcommit[0].Round())) + decidedRoundsGauge.WithLabelValues(duty.Type.String(), string(roundTimer.Type())).Set(float64(round)) inst.decidedAtCh <- time.Now() + + leaderIndex := leader(duty, round, nodes) + leaderName := c.peers[leaderIndex].Name + log.Debug(ctx, "QBFT consensus decided", + z.Str("duty", duty.Type.String()), + z.U64("slot", duty.Slot), + z.I64("round", round), + z.I64("leader_index", leaderIndex), + z.Str("leader_name", leaderName)) + + decidedLeaderGauge.WithLabelValues(duty.Type.String()).Set(float64(leaderIndex)) } // Create a new qbft definition for this instance. - def := newDefinition(len(c.peers), c.subscribers, roundTimer, decideCallback) + def := newDefinition(nodes, c.subscribers, roundTimer, decideCallback) // Create a new transport that handles sending and receiving for this instance. t := transport{ @@ -466,7 +482,7 @@ func (c *Component) runInstance(ctx context.Context, duty core.Duty) (err error) } // Run the algo, blocking until the context is cancelled. - err = qbft.Run[core.Duty, [32]byte](ctx, def, qt, duty, peerIdx, inst.hashCh) + err = qbft.Run(ctx, def, qt, duty, peerIdx, inst.hashCh) if err != nil && !isContextErr(err) { consensusError.Inc() return err // Only return non-context errors. diff --git a/core/consensus/metrics.go b/core/consensus/metrics.go index 0c9b986487..8a6eee9963 100644 --- a/core/consensus/metrics.go +++ b/core/consensus/metrics.go @@ -9,12 +9,21 @@ import ( ) var ( + // Using gauge since the value changes slowly, once per slot. decidedRoundsGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "core", Subsystem: "consensus", Name: "decided_rounds", Help: "Number of rounds it took to decide consensus instances by duty and timer type.", - }, []string{"duty", "timer"}) // Using gauge since the value changes slowly, once per slot. + }, []string{"duty", "timer"}) + + // Using gauge since the value changes slowly, once per slot. + decidedLeaderGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "core", + Subsystem: "consensus", + Name: "decided_leader_index", + Help: "Leader node index of the decision round by duty.", + }, []string{"duty"}) consensusDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ Namespace: "core", diff --git a/docs/metrics.md b/docs/metrics.md index 7d95d640bb..6ad754a901 100644 --- a/docs/metrics.md +++ b/docs/metrics.md @@ -43,6 +43,7 @@ when storing metrics from multiple nodes or clusters in one Prometheus instance. | `core_bcast_recast_errors_total` | Counter | The total count of failed recasted registrations by source; `pregen` vs `downstream` | `source` | | `core_bcast_recast_registration_total` | Counter | The total number of unique validator registration stored in recaster per pubkey | `pubkey` | | `core_bcast_recast_total` | Counter | The total count of recasted registrations by source; `pregen` vs `downstream` | `source` | +| `core_consensus_decided_leader_index` | Gauge | Leader node index of the decision round by duty. | `duty` | | `core_consensus_decided_rounds` | Gauge | Number of rounds it took to decide consensus instances by duty and timer type. | `duty, timer` | | `core_consensus_duration_seconds` | Histogram | Duration of a consensus instance in seconds by duty and timer type. | `duty, timer` | | `core_consensus_error_total` | Counter | Total count of consensus errors | | From 7e47fe81dc9be5ef849876dcbe100ec3f0fdd948 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Tue, 19 Nov 2024 21:56:04 +0200 Subject: [PATCH 87/89] *: removing mutable config (#3307) --- testutil/integration/nightly_dkg_test.go | 34 ++++++++++++------------ 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/testutil/integration/nightly_dkg_test.go b/testutil/integration/nightly_dkg_test.go index ed333f3f04..ab0ad6b108 100644 --- a/testutil/integration/nightly_dkg_test.go +++ b/testutil/integration/nightly_dkg_test.go @@ -148,8 +148,7 @@ func mimicDKGNode(parentCtx context.Context, t *testing.T, dkgConf dkg.Config, w t.Helper() var ( - ctx context.Context - cancelFunc context.CancelFunc + stopNode context.CancelFunc firstNode bool // True if node index is 0 allStarted bool // True if all nodes have started DKG firstTime = true // True if the node is starting for the first time @@ -158,13 +157,9 @@ func mimicDKGNode(parentCtx context.Context, t *testing.T, dkgConf dkg.Config, w firstNode = nodeIdx == 0 // runDKG runs a new instance of DKG. If a DKG is already running, it stops it before starting a new one. - runDKG := func() { - // If there's an instance already running, stop it - if ctx != nil { - cancelFunc() - } + runDKG := func() context.CancelFunc { + ctx, cancelFunc := context.WithCancel(parentCtx) - ctx, cancelFunc = context.WithCancel(parentCtx) log.Debug(ctx, "Starting DKG node", z.Int("node", nodeIdx), z.Bool("first_time", firstTime)) errCh := make(chan error, 1) @@ -178,6 +173,8 @@ func mimicDKGNode(parentCtx context.Context, t *testing.T, dkgConf dkg.Config, w }(ctx) err := <-errCh require.ErrorContains(t, err, ctxCanceledErr) + + return cancelFunc } for { @@ -186,12 +183,15 @@ func mimicDKGNode(parentCtx context.Context, t *testing.T, dkgConf dkg.Config, w allStarted = true case <-newWindowStarted: if firstNode && !firstTime { // Node 0 never restarts (is always up) - log.Debug(ctx, "Not restarting node", z.Int("node", nodeIdx)) + log.Debug(parentCtx, "Not restarting node", z.Int("node", nodeIdx)) continue } // Start the node - runDKG() + if stopNode != nil { + stopNode() + } + stopNode = runDKG() firstTime = false if firstNode { continue @@ -199,7 +199,7 @@ func mimicDKGNode(parentCtx context.Context, t *testing.T, dkgConf dkg.Config, w // Wait for some random duration before stopping the node stopDelay := calcStopDelay(t, window, nodeDownPeriod) - log.Debug(ctx, "Stopping node after delay", z.Int("node", nodeIdx), z.Str("delay", stopDelay.String())) + log.Debug(parentCtx, "Stopping node after delay", z.Int("node", nodeIdx), z.Str("delay", stopDelay.String())) select { case <-time.After(stopDelay): case <-allNodesStarted: @@ -207,8 +207,8 @@ func mimicDKGNode(parentCtx context.Context, t *testing.T, dkgConf dkg.Config, w } // Stop the node - cancelFunc() - log.Debug(ctx, "Node stopped", z.Int("node", nodeIdx)) + stopNode() + log.Debug(parentCtx, "Node stopped", z.Int("node", nodeIdx)) // If all nodes have started, there's no point in restarting the node if allStarted { @@ -216,10 +216,10 @@ func mimicDKGNode(parentCtx context.Context, t *testing.T, dkgConf dkg.Config, w } // Wait nodeDownPeriod before restarting the node - log.Debug(ctx, "Waiting before restarting node", z.Int("node", nodeIdx), z.Str("delay", nodeDownPeriod.String())) + log.Debug(parentCtx, "Waiting before restarting node", z.Int("node", nodeIdx), z.Str("delay", nodeDownPeriod.String())) select { case <-time.After(nodeDownPeriod): - runDKG() + stopNode = runDKG() case <-allNodesStarted: allStarted = true } @@ -231,8 +231,8 @@ func mimicDKGNode(parentCtx context.Context, t *testing.T, dkgConf dkg.Config, w } // Stop any existing running DKG and run the final DKG since all nodes are up now - if ctx != nil { - cancelFunc() + if stopNode != nil { + stopNode() } log.Debug(parentCtx, "Running final DKG", z.Int("node", nodeIdx)) From c8a0c643fb316df643ef6f7a259b3a7944258628 Mon Sep 17 00:00:00 2001 From: Andrei Smirnov Date: Thu, 10 Oct 2024 10:51:06 +0300 Subject: [PATCH 88/89] *: bumped protoc-gen-go (#3331) The new protoc-gen-go v1.35.1 needs our proto files to be regenerarted, otherwise pre-commit is failing. category: fixbuild ticket: none --- app/log/loki/lokipb/v1/loki.pb.go | 68 ++------ app/peerinfo/peerinfopb/v1/peerinfo.pb.go | 24 +-- app/protonil/testdata/v1/test.pb.go | 134 +++------------ cluster/manifestpb/v1/manifest.pb.go | 200 ++++------------------ core/corepb/v1/consensus.pb.go | 112 +++--------- core/corepb/v1/core.pb.go | 90 ++-------- core/corepb/v1/parsigex.pb.go | 24 +-- core/corepb/v1/priority.pb.go | 112 +++--------- dkg/dkgpb/v1/bcast.pb.go | 68 ++------ dkg/dkgpb/v1/frost.pb.go | 156 +++-------------- dkg/dkgpb/v1/nodesigs.pb.go | 24 +-- dkg/dkgpb/v1/sync.pb.go | 46 +---- 12 files changed, 188 insertions(+), 870 deletions(-) diff --git a/app/log/loki/lokipb/v1/loki.pb.go b/app/log/loki/lokipb/v1/loki.pb.go index 5b76d1b5f5..e1f71c092c 100644 --- a/app/log/loki/lokipb/v1/loki.pb.go +++ b/app/log/loki/lokipb/v1/loki.pb.go @@ -31,11 +31,9 @@ type PushRequest struct { func (x *PushRequest) Reset() { *x = PushRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_app_log_loki_lokipb_v1_loki_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_log_loki_lokipb_v1_loki_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PushRequest) String() string { @@ -46,7 +44,7 @@ func (*PushRequest) ProtoMessage() {} func (x *PushRequest) ProtoReflect() protoreflect.Message { mi := &file_app_log_loki_lokipb_v1_loki_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -80,11 +78,9 @@ type Stream struct { func (x *Stream) Reset() { *x = Stream{} - if protoimpl.UnsafeEnabled { - mi := &file_app_log_loki_lokipb_v1_loki_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_log_loki_lokipb_v1_loki_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Stream) String() string { @@ -95,7 +91,7 @@ func (*Stream) ProtoMessage() {} func (x *Stream) ProtoReflect() protoreflect.Message { mi := &file_app_log_loki_lokipb_v1_loki_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -142,11 +138,9 @@ type Entry struct { func (x *Entry) Reset() { *x = Entry{} - if protoimpl.UnsafeEnabled { - mi := &file_app_log_loki_lokipb_v1_loki_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_log_loki_lokipb_v1_loki_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Entry) String() string { @@ -157,7 +151,7 @@ func (*Entry) ProtoMessage() {} func (x *Entry) ProtoReflect() protoreflect.Message { mi := &file_app_log_loki_lokipb_v1_loki_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -253,44 +247,6 @@ func file_app_log_loki_lokipb_v1_loki_proto_init() { if File_app_log_loki_lokipb_v1_loki_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_app_log_loki_lokipb_v1_loki_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*PushRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_app_log_loki_lokipb_v1_loki_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Stream); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_app_log_loki_lokipb_v1_loki_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*Entry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/app/peerinfo/peerinfopb/v1/peerinfo.pb.go b/app/peerinfo/peerinfopb/v1/peerinfo.pb.go index 5524c0716f..8663f44084 100644 --- a/app/peerinfo/peerinfopb/v1/peerinfo.pb.go +++ b/app/peerinfo/peerinfopb/v1/peerinfo.pb.go @@ -36,11 +36,9 @@ type PeerInfo struct { func (x *PeerInfo) Reset() { *x = PeerInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_app_peerinfo_peerinfopb_v1_peerinfo_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_peerinfo_peerinfopb_v1_peerinfo_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PeerInfo) String() string { @@ -51,7 +49,7 @@ func (*PeerInfo) ProtoMessage() {} func (x *PeerInfo) ProtoReflect() protoreflect.Message { mi := &file_app_peerinfo_peerinfopb_v1_peerinfo_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -175,20 +173,6 @@ func file_app_peerinfo_peerinfopb_v1_peerinfo_proto_init() { if File_app_peerinfo_peerinfopb_v1_peerinfo_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_app_peerinfo_peerinfopb_v1_peerinfo_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*PeerInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_app_peerinfo_peerinfopb_v1_peerinfo_proto_msgTypes[0].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ diff --git a/app/protonil/testdata/v1/test.pb.go b/app/protonil/testdata/v1/test.pb.go index c69299f731..19a41ae255 100644 --- a/app/protonil/testdata/v1/test.pb.go +++ b/app/protonil/testdata/v1/test.pb.go @@ -32,11 +32,9 @@ type M1 struct { func (x *M1) Reset() { *x = M1{} - if protoimpl.UnsafeEnabled { - mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *M1) String() string { @@ -47,7 +45,7 @@ func (*M1) ProtoMessage() {} func (x *M1) ProtoReflect() protoreflect.Message { mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -95,11 +93,9 @@ type M2 struct { func (x *M2) Reset() { *x = M2{} - if protoimpl.UnsafeEnabled { - mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *M2) String() string { @@ -110,7 +106,7 @@ func (*M2) ProtoMessage() {} func (x *M2) ProtoReflect() protoreflect.Message { mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -156,11 +152,9 @@ type M3 struct { func (x *M3) Reset() { *x = M3{} - if protoimpl.UnsafeEnabled { - mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *M3) String() string { @@ -171,7 +165,7 @@ func (*M3) ProtoMessage() {} func (x *M3) ProtoReflect() protoreflect.Message { mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -205,11 +199,9 @@ type M4 struct { func (x *M4) Reset() { *x = M4{} - if protoimpl.UnsafeEnabled { - mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *M4) String() string { @@ -220,7 +212,7 @@ func (*M4) ProtoMessage() {} func (x *M4) ProtoReflect() protoreflect.Message { mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -267,11 +259,9 @@ type MaxIndex struct { func (x *MaxIndex) Reset() { *x = MaxIndex{} - if protoimpl.UnsafeEnabled { - mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MaxIndex) String() string { @@ -282,7 +272,7 @@ func (*MaxIndex) ProtoMessage() {} func (x *MaxIndex) ProtoReflect() protoreflect.Message { mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -321,11 +311,9 @@ type Attack struct { func (x *Attack) Reset() { *x = Attack{} - if protoimpl.UnsafeEnabled { - mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Attack) String() string { @@ -336,7 +324,7 @@ func (*Attack) ProtoMessage() {} func (x *Attack) ProtoReflect() protoreflect.Message { mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -502,80 +490,6 @@ func file_app_protonil_testdata_v1_test_proto_init() { if File_app_protonil_testdata_v1_test_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_app_protonil_testdata_v1_test_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*M1); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_app_protonil_testdata_v1_test_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*M2); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_app_protonil_testdata_v1_test_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*M3); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_app_protonil_testdata_v1_test_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*M4); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_app_protonil_testdata_v1_test_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*MaxIndex); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_app_protonil_testdata_v1_test_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*Attack); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_app_protonil_testdata_v1_test_proto_msgTypes[0].OneofWrappers = []any{} file_app_protonil_testdata_v1_test_proto_msgTypes[1].OneofWrappers = []any{} file_app_protonil_testdata_v1_test_proto_msgTypes[5].OneofWrappers = []any{} diff --git a/cluster/manifestpb/v1/manifest.pb.go b/cluster/manifestpb/v1/manifest.pb.go index 961f5fc880..6b7a68bb45 100644 --- a/cluster/manifestpb/v1/manifest.pb.go +++ b/cluster/manifestpb/v1/manifest.pb.go @@ -39,11 +39,9 @@ type Cluster struct { func (x *Cluster) Reset() { *x = Cluster{} - if protoimpl.UnsafeEnabled { - mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Cluster) String() string { @@ -54,7 +52,7 @@ func (*Cluster) ProtoMessage() {} func (x *Cluster) ProtoReflect() protoreflect.Message { mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -138,11 +136,9 @@ type Mutation struct { func (x *Mutation) Reset() { *x = Mutation{} - if protoimpl.UnsafeEnabled { - mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Mutation) String() string { @@ -153,7 +149,7 @@ func (*Mutation) ProtoMessage() {} func (x *Mutation) ProtoReflect() protoreflect.Message { mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -202,11 +198,9 @@ type SignedMutation struct { func (x *SignedMutation) Reset() { *x = SignedMutation{} - if protoimpl.UnsafeEnabled { - mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SignedMutation) String() string { @@ -217,7 +211,7 @@ func (*SignedMutation) ProtoMessage() {} func (x *SignedMutation) ProtoReflect() protoreflect.Message { mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -264,11 +258,9 @@ type SignedMutationList struct { func (x *SignedMutationList) Reset() { *x = SignedMutationList{} - if protoimpl.UnsafeEnabled { - mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SignedMutationList) String() string { @@ -279,7 +271,7 @@ func (*SignedMutationList) ProtoMessage() {} func (x *SignedMutationList) ProtoReflect() protoreflect.Message { mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -313,11 +305,9 @@ type Operator struct { func (x *Operator) Reset() { *x = Operator{} - if protoimpl.UnsafeEnabled { - mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Operator) String() string { @@ -328,7 +318,7 @@ func (*Operator) ProtoMessage() {} func (x *Operator) ProtoReflect() protoreflect.Message { mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -372,11 +362,9 @@ type Validator struct { func (x *Validator) Reset() { *x = Validator{} - if protoimpl.UnsafeEnabled { - mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Validator) String() string { @@ -387,7 +375,7 @@ func (*Validator) ProtoMessage() {} func (x *Validator) ProtoReflect() protoreflect.Message { mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -448,11 +436,9 @@ type ValidatorList struct { func (x *ValidatorList) Reset() { *x = ValidatorList{} - if protoimpl.UnsafeEnabled { - mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ValidatorList) String() string { @@ -463,7 +449,7 @@ func (*ValidatorList) ProtoMessage() {} func (x *ValidatorList) ProtoReflect() protoreflect.Message { mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -496,11 +482,9 @@ type LegacyLock struct { func (x *LegacyLock) Reset() { *x = LegacyLock{} - if protoimpl.UnsafeEnabled { - mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LegacyLock) String() string { @@ -511,7 +495,7 @@ func (*LegacyLock) ProtoMessage() {} func (x *LegacyLock) ProtoReflect() protoreflect.Message { mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -542,11 +526,9 @@ type Empty struct { func (x *Empty) Reset() { *x = Empty{} - if protoimpl.UnsafeEnabled { - mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Empty) String() string { @@ -557,7 +539,7 @@ func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -700,116 +682,6 @@ func file_cluster_manifestpb_v1_manifest_proto_init() { if File_cluster_manifestpb_v1_manifest_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_cluster_manifestpb_v1_manifest_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Cluster); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cluster_manifestpb_v1_manifest_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Mutation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cluster_manifestpb_v1_manifest_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*SignedMutation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cluster_manifestpb_v1_manifest_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*SignedMutationList); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cluster_manifestpb_v1_manifest_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Operator); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cluster_manifestpb_v1_manifest_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*Validator); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cluster_manifestpb_v1_manifest_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*ValidatorList); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cluster_manifestpb_v1_manifest_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*LegacyLock); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cluster_manifestpb_v1_manifest_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*Empty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/core/corepb/v1/consensus.pb.go b/core/corepb/v1/consensus.pb.go index c2258c1d4f..0bbf9708f9 100644 --- a/core/corepb/v1/consensus.pb.go +++ b/core/corepb/v1/consensus.pb.go @@ -39,11 +39,9 @@ type QBFTMsg struct { func (x *QBFTMsg) Reset() { *x = QBFTMsg{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_consensus_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_consensus_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *QBFTMsg) String() string { @@ -54,7 +52,7 @@ func (*QBFTMsg) ProtoMessage() {} func (x *QBFTMsg) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_consensus_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -137,11 +135,9 @@ type ConsensusMsg struct { func (x *ConsensusMsg) Reset() { *x = ConsensusMsg{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_consensus_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_consensus_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ConsensusMsg) String() string { @@ -152,7 +148,7 @@ func (*ConsensusMsg) ProtoMessage() {} func (x *ConsensusMsg) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_consensus_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -199,11 +195,9 @@ type SniffedConsensusMsg struct { func (x *SniffedConsensusMsg) Reset() { *x = SniffedConsensusMsg{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_consensus_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_consensus_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SniffedConsensusMsg) String() string { @@ -214,7 +208,7 @@ func (*SniffedConsensusMsg) ProtoMessage() {} func (x *SniffedConsensusMsg) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_consensus_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -256,11 +250,9 @@ type SniffedConsensusInstance struct { func (x *SniffedConsensusInstance) Reset() { *x = SniffedConsensusInstance{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_consensus_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_consensus_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SniffedConsensusInstance) String() string { @@ -271,7 +263,7 @@ func (*SniffedConsensusInstance) ProtoMessage() {} func (x *SniffedConsensusInstance) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_consensus_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -325,11 +317,9 @@ type SniffedConsensusInstances struct { func (x *SniffedConsensusInstances) Reset() { *x = SniffedConsensusInstances{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_consensus_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_consensus_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SniffedConsensusInstances) String() string { @@ -340,7 +330,7 @@ func (*SniffedConsensusInstances) ProtoMessage() {} func (x *SniffedConsensusInstances) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_consensus_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -490,68 +480,6 @@ func file_core_corepb_v1_consensus_proto_init() { return } file_core_corepb_v1_core_proto_init() - if !protoimpl.UnsafeEnabled { - file_core_corepb_v1_consensus_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*QBFTMsg); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_consensus_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ConsensusMsg); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_consensus_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*SniffedConsensusMsg); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_consensus_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*SniffedConsensusInstance); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_consensus_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*SniffedConsensusInstances); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/core/corepb/v1/core.pb.go b/core/corepb/v1/core.pb.go index be16005911..baeb54a46b 100644 --- a/core/corepb/v1/core.pb.go +++ b/core/corepb/v1/core.pb.go @@ -31,11 +31,9 @@ type Duty struct { func (x *Duty) Reset() { *x = Duty{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_core_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_core_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Duty) String() string { @@ -46,7 +44,7 @@ func (*Duty) ProtoMessage() {} func (x *Duty) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_core_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -85,11 +83,9 @@ type UnsignedDataSet struct { func (x *UnsignedDataSet) Reset() { *x = UnsignedDataSet{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_core_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_core_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UnsignedDataSet) String() string { @@ -100,7 +96,7 @@ func (*UnsignedDataSet) ProtoMessage() {} func (x *UnsignedDataSet) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_core_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -132,11 +128,9 @@ type ParSignedDataSet struct { func (x *ParSignedDataSet) Reset() { *x = ParSignedDataSet{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_core_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_core_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ParSignedDataSet) String() string { @@ -147,7 +141,7 @@ func (*ParSignedDataSet) ProtoMessage() {} func (x *ParSignedDataSet) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_core_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -181,11 +175,9 @@ type ParSignedData struct { func (x *ParSignedData) Reset() { *x = ParSignedData{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_core_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_core_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ParSignedData) String() string { @@ -196,7 +188,7 @@ func (*ParSignedData) ProtoMessage() {} func (x *ParSignedData) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_core_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -309,56 +301,6 @@ func file_core_corepb_v1_core_proto_init() { if File_core_corepb_v1_core_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_core_corepb_v1_core_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Duty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_core_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*UnsignedDataSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_core_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ParSignedDataSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_core_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ParSignedData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/core/corepb/v1/parsigex.pb.go b/core/corepb/v1/parsigex.pb.go index 3c62dcc4a7..aabb6d5ae4 100644 --- a/core/corepb/v1/parsigex.pb.go +++ b/core/corepb/v1/parsigex.pb.go @@ -31,11 +31,9 @@ type ParSigExMsg struct { func (x *ParSigExMsg) Reset() { *x = ParSigExMsg{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_parsigex_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_parsigex_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ParSigExMsg) String() string { @@ -46,7 +44,7 @@ func (*ParSigExMsg) ProtoMessage() {} func (x *ParSigExMsg) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_parsigex_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -130,20 +128,6 @@ func file_core_corepb_v1_parsigex_proto_init() { return } file_core_corepb_v1_core_proto_init() - if !protoimpl.UnsafeEnabled { - file_core_corepb_v1_parsigex_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*ParSigExMsg); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/core/corepb/v1/priority.pb.go b/core/corepb/v1/priority.pb.go index ac41a161b8..1db8a47889 100644 --- a/core/corepb/v1/priority.pb.go +++ b/core/corepb/v1/priority.pb.go @@ -33,11 +33,9 @@ type PriorityResult struct { func (x *PriorityResult) Reset() { *x = PriorityResult{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_priority_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_priority_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PriorityResult) String() string { @@ -48,7 +46,7 @@ func (*PriorityResult) ProtoMessage() {} func (x *PriorityResult) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_priority_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -91,11 +89,9 @@ type PriorityMsg struct { func (x *PriorityMsg) Reset() { *x = PriorityMsg{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_priority_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_priority_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PriorityMsg) String() string { @@ -106,7 +102,7 @@ func (*PriorityMsg) ProtoMessage() {} func (x *PriorityMsg) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_priority_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -161,11 +157,9 @@ type PriorityTopicProposal struct { func (x *PriorityTopicProposal) Reset() { *x = PriorityTopicProposal{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_priority_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_priority_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PriorityTopicProposal) String() string { @@ -176,7 +170,7 @@ func (*PriorityTopicProposal) ProtoMessage() {} func (x *PriorityTopicProposal) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_priority_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -219,11 +213,9 @@ type PriorityTopicResult struct { func (x *PriorityTopicResult) Reset() { *x = PriorityTopicResult{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_priority_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_priority_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PriorityTopicResult) String() string { @@ -234,7 +226,7 @@ func (*PriorityTopicResult) ProtoMessage() {} func (x *PriorityTopicResult) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_priority_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -275,11 +267,9 @@ type PriorityScoredResult struct { func (x *PriorityScoredResult) Reset() { *x = PriorityScoredResult{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_priority_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_priority_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PriorityScoredResult) String() string { @@ -290,7 +280,7 @@ func (*PriorityScoredResult) ProtoMessage() {} func (x *PriorityScoredResult) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_priority_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -421,68 +411,6 @@ func file_core_corepb_v1_priority_proto_init() { return } file_core_corepb_v1_core_proto_init() - if !protoimpl.UnsafeEnabled { - file_core_corepb_v1_priority_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*PriorityResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_priority_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*PriorityMsg); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_priority_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*PriorityTopicProposal); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_priority_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*PriorityTopicResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_priority_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*PriorityScoredResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/dkg/dkgpb/v1/bcast.pb.go b/dkg/dkgpb/v1/bcast.pb.go index e18bb72a9a..1f8063c3e1 100644 --- a/dkg/dkgpb/v1/bcast.pb.go +++ b/dkg/dkgpb/v1/bcast.pb.go @@ -32,11 +32,9 @@ type BCastSigRequest struct { func (x *BCastSigRequest) Reset() { *x = BCastSigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_bcast_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_bcast_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BCastSigRequest) String() string { @@ -47,7 +45,7 @@ func (*BCastSigRequest) ProtoMessage() {} func (x *BCastSigRequest) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_bcast_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -87,11 +85,9 @@ type BCastSigResponse struct { func (x *BCastSigResponse) Reset() { *x = BCastSigResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_bcast_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_bcast_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BCastSigResponse) String() string { @@ -102,7 +98,7 @@ func (*BCastSigResponse) ProtoMessage() {} func (x *BCastSigResponse) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_bcast_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -143,11 +139,9 @@ type BCastMessage struct { func (x *BCastMessage) Reset() { *x = BCastMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_bcast_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_bcast_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BCastMessage) String() string { @@ -158,7 +152,7 @@ func (*BCastMessage) ProtoMessage() {} func (x *BCastMessage) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_bcast_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -258,44 +252,6 @@ func file_dkg_dkgpb_v1_bcast_proto_init() { if File_dkg_dkgpb_v1_bcast_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_dkg_dkgpb_v1_bcast_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*BCastSigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_dkg_dkgpb_v1_bcast_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*BCastSigResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_dkg_dkgpb_v1_bcast_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*BCastMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/dkg/dkgpb/v1/frost.pb.go b/dkg/dkgpb/v1/frost.pb.go index 5a51cb4d6a..3a5f6d52e5 100644 --- a/dkg/dkgpb/v1/frost.pb.go +++ b/dkg/dkgpb/v1/frost.pb.go @@ -32,11 +32,9 @@ type FrostMsgKey struct { func (x *FrostMsgKey) Reset() { *x = FrostMsgKey{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FrostMsgKey) String() string { @@ -47,7 +45,7 @@ func (*FrostMsgKey) ProtoMessage() {} func (x *FrostMsgKey) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -93,11 +91,9 @@ type FrostRound1Casts struct { func (x *FrostRound1Casts) Reset() { *x = FrostRound1Casts{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FrostRound1Casts) String() string { @@ -108,7 +104,7 @@ func (*FrostRound1Casts) ProtoMessage() {} func (x *FrostRound1Casts) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -143,11 +139,9 @@ type FrostRound1Cast struct { func (x *FrostRound1Cast) Reset() { *x = FrostRound1Cast{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FrostRound1Cast) String() string { @@ -158,7 +152,7 @@ func (*FrostRound1Cast) ProtoMessage() {} func (x *FrostRound1Cast) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -211,11 +205,9 @@ type FrostRound1P2P struct { func (x *FrostRound1P2P) Reset() { *x = FrostRound1P2P{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FrostRound1P2P) String() string { @@ -226,7 +218,7 @@ func (*FrostRound1P2P) ProtoMessage() {} func (x *FrostRound1P2P) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -260,11 +252,9 @@ type FrostRound1ShamirShare struct { func (x *FrostRound1ShamirShare) Reset() { *x = FrostRound1ShamirShare{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FrostRound1ShamirShare) String() string { @@ -275,7 +265,7 @@ func (*FrostRound1ShamirShare) ProtoMessage() {} func (x *FrostRound1ShamirShare) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -321,11 +311,9 @@ type FrostRound2Casts struct { func (x *FrostRound2Casts) Reset() { *x = FrostRound2Casts{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FrostRound2Casts) String() string { @@ -336,7 +324,7 @@ func (*FrostRound2Casts) ProtoMessage() {} func (x *FrostRound2Casts) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -370,11 +358,9 @@ type FrostRound2Cast struct { func (x *FrostRound2Cast) Reset() { *x = FrostRound2Cast{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FrostRound2Cast) String() string { @@ -385,7 +371,7 @@ func (*FrostRound2Cast) ProtoMessage() {} func (x *FrostRound2Cast) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -517,92 +503,6 @@ func file_dkg_dkgpb_v1_frost_proto_init() { if File_dkg_dkgpb_v1_frost_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_dkg_dkgpb_v1_frost_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FrostMsgKey); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_dkg_dkgpb_v1_frost_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FrostRound1Casts); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_dkg_dkgpb_v1_frost_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*FrostRound1Cast); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_dkg_dkgpb_v1_frost_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*FrostRound1P2P); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_dkg_dkgpb_v1_frost_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*FrostRound1ShamirShare); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_dkg_dkgpb_v1_frost_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*FrostRound2Casts); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_dkg_dkgpb_v1_frost_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*FrostRound2Cast); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/dkg/dkgpb/v1/nodesigs.pb.go b/dkg/dkgpb/v1/nodesigs.pb.go index d19659b6f3..f6ac5a1e65 100644 --- a/dkg/dkgpb/v1/nodesigs.pb.go +++ b/dkg/dkgpb/v1/nodesigs.pb.go @@ -31,11 +31,9 @@ type MsgNodeSig struct { func (x *MsgNodeSig) Reset() { *x = MsgNodeSig{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_nodesigs_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_nodesigs_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MsgNodeSig) String() string { @@ -46,7 +44,7 @@ func (*MsgNodeSig) ProtoMessage() {} func (x *MsgNodeSig) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_nodesigs_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -120,20 +118,6 @@ func file_dkg_dkgpb_v1_nodesigs_proto_init() { if File_dkg_dkgpb_v1_nodesigs_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_dkg_dkgpb_v1_nodesigs_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*MsgNodeSig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/dkg/dkgpb/v1/sync.pb.go b/dkg/dkgpb/v1/sync.pb.go index 8e48bda752..e6218b0509 100644 --- a/dkg/dkgpb/v1/sync.pb.go +++ b/dkg/dkgpb/v1/sync.pb.go @@ -35,11 +35,9 @@ type MsgSync struct { func (x *MsgSync) Reset() { *x = MsgSync{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_sync_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_sync_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MsgSync) String() string { @@ -50,7 +48,7 @@ func (*MsgSync) ProtoMessage() {} func (x *MsgSync) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_sync_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -111,11 +109,9 @@ type MsgSyncResponse struct { func (x *MsgSyncResponse) Reset() { *x = MsgSyncResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_sync_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_sync_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MsgSyncResponse) String() string { @@ -126,7 +122,7 @@ func (*MsgSyncResponse) ProtoMessage() {} func (x *MsgSyncResponse) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_sync_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -220,32 +216,6 @@ func file_dkg_dkgpb_v1_sync_proto_init() { if File_dkg_dkgpb_v1_sync_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_dkg_dkgpb_v1_sync_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*MsgSync); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_dkg_dkgpb_v1_sync_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*MsgSyncResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ From 044adbd995bc8364c0448c44afdd05465e1acf4a Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Wed, 20 Nov 2024 12:52:11 +0200 Subject: [PATCH 89/89] Bump version --- app/version/version.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/app/version/version.go b/app/version/version.go index 8fdc5c5409..a2cc30ceee 100644 --- a/app/version/version.go +++ b/app/version/version.go @@ -15,7 +15,7 @@ import ( ) // version a string since it is overwritten at build-time with the git tag for official releases. -var version = "v1.1-rc" +var version = "v1.2-rc" // Version is the branch version of the codebase. // - Main branch: v0.X-dev @@ -25,6 +25,7 @@ var Version, _ = Parse(version) // Error is caught in tests. // Supported returns the supported minor versions in order of precedence. func Supported() []SemVer { return []SemVer{ + {major: 1, minor: 2}, {major: 1, minor: 1}, {major: 1, minor: 0}, }